diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index 78078f79358..c374bfeb0ef 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -19,7 +19,7 @@ pipeline { DEVELOPER_MODE=true } options { - timeout(time: 2, unit: 'HOURS') + timeout(time: 3, unit: 'HOURS') buildDiscarder(logRotator(numToKeepStr: '20', artifactNumToKeepStr: '20', daysToKeepStr: '30')) timestamps() ansiColor('xterm') @@ -39,6 +39,14 @@ pipeline { // disabled by default, but required for merge: // opt-in with 'ci:end-to-end' tag on PR booleanParam(name: 'end_to_end_tests_ci', defaultValue: false, description: 'Enable End-to-End tests') + + // disabled by default, but required for merge: + // opt-in with 'ci:extended-windows' tag on PR + booleanParam(name: 'extended_windows_ci', defaultValue: false, description: 'Enable Extended Windows tests') + + // disabled by default, but required for merge: + // opt-in with 'ci:extended-m1' tag on PR + booleanParam(name: 'extended_m1_ci', defaultValue: false, description: 'Enable M1 tests') } stages { stage('Checkout') { @@ -51,6 +59,10 @@ pipeline { setEnvVar('ONLY_DOCS', isGitRegionMatch(patterns: [ '.*\\.(asciidoc|md)' ], shouldMatchAll: true).toString()) setEnvVar('PACKAGING_CHANGES', isGitRegionMatch(patterns: [ '(^dev-tools/packaging/.*|.ci/Jenkinsfile)' ], shouldMatchAll: false).toString()) setEnvVar('K8S_CHANGES', isGitRegionMatch(patterns: [ '(^deploy/kubernetes/.*|^version/docs/version.asciidoc|.ci/Jenkinsfile)' ], shouldMatchAll: false).toString()) + setEnvVar('EXT_WINDOWS_CHANGES', isGitRegionMatch(patterns: [ '.ci/Jenkinsfile' ], shouldMatchAll: false).toString()) + setEnvVar('EXT_M1_CHANGES', isGitRegionMatch(patterns: [ '.ci/Jenkinsfile' ], shouldMatchAll: false).toString()) + // set the GO_VERSION env variable with the go version to be used in withMageEnv + setEnvVar('GO_VERSION', readFile(file: '.go-version')?.trim()) } } } @@ -79,7 +91,8 @@ pipeline { axes { axis { name 'PLATFORM' - values 'ubuntu-20.04 && immutable', 'aws && aarch64', 'windows-2016 && windows-immutable', 'windows-2022 && windows-immutable', 'macos12 && x86_64' + // Orka workers are not healthy (memory and connectivity issues) + values 'ubuntu-20.04 && immutable', 'aws && aarch64', 'windows-2016 && windows-immutable', 'windows-2022 && windows-immutable' //, 'macos12 && x86_64' } } stages { @@ -147,7 +160,7 @@ pipeline { } } steps { - runK8s(k8sVersion: 'v1.23.0', kindVersion: 'v0.11.1', context: "K8s-${PLATFORM}") + runK8s(k8sVersion: 'v1.25.0-beta.0', kindVersion: 'v0.14.0', context: "K8s-${PLATFORM}") } } stage('Package') { @@ -219,7 +232,7 @@ pipeline { axes { axis { name 'K8S_VERSION' - values "v1.24.0", "v1.23.6", "v1.22.9", "v1.21.12" + values "v1.25.0","v1.24.3", "v1.23.6", "v1.22.9" } } stages { @@ -238,6 +251,153 @@ pipeline { } } } + stage('Sync K8s') { //This stage opens a PR to kibana Repository in order to sync k8s manifests + when { + // Only on main branch + // Enable if k8s related changes. + allOf { + branch 'main' // Only runs for branch main + expression { return env.K8S_CHANGES == "true" } // If k8s changes + } + } + failFast false + agent {label 'ubuntu-20.04 && immutable'} + options { skipDefaultCheckout() } + stages { + stage('OpenKibanaPR') { + steps { + withGhEnv(version: '2.4.0') { + deleteDir() + unstashV2(name: 'source', bucket: "${JOB_GCS_BUCKET}", credentialsId: "${JOB_GCS_CREDENTIALS}") + dir("${BASE_DIR}/deploy/kubernetes"){ + sh(label: '[File Creation] Create-Needed-Manifest', script: """ + WITHOUTCONFIG=true make generate-k8s + ./creator_k8s_manifest.sh . """) + sh(label: '[Clone] Kibana-Repository', script: """ + make ci-clone-kibana-repository + cp Makefile ./kibana + cd kibana + make ci-create-kubernetes-templates-pull-request """) + } + } + } + post { + always { + junit(allowEmptyResults: true, keepLongStdio: true, testResults: "${BASE_DIR}/build/TEST-*.xml") + } + } + } + } + } + stage('extended windows') { + when { + // Always when running builds on branches/tags + // Enable if extended windows support related changes. + beforeAgent true + anyOf { + not { changeRequest() } + expression { return isExtendedWindowsEnabled() && env.ONLY_DOCS == "false"} + } + } + failFast false + matrix { + agent {label "${PLATFORM} && windows-immutable"} + options { skipDefaultCheckout() } + axes { + axis { + name 'PLATFORM' + values 'windows-8', 'windows-10', 'windows-11' + } + } + stages { + stage('build'){ + options { skipDefaultCheckout() } + steps { + withGithubNotify(context: "Build-${PLATFORM}") { + deleteDir() + unstashV2(name: 'source', bucket: "${JOB_GCS_BUCKET}", credentialsId: "${JOB_GCS_CREDENTIALS}") + withMageEnv(){ + dir("${BASE_DIR}"){ + cmd(label: 'Go build', script: 'mage build') + } + } + } + } + } + stage('Test') { + options { skipDefaultCheckout() } + steps { + withGithubNotify(context: "Test-${PLATFORM}") { + withMageEnv(){ + dir("${BASE_DIR}"){ + withEnv(["TEST_COVERAGE=${isCodeCoverageEnabled()}"]) { + cmd(label: 'Go unitTest', script: 'mage unitTest') + } + } + } + } + } + post { + always { + junit(allowEmptyResults: true, keepLongStdio: true, testResults: "${BASE_DIR}/build/TEST-*.xml") + whenTrue(isCodeCoverageEnabled()) { + coverageReport(baseDir: "**/build", reportFiles: 'TEST-go-unit.html', coverageFiles: 'TEST-go-unit-cov.xml') + } + } + } + } + } + } + } + stage('m1') { + agent { label 'orka && darwin && aarch64' } + options { skipDefaultCheckout() } + when { + // Always when running builds on branches/tags + // Enable if extended M1 support related changes. + beforeAgent true + anyOf { + not { changeRequest() } + expression { return isExtendedM1Enabled() && env.ONLY_DOCS == "false"} + } + } + stages { + stage('build'){ + steps { + withGithubNotify(context: "Build-darwin-aarch64") { + deleteDir() + unstashV2(name: 'source', bucket: "${JOB_GCS_BUCKET}", credentialsId: "${JOB_GCS_CREDENTIALS}") + withMageEnv(){ + dir("${BASE_DIR}"){ + cmd(label: 'Go build', script: 'mage build') + } + } + } + } + } + stage('Test') { + steps { + withGithubNotify(context: "Test-darwin-aarch64") { + withMageEnv(){ + dir("${BASE_DIR}"){ + withEnv(["TEST_COVERAGE=${isCodeCoverageEnabled()}"]) { + cmd(label: 'Go unitTest', script: 'mage unitTest') + } + } + } + } + } + post { + always { + junit(allowEmptyResults: true, keepLongStdio: true, testResults: "${BASE_DIR}/build/TEST-*.xml") + whenTrue(isCodeCoverageEnabled()) { + coverageReport(baseDir: "**/build", reportFiles: 'TEST-go-unit.html', coverageFiles: 'TEST-go-unit-cov.xml') + } + } + } + } + } + } stage('e2e tests') { when { // Always when running builds on branches/tags @@ -250,7 +410,6 @@ pipeline { } } steps { - // TODO: what's the testMatrixFile to be used if any runE2E(testMatrixFile: '.ci/.e2e-tests-for-elastic-agent.yaml', beatVersion: "${env.BEAT_VERSION}-SNAPSHOT", elasticAgentVersion: "${env.BEAT_VERSION}-SNAPSHOT", @@ -374,3 +533,17 @@ def isE2eEnabled() { def isPackageEnabled() { return env.PACKAGING_CHANGES == "true" || env.GITHUB_COMMENT?.contains('package') || matchesPrLabel(label: 'ci:package') } + +/** +* Wrapper to know if the build should enable the windows extended support +*/ +def isExtendedWindowsEnabled() { + return env.EXT_WINDOWS_CHANGES == "true" || params.extended_windows_ci || env.GITHUB_COMMENT?.contains('extended windows') || matchesPrLabel(label: 'ci:extended-windows') +} + +/** +* Wrapper to know if the build should enable the M1 extended support +*/ +def isExtendedM1Enabled() { + return env.EXT_M1_CHANGES == "true" || params.extended_m1_ci || env.GITHUB_COMMENT?.contains('extended m1') || matchesPrLabel(label: 'ci:extended-m1') +} diff --git a/.ci/schedule-daily.groovy b/.ci/schedule-daily.groovy index 5c1d7134858..adc1ec0f02e 100644 --- a/.ci/schedule-daily.groovy +++ b/.ci/schedule-daily.groovy @@ -20,7 +20,7 @@ pipeline { stages { stage('Nighly beats builds') { steps { - runBuilds(quietPeriodFactor: 2000, branches: ['main', '8.', '8.', '7.']) + runBuilds(quietPeriodFactor: 2000, branches: ['main', '8.', '8.']) } } } diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml new file mode 100644 index 00000000000..d0f29a0fd25 --- /dev/null +++ b/.github/workflows/changelog.yml @@ -0,0 +1,17 @@ +name: Changelog +on: [pull_request] + +jobs: + fragments: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Fetch Go version from .go-version + run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV + - uses: actions/setup-go@v3 + with: + go-version: ${{ env.GO_VERSION }} + - name: check pr-has-fragment + run: | + GOBIN=$PWD/bin go install github.com/elastic/elastic-agent-changelog-tool@latest + ./bin/elastic-agent-changelog-tool pr-has-fragment --repo ${{ github.event.repository.name }} ${{github.event.number}} diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 8079fe1c673..62d4006737c 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -18,22 +18,22 @@ jobs: name: lint runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 # Uses Go version from the repository. - name: Read .go-version file id: goversion run: echo "::set-output name=version::$(cat .go-version)" - - uses: actions/setup-go@v2 + - uses: actions/setup-go@v3 with: go-version: "${{ steps.goversion.outputs.version }}" - name: golangci-lint - uses: golangci/golangci-lint-action@v2 + uses: golangci/golangci-lint-action@v3 with: # Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version - version: v1.45.2 + version: v1.47.2 # Give the job more time to execute. # Regarding `--whole-files`, the linter is supposed to support linting of changed a patch only but, diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml new file mode 100644 index 00000000000..bf3e5eed775 --- /dev/null +++ b/.github/workflows/macos.yml @@ -0,0 +1,25 @@ +name: macos + +on: + pull_request: + push: + branches: + - main + - 8.* + +jobs: + macos: + runs-on: macos-latest + steps: + - uses: actions/checkout@v3 + - name: Fetch Go version from .go-version + run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV + - uses: actions/setup-go@v3 + with: + go-version: ${{ env.GO_VERSION }} + - name: Install dependencies + run: go install github.com/magefile/mage + - name: Run build + run: mage build + - name: Run test + run: mage unitTest diff --git a/.github/workflows/qa-labels.yml b/.github/workflows/qa-labels.yml deleted file mode 100644 index bbbd4439847..00000000000 --- a/.github/workflows/qa-labels.yml +++ /dev/null @@ -1,93 +0,0 @@ -name: Add QA labels to Elastic Agent issues -on: - # pull_request_target allows running actions on PRs from forks with a read/write GITHUB_TOKEN, but it will not allow - # running workflows defined in the PRs itself, only workflows already merged into the target branch. This avoids - # potential vulnerabilities that could allow someone to open a PR and retrieve secrets. - # It's important that this workflow never runs any checkout actions which could be used to circumvent this protection. - # See these links for more information: - # - https://github.blog/2020-08-03-github-actions-improvements-for-fork-and-pull-request-workflows/ - # - https://nathandavison.com/blog/github-actions-and-the-threat-of-malicious-pull-requests - pull_request_target: - types: - - closed - -jobs: - fetch_issues_to_label: - runs-on: ubuntu-latest - # Only run on PRs that were merged for the Elastic Agent teams - if: | - github.event.pull_request.merged_at && - ( - contains(github.event.pull_request.labels.*.name, 'Team:Elastic-Agent') || - contains(github.event.pull_request.labels.*.name, 'Team:Elastic-Agent-Data-Plane') || - contains(github.event.pull_request.labels.*.name, 'Team:Elastic-Agent-Control-Plane') - ) - outputs: - issue_ids: ${{ steps.issues_to_label.outputs.value }} - label_ids: ${{ steps.label_ids.outputs.value }} - steps: - - uses: octokit/graphql-action@v2.x - id: closing_issues - with: - query: | - query closingIssueNumbersQuery($prnumber: Int!) { - repository(owner: "elastic", name: "elastic-agent") { - pullRequest(number: $prnumber) { - closingIssuesReferences(first: 10) { - nodes { - id - labels(first: 20) { - nodes { - id - name - } - } - } - } - } - } - } - prnumber: ${{ github.event.number }} - token: ${{ secrets.GITHUB_TOKEN }} - - uses: sergeysova/jq-action@v2 - id: issues_to_label - with: - # Map to the issues' node id - cmd: echo $CLOSING_ISSUES | jq -c '.repository.pullRequest.closingIssuesReferences.nodes | map(.id)' - multiline: true - env: - CLOSING_ISSUES: ${{ steps.closing_issues.outputs.data }} - - uses: sergeysova/jq-action@v2 - id: label_ids - with: - # Get list of version labels on pull request and map to label's node id, append 'QA:Ready For Testing' id ("LA_kwDOGgEmJc7mkkl9]") - cmd: echo $PR_LABELS | jq -c 'map(select(.name | test("v[0-9]+\\.[0-9]+\\.[0-9]+")) | .node_id) + ["LA_kwDOGgEmJc7mkkl9]' - multiline: true - env: - PR_LABELS: ${{ toJSON(github.event.pull_request.labels) }} - - label_issues: - needs: fetch_issues_to_label - runs-on: ubuntu-latest - # For each issue closed by the PR run this job - if: | - fromJSON(needs.fetch_issues_to_label.outputs.issue_ids).length > 0 && - fromJSON(needs.fetch_issues_to_label.outputs.label_ids).length > 0 - strategy: - matrix: - issueNodeId: ${{ fromJSON(needs.fetch_issues_to_label.outputs.issue_ids) }} - labelId: ${{ fromJSON(needs.fetch_issues_to_label.outputs.label_ids) }} - name: Label issue ${{ matrix.issueNodeId }} - steps: - - uses: octokit/graphql-action@v2.x - id: add_labels_to_closed_issue - with: - query: | - mutation add_label($issueid:ID!, $labelids:[String!]!) { - addLabelsToLabelable(input: {labelableId: $issueid, labelIds: $labelids}) { - clientMutationId - } - } - issueid: ${{ matrix.issueNodeId }} - labelids: ${{ matrix.labelId }} - token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.go-version b/.go-version index ada2e4fce87..d6f3a382b34 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.17.10 +1.18.7 diff --git a/.golangci.yml b/.golangci.yml index 956b4b4b573..96e131c8ade 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -12,46 +12,37 @@ issues: # Set to 0 to disable. # Default: 50 max-issues-per-linter: 0 + exclude-rules: + # Exclude package name contains '-' issue because we have at least one package with + # it on its name. + - text: "ST1003:" + linters: + - stylecheck + # From mage we are priting to the console to ourselves + - path: (.*magefile.go|.*dev-tools/mage/.*) + linters: + - forbidigo output: sort-results: true -# Uncomment and add a path if needed to exclude -# skip-dirs: -# - some/path -# skip-files: -# - ".*\\.my\\.go$" -# - lib/bad.go - # Find the whole list here https://golangci-lint.run/usage/linters/ linters: disable-all: true enable: - - deadcode # finds unused code - errcheck # checking for unchecked errors in go programs - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13. - - goconst # finds repeated strings that could be replaced by a constant - - dupl # tool for code clone detection - forbidigo # forbids identifiers matched by reg exps - # 'replace' is used in go.mod for many dependencies that come from libbeat. We should work to remove those, - # so we can re-enable this linter. - # - gomoddirectives # manage the use of 'replace', 'retract', and 'excludes' directives in go.mod. - - gomodguard - gosimple # linter for Go source code that specializes in simplifying a code - misspell # finds commonly misspelled English words in comments - nakedret # finds naked returns in functions greater than a specified function length - - prealloc # finds slice declarations that could potentially be preallocated - nolintlint # reports ill-formed or insufficient nolint directives - staticcheck # Staticcheck is a go vet on steroids, applying a ton of static analysis checks - stylecheck # a replacement for golint - - unparam # reports unused function parameters - unused # checks Go code for unused constants, variables, functions and types - - govet # Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string - ineffassign # detects when assignments to existing variables are not used - - structcheck # finds unused struct fields - typecheck # Like the front-end of a Go compiler, parses and type-checks Go code - - varcheck # Finds unused global variables and constants - asciicheck # simple linter to check that your code does not contain non-ASCII identifiers - bodyclose # checks whether HTTP response body is closed successfully - durationcheck # check for two durations multiplied together @@ -63,14 +54,20 @@ linters: - noctx # noctx finds sending http request without context.Context - unconvert # Remove unnecessary type conversions - wastedassign # wastedassign finds wasted assignment statements. - # - godox # tool for detection of FIXME, TODO and other comment keywords + - gomodguard # check for blocked dependencies # all available settings of specific linters linters-settings: errcheck: # report about not checking of errors in type assertions: `a := b.(MyStruct)`; - # default is false: such cases aren't reported by default. - check-type-assertions: true + check-type-assertions: false + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`. + check-blank: false + # List of functions to exclude from checking, where each entry is a single function to exclude. + # See https://github.com/kisielk/errcheck#excluding-functions for details. + exclude-functions: + - (mapstr.M).Delete # Only returns ErrKeyNotFound, can safely be ignored. + - (mapstr.M).Put # Can only fail on type conversions, usually safe to ignore. errorlint: # Check whether fmt.Errorf uses the %w verb for formatting errors. See the readme for caveats @@ -80,16 +77,6 @@ linters-settings: # Check for plain error comparisons comparison: true - goconst: - # minimal length of string constant, 3 by default - min-len: 3 - # minimal occurrences count to trigger, 3 by default - min-occurrences: 2 - - dupl: - # tokens count to trigger issue, 150 by default - threshold: 100 - forbidigo: # Forbid the following identifiers forbid: @@ -97,68 +84,59 @@ linters-settings: # Exclude godoc examples from forbidigo checks. Default is true. exclude_godoc_examples: true - gomoddirectives: - # Allow local `replace` directives. Default is false. - replace-local: false + goimports: + local-prefixes: github.com/elastic gomodguard: blocked: # List of blocked modules. modules: - - github.com/elastic/beats/v7: - reason: "There must be no Beats dependency, use elastic-agent-libs instead." - + # Blocked module. + - github.com/pkg/errors: + # Recommended modules that should be used instead. (Optional) + recommendations: + - errors + - fmt + reason: "This package is deprecated, use `fmt.Errorf` with `%w` instead" gosimple: # Select the Go version to target. The default is '1.13'. - go: "1.17" - - misspell: - # Correct spellings using locale preferences for US or UK. - # Default is to use a neutral variety of English. - # Setting locale to US will correct the British spelling of 'colour' to 'color'. - # locale: US - # ignore-words: - # - IdP + go: "1.18.7" nakedret: # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 max-func-lines: 0 - prealloc: - # Report preallocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them. - # True by default. - simple: true - range-loops: true # Report preallocation suggestions on range loops, true by default - for-loops: false # Report preallocation suggestions on for loops, false by default - nolintlint: # Enable to ensure that nolint directives are all used. Default is true. allow-unused: false # Disable to ensure that nolint directives don't have a leading space. Default is true. - allow-leading-space: true + allow-leading-space: false # Exclude following linters from requiring an explanation. Default is []. allow-no-explanation: [] # Enable to require an explanation of nonzero length after each nolint directive. Default is false. require-explanation: true # Enable to require nolint directives to mention the specific linter being suppressed. Default is false. - require-specific: true + require-specific: false staticcheck: # Select the Go version to target. The default is '1.13'. - go: "1.17" + go: "1.18.7" + checks: ["all"] stylecheck: # Select the Go version to target. The default is '1.13'. - go: "1.17" - - unparam: - # Inspect exported functions, default is false. Set to true if no external program/library imports your code. - # XXX: if you enable this setting, unparam will report a lot of false-positives in text editors: - # if it's called for subdir of a project it can't find external interfaces. All text editor integrations - # with golangci-lint call it on a directory with the changed file. - check-exported: false + go: "1.18.7" + checks: ["all"] unused: # Select the Go version to target. The default is '1.13'. - go: "1.17" + go: "1.18.7" + + gosec: + excludes: + - G306 # Expect WriteFile permissions to be 0600 or less + - G404 # Use of weak random number generator + - G401 # Detect the usage of DES, RC4, MD5 or SHA1: Used in non-crypto contexts. + - G501 # Import blocklist: crypto/md5: Used in non-crypto contexts. + - G505 # Import blocklist: crypto/sha1: Used in non-crypto contexts. diff --git a/.mergify.yml b/.mergify.yml index 3fe46362854..528df9b498b 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -220,3 +220,16 @@ pull_request_rules: labels: - "backport" title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" + - name: backport patches to 8.5 branch + conditions: + - merged + - label=backport-v8.5.0 + actions: + backport: + assignees: + - "{{ author }}" + branches: + - "8.5" + labels: + - "backport" + title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc deleted file mode 100644 index acdf4efc087..00000000000 --- a/CHANGELOG.next.asciidoc +++ /dev/null @@ -1,200 +0,0 @@ -// Use these for links to issue and pulls. Note issues and pulls redirect one to -// each other on Github, so don't worry too much on using the right prefix. -:issue-beats: https://github.com/elastic/beats/issues/ -:pull-beats: https://github.com/elastic/beats/pull/ - -:issue: https://github.com/elastic/elastic-agent/issues/ -:pull: https://github.com/elastic/elastic-agent/pull/ - -=== Elastic Agent version HEAD - -==== Breaking changes - -- Docker container is not run as root by default. {pull-beats}[21213] -- Read Fleet connection information from `fleet.*` instead of `fleet.kibana.*`. {pull-beats}[24713] -- Beats build for 32Bit Windows or Linux system will refuse to run on a 64bit system. {pull-beats}[25186] -- Remove the `--kibana-url` from `install` and `enroll` command. {pull-beats}[25529] -- Default to port 80 and 443 for Kibana and Fleet Server connections. {pull-beats}[25723] -- Remove deprecated/undocumented IncludeCreatorMetadata setting from kubernetes metadata config options {pull-beats}[28006] -- The `/processes/` endpoint proxies to the subprocess's monitoring endpoint, instead of querying its `/stats` endpoint {pull-beats}[28165] -- Remove username/password for fleet-server authentication. {pull-beats}[29458] - -==== Bugfixes -- Fix rename *ConfigChange to *PolicyChange to align on changes in the UI. {pull-beats}[20779] -- Thread safe sorted set {pull-beats}[21290] -- Copy Action store on upgrade {pull-beats}[21298] -- Include inputs in action store actions {pull-beats}[21298] -- Fix issue where inputs without processors defined would panic {pull-beats}[21628] -- Prevent reporting ecs version twice {pull-beats}[21616] -- Partial extracted beat result in failure to spawn beat {issue-beats}[21718] -- Use symlink path for reexecutions {pull-beats}[21835] -- Use ML_SYSTEM to detect if agent is running as a service {pull-beats}[21884] -- Use local temp instead of system one {pull-beats}[21883] -- Rename monitoring index from `elastic.agent` to `elastic_agent` {pull-beats}[21932] -- Fix issue with named pipes on Windows 7 {pull-beats}[21931] -- Fix missing elastic_agent event data {pull-beats}[21994] -- Ensure shell wrapper path exists before writing wrapper on install {pull-beats}[22144] -- Fix deb/rpm packaging for Elastic Agent {pull-beats}[22153] -- Fix composable input processor promotion to fix duplicates {pull-beats}[22344] -- Fix sysv init files for deb/rpm installation {pull-beats}[22543] -- Fix shell wrapper for deb/rpm packaging {pull-beats}[23038] -- Fixed parsing of npipe URI {pull-beats}[22978] -- Select default agent policy if no enrollment token provided. {pull-beats}[23973] -- Remove artifacts on transient download errors {pull-beats}[23235] -- Support for linux/arm64 {pull-beats}[23479] -- Skip top level files when unziping archive during upgrade {pull-beats}[23456] -- Do not take ownership of Endpoint log path {pull-beats}[23444] -- Fixed fetching DBus service PID {pull-beats}[23496] -- Fix issue of missing log messages from filebeat monitor {pull-beats}[23514] -- Increase checkin grace period to 30 seconds {pull-beats}[23568] -- Fix libbeat from reporting back degraded on config update {pull-beats}[23537] -- Rewrite check if agent is running with admin rights on Windows {pull-beats}[23970] -- Fix issues with dynamic inputs and conditions {pull-beats}[23886] -- Fix bad substitution of API key. {pull-beats}[24036] -- Fix docker enrollment issue related to Fleet Server change. {pull-beats}[24155] -- Improve log on failure of Endpoint Security installation. {pull-beats}[24429] -- Verify communication to Kibana before updating Fleet client. {pull-beats}[24489] -- Fix nil pointer when null is generated as list item. {issue-beats}[23734] -- Add support for filestream input. {pull-beats}[24820] -- Add check for URL set when cert and cert key. {pull-beats}[24904] -- Fix install command for Fleet Server bootstrap, remove need for --enrollment-token when using --fleet-server {pull-beats}[24981] -- Respect host configuration for exposed processes endpoint {pull-beats}[25114] -- Set --inscure in container when FLEET_SERVER_ENABLE and FLEET_INSECURE set {pull-beats}[25137] -- Fixed: limit for retries to Kibana configurable {issue-beats}[25063] -- Fix issue with status and inspect inside of container {pull-beats}[25204] -- Remove FLEET_SERVER_POLICY_NAME env variable as it was not used {pull-beats}[25149] -- Reduce log level for listener cleanup to debug {pull-beats} -- Passing in policy id to container command works {pull-beats}[25352] -- Reduce log level for listener cleanup to debug {pull-beats}[25274] -- Delay the restart of application when a status report of failure is given {pull-beats}[25339] -- Don't log when upgrade capability doesn't apply {pull-beats}[25386] -- Fixed issue when unversioned home is set and invoked watcher failing with ENOENT {issue-beats}[25371] -- Fixed Elastic Agent: expecting Dict and received *transpiler.Key for '0' {issue-beats}[24453] -- Fix AckBatch to do nothing when no actions passed {pull-beats}[25562] -- Add error log entry when listener creation fails {issue-beats}[23482] -- Handle case where policy doesn't contain Fleet connection information {pull-beats}[25707] -- Fix fleet-server.yml spec to not overwrite existing keys {pull-beats}[25741] -- Agent sends wrong log level to Endpoint {issue-beats}[25583] -- Fix startup with failing configuration {pull-beats}[26057] -- Change timestamp in elatic-agent-json.log to use UTC {issue-beats}[25391] -- Fix add support for Logstash output. {pull-beats}[24305] -- Do not log Elasticsearch configuration for monitoring output when running with debug. {pull-beats}[26583] -- Fix issue where proxy enrollment options broke enrollment command. {pull-beats}[26749] -- Remove symlink.prev from previously failed upgrade {pull-beats}[26785] -- Fix apm-server supported outputs not being in sync with supported output types. {pull-beats}[26885] -- Set permissions during installation {pull-beats}[26665] -- Disable monitoring during fleet-server bootstrapping. {pull-beats}[27222] -- Fix issue with atomic extract running in K8s {pull-beats}[27396] -- Fix issue with install directory in state path in K8s {pull-beats}[27396] -- Disable monitoring during fleet-server bootstrapping. {pull-beats}[27222] -- Change output.elasticsearch.proxy_disabled flag to output.elasticsearch.proxy_disable so fleet uses it. {issue-beats}[27670] {pull-beats}[27671] -- Add validation for certificate flags to ensure they are absolute paths. {pull-beats}[27779] -- Migrate state on upgrade {pull-beats}[27825] -- Add "_monitoring" suffix to monitoring instance names to remove ambiguity with the status command. {issue-beats}[25449] -- Ignore ErrNotExists when fixing permissions. {issue-beats}[27836] {pull-beats}[27846] -- Snapshot artifact lookup will use agent.download proxy settings. {issue-beats}[27903] {pull-beats}[27904] -- Fix lazy acker to only add new actions to the batch. {pull-beats}[27981] -- Allow HTTP metrics to run in bootstrap mode. Add ability to adjust timeouts for Fleet Server. {pull-beats}[28260] -- Fix agent configuration overwritten by default fleet config. {pull-beats}[29297] -- Allow agent containers to use basic auth to create a service token. {pull-beats}[29651] -- Fix issue where a failing artifact verification does not remove the bad artifact. {pull-beats}[30281] -- Reduce Elastic Agent shut down time by stopping processes concurrently {pull-beats}[29650] -- Move `context cancelled` error from fleet gateway into debug level. {pull}187[187] -- Update library containerd to 1.5.10. {pull}186[186] -- Add fleet-server to output of elastic-agent inspect output command (and diagnostic bundle). {pull}243[243] -- Update API calls that the agent makes to Kibana when running the container command. {pull}253[253] -- diagnostics collect log names are fixed on Windows machines, command will ignore failures. AgentID is included in diagnostics(and diagnostics collect) output. {issue}81[81] {issue}92[92] {issue}190[190] {pull}262[262] -- Collects stdout and stderr of applications run as a process and logs them. {issue}[88] -- Remove VerificationMode option to empty string. Default value is `full`. {issue}[184] -- diagnostics collect file mod times are set. {pull}570[570] -- Allow ':' characters in dynamic variables {issue}624[624] {pull}680[680] -- Allow the - char to appear as part of variable names in eql expressions. {issue}709[709] {pull}710[710] -- Allow the / char in variable names in eql and transpiler. {issue}715[715] {pull}718[718] -- Fix data duplication for standalone agent on Kubernetes using the default manifest {issue-beats}31512[31512] {pull}742[742] -- Agent updates will clean up unneeded artifacts. {issue}693[693] {issue}694[694] {pull}752[752] - -==== New features - -- Prepare packaging for endpoint and asc files {pull-beats}[20186] -- Improved version CLI {pull-beats}[20359] -- Enroll CLI now restarts running daemon {pull-beats}[20359] -- Add restart CLI cmd {pull-beats}[20359] -- Add new `synthetics/*` inputs to run Heartbeat {pull-beats}[20387] -- Users of the Docker image can now pass `FLEET_ENROLL_INSECURE=1` to include the `--insecure` flag with the `elastic-agent enroll` command {issue-beats}[20312] {pull-beats}[20713] -- Add `docker` composable dynamic provider. {pull-beats}[20842] -- Add support for dynamic inputs with providers and `{{variable|"default"}}` substitution. {pull-beats}[20839] -- Add support for EQL based condition on inputs {pull-beats}[20994] -- Send `fleet.host.id` to Endpoint Security {pull-beats}[21042] -- Add `install` and `uninstall` subcommands {pull-beats}[21206] -- Use new form of fleet API paths {pull-beats}[21478] -- Add `kubernetes` composable dynamic provider. {pull-beats}[21480] -- Send updating state {pull-beats}[21461] -- Add `elastic.agent.id` and `elastic.agent.version` to published events from filebeat and metricbeat {pull-beats}[21543] -- Add `upgrade` subcommand to perform upgrade of installed Elastic Agent {pull-beats}[21425] -- Update `fleet.yml` and Kibana hosts when a policy change updates the Kibana hosts {pull-beats}[21599] -- Update `install` command to perform enroll before starting Elastic Agent {pull-beats}[21772] -- Update `fleet.kibana.path` from a POLICY_CHANGE {pull-beats}[21804] -- Removed `install-service.ps1` and `uninstall-service.ps1` from Windows .zip packaging {pull-beats}[21694] -- Add `priority` to `AddOrUpdate` on dynamic composable input providers communication channel {pull-beats}[22352] -- Ship `endpoint-security` logs to elasticsearch {pull-beats}[22526] -- Log level reloadable from fleet {pull-beats}[22690] -- Push log level downstream {pull-beats}[22815] -- Add metrics collection for Agent {pull-beats}[22793] -- Add support for Fleet Server {pull-beats}[23736] -- Add support for enrollment with local bootstrap of Fleet Server {pull-beats}[23865] -- Add TLS support for Fleet Server {pull-beats}[24142] -- Add support for Fleet Server running under Elastic Agent {pull-beats}[24220] -- Add CA support to Elastic Agent docker image {pull-beats}[24486] -- Add k8s secrets provider for Agent {pull-beats}[24789] -- Add STATE_PATH, CONFIG_PATH, LOGS_PATH to Elastic Agent docker image {pull-beats}[24817] -- Add status subcommand {pull-beats}[24856] -- Add leader_election provider for k8s {pull-beats}[24267] -- Add --fleet-server-service-token and FLEET_SERVER_SERVICE_TOKEN options {pull-beats}[25083] -- Keep http and logging config during enroll {pull-beats}[25132] -- Log output of container to $LOGS_PATH/elastic-agent-start.log when LOGS_PATH set {pull-beats}[25150] -- Use `filestream` input for internal log collection. {pull-beats}[25660] -- Enable agent to send custom headers to kibana/ES {pull-beats}[26275] -- Set `agent.id` to the Fleet Agent ID in events published from inputs backed by Beats. {issue-beats}[21121] {pull-beats}[26394] {pull-beats}[26548] -- Add proxy support to artifact downloader and communication with fleet server. {pull-beats}[25219] -- Add proxy support to enroll command. {pull-beats}[26514] -- Enable configuring monitoring namespace {issue-beats}[26439] -- Communicate with Fleet Server over HTTP2. {pull-beats}[26474] -- Pass logging.metrics.enabled to beats to stop beats from adding metrics into their logs. {issue-beats}[26758] {pull-beats}[26828] -- Support Node and Service autodiscovery in kubernetes dynamic provider. {pull-beats}[26801] -- Increase Agent's mem limits in k8s. {pull-beats}[27153] -- Add new --enroll-delay option for install and enroll commands. {pull-beats}[27118] -- Add link to troubleshooting guide on fatal exits. {issue-beats}[26367] {pull-beats}[27236] -- Agent now adapts the beats queue size based on output settings. {issue-beats}[26638] {pull-beats}[27429] -- Support ephemeral containers in Kubernetes dynamic provider. {issue-beats}[#27020] {pull-beats}[27707] -- Add complete k8s metadata through composable provider. {pull-beats}[27691] -- Add diagnostics command to gather beat metadata. {pull-beats}[28265] -- Add diagnostics collect command to gather beat metadata, config, policy, and logs and bundle it into an archive. {pull-beats}[28461] -- Add `KIBANA_FLEET_SERVICE_TOKEN` to Elastic Agent container. {pull-beats}[28096] -- Enable pprof endpoints for beats processes. Allow pprof endpoints for elastic-agent if enabled. {pull-beats}[28983] -- Add `--pprof` flag to `elastic-agent diagnostics` and an `elastic-agent pprof` command to allow operators to gather pprof data from the agent and beats running under it. {pull-beats}[28798] -- Allow pprof endpoints for elastic-agent or beats if enabled. {pull-beats}[28983] {pull-beats}[29155] -- Add --fleet-server-es-ca-trusted-fingerprint flag to allow agent/fleet-server to work with elasticsearch clusters using self signed certs. {pull-beats}[29128] -- Discover changes in Kubernetes nodes metadata as soon as they happen. {pull-beats}[23139] -- Add results of inspect output command into archive produced by diagnostics collect. {pull-beats}[29902] -- Add support for loading input configuration from external configuration files in standalone mode. You can load inputs from YAML configuration files under the folder `{path.config}/inputs.d`. {pull-beats}[30087] -- Install command will skip install/uninstall steps when installation via package is detected on Linux distros. {pull-beats}[30289] -- Update docker/distribution dependency library to fix a security issues concerning OCI Manifest Type Confusion Issue. {pull-beats}[30462] -- Add action_input_type for the .fleet-actions-results {pull-beats}[30562] -- Add support for enabling the metrics buffer endpoint in the elastic-agent and beats it runs. diagnostics collect command will gather metrics-buffer data if enabled. {pull-beats}[30471] -- Update ack response schema and processing, add retrier for acks {pull}200[200] -- Enhance error messages and logs for process start {pull}225[225] -- Changed the default policy selection logic. When the agent has no policy id or name defined, it will fall back to defaults (defined by $FLEET_SERVER_POLICY_ID and $FLEET_DEFAULT_TOKEN_POLICY_NAME environment variables respectively). {issue-beats}[29774] {pull}226[226] -- Add Elastic APM instrumentation {pull}180[180] -- Agent can be built for `darwin/arm64`. When it's built for both `darwin/arm64` and `darwin/adm64` a universal binary is also built and packaged. {pull}203[203] -- Add support for Cloudbeat. {pull}179[179] -- Fix download verification in snapshot builds. {issue}252[252] -- Add support for kubernetes cronjobs {pull}279[279] -- Increase the download artifact timeout to 10mins and add log download statistics. {pull}308[308] -- Save the agent configuration and the state encrypted on the disk. {issue}535[535] {pull}398[398] -- Bump node.js version for heartbeat/synthetics to 16.15.0 -- Support scheduled actions and cancellation of pending actions. {issue}393[393] {pull}419[419] -- Add `@metadata.input_id` and `@metadata.stream_id` when applying the inject stream processor {pull}527[527] -- Add liveness endpoint, allow fleet-gateway component to report degraded state, add update time and messages to status output. {issue}390[390] {pull}569[569] -- Redact sensitive information on diagnostics collect command. {issue}[241] {pull}[566] -- Fix incorrectly creating a filebeat redis input when a policy contains a packetbeat redis input. {issue}[427] {pull}[700] -- Allow upgrade actions to be retried on failure with action queue scheduling. {issue}778[778] {pull}1219[1219] diff --git a/Dockerfile b/Dockerfile index 709dcbc7bef..fd56ef5e2ff 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.17.10 +ARG GO_VERSION=1.18.7 FROM circleci/golang:${GO_VERSION} diff --git a/Makefile b/Makefile index 37022ff7d7d..19eca744b78 100644 --- a/Makefile +++ b/Makefile @@ -13,7 +13,7 @@ export MAGE_IMPORT_PATH mage: ifndef MAGE_PRESENT @echo Installing mage $(MAGE_VERSION). - @go get -ldflags="-X $(MAGE_IMPORT_PATH)/mage.gitTag=$(MAGE_VERSION)" ${MAGE_IMPORT_PATH}@$(MAGE_VERSION) + @go install ${MAGE_IMPORT_PATH}@$(MAGE_VERSION) @-mage -clean endif @true diff --git a/NOTICE.txt b/NOTICE.txt index ad7c25aaad6..f23805c5d87 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -617,11 +617,11 @@ you may not use this file except in compliance with the Elastic License. -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-autodiscover -Version: v0.0.0-20220404145827-89887023c1ab +Version: v0.2.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-autodiscover@v0.0.0-20220404145827-89887023c1ab/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-autodiscover@v0.2.1/LICENSE: Apache License Version 2.0, January 2004 diff --git a/README.md b/README.md index 2c0dbe31f69..bd0ae71c5fc 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,6 @@ -# Elastic Agent developer docs +# Elastic Agent + +## Developer docs The source files for the general Elastic Agent documentation are currently stored in the [observability-docs](https://github.com/elastic/observability-docs) repo. The following docs are only focused on getting developers started building code for Elastic Agent. @@ -9,6 +11,14 @@ Prerequisites: - installed [mage](https://github.com/magefile/mage) - [Docker](https://docs.docker.com/get-docker/) - [X-pack](https://github.com/elastic/beats/tree/main/x-pack) to pre-exist in the parent folder of the local Git repository checkout +- [elastic-agent-changelog-tool](https://github.com/elastic/elastic-agent-changelog-tool) to add changelog fragments for changelog generation + +If you are on a Mac with M1 chip, don't forget to export some docker variable to be able to build for AMD +``` +export DOCKER_BUILDKIT=0 +export COMPOSE_DOCKER_CLI_BUILD=0 +export DOCKER_DEFAULT_PLATFORM=linux/amd64 +``` If you are on a Mac with M1 chip, don't forget to export some docker variable to be able to build for AMD ``` @@ -101,3 +111,20 @@ kubectl apply -f elastic-agent-${ELASTIC_AGENT_MODE}-kubernetes.yaml ``` kubectl -n kube-system get pods -l app=elastic-agent ``` + +## Updating dependencies/PRs +Even though we prefer `mage` to our automation, we still have some +rules implemented on our `Makefile` as well as CI will use the +`Makefile`. CI will run `make check-ci`, so make sure to run it +locally before submitting any PRs to have a quicker feedback instead +of waiting for a CI failure. + +### Generating the `NOTICE.txt` when updating/adding dependencies +To do so, just run `make notice`, this is also part of the `make +check-ci` and is the same check our CI will do. + +At some point we will migrate it to mage (see discussion on +https://github.com/elastic/elastic-agent/pull/1108 and on +https://github.com/elastic/elastic-agent/issues/1107). However until +we have the mage automation sorted out, it has been removed to avoid +confusion. diff --git a/changelog/fragments/1660139385-Fix-a-panic-caused-by-a-race-condition-when-installing-the-Elastic-Agent.yaml b/changelog/fragments/1660139385-Fix-a-panic-caused-by-a-race-condition-when-installing-the-Elastic-Agent.yaml new file mode 100644 index 00000000000..19844fe2dfc --- /dev/null +++ b/changelog/fragments/1660139385-Fix-a-panic-caused-by-a-race-condition-when-installing-the-Elastic-Agent.yaml @@ -0,0 +1,3 @@ +kind: bug-fix +summary: Fix a panic caused by a race condition when installing the Elastic Agent. +pr: https://github.com/elastic/elastic-agent/pull/823 diff --git a/changelog/fragments/1660158319-Upgrade-to-Go-118-Certificates-signed-with-SHA-1-are-now-rejected-See-the-Go-118.yaml b/changelog/fragments/1660158319-Upgrade-to-Go-118-Certificates-signed-with-SHA-1-are-now-rejected-See-the-Go-118.yaml new file mode 100644 index 00000000000..f7b6ce903d3 --- /dev/null +++ b/changelog/fragments/1660158319-Upgrade-to-Go-118-Certificates-signed-with-SHA-1-are-now-rejected-See-the-Go-118.yaml @@ -0,0 +1,3 @@ +kind: breaking-change +summary: Upgrade to Go 1.18. Certificates signed with SHA-1 are now rejected. See the Go 1.18 https//tip.golang.org/doc/go1.18#sha1[release notes] for details. +pr: https://github.com/elastic/elastic-agent/pull/832 diff --git a/changelog/fragments/1661188787-Add-lumberjack-input-type-to-the-Filebeat-spec.yaml b/changelog/fragments/1661188787-Add-lumberjack-input-type-to-the-Filebeat-spec.yaml new file mode 100644 index 00000000000..9110968e91f --- /dev/null +++ b/changelog/fragments/1661188787-Add-lumberjack-input-type-to-the-Filebeat-spec.yaml @@ -0,0 +1,3 @@ +kind: feature +summary: Add `lumberjack` input type to the Filebeat spec. +pr: https://github.com/elastic/elastic-agent/pull/959 diff --git a/changelog/fragments/1663143487-Add-support-for-hints-based-autodiscovery-in-kubernetes-provider.yaml b/changelog/fragments/1663143487-Add-support-for-hints-based-autodiscovery-in-kubernetes-provider.yaml new file mode 100644 index 00000000000..04e84669955 --- /dev/null +++ b/changelog/fragments/1663143487-Add-support-for-hints-based-autodiscovery-in-kubernetes-provider.yaml @@ -0,0 +1,3 @@ +kind: feature +summary: Add support for hints' based autodiscovery in kubernetes provider. +pr: https://github.com/elastic/elastic-agent/pull/698 diff --git a/changelog/fragments/1664177394-Fix-unintended-reset-of-source-URI-when-downloading-components.yaml b/changelog/fragments/1664177394-Fix-unintended-reset-of-source-URI-when-downloading-components.yaml new file mode 100644 index 00000000000..b5712f4c193 --- /dev/null +++ b/changelog/fragments/1664177394-Fix-unintended-reset-of-source-URI-when-downloading-components.yaml @@ -0,0 +1,3 @@ +kind: bug-fix +summary: Fix unintended reset of source URI when downloading components +pr: https://github.com/elastic/elastic-agent/pull/1252 diff --git a/changelog/fragments/1664212969-Create-separate-status-reporter-for-local-only-events-so-that-degraded-fleet-che.yaml b/changelog/fragments/1664212969-Create-separate-status-reporter-for-local-only-events-so-that-degraded-fleet-che.yaml new file mode 100644 index 00000000000..a94f5b66751 --- /dev/null +++ b/changelog/fragments/1664212969-Create-separate-status-reporter-for-local-only-events-so-that-degraded-fleet-che.yaml @@ -0,0 +1,4 @@ +kind: bug-fix +summary: Create separate status reporter for local only events so that degraded fleet-checkins no longer affect health on successful fleet-checkins. +issue: https://github.com/elastic/elastic-agent/issues/1157 +pr: https://github.com/elastic/elastic-agent/pull/1285 diff --git a/changelog/fragments/1664230732-Improve-logging-during-upgrades.yaml b/changelog/fragments/1664230732-Improve-logging-during-upgrades.yaml new file mode 100644 index 00000000000..15f81e7d5ad --- /dev/null +++ b/changelog/fragments/1664230732-Improve-logging-during-upgrades.yaml @@ -0,0 +1,3 @@ +kind: feature +summary: Improve logging during upgrades. +pr: https://github.com/elastic/elastic-agent/pull/1287 diff --git a/changelog/fragments/1664360554-Add-success-log-message-after-previous-checkin-failures.yaml b/changelog/fragments/1664360554-Add-success-log-message-after-previous-checkin-failures.yaml new file mode 100644 index 00000000000..3e4ac3d91a5 --- /dev/null +++ b/changelog/fragments/1664360554-Add-success-log-message-after-previous-checkin-failures.yaml @@ -0,0 +1,3 @@ +kind: bug-fix +summary: Add success log message after previous checkin failures +pr: https://github.com/elastic/elastic-agent/pull/1327 diff --git a/changelog/fragments/1664989867-fix-docker-provider-processors.yaml b/changelog/fragments/1664989867-fix-docker-provider-processors.yaml new file mode 100644 index 00000000000..c7c87152479 --- /dev/null +++ b/changelog/fragments/1664989867-fix-docker-provider-processors.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: bug-fix + +# Change summary; a 80ish characters long description of the change. +summary: Fix docker provider add_fields processors + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +#description: + +# Affected component; a word indicating the component this changeset affects. +component: providers + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +#pr: 1234 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +#issue: 1234 diff --git a/changelog/fragments/1665517984-improve-checkin-error-logging.yaml b/changelog/fragments/1665517984-improve-checkin-error-logging.yaml new file mode 100644 index 00000000000..7bf2777d9d5 --- /dev/null +++ b/changelog/fragments/1665517984-improve-checkin-error-logging.yaml @@ -0,0 +1,5 @@ +kind: enhancement +summary: Improve logging of Fleet check-in errors. +description: Improve logging of Fleet check-in errors and only report the local state as degraded after two consecutive failed check-ins. +pr: 1477 +issue: 1154 diff --git a/changelog/fragments/1665784342-use-stack-version-npm-synthetics.yaml b/changelog/fragments/1665784342-use-stack-version-npm-synthetics.yaml new file mode 100644 index 00000000000..a928c800d1e --- /dev/null +++ b/changelog/fragments/1665784342-use-stack-version-npm-synthetics.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: feature + +# Change summary; a 80ish characters long description of the change. +summary: use-stack-version-npm-synthetics + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +description: Always npm i the stack_release version of @elastic/synthetics + +# Affected component; a word indicating the component this changeset affects. +component: synthetics-integration + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: 1528 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +#issue: 1234 diff --git a/changelog/fragments/1666088774-Fix-admin-permission-check-on-localized-windows.yaml b/changelog/fragments/1666088774-Fix-admin-permission-check-on-localized-windows.yaml new file mode 100644 index 00000000000..93d5999f1b0 --- /dev/null +++ b/changelog/fragments/1666088774-Fix-admin-permission-check-on-localized-windows.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: bug-fix + +# Change summary; a 80ish characters long description of the change. +summary: Fix admin permission check on localized windows + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +#description: + +# Affected component; a word indicating the component this changeset affects. +component: + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: 1552 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: 857 diff --git a/deploy/kubernetes/Makefile b/deploy/kubernetes/Makefile index 35745dcec31..98e216142b7 100644 --- a/deploy/kubernetes/Makefile +++ b/deploy/kubernetes/Makefile @@ -1,8 +1,14 @@ ALL=elastic-agent-standalone elastic-agent-managed BEAT_VERSION=$(shell head -n 1 ../../version/docs/version.asciidoc | cut -c 17- ) +BRANCH_VERSION=$(shell sed -n '2p' ../../version/docs/version.asciidoc | cut -c 14- ) -.PHONY: generate-k8s $(ALL) +#variables needed for ci-create-kubernetes-templates-pull-request +ELASTIC_AGENT_REPO=kibana +ELASTIC_AGENT_REPO_PATH=x-pack/plugins/fleet/server/services/ +FILE_REPO=elastic_agent_manifest.ts +ELASTIC_AGENT_BRANCH=update-k8s-templates-$(shell date "+%Y%m%d%H%M%S") +.PHONY: generate-k8s $(ALL) generate-k8s: $(ALL) test: generate-k8s @@ -15,9 +21,66 @@ clean: @for f in $(ALL); do rm -f "$$f-kubernetes.yaml"; done $(ALL): +ifdef WITHOUTCONFIG + @echo "Generating $@-kubernetes-without-configmap.yaml" + @rm -f $@-kubernetes-without-configmap.yaml + @for f in $(shell ls $@/*.yaml | grep -v daemonset-configmap); do \ + sed -e "s/%VERSION%/VERSION/g" -e "s/%BRANCH%/${BRANCH_VERSION}/g" $$f >> $@-kubernetes-without-configmap.yaml; \ + echo --- >> $@-kubernetes-without-configmap.yaml; \ + done +else @echo "Generating $@-kubernetes.yaml" @rm -f $@-kubernetes.yaml @for f in $(shell ls $@/*.yaml); do \ - sed "s/%VERSION%/${BEAT_VERSION}/g" $$f >> $@-kubernetes.yaml; \ + sed -e "s/%VERSION%/${BEAT_VERSION}/g" -e "s/%BRANCH%/${BRANCH_VERSION}/g" $$f >> $@-kubernetes.yaml; \ echo --- >> $@-kubernetes.yaml; \ done +endif + +CHDIR_SHELL := $(SHELL) +define chdir + $(eval _D=$(firstword $(1) $(@D))) + $(info $(MAKE): cd $(_D)) $(eval SHELL = cd $(_D); $(CHDIR_SHELL)) +endef + +## ci-clone-kibana-repository : Clone Kibana Repository and copy new files for the PR +.PHONY: ci-clone-kibana-repository +ci-clone-kibana-repository: + git clone git@github.com:elastic/kibana.git + cp $(FILE_REPO) $(ELASTIC_AGENT_REPO)/$(ELASTIC_AGENT_REPO_PATH) + +## ci-create-kubernetes-templates-pull-request : Create the pull request for the kubernetes templates +$(eval HASDIFF =$(shell sh -c "git status | grep $(FILE_REPO) | wc -l")) +.PHONY: ci-create-kubernetes-templates-pull-request +ci-create-kubernetes-templates-pull-request: +ifeq ($(HASDIFF),1) + echo "INFO: Create branch to update k8s templates" + git config user.name obscloudnativemonitoring + git config user.email obs-cloudnative-monitoring@elastic.co + git checkout -b $(ELASTIC_AGENT_BRANCH) + echo "INFO: add files if any" + git add $(ELASTIC_AGENT_REPO_PATH)$(FILE_REPO) + echo "INFO: commit changes if any" + git diff --staged --quiet || git commit -m "[Automated PR] Publish kubernetes templates for elastic-agent" + echo "INFO: show remote details" + git remote -v + +ifeq ($(DRY_RUN),TRUE) + echo "INFO: skip pushing branch" +else + echo "INFO: push branch" + @git push --set-upstream origin $(ELASTIC_AGENT_BRANCH) + echo "INFO: create pull request" + @gh pr create \ + --title "Update kubernetes templates for elastic-agent" \ + --body "Automated by ${BUILD_URL}" \ + --label automation \ + --base main \ + --head $(ELASTIC_AGENT_BRANCH) \ + --reviewer elastic/obs-cloudnative-monitoring +endif + +else + echo "No differences found with kibana git repository" +endif + diff --git a/deploy/kubernetes/creator_k8s_manifest.sh b/deploy/kubernetes/creator_k8s_manifest.sh new file mode 100755 index 00000000000..245f43dcb3d --- /dev/null +++ b/deploy/kubernetes/creator_k8s_manifest.sh @@ -0,0 +1,58 @@ +#!/bin/bash +#### +# Bash Script that creates the needed https://github.com/elastic/kibana/blob/main/x-pack/plugins/fleet/server/services/elastic_agent_manifest.ts +# The script takes as an argument the path of elastic-agent manifests +# Eg. ./creator_k8s_manifest.sh deploy/kubernetes +#### + + +STANDALONE=elastic-agent-standalone-kubernetes-without-configmap.yaml +MANAGED=elastic-agent-managed-kubernetes-without-configmap.yaml +OUTPUT_FILE=elastic_agent_manifest.ts + +#Check if arguments provided +((!$#)) && echo "No arguments provided!Please provide path of elastic-agent files" && exit 1 +MANIFEST_PATH=$1 + +#Check if file elastic-agent-standalone-kubernetes-without-configmap.yaml exists +if [ ! -f "$MANIFEST_PATH/$STANDALONE" ]; then + echo "$MANIFEST_PATH/$STANDALONE does not exists" + exit 1 +fi + +#Check if file elastic-agent-managed-kubernetes-without-configmap.yaml exists +if [ ! -f "$MANIFEST_PATH/$MANAGED" ]; then + echo "$MANIFEST_PATH/$MANAGED does not exists" + exit 1 +fi + +#Start creation of output file +cat << EOF > $OUTPUT_FILE +/* +* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +* or more contributor license agreements. Licensed under the Elastic License +* 2.0; you may not use this file except in compliance with the Elastic License +* 2.0. +*/ + +export const elasticAgentStandaloneManifest = \`--- +EOF + +cat $MANIFEST_PATH/$STANDALONE >> $OUTPUT_FILE +echo "\`;" >> $OUTPUT_FILE + +cat << EOF >> $OUTPUT_FILE + +export const elasticAgentManagedManifest = \`--- +EOF + +cat $MANIFEST_PATH/$MANAGED >> $OUTPUT_FILE +echo -n "\`;" >> $OUTPUT_FILE + +#Replacing all occurencies of elastic-agent-standalone +sed -i -e 's/elastic-agent-standalone/elastic-agent/g' $OUTPUT_FILE + +#Remove ES_HOST entry from file +sed -i -e '/# The Elasticsearch host to communicate with/d' $OUTPUT_FILE +sed -i -e '/ES_HOST/d' $OUTPUT_FILE +sed -i -e '/value: ""/d' $OUTPUT_FILE \ No newline at end of file diff --git a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml index 1e2403f47a2..3a41910c51a 100644 --- a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml @@ -1,4 +1,4 @@ -# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-managed-by-fleet.html +# For more information https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-managed-by-fleet.html apiVersion: apps/v1 kind: DaemonSet metadata: @@ -15,9 +15,11 @@ spec: labels: app: elastic-agent spec: - # Tolerations are needed to run Elastic Agent on Kubernetes master nodes. - # Agents running on master nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes + # Tolerations are needed to run Elastic Agent on Kubernetes control-plane nodes. + # Agents running on control-plane nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes tolerations: + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule - key: node-role.kubernetes.io/master effect: NoSchedule serviceAccountName: elastic-agent @@ -43,7 +45,7 @@ spec: # Elasticsearch API key used to enroll Elastic Agents in Fleet (https://www.elastic.co/guide/en/fleet/current/fleet-enrollment-tokens.html#fleet-enrollment-tokens) # If FLEET_ENROLLMENT_TOKEN is empty then KIBANA_HOST, KIBANA_FLEET_USERNAME, KIBANA_FLEET_PASSWORD are needed - name: FLEET_ENROLLMENT_TOKEN - value: "" + value: "token-id" - name: KIBANA_HOST value: "http://kibana:5601" # The basic authentication username used to connect to Kibana and retrieve a service_token to enable Fleet @@ -81,21 +83,12 @@ spec: - name: varlog mountPath: /var/log readOnly: true - - name: etc-kubernetes - mountPath: /hostfs/etc/kubernetes + - name: etc-full + mountPath: /hostfs/etc readOnly: true - name: var-lib mountPath: /hostfs/var/lib readOnly: true - - name: passwd - mountPath: /hostfs/etc/passwd - readOnly: true - - name: group - mountPath: /hostfs/etc/group - readOnly: true - - name: etcsysmd - mountPath: /hostfs/etc/systemd - readOnly: true - name: etc-mid mountPath: /etc/machine-id readOnly: true @@ -112,26 +105,15 @@ spec: - name: varlog hostPath: path: /var/log - # Needed for cloudbeat - - name: etc-kubernetes + # The following volumes are needed for Cloud Security Posture integration (cloudbeat) + # If you are not using this integration, then these volumes and the corresponding + # mounts can be removed. + - name: etc-full hostPath: - path: /etc/kubernetes - # Needed for cloudbeat + path: /etc - name: var-lib hostPath: path: /var/lib - # Needed for cloudbeat - - name: passwd - hostPath: - path: /etc/passwd - # Needed for cloudbeat - - name: group - hostPath: - path: /etc/group - # Needed for cloudbeat - - name: etcsysmd - hostPath: - path: /etc/systemd # Mount /etc/machine-id from the host to determine host ID # Needed for Elastic Security integration - name: etc-mid @@ -245,6 +227,10 @@ rules: resources: - podsecuritypolicies verbs: ["get", "list", "watch"] + - apiGroups: [ "storage.k8s.io" ] + resources: + - storageclasses + verbs: [ "get", "list", "watch" ] --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role diff --git a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml index c3c679efa36..e1b85082ac3 100644 --- a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml +++ b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml @@ -1,4 +1,4 @@ -# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-managed-by-fleet.html +# For more information https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-managed-by-fleet.html apiVersion: apps/v1 kind: DaemonSet metadata: @@ -15,9 +15,11 @@ spec: labels: app: elastic-agent spec: - # Tolerations are needed to run Elastic Agent on Kubernetes master nodes. - # Agents running on master nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes + # Tolerations are needed to run Elastic Agent on Kubernetes control-plane nodes. + # Agents running on control-plane nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes tolerations: + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule - key: node-role.kubernetes.io/master effect: NoSchedule serviceAccountName: elastic-agent @@ -43,7 +45,7 @@ spec: # Elasticsearch API key used to enroll Elastic Agents in Fleet (https://www.elastic.co/guide/en/fleet/current/fleet-enrollment-tokens.html#fleet-enrollment-tokens) # If FLEET_ENROLLMENT_TOKEN is empty then KIBANA_HOST, KIBANA_FLEET_USERNAME, KIBANA_FLEET_PASSWORD are needed - name: FLEET_ENROLLMENT_TOKEN - value: "" + value: "token-id" - name: KIBANA_HOST value: "http://kibana:5601" # The basic authentication username used to connect to Kibana and retrieve a service_token to enable Fleet @@ -81,21 +83,12 @@ spec: - name: varlog mountPath: /var/log readOnly: true - - name: etc-kubernetes - mountPath: /hostfs/etc/kubernetes + - name: etc-full + mountPath: /hostfs/etc readOnly: true - name: var-lib mountPath: /hostfs/var/lib readOnly: true - - name: passwd - mountPath: /hostfs/etc/passwd - readOnly: true - - name: group - mountPath: /hostfs/etc/group - readOnly: true - - name: etcsysmd - mountPath: /hostfs/etc/systemd - readOnly: true - name: etc-mid mountPath: /etc/machine-id readOnly: true @@ -112,26 +105,15 @@ spec: - name: varlog hostPath: path: /var/log - # Needed for cloudbeat - - name: etc-kubernetes + # The following volumes are needed for Cloud Security Posture integration (cloudbeat) + # If you are not using this integration, then these volumes and the corresponding + # mounts can be removed. + - name: etc-full hostPath: - path: /etc/kubernetes - # Needed for cloudbeat + path: /etc - name: var-lib hostPath: path: /var/lib - # Needed for cloudbeat - - name: passwd - hostPath: - path: /etc/passwd - # Needed for cloudbeat - - name: group - hostPath: - path: /etc/group - # Needed for cloudbeat - - name: etcsysmd - hostPath: - path: /etc/systemd # Mount /etc/machine-id from the host to determine host ID # Needed for Elastic Security integration - name: etc-mid diff --git a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-role.yaml b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-role.yaml index 0d961215f4e..778a4ba5520 100644 --- a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-role.yaml +++ b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-role.yaml @@ -63,6 +63,10 @@ rules: resources: - podsecuritypolicies verbs: ["get", "list", "watch"] + - apiGroups: [ "storage.k8s.io" ] + resources: + - storageclasses + verbs: [ "get", "list", "watch" ] --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role diff --git a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml index 0984f0dc8ac..373282a4c1b 100644 --- a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml @@ -1,4 +1,4 @@ -# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html +# For more information https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html apiVersion: v1 kind: ConfigMap metadata: @@ -25,6 +25,8 @@ data: providers.kubernetes: node: ${NODE_NAME} scope: node + #Uncomment to enable hints' support + #hints.enabled: true inputs: - name: kubernetes-cluster-metrics condition: ${kubernetes_leaderelection.leader} == true @@ -624,6 +626,7 @@ data: # period: 10s # condition: ${kubernetes.labels.app} == 'redis' --- +# For more information refer https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html apiVersion: apps/v1 kind: DaemonSet metadata: @@ -640,19 +643,34 @@ spec: labels: app: elastic-agent-standalone spec: - # Tolerations are needed to run Elastic Agent on Kubernetes master nodes. - # Agents running on master nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes + # Tolerations are needed to run Elastic Agent on Kubernetes control-plane nodes. + # Agents running on control-plane nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes tolerations: + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule - key: node-role.kubernetes.io/master effect: NoSchedule serviceAccountName: elastic-agent-standalone hostNetwork: true dnsPolicy: ClusterFirstWithHostNet + # Uncomment if using hints feature + #initContainers: + # - name: k8s-templates-downloader + # image: busybox:1.28 + # command: ['sh'] + # args: + # - -c + # - >- + # mkdir -p /etc/elastic-agent/inputs.d && + # wget -O - https://github.com/elastic/elastic-agent/archive/main.tar.gz | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-main/deploy/kubernetes/elastic-agent-standalone/templates.d" + # volumeMounts: + # - name: external-inputs + # mountPath: /etc/elastic-agent/inputs.d containers: - name: elastic-agent-standalone image: docker.elastic.co/beats/elastic-agent:8.3.0 args: [ - "-c", "/etc/agent.yml", + "-c", "/etc/elastic-agent/agent.yml", "-e", ] env: @@ -662,7 +680,7 @@ spec: value: "elastic" # The basic authentication password used to connect to Elasticsearch - name: ES_PASSWORD - value: "" + value: "changeme" # The Elasticsearch host to communicate with - name: ES_HOST value: "" @@ -674,6 +692,8 @@ spec: valueFrom: fieldRef: fieldPath: metadata.name + - name: STATE_PATH + value: "/etc/elastic-agent" securityContext: runAsUser: 0 resources: @@ -684,9 +704,12 @@ spec: memory: 400Mi volumeMounts: - name: datastreams - mountPath: /etc/agent.yml + mountPath: /etc/elastic-agent/agent.yml readOnly: true subPath: agent.yml + # Uncomment if using hints feature + #- name: external-inputs + # mountPath: /etc/elastic-agent/inputs.d - name: proc mountPath: /hostfs/proc readOnly: true @@ -699,26 +722,20 @@ spec: - name: varlog mountPath: /var/log readOnly: true - - name: etc-kubernetes - mountPath: /hostfs/etc/kubernetes + - name: etc-full + mountPath: /hostfs/etc readOnly: true - name: var-lib mountPath: /hostfs/var/lib readOnly: true - - name: passwd - mountPath: /hostfs/etc/passwd - readOnly: true - - name: group - mountPath: /hostfs/etc/group - readOnly: true - - name: etcsysmd - mountPath: /hostfs/etc/systemd - readOnly: true volumes: - name: datastreams configMap: defaultMode: 0640 name: agent-node-datastreams + # Uncomment if using hints feature + #- name: external-inputs + # emptyDir: {} - name: proc hostPath: path: /proc @@ -731,26 +748,15 @@ spec: - name: varlog hostPath: path: /var/log - # Needed for cloudbeat - - name: etc-kubernetes + # The following volumes are needed for Cloud Security Posture integration (cloudbeat) + # If you are not using this integration, then these volumes and the corresponding + # mounts can be removed. + - name: etc-full hostPath: - path: /etc/kubernetes - # Needed for cloudbeat + path: /etc - name: var-lib hostPath: path: /var/lib - # Needed for cloudbeat - - name: passwd - hostPath: - path: /etc/passwd - # Needed for cloudbeat - - name: group - hostPath: - path: /etc/group - # Needed for cloudbeat - - name: etcsysmd - hostPath: - path: /etc/systemd --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -858,6 +864,10 @@ rules: resources: - podsecuritypolicies verbs: ["get", "list", "watch"] + - apiGroups: [ "storage.k8s.io" ] + resources: + - storageclasses + verbs: [ "get", "list", "watch" ] --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml index 7048bf22adb..1a52302826d 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml @@ -1,4 +1,4 @@ -# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html +# For more information https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html apiVersion: v1 kind: ConfigMap metadata: @@ -25,6 +25,8 @@ data: providers.kubernetes: node: ${NODE_NAME} scope: node + #Uncomment to enable hints' support + #hints.enabled: true inputs: - name: kubernetes-cluster-metrics condition: ${kubernetes_leaderelection.leader} == true diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml index 0bf131ec8ea..d40291d2ed1 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml @@ -1,3 +1,4 @@ +# For more information refer https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html apiVersion: apps/v1 kind: DaemonSet metadata: @@ -14,19 +15,34 @@ spec: labels: app: elastic-agent-standalone spec: - # Tolerations are needed to run Elastic Agent on Kubernetes master nodes. - # Agents running on master nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes + # Tolerations are needed to run Elastic Agent on Kubernetes control-plane nodes. + # Agents running on control-plane nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes tolerations: + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule - key: node-role.kubernetes.io/master effect: NoSchedule serviceAccountName: elastic-agent-standalone hostNetwork: true dnsPolicy: ClusterFirstWithHostNet + # Uncomment if using hints feature + #initContainers: + # - name: k8s-templates-downloader + # image: busybox:1.28 + # command: ['sh'] + # args: + # - -c + # - >- + # mkdir -p /etc/elastic-agent/inputs.d && + # wget -O - https://github.com/elastic/elastic-agent/archive/%BRANCH%.tar.gz | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-main/deploy/kubernetes/elastic-agent-standalone/templates.d" + # volumeMounts: + # - name: external-inputs + # mountPath: /etc/elastic-agent/inputs.d containers: - name: elastic-agent-standalone image: docker.elastic.co/beats/elastic-agent:%VERSION% args: [ - "-c", "/etc/agent.yml", + "-c", "/etc/elastic-agent/agent.yml", "-e", ] env: @@ -36,7 +52,7 @@ spec: value: "elastic" # The basic authentication password used to connect to Elasticsearch - name: ES_PASSWORD - value: "" + value: "changeme" # The Elasticsearch host to communicate with - name: ES_HOST value: "" @@ -48,6 +64,8 @@ spec: valueFrom: fieldRef: fieldPath: metadata.name + - name: STATE_PATH + value: "/etc/elastic-agent" securityContext: runAsUser: 0 resources: @@ -58,9 +76,12 @@ spec: memory: 400Mi volumeMounts: - name: datastreams - mountPath: /etc/agent.yml + mountPath: /etc/elastic-agent/agent.yml readOnly: true subPath: agent.yml + # Uncomment if using hints feature + #- name: external-inputs + # mountPath: /etc/elastic-agent/inputs.d - name: proc mountPath: /hostfs/proc readOnly: true @@ -73,26 +94,20 @@ spec: - name: varlog mountPath: /var/log readOnly: true - - name: etc-kubernetes - mountPath: /hostfs/etc/kubernetes + - name: etc-full + mountPath: /hostfs/etc readOnly: true - name: var-lib mountPath: /hostfs/var/lib readOnly: true - - name: passwd - mountPath: /hostfs/etc/passwd - readOnly: true - - name: group - mountPath: /hostfs/etc/group - readOnly: true - - name: etcsysmd - mountPath: /hostfs/etc/systemd - readOnly: true volumes: - name: datastreams configMap: defaultMode: 0640 name: agent-node-datastreams + # Uncomment if using hints feature + #- name: external-inputs + # emptyDir: {} - name: proc hostPath: path: /proc @@ -105,23 +120,12 @@ spec: - name: varlog hostPath: path: /var/log - # Needed for cloudbeat - - name: etc-kubernetes + # The following volumes are needed for Cloud Security Posture integration (cloudbeat) + # If you are not using this integration, then these volumes and the corresponding + # mounts can be removed. + - name: etc-full hostPath: - path: /etc/kubernetes - # Needed for cloudbeat + path: /etc - name: var-lib hostPath: path: /var/lib - # Needed for cloudbeat - - name: passwd - hostPath: - path: /etc/passwd - # Needed for cloudbeat - - name: group - hostPath: - path: /etc/group - # Needed for cloudbeat - - name: etcsysmd - hostPath: - path: /etc/systemd diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-role.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-role.yaml index 8a644f3aadf..a0cd80b456a 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-role.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-role.yaml @@ -63,6 +63,10 @@ rules: resources: - podsecuritypolicies verbs: ["get", "list", "watch"] + - apiGroups: [ "storage.k8s.io" ] + resources: + - storageclasses + verbs: [ "get", "list", "watch" ] --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/activemq.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/activemq.yml new file mode 100644 index 00000000000..007060a5ac0 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/activemq.yml @@ -0,0 +1,96 @@ +inputs: + - name: filestream-activemq + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.activemq.audit.enabled} == true or ${kubernetes.hints.activemq.enabled} == true + data_stream: + dataset: activemq.audit + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.activemq.audit.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - activemq-audit + - condition: ${kubernetes.hints.activemq.log.enabled} == true or ${kubernetes.hints.activemq.enabled} == true + data_stream: + dataset: activemq.log + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: '^\d{4}-\d{2}-\d{2} ' + parsers: + - container: + format: auto + stream: ${kubernetes.hints.activemq.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - activemq-log + data_stream.namespace: default + - name: activemq/metrics-activemq + type: activemq/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.activemq.broker.enabled} == true or ${kubernetes.hints.activemq.enabled} == true + data_stream: + dataset: activemq.broker + type: metrics + hosts: + - ${kubernetes.hints.activemq.broker.host|'localhost:8161'} + metricsets: + - broker + password: ${kubernetes.hints.activemq.broker.password|'admin'} + path: /api/jolokia/?ignoreErrors=true&canonicalNaming=false + period: ${kubernetes.hints.activemq.broker.period|'10s'} + tags: + - forwarded + - activemq-broker + username: ${kubernetes.hints.activemq.broker.username|'admin'} + - condition: ${kubernetes.hints.activemq.queue.enabled} == true or ${kubernetes.hints.activemq.enabled} == true + data_stream: + dataset: activemq.queue + type: metrics + hosts: + - ${kubernetes.hints.activemq.queue.host|'localhost:8161'} + metricsets: + - queue + password: ${kubernetes.hints.activemq.queue.password|'admin'} + path: /api/jolokia/?ignoreErrors=true&canonicalNaming=false + period: ${kubernetes.hints.activemq.queue.period|'10s'} + tags: + - forwarded + - activemq-queue + username: ${kubernetes.hints.activemq.queue.username|'admin'} + - condition: ${kubernetes.hints.activemq.topic.enabled} == true or ${kubernetes.hints.activemq.enabled} == true + data_stream: + dataset: activemq.topic + type: metrics + hosts: + - ${kubernetes.hints.activemq.topic.host|'localhost:8161'} + metricsets: + - topic + password: ${kubernetes.hints.activemq.topic.password|'admin'} + path: /api/jolokia/?ignoreErrors=true&canonicalNaming=false + period: ${kubernetes.hints.activemq.topic.period|'10s'} + tags: + - forwarded + - activemq-topic + username: ${kubernetes.hints.activemq.topic.username|'admin'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/apache.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/apache.yml new file mode 100644 index 00000000000..a6e461a5363 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/apache.yml @@ -0,0 +1,134 @@ +inputs: + - name: filestream-apache + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.apache.access.enabled} == true or ${kubernetes.hints.apache.enabled} == true + data_stream: + dataset: apache.access + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.apache.access.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - apache-access + - condition: ${kubernetes.hints.apache.error.enabled} == true or ${kubernetes.hints.apache.enabled} == true + data_stream: + dataset: apache.error + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.apache.error.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - apache-error + data_stream.namespace: default + - name: httpjson-apache + type: httpjson + use_output: default + streams: + - condition: ${kubernetes.hints.apache.access.enabled} == true and ${kubernetes.hints.apache.enabled} == true + config_version: "2" + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: apache.access + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="access*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - apache-access + - condition: ${kubernetes.hints.apache.error.enabled} == true and ${kubernetes.hints.apache.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: apache.error + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype=apache:error OR sourcetype=apache_error | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - apache-error + data_stream.namespace: default + - name: apache/metrics-apache + type: apache/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.apache.status.enabled} == true or ${kubernetes.hints.apache.enabled} == true + data_stream: + dataset: apache.status + type: metrics + hosts: + - ${kubernetes.hints.apache.status.host|'http://127.0.0.1'} + metricsets: + - status + period: ${kubernetes.hints.apache.status.period|'30s'} + server_status_path: /server-status + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/cassandra.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/cassandra.yml new file mode 100644 index 00000000000..bce4edf635c --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/cassandra.yml @@ -0,0 +1,327 @@ +inputs: + - name: filestream-cassandra + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.cassandra.log.enabled} == true or ${kubernetes.hints.cassandra.enabled} == true + data_stream: + dataset: cassandra.log + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: ^([A-Z]) + parsers: + - container: + format: auto + stream: ${kubernetes.hints.cassandra.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - cassandra-systemlogs + data_stream.namespace: default + - name: jolokia/metrics-cassandra + type: jolokia/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.cassandra.metrics.enabled} == true or ${kubernetes.hints.cassandra.enabled} == true + data_stream: + dataset: cassandra.metrics + type: metrics + hosts: + - ${kubernetes.hints.cassandra.metrics.host|'localhost:8778'} + jmx.mappings: + - attributes: + - attr: ReleaseVersion + field: system.version + - attr: ClusterName + field: system.cluster + - attr: LiveNodes + field: system.live_nodes + - attr: UnreachableNodes + field: system.unreachable_nodes + - attr: LeavingNodes + field: system.leaving_nodes + - attr: JoiningNodes + field: system.joining_nodes + - attr: MovingNodes + field: system.moving_nodes + mbean: org.apache.cassandra.db:type=StorageService + - attributes: + - attr: Datacenter + field: system.data_center + - attr: Rack + field: system.rack + mbean: org.apache.cassandra.db:type=EndpointSnitchInfo + - attributes: + - attr: Count + field: storage.total_hint_in_progress + mbean: org.apache.cassandra.metrics:name=TotalHintsInProgress,type=Storage + - attributes: + - attr: Count + field: storage.total_hints + mbean: org.apache.cassandra.metrics:name=TotalHints,type=Storage + - attributes: + - attr: Count + field: storage.exceptions + mbean: org.apache.cassandra.metrics:name=Exceptions,type=Storage + - attributes: + - attr: Count + field: storage.load + mbean: org.apache.cassandra.metrics:name=Load,type=Storage + - attributes: + - attr: OneMinuteRate + field: hits.succeeded_per_second + mbean: org.apache.cassandra.metrics:type=HintsService,name=HintsSucceeded + - attributes: + - attr: OneMinuteRate + field: hits.failed_per_second + mbean: org.apache.cassandra.metrics:type=HintsService,name=HintsFailed + - attributes: + - attr: OneMinuteRate + field: hits.timed_out_per_second + mbean: org.apache.cassandra.metrics:type=HintsService,name=HintsTimedOut + - attributes: + - attr: CollectionTime + field: gc.concurrent_mark_sweep.collection_time + - attr: CollectionCount + field: gc.concurrent_mark_sweep.collection_count + mbean: java.lang:type=GarbageCollector,name=ConcurrentMarkSweep + - attributes: + - attr: CollectionTime + field: gc.par_new.collection_time + - attr: CollectionCount + field: gc.par_new.collection_count + mbean: java.lang:type=GarbageCollector,name=ParNew + - attributes: + - attr: HeapMemoryUsage + field: memory.heap_usage + - attr: NonHeapMemoryUsage + field: memory.other_usage + mbean: java.lang:type=Memory + - attributes: + - attr: Value + field: task.complete + mbean: org.apache.cassandra.metrics:name=CompletedTasks,type=CommitLog + - attributes: + - attr: Value + field: task.pending + mbean: org.apache.cassandra.metrics:name=PendingTasks,type=CommitLog + - attributes: + - attr: Value + field: task.total_commitlog_size + mbean: org.apache.cassandra.metrics:name=TotalCommitLogSize,type=CommitLog + - attributes: + - attr: Count + field: client_request.write.timeouts + - attr: OneMinuteRate + field: client_request.write.timeoutsms + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=Timeouts,scope=Write + - attributes: + - attr: Count + field: client_request.write.unavailables + - attr: OneMinuteRate + field: client_request.write.unavailablesms + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=Unavailables,scope=Write + - attributes: + - attr: Count + field: client_request.write.count + - attr: OneMinuteRate + field: client_request.write.one_minute_rate + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=Latency,scope=Write + - attributes: + - attr: Count + field: client_request.write.total_latency + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=TotalLatency,scope=Write + - attributes: + - attr: Count + field: client_request.read.timeouts + - attr: OneMinuteRate + field: client_request.read.timeoutsms + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=Timeouts,scope=Read + - attributes: + - attr: Count + field: client_request.read.unavailables + - attr: OneMinuteRate + field: client_request.read.unavailablesms + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=Unavailables,scope=Read + - attributes: + - attr: Count + field: client_request.read.count + - attr: OneMinuteRate + field: client_request.read.one_minute_rate + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=Latency,scope=Read + - attributes: + - attr: Count + field: client_request.read.total_latency + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=TotalLatency,scope=Read + - attributes: + - attr: OneMinuteRate + field: client_request.range_slice.one_minute_rate + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=Latency,scope=RangeSlice + - attributes: + - attr: Count + field: client_request.range_slice.total_latency + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=TotalLatency,scope=RangeSlice + - attributes: + - attr: OneMinuteRate + field: client_request.caswrite.one_minute_rate + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=Latency,scope=CASWrite + - attributes: + - attr: OneMinuteRate + field: client_request.casread.one_minute_rate + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=Latency,scope=CASRead + - attributes: + - attr: Value + field: client.connected_native_clients + mbean: org.apache.cassandra.metrics:type=Client,name=connectedNativeClients + - attributes: + - attr: Value + field: compaction.completed + mbean: org.apache.cassandra.metrics:name=CompletedTasks,type=Compaction + - attributes: + - attr: Value + field: compaction.pending + mbean: org.apache.cassandra.metrics:name=PendingTasks,type=Compaction + - attributes: + - attr: Value + field: table.live_ss_table_count + mbean: org.apache.cassandra.metrics:type=Table,name=LiveSSTableCount + - attributes: + - attr: Value + field: table.live_disk_space_used + mbean: org.apache.cassandra.metrics:type=Table,name=LiveDiskSpaceUsed + - attributes: + - attr: Value + field: table.all_memtables_heap_size + mbean: org.apache.cassandra.metrics:type=Table,name=AllMemtablesHeapSize + - attributes: + - attr: Value + field: table.all_memtables_off_heap_size + mbean: org.apache.cassandra.metrics:type=Table,name=AllMemtablesOffHeapSize + - attributes: + - attr: OneMinuteRate + field: cache.key_cache.requests.one_minute_rate + mbean: org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Requests + - attributes: + - attr: Value + field: cache.key_cache.capacity + mbean: org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Capacity + - attributes: + - attr: Value + field: cache.key_cache.one_minute_hit_rate + mbean: org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=OneMinuteHitRate + - attributes: + - attr: OneMinuteRate + field: cache.row_cache.requests.one_minute_rate + mbean: org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Requests + - attributes: + - attr: Value + field: cache.row_cache.capacity + mbean: org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Capacity + - attributes: + - attr: Value + field: cache.row_cache.one_minute_hit_rate + mbean: org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=OneMinuteHitRate + - attributes: + - attr: Value + field: thread_pools.counter_mutation_stage.request.active + mbean: org.apache.cassandra.metrics:name=ActiveTasks,path=request,scope=CounterMutationStage,type=ThreadPools + - attributes: + - attr: Value + field: thread_pools.counter_mutation_stage.request.pending + mbean: org.apache.cassandra.metrics:name=PendingTasks,path=request,scope=CounterMutationStage,type=ThreadPools + - attributes: + - attr: Value + field: thread_pools.mutation_stage.request.active + mbean: org.apache.cassandra.metrics:name=ActiveTasks,path=request,scope=MutationStage,type=ThreadPools + - attributes: + - attr: Value + field: thread_pools.mutation_stage.request.pending + mbean: org.apache.cassandra.metrics:name=PendingTasks,path=request,scope=MutationStage,type=ThreadPools + - attributes: + - attr: Value + field: thread_pools.read_repair_stage.request.active + mbean: org.apache.cassandra.metrics:name=ActiveTasks,path=request,scope=ReadRepairStage,type=ThreadPools + - attributes: + - attr: Value + field: thread_pools.read_repair_stage.request.pending + mbean: org.apache.cassandra.metrics:name=PendingTasks,path=request,scope=ReadRepairStage,type=ThreadPools + - attributes: + - attr: Value + field: thread_pools.read_stage.request.active + mbean: org.apache.cassandra.metrics:name=ActiveTasks,path=request,scope=ReadStage,type=ThreadPools + - attributes: + - attr: Value + field: thread_pools.read_stage.request.pending + mbean: org.apache.cassandra.metrics:name=PendingTasks,path=request,scope=ReadStage,type=ThreadPools + - attributes: + - attr: Value + field: thread_pools.request_response_stage.request.active + mbean: org.apache.cassandra.metrics:name=ActiveTasks,path=request,scope=RequestResponseStage,type=ThreadPools + - attributes: + - attr: Value + field: thread_pools.request_response_stage.request.pending + mbean: org.apache.cassandra.metrics:name=PendingTasks,path=request,scope=RequestResponseStage,type=ThreadPools + - attributes: + - attr: Value + field: column_family.total_disk_space_used + mbean: org.apache.cassandra.metrics:name=TotalDiskSpaceUsed,type=ColumnFamily + - attributes: + - attr: Count + field: dropped_message.batch_remove + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=BATCH_REMOVE,name=Dropped + - attributes: + - attr: Count + field: dropped_message.batch_store + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=BATCH_STORE,name=Dropped + - attributes: + - attr: Count + field: dropped_message.counter_mutation + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=COUNTER_MUTATION,name=Dropped + - attributes: + - attr: Count + field: dropped_message.hint + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=HINT,name=Dropped + - attributes: + - attr: Count + field: dropped_message.mutation + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=MUTATION,name=Dropped + - attributes: + - attr: Count + field: dropped_message.paged_range + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=PAGED_RANGE,name=Dropped + - attributes: + - attr: Count + field: dropped_message.range_slice + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=RANGE_SLICE,name=Dropped + - attributes: + - attr: Count + field: dropped_message.read + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=READ,name=Dropped + - attributes: + - attr: Count + field: dropped_message.read_repair + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=READ_REPAIR,name=Dropped + - attributes: + - attr: Count + field: dropped_message.request_response + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=REQUEST_RESPONSE,name=Dropped + - attributes: + - attr: Count + field: dropped_message.trace + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=_TRACE,name=Dropped + metricsets: + - jmx + namespace: metrics + password: ${kubernetes.hints.cassandra.metrics.password|'admin'} + path: /jolokia/?ignoreErrors=true&canonicalNaming=false + period: ${kubernetes.hints.cassandra.metrics.period|'10s'} + username: ${kubernetes.hints.cassandra.metrics.username|'admin'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/cef.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/cef.yml new file mode 100644 index 00000000000..524cb6159f3 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/cef.yml @@ -0,0 +1,51 @@ +inputs: + - name: filestream-cef + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.cef.log.enabled} == true or ${kubernetes.hints.cef.enabled} == true + data_stream: + dataset: cef.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.cef.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - rename: + fields: + - from: message + to: event.original + - decode_cef: + field: event.original + prospector: + scanner: + symlinks: true + tags: + - cef + - forwarded + data_stream.namespace: default + - name: udp-cef + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.cef.log.enabled} == true or ${kubernetes.hints.cef.enabled} == true + data_stream: + dataset: cef.log + type: logs + host: localhost:9003 + processors: + - rename: + fields: + - from: message + to: event.original + - decode_cef: + field: event.original + tags: + - cef + - forwarded + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/checkpoint.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/checkpoint.yml new file mode 100644 index 00000000000..c8d49475fb3 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/checkpoint.yml @@ -0,0 +1,62 @@ +inputs: + - name: filestream-checkpoint + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.checkpoint.firewall.enabled} == true or ${kubernetes.hints.checkpoint.enabled} == true + data_stream: + dataset: checkpoint.firewall + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.checkpoint.firewall.stream|'all'} + paths: null + processors: + - add_locale: null + - add_fields: + fields: + internal_zones: + - trust + target: _temp_ + - add_fields: + fields: + external_zones: + - untrust + target: _temp_ + prospector: + scanner: + symlinks: true + tags: + - forwarded + data_stream.namespace: default + - name: tcp-checkpoint + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.checkpoint.firewall.enabled} == true or ${kubernetes.hints.checkpoint.enabled} == true + data_stream: + dataset: checkpoint.firewall + type: logs + host: localhost:9001 + processors: + - add_locale: null + tags: + - forwarded + data_stream.namespace: default + - name: udp-checkpoint + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.checkpoint.firewall.enabled} == true or ${kubernetes.hints.checkpoint.enabled} == true + data_stream: + dataset: checkpoint.firewall + type: logs + host: localhost:9001 + processors: + - add_locale: null + tags: + - forwarded + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/cockroachdb.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/cockroachdb.yml new file mode 100644 index 00000000000..3e55b02794d --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/cockroachdb.yml @@ -0,0 +1,44 @@ +inputs: + - name: prometheus/metrics-cockroachdb + type: prometheus/metrics + use_output: default + streams: + - bearer_token_file: null + condition: ${kubernetes.hints.cockroachdb.status.enabled} == true or ${kubernetes.hints.cockroachdb.enabled} == true + data_stream: + dataset: cockroachdb.status + type: metrics + hosts: + - ${kubernetes.hints.cockroachdb.status.host|'localhost:8080'} + metrics_filters.exclude: null + metrics_filters.include: null + metrics_path: /_status/vars + metricsets: + - collector + password: null + period: ${kubernetes.hints.cockroachdb.status.period|'10s'} + ssl.certificate_authorities: null + use_types: true + username: null + data_stream.namespace: default + - name: filestream-cockroachdb + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.cockroachdb.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/crowdstrike.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/crowdstrike.yml new file mode 100644 index 00000000000..95a2730c18b --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/crowdstrike.yml @@ -0,0 +1,79 @@ +inputs: + - name: filestream-crowdstrike + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.crowdstrike.falcon.enabled} == true or ${kubernetes.hints.crowdstrike.enabled} == true + data_stream: + dataset: crowdstrike.falcon + type: logs + exclude_files: + - .gz$ + multiline.match: after + multiline.max_lines: 5000 + multiline.negate: true + multiline.pattern: ^{ + multiline.timeout: 10 + parsers: + - container: + format: auto + stream: ${kubernetes.hints.crowdstrike.falcon.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - crowdstrike-falcon + - condition: ${kubernetes.hints.crowdstrike.fdr.enabled} == true or ${kubernetes.hints.crowdstrike.enabled} == true + data_stream: + dataset: crowdstrike.fdr + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.crowdstrike.fdr.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - forwarded + - crowdstrike-fdr + data_stream.namespace: default + - name: aws-s3-crowdstrike + type: aws-s3 + use_output: default + streams: + - condition: ${kubernetes.hints.crowdstrike.fdr.enabled} == true or ${kubernetes.hints.crowdstrike.enabled} == true + data_stream: + dataset: crowdstrike.fdr + type: logs + queue_url: null + sqs.notification_parsing_script.source: | + function parse(n) { + var m = JSON.parse(n); + var evts = []; + var files = m.files; + var bucket = m.bucket; + if (!Array.isArray(files) || (files.length == 0) || bucket == null || bucket == "") { + return evts; + } + files.forEach(function(f){ + var evt = new S3EventV2(); + evt.SetS3BucketName(bucket); + evt.SetS3ObjectKey(f.path); + evts.push(evt); + }); + return evts; + } + tags: + - forwarded + - crowdstrike-fdr + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/cyberarkpas.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/cyberarkpas.yml new file mode 100644 index 00000000000..fc8f72c6206 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/cyberarkpas.yml @@ -0,0 +1,57 @@ +inputs: + - name: tcp-cyberarkpas + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.cyberarkpas.audit.enabled} == true or ${kubernetes.hints.cyberarkpas.enabled} == true + data_stream: + dataset: cyberarkpas.audit + type: logs + host: localhost:9301 + processors: + - add_locale: null + tags: + - cyberarkpas-audit + - forwarded + tcp: null + data_stream.namespace: default + - name: udp-cyberarkpas + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.cyberarkpas.audit.enabled} == true or ${kubernetes.hints.cyberarkpas.enabled} == true + data_stream: + dataset: cyberarkpas.audit + type: logs + host: localhost:9301 + processors: + - add_locale: null + tags: + - cyberarkpas-audit + - forwarded + udp: null + data_stream.namespace: default + - name: filestream-cyberarkpas + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.cyberarkpas.audit.enabled} == true and ${kubernetes.hints.cyberarkpas.enabled} == true + data_stream: + dataset: cyberarkpas.audit + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.cyberarkpas.audit.stream|'all'} + paths: null + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - forwarded + - cyberarkpas-audit + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/elasticsearch.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/elasticsearch.yml new file mode 100644 index 00000000000..49503b63346 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/elasticsearch.yml @@ -0,0 +1,288 @@ +inputs: + - name: filestream-elasticsearch + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.elasticsearch.audit.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.audit + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.elasticsearch.audit.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + - add_fields: + fields: + ecs.version: 1.10.0 + target: "" + - else: + - script: + id: elasticsearch_audit + lang: javascript + source: | + var requestRegex = new RegExp("request_body=\\\[(.*)\\\]$"); function process(event) { + var message = event.Get("message"); + if (message !== null) { + var matches = message.match(requestRegex); + if (matches && matches.length > 1) { + event.Put("_request", matches[1]); + } + } + } + if: + regexp: + message: ^{ + then: + - decode_json_fields: + fields: + - message + target: _json + - rename: + fields: + - from: _json.request.body + to: _request + ignore_missing: true + - drop_fields: + fields: + - _json + - detect_mime_type: + field: _request + target: http.request.mime_type + - drop_fields: + fields: + - _request + ignore_missing: true + prospector: + scanner: + symlinks: true + - condition: ${kubernetes.hints.elasticsearch.deprecation.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.deprecation + type: logs + exclude_files: + - .gz$ + - _slowlog.log$ + - _access.log$ + multiline: + match: after + negate: true + pattern: ^(\[[0-9]{4}-[0-9]{2}-[0-9]{2}|{) + parsers: + - container: + format: auto + stream: ${kubernetes.hints.elasticsearch.deprecation.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale.when.not.regexp.message: ^{ + - add_fields: + fields: + ecs.version: 1.10.0 + target: "" + prospector: + scanner: + symlinks: true + - condition: ${kubernetes.hints.elasticsearch.gc.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.gc + type: logs + exclude_files: + - .gz$ + exclude_lines: + - '^(OpenJDK|Java HotSpot).* Server VM ' + - '^CommandLine flags: ' + - '^Memory: ' + - ^{ + multiline: + match: after + negate: true + pattern: ^(\[?[0-9]{4}-[0-9]{2}-[0-9]{2}|{) + parsers: + - container: + format: auto + stream: ${kubernetes.hints.elasticsearch.gc.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_fields: + fields: + ecs.version: 1.10.0 + target: "" + prospector: + scanner: + symlinks: true + - condition: ${kubernetes.hints.elasticsearch.server.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.server + type: logs + exclude_files: + - .gz$ + - _slowlog.log$ + - _access.log$ + - _deprecation.log$ + multiline: + match: after + negate: true + pattern: ^(\[[0-9]{4}-[0-9]{2}-[0-9]{2}|{) + parsers: + - container: + format: auto + stream: ${kubernetes.hints.elasticsearch.server.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale.when.not.regexp.message: ^{ + - add_fields: + fields: + ecs.version: 1.10.0 + target: "" + prospector: + scanner: + symlinks: true + - condition: ${kubernetes.hints.elasticsearch.slowlog.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.slowlog + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: ^(\[?[0-9]{4}-[0-9]{2}-[0-9]{2}|{) + parsers: + - container: + format: auto + stream: ${kubernetes.hints.elasticsearch.slowlog.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale.when.not.regexp.message: ^{ + - add_fields: + fields: + ecs.version: 1.10.0 + target: "" + prospector: + scanner: + symlinks: true + data_stream.namespace: default + - name: elasticsearch/metrics-elasticsearch + type: elasticsearch/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.elasticsearch.ccr.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.ccr + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.ccr.host|'http://localhost:9200'} + metricsets: + - ccr + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.cluster_stats.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.cluster_stats + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.cluster_stats.host|'http://localhost:9200'} + metricsets: + - cluster_stats + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.enrich.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.enrich + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.enrich.host|'http://localhost:9200'} + metricsets: + - enrich + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.index.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.index + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.index.host|'http://localhost:9200'} + metricsets: + - index + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.index_recovery.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.index_recovery + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.index_recovery.host|'http://localhost:9200'} + metricsets: + - index_recovery + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.index_summary.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.index_summary + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.index_summary.host|'http://localhost:9200'} + metricsets: + - index_summary + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.ml_job.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.ml_job + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.ml_job.host|'http://localhost:9200'} + metricsets: + - ml_job + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.node.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.node + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.node.host|'http://localhost:9200'} + metricsets: + - node + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.node_stats.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.node_stats + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.node_stats.host|'http://localhost:9200'} + metricsets: + - node_stats + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.pending_tasks.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.pending_tasks + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.pending_tasks.host|'http://localhost:9200'} + metricsets: + - pending_tasks + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.shard.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.shard + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.shard.host|'http://localhost:9200'} + metricsets: + - shard + period: null + scope: node + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/endpoint.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/endpoint.yml new file mode 100644 index 00000000000..178a6098f99 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/endpoint.yml @@ -0,0 +1,22 @@ +inputs: + - name: filestream-endpoint + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.endpoint.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/fireeye.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/fireeye.yml new file mode 100644 index 00000000000..44b8074cb5a --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/fireeye.yml @@ -0,0 +1,59 @@ +inputs: + - name: filestream-fireeye + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.fireeye.nx.enabled} == true or ${kubernetes.hints.fireeye.enabled} == true + data_stream: + dataset: fireeye.nx + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.fireeye.nx.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - fireeye-nx + data_stream.namespace: default + - name: udp-fireeye + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.fireeye.nx.enabled} == true or ${kubernetes.hints.fireeye.enabled} == true + data_stream: + dataset: fireeye.nx + type: logs + fields_under_root: true + host: localhost:9523 + processors: + - add_locale: null + tags: + - fireeye-nx + - forwarded + udp: null + data_stream.namespace: default + - name: tcp-fireeye + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.fireeye.nx.enabled} == true or ${kubernetes.hints.fireeye.enabled} == true + data_stream: + dataset: fireeye.nx + type: logs + fields_under_root: true + host: localhost:9523 + processors: + - add_locale: null + tags: + - fireeye-nx + - forwarded + tcp: null + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/haproxy.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/haproxy.yml new file mode 100644 index 00000000000..cff5d5821aa --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/haproxy.yml @@ -0,0 +1,68 @@ +inputs: + - name: syslog-haproxy + type: syslog + use_output: default + streams: + - condition: ${kubernetes.hints.haproxy.log.enabled} == true or ${kubernetes.hints.haproxy.enabled} == true + data_stream: + dataset: haproxy.log + type: logs + processors: + - add_locale: null + protocol.udp: + host: localhost:9001 + tags: + - forwarded + - haproxy-log + data_stream.namespace: default + - name: haproxy/metrics-haproxy + type: haproxy/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.haproxy.info.enabled} == true or ${kubernetes.hints.haproxy.enabled} == true + data_stream: + dataset: haproxy.info + type: metrics + hosts: + - ${kubernetes.hints.haproxy.info.host|'tcp://127.0.0.1:14567'} + metricsets: + - info + password: ${kubernetes.hints.haproxy.info.password|'admin'} + period: ${kubernetes.hints.haproxy.info.period|'10s'} + username: ${kubernetes.hints.haproxy.info.username|'admin'} + - condition: ${kubernetes.hints.haproxy.stat.enabled} == true or ${kubernetes.hints.haproxy.enabled} == true + data_stream: + dataset: haproxy.stat + type: metrics + hosts: + - ${kubernetes.hints.haproxy.stat.host|'tcp://127.0.0.1:14567'} + metricsets: + - stat + password: ${kubernetes.hints.haproxy.stat.password|'admin'} + period: ${kubernetes.hints.haproxy.stat.period|'10s'} + username: ${kubernetes.hints.haproxy.stat.username|'admin'} + data_stream.namespace: default + - name: filestream-haproxy + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.haproxy.log.enabled} == true or ${kubernetes.hints.haproxy.enabled} == true + data_stream: + dataset: haproxy.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.haproxy.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - haproxy-log + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/hashicorp_vault.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/hashicorp_vault.yml new file mode 100644 index 00000000000..19892110b74 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/hashicorp_vault.yml @@ -0,0 +1,73 @@ +inputs: + - name: filestream-hashicorp_vault + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.hashicorp_vault.audit.enabled} == true or ${kubernetes.hints.hashicorp_vault.enabled} == true + data_stream: + dataset: hashicorp_vault.audit + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.hashicorp_vault.audit.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - hashicorp-vault-audit + - condition: ${kubernetes.hints.hashicorp_vault.log.enabled} == true or ${kubernetes.hints.hashicorp_vault.enabled} == true + data_stream: + dataset: hashicorp_vault.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.hashicorp_vault.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - hashicorp-vault-log + data_stream.namespace: default + - name: tcp-hashicorp_vault + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.hashicorp_vault.audit.enabled} == true and ${kubernetes.hints.hashicorp_vault.enabled} == true + data_stream: + dataset: hashicorp_vault.audit + type: logs + host: localhost:9007 + max_message_size: 1 MiB + tags: + - hashicorp-vault-audit + - forwarded + data_stream.namespace: default + - name: prometheus/metrics-hashicorp_vault + type: prometheus/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.hashicorp_vault.metrics.enabled} == true or ${kubernetes.hints.hashicorp_vault.enabled} == true + data_stream: + dataset: hashicorp_vault.metrics + type: metrics + hosts: + - ${kubernetes.hints.hashicorp_vault.metrics.host|'http://localhost:8200'} + metrics_path: /v1/sys/metrics + metricsets: + - collector + period: ${kubernetes.hints.hashicorp_vault.metrics.period|'30s'} + query: + format: prometheus + rate_counters: true + use_types: true + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/hid_bravura_monitor.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/hid_bravura_monitor.yml new file mode 100644 index 00000000000..28d8f782d69 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/hid_bravura_monitor.yml @@ -0,0 +1,42 @@ +inputs: + - name: filestream-hid_bravura_monitor + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.hid_bravura_monitor.log.enabled} == true or ${kubernetes.hints.hid_bravura_monitor.enabled} == true + data_stream: + dataset: hid_bravura_monitor.log + type: logs + line_terminator: carriage_return_line_feed + parsers: + - multiline: + match: after + negate: true + pattern: ^[[:cntrl:]] + type: pattern + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_fields: + fields: + event.timezone: UTC + hid_bravura_monitor.environment: PRODUCTION + hid_bravura_monitor.instancename: default + hid_bravura_monitor.instancetype: Privilege-Identity-Password + hid_bravura_monitor.node: 0.0.0.0 + target: "" + prospector.scanner.exclude_files: + - .gz$ + tags: null + data_stream.namespace: default + - name: winlog-hid_bravura_monitor + type: winlog + use_output: default + streams: + - condition: ${kubernetes.hints.hid_bravura_monitor.winlog.enabled} == true or ${kubernetes.hints.hid_bravura_monitor.enabled} == true + data_stream: + dataset: hid_bravura_monitor.winlog + type: logs + name: Hitachi-Hitachi ID Systems-Hitachi ID Suite/Operational + tags: null + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/iis.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/iis.yml new file mode 100644 index 00000000000..44162f4ac6b --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/iis.yml @@ -0,0 +1,71 @@ +inputs: + - name: filestream-iis + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.iis.access.enabled} == true or ${kubernetes.hints.iis.enabled} == true + data_stream: + dataset: iis.access + type: logs + exclude_files: + - .gz$ + exclude_lines: + - ^# + parsers: + - container: + format: auto + stream: ${kubernetes.hints.iis.access.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - iis-access + - condition: ${kubernetes.hints.iis.error.enabled} == true or ${kubernetes.hints.iis.enabled} == true + data_stream: + dataset: iis.error + type: logs + exclude_files: + - .gz$ + exclude_lines: + - ^# + parsers: + - container: + format: auto + stream: ${kubernetes.hints.iis.error.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - iis-error + data_stream.namespace: default + - name: iis/metrics-iis + type: iis/metrics + use_output: default + streams: + - application_pool.name: null + condition: ${kubernetes.hints.iis.application_pool.enabled} == true or ${kubernetes.hints.iis.enabled} == true + data_stream: + dataset: iis.application_pool + type: metrics + metricsets: + - application_pool + period: ${kubernetes.hints.iis.application_pool.period|'10s'} + - condition: ${kubernetes.hints.iis.webserver.enabled} == true or ${kubernetes.hints.iis.enabled} == true + data_stream: + dataset: iis.webserver + type: metrics + metricsets: + - webserver + period: ${kubernetes.hints.iis.webserver.period|'10s'} + - condition: ${kubernetes.hints.iis.website.enabled} == true or ${kubernetes.hints.iis.enabled} == true + data_stream: + dataset: iis.website + type: metrics + metricsets: + - website + period: ${kubernetes.hints.iis.website.period|'10s'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/infoblox_nios.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/infoblox_nios.yml new file mode 100644 index 00000000000..d260fead6a6 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/infoblox_nios.yml @@ -0,0 +1,63 @@ +inputs: + - name: filestream-infoblox_nios + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.infoblox_nios.log.enabled} == true or ${kubernetes.hints.infoblox_nios.enabled} == true + data_stream: + dataset: infoblox_nios.log + type: logs + exclude_files: + - .gz$ + fields: + _conf: + tz_offset: local + fields_under_root: true + parsers: + - container: + format: auto + stream: ${kubernetes.hints.infoblox_nios.log.stream|'all'} + paths: null + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - forwarded + - infoblox_nios-log + data_stream.namespace: default + - name: tcp-infoblox_nios + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.infoblox_nios.log.enabled} == true or ${kubernetes.hints.infoblox_nios.enabled} == true + data_stream: + dataset: infoblox_nios.log + type: logs + fields: + _conf: + tz_offset: local + fields_under_root: true + host: localhost:9027 + tags: + - forwarded + - infoblox_nios-log + data_stream.namespace: default + - name: udp-infoblox_nios + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.infoblox_nios.log.enabled} == true or ${kubernetes.hints.infoblox_nios.enabled} == true + data_stream: + dataset: infoblox_nios.log + type: logs + fields: + _conf: + tz_offset: local + fields_under_root: true + host: localhost:9028 + tags: + - forwarded + - infoblox_nios-log + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/iptables.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/iptables.yml new file mode 100644 index 00000000000..02d1d8330d3 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/iptables.yml @@ -0,0 +1,54 @@ +inputs: + - name: udp-iptables + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.iptables.log.enabled} == true or ${kubernetes.hints.iptables.enabled} == true + data_stream: + dataset: iptables.log + type: logs + host: localhost:9001 + processors: + - add_locale: null + tags: + - iptables-log + - forwarded + data_stream.namespace: default + - name: filestream-iptables + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.iptables.log.enabled} == true and ${kubernetes.hints.iptables.enabled} == true + data_stream: + dataset: iptables.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.iptables.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - iptables-log + - forwarded + data_stream.namespace: default + - name: journald-iptables + type: journald + use_output: default + streams: + - condition: ${kubernetes.hints.iptables.log.enabled} == true or ${kubernetes.hints.iptables.enabled} == true + data_stream: + dataset: iptables.log + type: logs + include_matches: + - _TRANSPORT=kernel + tags: + - iptables-log + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/kafka.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/kafka.yml new file mode 100644 index 00000000000..b79eebbcfb0 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/kafka.yml @@ -0,0 +1,61 @@ +inputs: + - name: filestream-kafka + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.kafka.log.enabled} == true or ${kubernetes.hints.kafka.enabled} == true + data_stream: + dataset: kafka.log + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: ^\[ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.kafka.log.stream|'all'} + paths: + - /opt/kafka*/var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - kafka-log + data_stream.namespace: default + - name: kafka/metrics-kafka + type: kafka/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.kafka.broker.enabled} == true or ${kubernetes.hints.kafka.enabled} == true + data_stream: + dataset: kafka.broker + type: metrics + hosts: + - localhost:8778 + metricsets: + - broker + period: ${kubernetes.hints.kafka.broker.period|'10s'} + - condition: ${kubernetes.hints.kafka.consumergroup.enabled} == true or ${kubernetes.hints.kafka.enabled} == true + data_stream: + dataset: kafka.consumergroup + type: metrics + hosts: + - ${kubernetes.hints.kafka.consumergroup.host|'localhost:9092'} + metricsets: + - consumergroup + period: ${kubernetes.hints.kafka.consumergroup.period|'10s'} + - condition: ${kubernetes.hints.kafka.partition.enabled} == true or ${kubernetes.hints.kafka.enabled} == true + data_stream: + dataset: kafka.partition + type: metrics + hosts: + - ${kubernetes.hints.kafka.partition.host|'localhost:9092'} + metricsets: + - partition + period: ${kubernetes.hints.kafka.partition.period|'10s'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/keycloak.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/keycloak.yml new file mode 100644 index 00000000000..794d014d41c --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/keycloak.yml @@ -0,0 +1,23 @@ +inputs: + - name: filestream-keycloak + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.keycloak.log.enabled} == true or ${kubernetes.hints.keycloak.enabled} == true + data_stream: + dataset: keycloak.log + type: logs + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + - add_fields: + fields: + only_user_events: false + tz_offset: local + target: _tmp + prospector.scanner.exclude_files: + - \.gz$ + tags: + - keycloak-log + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/kibana.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/kibana.yml new file mode 100644 index 00000000000..1c27b4830ab --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/kibana.yml @@ -0,0 +1,112 @@ +inputs: + - name: filestream-kibana + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.kibana.audit.enabled} == true or ${kubernetes.hints.kibana.enabled} == true + data_stream: + dataset: kibana.audit + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.kibana.audit.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + - add_fields: + fields: + ecs.version: 1.10.0 + target: "" + - decode_json_fields: + fields: + - message + target: kibana._audit_temp + prospector: + scanner: + symlinks: true + - condition: ${kubernetes.hints.kibana.log.enabled} == true or ${kubernetes.hints.kibana.enabled} == true + data_stream: + dataset: kibana.log + type: logs + exclude_files: + - .gz$ + json.add_error_key: true + json.keys_under_root: false + parsers: + - container: + format: auto + stream: ${kubernetes.hints.kibana.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_fields: + fields: + ecs.version: 1.10.0 + target: "" + prospector: + scanner: + symlinks: true + data_stream.namespace: default + - name: kibana/metrics-kibana + type: kibana/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.kibana.cluster_actions.enabled} == true or ${kubernetes.hints.kibana.enabled} == true + data_stream: + dataset: kibana.stack_monitoring.cluster_actions + type: metrics + hosts: + - ${kubernetes.hints.kibana.cluster_actions.host|'http://localhost:5601'} + metricsets: + - cluster_actions + period: null + - condition: ${kubernetes.hints.kibana.cluster_rules.enabled} == true or ${kubernetes.hints.kibana.enabled} == true + data_stream: + dataset: kibana.stack_monitoring.cluster_rules + type: metrics + hosts: + - ${kubernetes.hints.kibana.cluster_rules.host|'http://localhost:5601'} + metricsets: + - cluster_rules + period: null + - condition: ${kubernetes.hints.kibana.node_actions.enabled} == true or ${kubernetes.hints.kibana.enabled} == true + data_stream: + dataset: kibana.stack_monitoring.node_actions + type: metrics + hosts: + - ${kubernetes.hints.kibana.node_actions.host|'http://localhost:5601'} + metricsets: + - node_actions + period: null + - condition: ${kubernetes.hints.kibana.node_rules.enabled} == true or ${kubernetes.hints.kibana.enabled} == true + data_stream: + dataset: kibana.stack_monitoring.node_rules + type: metrics + hosts: + - ${kubernetes.hints.kibana.node_rules.host|'http://localhost:5601'} + metricsets: + - node_rules + period: null + - condition: ${kubernetes.hints.kibana.stats.enabled} == true or ${kubernetes.hints.kibana.enabled} == true + data_stream: + dataset: kibana.stack_monitoring.stats + type: metrics + hosts: + - ${kubernetes.hints.kibana.stats.host|'http://localhost:5601'} + metricsets: + - stats + period: null + - condition: ${kubernetes.hints.kibana.status.enabled} == true or ${kubernetes.hints.kibana.enabled} == true + data_stream: + dataset: kibana.stack_monitoring.status + type: metrics + hosts: + - ${kubernetes.hints.kibana.status.host|'http://localhost:5601'} + metricsets: + - status + period: null + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/log.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/log.yml new file mode 100644 index 00000000000..b4627a13814 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/log.yml @@ -0,0 +1,18 @@ +inputs: + - name: filestream-log + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.log.log.enabled} == true or ${kubernetes.hints.log.enabled} == true + data_stream: + dataset: log.log + type: logs + parsers: + - container: + format: auto + stream: ${kubernetes.hints.log.log.stream|'all'} + paths: null + prospector: + scanner: + symlinks: true + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/logstash.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/logstash.yml new file mode 100644 index 00000000000..6ba62de3274 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/logstash.yml @@ -0,0 +1,75 @@ +inputs: + - name: filestream-logstash + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.logstash.log.enabled} == true or ${kubernetes.hints.logstash.enabled} == true + data_stream: + dataset: logstash.log + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: ^((\[[0-9]{4}-[0-9]{2}-[0-9]{2}[^\]]+\])|({.+})) + parsers: + - container: + format: auto + stream: ${kubernetes.hints.logstash.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale.when.not.regexp.message: ^{ + - add_fields: + fields: + ecs.version: 1.10.0 + target: "" + prospector: + scanner: + symlinks: true + - condition: ${kubernetes.hints.logstash.slowlog.enabled} == true or ${kubernetes.hints.logstash.enabled} == true + data_stream: + dataset: logstash.slowlog + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.logstash.slowlog.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale.when.not.regexp.message: ^{ + - add_fields: + fields: + ecs.version: 1.10.0 + target: "" + prospector: + scanner: + symlinks: true + data_stream.namespace: default + - name: logstash/metrics-logstash + type: logstash/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.logstash.node.enabled} == true or ${kubernetes.hints.logstash.enabled} == true + data_stream: + dataset: logstash.stack_monitoring.node + type: metrics + hosts: + - ${kubernetes.hints.logstash.node.host|'http://localhost:9600'} + metricsets: + - node + period: ${kubernetes.hints.logstash.node.period|'10s'} + - condition: ${kubernetes.hints.logstash.node_stats.enabled} == true or ${kubernetes.hints.logstash.enabled} == true + data_stream: + dataset: logstash.stack_monitoring.node_stats + type: metrics + hosts: + - ${kubernetes.hints.logstash.node_stats.host|'http://localhost:9600'} + metricsets: + - node_stats + period: ${kubernetes.hints.logstash.node_stats.period|'10s'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/mattermost.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/mattermost.yml new file mode 100644 index 00000000000..de5c8932af1 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/mattermost.yml @@ -0,0 +1,22 @@ +inputs: + - name: filestream-mattermost + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.mattermost.audit.enabled} == true or ${kubernetes.hints.mattermost.enabled} == true + data_stream: + dataset: mattermost.audit + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.mattermost.audit.stream|'all'} + paths: null + prospector: + scanner: + symlinks: true + tags: + - mattermost-audit + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/microsoft_sqlserver.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/microsoft_sqlserver.yml new file mode 100644 index 00000000000..5ac70293051 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/microsoft_sqlserver.yml @@ -0,0 +1,127 @@ +inputs: + - name: winlog-microsoft_sqlserver + type: winlog + use_output: default + streams: + - condition: ${kubernetes.hints.microsoft_sqlserver.audit.enabled} == true or ${kubernetes.hints.microsoft_sqlserver.enabled} == true + data_stream: + dataset: microsoft_sqlserver.audit + type: logs + event_id: 33205 + ignore_older: 72h + name: Security + data_stream.namespace: default + - name: filestream-microsoft_sqlserver + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.microsoft_sqlserver.log.enabled} == true and ${kubernetes.hints.microsoft_sqlserver.enabled} == true + data_stream: + dataset: microsoft_sqlserver.log + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: ^\d{4}-\d{2}-\d{2} + parsers: + - container: + format: auto + stream: ${kubernetes.hints.microsoft_sqlserver.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - mssql-logs + data_stream.namespace: default + - name: sql/metrics-microsoft_sqlserver + type: sql/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.microsoft_sqlserver.performance.enabled} == true and ${kubernetes.hints.microsoft_sqlserver.enabled} == true + data_stream: + dataset: microsoft_sqlserver.performance + type: metrics + driver: mssql + dynamic_counter_name: Memory Grants Pending + hosts: + - sqlserver://${kubernetes.hints.microsoft_sqlserver.performance.username|'domain\username'}:${kubernetes.hints.microsoft_sqlserver.performance.password|'verysecurepassword'}@${kubernetes.hints.microsoft_sqlserver.performance.host|'localhost'}:1433 + metricsets: + - query + period: ${kubernetes.hints.microsoft_sqlserver.performance.period|'60s'} + raw_data.enabled: true + sql_queries: + - query: SELECT cntr_value As 'user_connections' FROM sys.dm_os_performance_counters WHERE counter_name= 'User Connections' + response_format: table + - query: SELECT cntr_value As 'active_temp_tables' FROM sys.dm_os_performance_counters WHERE counter_name = 'Active Temp Tables' AND object_name like '%General Statistics%' + response_format: table + - query: SELECT cntr_value As 'buffer_cache_hit_ratio' FROM sys.dm_os_performance_counters WHERE counter_name = 'Buffer cache hit ratio' AND object_name like '%Buffer Manager%' + response_format: table + - query: SELECT cntr_value As 'page_splits_per_sec' FROM sys.dm_os_performance_counters WHERE counter_name = 'Page splits/sec' + response_format: table + - query: SELECT cntr_value As 'lock_waits_per_sec' FROM sys.dm_os_performance_counters WHERE counter_name = 'Lock Waits/sec' AND instance_name = '_Total' + response_format: table + - query: SELECT cntr_value As 'compilations_per_sec' FROM sys.dm_os_performance_counters WHERE counter_name = 'SQL Compilations/sec' + response_format: table + - query: SELECT cntr_value As 'batch_requests_per_sec' FROM sys.dm_os_performance_counters WHERE counter_name = 'Batch Requests/sec' + response_format: table + - query: SELECT cntr_value As 'buffer_checkpoint_pages_per_sec' FROM sys.dm_os_performance_counters WHERE counter_name = 'Checkpoint pages/sec' AND object_name like '%Buffer Manager%' + response_format: table + - query: SELECT cntr_value As 'buffer_database_pages' FROM sys.dm_os_performance_counters WHERE counter_name = 'Database pages' AND object_name like '%Buffer Manager%' + response_format: table + - query: SELECT cntr_value As 'buffer_page_life_expectancy' FROM sys.dm_os_performance_counters WHERE counter_name = 'Page life expectancy' AND object_name like '%Buffer Manager%' + response_format: table + - query: SELECT cntr_value As 'buffer_target_pages' FROM sys.dm_os_performance_counters WHERE counter_name = 'Target pages' AND object_name like '%Buffer Manager%' + response_format: table + - query: SELECT cntr_value As 'connection_reset_per_sec' FROM sys.dm_os_performance_counters WHERE counter_name = 'Connection Reset/sec' AND object_name like '%Buffer Manager%' + response_format: table + - query: SELECT cntr_value As 'logins_per_sec' FROM sys.dm_os_performance_counters WHERE counter_name = 'Logins/sec' AND object_name like '%General Statistics%' + response_format: table + - query: SELECT cntr_value As 'logouts_per_sec' FROM sys.dm_os_performance_counters WHERE counter_name = 'Logouts/sec' AND object_name like '%General Statistics%' + response_format: table + - query: SELECT cntr_value As 'transactions' FROM sys.dm_os_performance_counters WHERE counter_name = 'Transactions' AND object_name like '%General Statistics%' + response_format: table + - query: SELECT cntr_value As 're_compilations_per_sec' FROM sys.dm_os_performance_counters WHERE counter_name = 'SQL Re-Compilations/sec' + response_format: table + - query: SELECT counter_name As 'dynamic_counter.name', cntr_value As 'dynamic_counter.value' FROM sys.dm_os_performance_counters WHERE counter_name= 'Memory Grants Pending' + response_format: table + - condition: ${kubernetes.hints.microsoft_sqlserver.transaction_log.enabled} == true and ${kubernetes.hints.microsoft_sqlserver.enabled} == true + data_stream: + dataset: microsoft_sqlserver.transaction_log + type: metrics + driver: mssql + hosts: + - sqlserver://${kubernetes.hints.microsoft_sqlserver.transaction_log.username|'domain\username'}:${kubernetes.hints.microsoft_sqlserver.transaction_log.password|'verysecurepassword'}@${kubernetes.hints.microsoft_sqlserver.transaction_log.host|'localhost'}:1433 + metricsets: + - query + period: ${kubernetes.hints.microsoft_sqlserver.transaction_log.period|'60s'} + raw_data.enabled: true + sql_queries: + - query: SELECT name As 'database_name', database_id FROM sys.databases WHERE database_id=1; + response_format: table + - query: SELECT 'master' As database_name, database_id,total_log_size_mb,active_log_size_mb,log_backup_time,log_since_last_log_backup_mb,log_since_last_checkpoint_mb,log_recovery_size_mb FROM sys.dm_db_log_stats(1) master + response_format: table + - query: SELECT 'master' As 'database_name', total_log_size_in_bytes As total_log_size_bytes, used_log_space_in_bytes As used_log_space_bytes, used_log_space_in_percent As used_log_space_pct, log_space_in_bytes_since_last_backup FROM sys.dm_db_log_space_usage master + response_format: table + - query: SELECT name As 'database_name', database_id FROM sys.databases WHERE database_id=2; + response_format: table + - query: SELECT 'tempdb' As 'database_name', database_id,total_log_size_mb,active_log_size_mb As active_log_size,log_backup_time,log_since_last_log_backup_mb, log_since_last_checkpoint_mb,log_recovery_size_mb FROM sys.dm_db_log_stats(2) tempdb + response_format: table + - query: SELECT 'tempdb' As 'database_name', total_log_size_in_bytes As total_log_size_bytes, used_log_space_in_bytes As used_log_space_bytes, used_log_space_in_percent As used_log_space_pct, log_space_in_bytes_since_last_backup FROM sys.dm_db_log_space_usage tempdb + response_format: table + - query: SELECT name As 'database_name', database_id FROM sys.databases WHERE database_id=3; + response_format: table + - query: SELECT 'model' As 'database_name', database_id,total_log_size_mb,active_log_size_mb As active_log_size,log_backup_time,log_since_last_log_backup_mb, log_since_last_checkpoint_mb,log_recovery_size_mb FROM sys.dm_db_log_stats(3) model + response_format: table + - query: SELECT 'model' As 'database_name', total_log_size_in_bytes As total_log_size_bytes, used_log_space_in_bytes As used_log_space_bytes, used_log_space_in_percent As used_log_space_pct, log_space_in_bytes_since_last_backup FROM sys.dm_db_log_space_usage model + response_format: table + - query: SELECT name As 'database_name', database_id FROM sys.databases WHERE database_id=4; + response_format: table + - query: SELECT 'msdb' As 'database_name', database_id,total_log_size_mb,active_log_size_mb As active_log_size,log_backup_time,log_since_last_log_backup_mb, log_since_last_checkpoint_mb,log_recovery_size_mb FROM sys.dm_db_log_stats(4) msdb + response_format: table + - query: SELECT 'msdb' As 'database_name', total_log_size_in_bytes As total_log_size_bytes, used_log_space_in_bytes As used_log_space_bytes, used_log_space_in_percent As used_log_space_pct, log_space_in_bytes_since_last_backup FROM sys.dm_db_log_space_usage msdb + response_format: table + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/mimecast.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/mimecast.yml new file mode 100644 index 00000000000..23139e47852 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/mimecast.yml @@ -0,0 +1,381 @@ +inputs: + - name: httpjson-mimecast + type: httpjson + use_output: default + streams: + - condition: ${kubernetes.hints.mimecast.audit_events.enabled} == true or ${kubernetes.hints.mimecast.enabled} == true + config_version: "2" + cursor: + next_date: + value: '[[.first_event.eventTime]]' + data_stream: + dataset: mimecast.audit_events + type: logs + interval: 5m + request.method: POST + request.transforms: + - set: + target: body.meta.pagination.pageSize + value: 500 + - set: + default: '[{"endDateTime": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "startDateTime":"[[formatDate (now (parseDuration "-5m")) "2006-01-02T15:04:05+0700"]]"}]' + target: body.data + value: '[{"endDateTime": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "startDateTime":"[[.cursor.next_date]]"}]' + value_type: json + - set: + target: header.x-mc-app-id + value: null + - set: + target: header.x-mc-date + value: '[[formatDate (now) "RFC1123"]]' + - set: + target: header.x-mc-req-id + value: '[[uuid]]' + - set: + fail_on_template_error: true + target: header.Authorization + value: MC :[[hmacBase64 "sha1" (base64Decode "") (sprintf "%s:%s:/api/audit/get-audit-events:" (.header.Get "x-mc-date") (.header.Get "x-mc-req-id"))]] + request.url: https://eu-api.mimecast.com/api/audit/get-audit-events + response.decode_as: application/json + response.pagination: + - set: + fail_on_template_error: true + target: body.meta.pagination.pageToken + value: '[[.last_response.body.meta.pagination.next]]' + response.split: + target: body.data + tags: + - forwarded + - mimecast-audit-events + - condition: ${kubernetes.hints.mimecast.dlp_logs.enabled} == true or ${kubernetes.hints.mimecast.enabled} == true + config_version: "2" + cursor: null + data_stream: + dataset: mimecast.dlp_logs + type: logs + interval: 5m + next_date: + value: '[[.first_event.eventTime]]' + request.method: POST + request.transforms: + - set: + default: '[{"to": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "from":"[[formatDate (now (parseDuration "-5m")) "2006-01-02T15:04:05+0700"]]"}]' + target: body.data + value: '[{"to": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "from":"[[.cursor.eventTime]]"}]' + value_type: json + - set: + target: header.x-mc-app-id + value: null + - set: + target: header.x-mc-date + value: '[[formatDate (now) "RFC1123"]]' + - set: + target: header.x-mc-req-id + value: '[[uuid]]' + - set: + fail_on_template_error: true + target: header.Authorization + value: MC :[[hmacBase64 "sha1" (base64Decode "") (sprintf "%s:%s:/api/dlp/get-logs:" (.header.Get "x-mc-date") (.header.Get "x-mc-req-id"))]] + request.url: https://eu-api.mimecast.com/api/dlp/get-logs + response.decode_as: application/json + response.pagination: + - set: + fail_on_template_error: true + target: body.meta.pagination.pageToken + value: '[[.last_response.body.meta.pagination.next]]' + response.split: + split: + target: body.dlpLogs + target: body.data + tags: + - forwarded + - mimecast-dlp-logs + - condition: ${kubernetes.hints.mimecast.siem_logs.enabled} == true or ${kubernetes.hints.mimecast.enabled} == true + config_version: "2" + cursor: + next_token: + value: '[[.last_response.header.Get "mc-siem-token"]]' + data_stream: + dataset: mimecast.siem_logs + type: logs + interval: 5m + request.method: POST + request.transforms: + - set: + default: '[{"type":"MTA","fileFormat":"json", "compress":true}]' + target: body.data + value: '[{"type":"MTA","fileFormat":"json", "compress":true, "token": "[[.cursor.next_token]]"}]' + value_type: json + - set: + target: header.x-mc-app-id + value: null + - set: + target: header.x-mc-date + value: '[[formatDate (now) "RFC1123"]]' + - set: + target: header.x-mc-req-id + value: '[[uuid]]' + - set: + fail_on_template_error: true + target: header.Authorization + value: MC :[[hmacBase64 "sha1" (base64Decode "") (sprintf "%s:%s:/api/audit/get-siem-logs:" (.header.Get "x-mc-date") (.header.Get "x-mc-req-id"))]] + - set: + target: header.Accept + value: '*/*' + request.url: https://eu-api.mimecast.com/api/audit/get-siem-logs + response.decode_as: application/zip + response.pagination: + - set: + target: body.data + value: '[{"type":"MTA","fileFormat":"json", "compress":true, "token": "[[.last_response.header.Get "mc-siem-token"]]"}]' + value_type: json + response.split: + target: body.data + transforms: + - set: + target: body.Content-Disposition + value: '[[.last_response.header.Get "Content-Disposition"]]' + tags: + - forwarded + - mimecast-siem-logs + - condition: ${kubernetes.hints.mimecast.threat_intel_malware_customer.enabled} == true or ${kubernetes.hints.mimecast.enabled} == true + config_version: "2" + cursor: + next_date: + value: '[[.first_event.created]]' + data_stream: + dataset: mimecast.threat_intel_malware_customer + type: logs + interval: 5m + request.method: POST + request.transforms: + - set: + default: '[{"feedType": "malware_customer","fileType": "stix","compress": false,"end": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "start":"[[formatDate (now (parseDuration "-5m")) "2006-01-02T15:04:05+0700"]]"}]' + target: body.data + value: '[{"feedType": "malware_customer","fileType": "stix","compress": false,"end": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "start":"[[formatDate (.cursor.next_date) "2006-01-02T15:04:05+0700"]]"}]' + value_type: json + - set: + target: header.x-mc-app-id + value: null + - set: + target: header.x-mc-date + value: '[[formatDate (now) "RFC1123"]]' + - set: + target: header.x-mc-req-id + value: '[[uuid]]' + - set: + fail_on_template_error: true + target: header.Authorization + value: MC :[[hmacBase64 "sha1" (base64Decode "") (sprintf "%s:%s:/api/ttp/threat-intel/get-feed:" (.header.Get "x-mc-date") (.header.Get "x-mc-req-id"))]] + request.url: https://eu-api.mimecast.com/api/ttp/threat-intel/get-feed + response.decode_as: application/json + response.pagination: + - set: + target: body.data + value: '[{"feedType": "malware_customer","fileType": "stix","compress": false,"token": "[[.last_response.header.Get "x-mc-threat-feed-next-token"]]","end": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "start":"[[.cursor.next_date]]"}]' + value_type: json + response.split: + target: body.objects + transforms: + - set: + target: body.Content-Disposition + value: '[[.last_response.header.Get "Content-Disposition"]]' + tags: + - forwarded + - mimecast-threat-intel-feed-malware-customer + - condition: ${kubernetes.hints.mimecast.threat_intel_malware_grid.enabled} == true or ${kubernetes.hints.mimecast.enabled} == true + config_version: "2" + cursor: + next_date: + value: '[[.first_event.created]]' + data_stream: + dataset: mimecast.threat_intel_malware_grid + type: logs + interval: 5m + request.method: POST + request.transforms: + - set: + default: '[{"feedType": "malware_grid","fileType": "stix","compress": false,"end": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "start":"[[formatDate (now (parseDuration "-5m")) "2006-01-02T15:04:05+0700"]]"}]' + target: body.data + value: '[{"feedType": "malware_grid","fileType": "stix","compress": false,"end": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "start":"[[formatDate (.cursor.next_date) "2006-01-02T15:04:05+0700"]]"}]' + value_type: json + - set: + target: header.x-mc-app-id + value: null + - set: + target: header.x-mc-date + value: '[[formatDate (now) "RFC1123"]]' + - set: + target: header.x-mc-req-id + value: '[[uuid]]' + - set: + fail_on_template_error: true + target: header.Authorization + value: MC :[[hmacBase64 "sha1" (base64Decode "") (sprintf "%s:%s:/api/ttp/threat-intel/get-feed:" (.header.Get "x-mc-date") (.header.Get "x-mc-req-id"))]] + request.url: https://eu-api.mimecast.com/api/ttp/threat-intel/get-feed + response.decode_as: application/json + response.pagination: + - set: + target: body.data + value: '[{"feedType": "malware_grid","fileType": "stix","compress": false,"token": "[[.last_response.header.Get "x-mc-threat-feed-next-token"]]","end": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "start":"[[.cursor.next_date]]"}]' + value_type: json + response.split: + target: body.objects + transforms: + - set: + target: body.Content-Disposition + value: '[[.last_response.header.Get "Content-Disposition"]]' + tags: + - forwarded + - mimecast-threat-intel-feed-malware-grid + - condition: ${kubernetes.hints.mimecast.ttp_ap_logs.enabled} == true or ${kubernetes.hints.mimecast.enabled} == true + config_version: "2" + cursor: null + data_stream: + dataset: mimecast.ttp_ap_logs + type: logs + interval: 5m + next_date: + value: '[[.first_event.date]]' + request.method: POST + request.transforms: + - set: + default: '[{"oldestFirst": false, "route": "all", "result":"all","to": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "from":"[[formatDate (now (parseDuration "-5m")) "2006-01-02T15:04:05+0700"]]"}]' + target: body.data + value: '[{"oldestFirst": false, "route": "all", "result":"all","to": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "from":"[[.cursor.next_date]]"}]' + value_type: json + - set: + target: header.x-mc-app-id + value: null + - set: + target: header.x-mc-date + value: '[[formatDate (now) "RFC1123"]]' + - set: + target: header.x-mc-req-id + value: '[[uuid]]' + - set: + fail_on_template_error: true + target: header.Authorization + value: MC :[[hmacBase64 "sha1" (base64Decode "") (sprintf "%s:%s:/api/ttp/attachment/get-logs:" (.header.Get "x-mc-date") (.header.Get "x-mc-req-id"))]] + request.url: https://eu-api.mimecast.com/api/ttp/attachment/get-logs + response.decode_as: application/json + response.pagination: + - set: + fail_on_template_error: true + target: body.meta.pagination.pageToken + value: '[[.last_response.body.meta.pagination.next]]' + response.split: + split: + target: body.attachmentLogs + target: body.data + tags: + - forwarded + - mimecast-ttp-ap + - condition: ${kubernetes.hints.mimecast.ttp_ip_logs.enabled} == true or ${kubernetes.hints.mimecast.enabled} == true + config_version: "2" + cursor: null + data_stream: + dataset: mimecast.ttp_ip_logs + type: logs + interval: 5m + next_date: + value: '[[.first_event.eventTime]]' + request.method: POST + request.transforms: + - set: + default: '[{"oldestFirst": false,"to": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "from":"[[formatDate (now (parseDuration "-5m")) "2006-01-02T15:04:05+0700"]]"}]' + target: body.data + value: '[{"oldestFirst": false,"to": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "from":"[[.cursor.next_date]]"}]' + value_type: json + - set: + target: header.x-mc-app-id + value: null + - set: + target: header.x-mc-date + value: '[[formatDate (now) "RFC1123"]]' + - set: + target: header.x-mc-req-id + value: '[[uuid]]' + - set: + fail_on_template_error: true + target: header.Authorization + value: MC :[[hmacBase64 "sha1" (base64Decode "") (sprintf "%s:%s:/api/ttp/impersonation/get-logs:" (.header.Get "x-mc-date") (.header.Get "x-mc-req-id"))]] + request.url: https://eu-api.mimecast.com/api/ttp/impersonation/get-logs + response.decode_as: application/json + response.pagination: + - set: + fail_on_template_error: true + target: body.meta.pagination.pageToken + value: '[[.last_response.body.meta.pagination.next]]' + response.split: + split: + target: body.impersonationLogs + target: body.data + tags: + - forwarded + - mimecast-ttp-ip + - condition: ${kubernetes.hints.mimecast.ttp_url_logs.enabled} == true or ${kubernetes.hints.mimecast.enabled} == true + config_version: "2" + cursor: + next_date: + value: '[[.first_event.date]]' + data_stream: + dataset: mimecast.ttp_url_logs + type: logs + interval: 5m + request.method: POST + request.transforms: + - set: + default: '[{"oldestFirst": false,"scanResult": "all","route":"all","to": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "from":"[[formatDate (now (parseDuration "-5m")) "2006-01-02T15:04:05+0700"]]"}]' + target: body.data + value: '[{"oldestFirst": false,"scanResult": "all","route":"all","to": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "from":"[[.cursor.next_date]]"}]' + value_type: json + - set: + target: header.x-mc-app-id + value: null + - set: + target: header.x-mc-date + value: '[[formatDate (now) "RFC1123"]]' + - set: + target: header.x-mc-req-id + value: '[[uuid]]' + - set: + fail_on_template_error: true + target: header.Authorization + value: MC :[[hmacBase64 "sha1" (base64Decode "") (sprintf "%s:%s:/api/ttp/url/get-logs:" (.header.Get "x-mc-date") (.header.Get "x-mc-req-id"))]] + request.url: https://eu-api.mimecast.com/api/ttp/url/get-logs + response.decode_as: application/json + response.pagination: + - set: + fail_on_template_error: true + target: body.meta.pagination.pageToken + value: '[[.last_response.body.meta.pagination.next]]' + response.split: + split: + target: body.clickLogs + target: body.data + tags: + - forwarded + - mimecast-ttp-url + data_stream.namespace: default + - name: filestream-mimecast + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.mimecast.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/modsecurity.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/modsecurity.yml new file mode 100644 index 00000000000..cc9e109d5ed --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/modsecurity.yml @@ -0,0 +1,28 @@ +inputs: + - name: filestream-modsecurity + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.modsecurity.auditlog.enabled} == true or ${kubernetes.hints.modsecurity.enabled} == true + data_stream: + dataset: modsecurity.auditlog + type: logs + exclude_files: + - .gz$ + fields: + tz_offset: null + fields_under_root: true + parsers: + - container: + format: auto + stream: ${kubernetes.hints.modsecurity.auditlog.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - modsec-audit + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/mongodb.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/mongodb.yml new file mode 100644 index 00000000000..ece2d4439eb --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/mongodb.yml @@ -0,0 +1,73 @@ +inputs: + - name: filestream-mongodb + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.mongodb.log.enabled} == true or ${kubernetes.hints.mongodb.enabled} == true + data_stream: + dataset: mongodb.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.mongodb.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - mongodb-logs + data_stream.namespace: default + - name: mongodb/metrics-mongodb + type: mongodb/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.mongodb.collstats.enabled} == true or ${kubernetes.hints.mongodb.enabled} == true + data_stream: + dataset: mongodb.collstats + type: metrics + hosts: + - ${kubernetes.hints.mongodb.collstats.host|'localhost:27017'} + metricsets: + - collstats + period: ${kubernetes.hints.mongodb.collstats.period|'10s'} + - condition: ${kubernetes.hints.mongodb.dbstats.enabled} == true or ${kubernetes.hints.mongodb.enabled} == true + data_stream: + dataset: mongodb.dbstats + type: metrics + hosts: + - ${kubernetes.hints.mongodb.dbstats.host|'localhost:27017'} + metricsets: + - dbstats + period: ${kubernetes.hints.mongodb.dbstats.period|'10s'} + - condition: ${kubernetes.hints.mongodb.metrics.enabled} == true or ${kubernetes.hints.mongodb.enabled} == true + data_stream: + dataset: mongodb.metrics + type: metrics + hosts: + - ${kubernetes.hints.mongodb.metrics.host|'localhost:27017'} + metricsets: + - metrics + period: ${kubernetes.hints.mongodb.metrics.period|'10s'} + - condition: ${kubernetes.hints.mongodb.replstatus.enabled} == true or ${kubernetes.hints.mongodb.enabled} == true + data_stream: + dataset: mongodb.replstatus + type: metrics + hosts: + - ${kubernetes.hints.mongodb.replstatus.host|'localhost:27017'} + metricsets: + - replstatus + period: ${kubernetes.hints.mongodb.replstatus.period|'10s'} + - condition: ${kubernetes.hints.mongodb.status.enabled} == true or ${kubernetes.hints.mongodb.enabled} == true + data_stream: + dataset: mongodb.status + type: metrics + hosts: + - ${kubernetes.hints.mongodb.status.host|'localhost:27017'} + metricsets: + - status + period: ${kubernetes.hints.mongodb.status.period|'10s'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql.yml new file mode 100644 index 00000000000..234caeeb40c --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql.yml @@ -0,0 +1,82 @@ +inputs: + - name: filestream-mysql + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.mysql.error.enabled} == true or ${kubernetes.hints.mysql.enabled} == true + data_stream: + dataset: mysql.error + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: ^([0-9]{4}-[0-9]{2}-[0-9]{2}|[0-9]{6}) + parsers: + - container: + format: auto + stream: ${kubernetes.hints.mysql.error.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + - condition: ${kubernetes.hints.mysql.slowlog.enabled} == true or ${kubernetes.hints.mysql.enabled} == true + data_stream: + dataset: mysql.slowlog + type: logs + exclude_files: + - .gz$ + exclude_lines: + - '^[\/\w\.]+, Version: .* started with:.*' + - ^# Time:.* + multiline: + match: after + negate: true + pattern: '^(# User@Host: |# Time: )' + parsers: + - container: + format: auto + stream: ${kubernetes.hints.mysql.slowlog.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + data_stream.namespace: default + - name: mysql/metrics-mysql + type: mysql/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.mysql.galera_status.enabled} == true and ${kubernetes.hints.mysql.enabled} == true + data_stream: + dataset: mysql.galera_status + type: metrics + hosts: + - ${kubernetes.hints.mysql.galera_status.host|'tcp(127.0.0.1:3306)/'} + metricsets: + - galera_status + password: ${kubernetes.hints.mysql.galera_status.password|'test'} + period: ${kubernetes.hints.mysql.galera_status.period|'10s'} + username: ${kubernetes.hints.mysql.galera_status.username|'root'} + - condition: ${kubernetes.hints.mysql.performance.enabled} == true and ${kubernetes.hints.mysql.enabled} == true + data_stream: + dataset: mysql.performance + type: metrics + metricsets: + - performance + - condition: ${kubernetes.hints.mysql.status.enabled} == true or ${kubernetes.hints.mysql.enabled} == true + data_stream: + dataset: mysql.status + type: metrics + hosts: + - ${kubernetes.hints.mysql.status.host|'tcp(127.0.0.1:3306)/'} + metricsets: + - status + password: ${kubernetes.hints.mysql.status.password|'test'} + period: ${kubernetes.hints.mysql.status.period|'10s'} + username: ${kubernetes.hints.mysql.status.username|'root'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql_enterprise.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql_enterprise.yml new file mode 100644 index 00000000000..d943bb661ff --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql_enterprise.yml @@ -0,0 +1,18 @@ +inputs: + - name: filestream-mysql_enterprise + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.mysql_enterprise.audit.enabled} == true or ${kubernetes.hints.mysql_enterprise.enabled} == true + data_stream: + dataset: mysql_enterprise.audit + type: logs + exclude_files: + - .gz$ + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + tags: + - mysql_enterprise-audit + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/nats.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/nats.yml new file mode 100644 index 00000000000..91525210374 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/nats.yml @@ -0,0 +1,82 @@ +inputs: + - name: filestream-nats + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.nats.log.enabled} == true or ${kubernetes.hints.nats.enabled} == true + data_stream: + dataset: nats.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.nats.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - nats-log + data_stream.namespace: default + - name: nats/metrics-nats + type: nats/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.nats.connection.enabled} == true and ${kubernetes.hints.nats.enabled} == true + data_stream: + dataset: nats.connection + type: metrics + hosts: + - ${kubernetes.hints.nats.connection.host|'localhost:8222'} + metricsets: + - connection + period: ${kubernetes.hints.nats.connection.period|'10s'} + - condition: ${kubernetes.hints.nats.connections.enabled} == true or ${kubernetes.hints.nats.enabled} == true + data_stream: + dataset: nats.connections + type: metrics + hosts: + - ${kubernetes.hints.nats.connections.host|'localhost:8222'} + metricsets: + - connections + period: ${kubernetes.hints.nats.connections.period|'10s'} + - condition: ${kubernetes.hints.nats.route.enabled} == true and ${kubernetes.hints.nats.enabled} == true + data_stream: + dataset: nats.route + type: metrics + hosts: + - ${kubernetes.hints.nats.route.host|'localhost:8222'} + metricsets: + - route + period: ${kubernetes.hints.nats.route.period|'10s'} + - condition: ${kubernetes.hints.nats.routes.enabled} == true or ${kubernetes.hints.nats.enabled} == true + data_stream: + dataset: nats.routes + type: metrics + hosts: + - ${kubernetes.hints.nats.routes.host|'localhost:8222'} + metricsets: + - routes + period: ${kubernetes.hints.nats.routes.period|'10s'} + - condition: ${kubernetes.hints.nats.stats.enabled} == true or ${kubernetes.hints.nats.enabled} == true + data_stream: + dataset: nats.stats + type: metrics + hosts: + - ${kubernetes.hints.nats.stats.host|'localhost:8222'} + metricsets: + - stats + period: ${kubernetes.hints.nats.stats.period|'10s'} + - condition: ${kubernetes.hints.nats.subscriptions.enabled} == true or ${kubernetes.hints.nats.enabled} == true + data_stream: + dataset: nats.subscriptions + type: metrics + hosts: + - ${kubernetes.hints.nats.subscriptions.host|'localhost:8222'} + metricsets: + - subscriptions + period: ${kubernetes.hints.nats.subscriptions.period|'10s'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/netflow.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/netflow.yml new file mode 100644 index 00000000000..d2bb80601df --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/netflow.yml @@ -0,0 +1,47 @@ +inputs: + - name: netflow-netflow + type: netflow + use_output: default + streams: + - condition: ${kubernetes.hints.netflow.log.enabled} == true or ${kubernetes.hints.netflow.enabled} == true + data_stream: + dataset: netflow.log + type: logs + detect_sequence_reset: true + expiration_timeout: 30m + host: localhost:2055 + max_message_size: 10KiB + protocols: + - v1 + - v5 + - v6 + - v7 + - v8 + - v9 + - ipfix + queue_size: 8192 + tags: + - netflow + - forwarded + data_stream.namespace: default + - name: filestream-netflow + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.netflow.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx.yml new file mode 100644 index 00000000000..a9b6693e372 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx.yml @@ -0,0 +1,142 @@ +inputs: + - name: nginx/metrics-nginx + type: nginx/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.nginx.stubstatus.enabled} == true or ${kubernetes.hints.nginx.enabled} == true + data_stream: + dataset: nginx.stubstatus + type: metrics + hosts: + - ${kubernetes.hints.nginx.stubstatus.host|'http://127.0.0.1:80'} + metricsets: + - stubstatus + period: ${kubernetes.hints.nginx.stubstatus.period|'10s'} + server_status_path: /nginx_status + data_stream.namespace: default + - name: filestream-nginx + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.nginx.access.enabled} == true or ${kubernetes.hints.nginx.enabled} == true + data_stream: + dataset: nginx.access + type: logs + exclude_files: + - .gz$ + ignore_older: 72h + parsers: + - container: + format: auto + stream: ${kubernetes.hints.nginx.access.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - nginx-access + - condition: ${kubernetes.hints.nginx.error.enabled} == true or ${kubernetes.hints.nginx.enabled} == true + data_stream: + dataset: nginx.error + type: logs + exclude_files: + - .gz$ + ignore_older: 72h + multiline: + match: after + negate: true + pattern: '^\d{4}\/\d{2}\/\d{2} ' + parsers: + - container: + format: auto + stream: ${kubernetes.hints.nginx.error.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - nginx-error + data_stream.namespace: default + - name: httpjson-nginx + type: httpjson + use_output: default + streams: + - condition: ${kubernetes.hints.nginx.access.enabled} == true and ${kubernetes.hints.nginx.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: nginx.access + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype=nginx:plus:access | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - nginx-access + - condition: ${kubernetes.hints.nginx.error.enabled} == true and ${kubernetes.hints.nginx.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: nginx.error + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype=nginx:plus:error | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - nginx-error + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx_ingress_controller.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx_ingress_controller.yml new file mode 100644 index 00000000000..5f9ba9bc7e4 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx_ingress_controller.yml @@ -0,0 +1,53 @@ +inputs: + - name: filestream-nginx_ingress_controller + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.nginx_ingress_controller.access.enabled} == true or ${kubernetes.hints.nginx_ingress_controller.enabled} == true + data_stream: + dataset: nginx_ingress_controller.access + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.nginx_ingress_controller.access.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - nginx-ingress-controller-access + - condition: ${kubernetes.hints.nginx_ingress_controller.error.enabled} == true or ${kubernetes.hints.nginx_ingress_controller.enabled} == true + data_stream: + dataset: nginx_ingress_controller.error + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: '^[A-Z]{1}[0-9]{4} ' + parsers: + - container: + format: auto + stream: ${kubernetes.hints.nginx_ingress_controller.error.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - drop_event: + when: + not: + regexp: + message: '^[A-Z]{1}[0-9]{4} ' + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - nginx-ingress-controller-error + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/oracle.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/oracle.yml new file mode 100644 index 00000000000..8e846586d4b --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/oracle.yml @@ -0,0 +1,82 @@ +inputs: + - name: filestream-oracle + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.oracle.database_audit.enabled} == true or ${kubernetes.hints.oracle.enabled} == true + data_stream: + dataset: oracle.database_audit + type: logs + exclude_files: + - .gz$ + exclude_lines: + - ^Audit file + parsers: + - multiline: + match: after + negate: true + pattern: ^[A-Za-z]{3}\s+[A-Za-z]{3}\s+[0-9]{1,2}\s[0-9]{2}:[0-9]{2}:[0-9]{2}\s[0-9]{4}\s\S[0-9]{2}:[0-9]{2} + timeout: 10 + type: pattern + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + tags: + - oracle-database_audit + data_stream.namespace: default + - name: sql/metrics-oracle + type: sql/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.oracle.performance.enabled} == true and ${kubernetes.hints.oracle.enabled} == true + data_stream: + dataset: oracle.performance + type: metrics + driver: oracle + hosts: + - ${kubernetes.hints.oracle.performance.host|'oracle://sys:Oradoc_db1@0.0.0.0:1521/ORCLCDB.localdomain?sysdba=1'} + metricsets: + - query + period: ${kubernetes.hints.oracle.performance.period|'60s'} + raw_data.enabled: true + sql_queries: + - query: SELECT name, physical_reads, db_block_gets, consistent_gets, 1 - (physical_reads / (db_block_gets + consistent_gets)) "Hit_Ratio" FROM V$BUFFER_POOL_STATISTICS + response_format: table + - query: SELECT sum(a.value) total_cur, avg(a.value) avg_cur, max(a.value) max_cur, S.username, s.machine FROM v$sesstat a, v$statname b, v$session s WHERE a.statistic# = b.statistic# AND s.sid = a.sid GROUP BY s.username, s.machine + response_format: table + - query: SELECT total_cursors, current_cursors, sess_cur_cache_hits, parse_count_total, sess_cur_cache_hits / total_cursors as cachehits_totalcursors_ratio , sess_cur_cache_hits - parse_count_total as real_parses FROM ( SELECT sum ( decode ( name, 'opened cursors cumulative', value, 0)) total_cursors, sum ( decode ( name, 'opened cursors current',value,0)) current_cursors, sum ( decode ( name, 'session cursor cache hits',value,0)) sess_cur_cache_hits, sum ( decode ( name, 'parse count (total)',value,0)) parse_count_total FROM v$sysstat WHERE name IN ( 'opened cursors cumulative','opened cursors current','session cursor cache hits', 'parse count (total)' )) + response_format: table + - query: SELECT 'lock_requests' "Ratio" , AVG(gethitratio) FROM V$LIBRARYCACHE UNION SELECT 'pin_requests' "Ratio", AVG(pinhitratio) FROM V$LIBRARYCACHE UNION SELECT 'io_reloads' "Ratio", (SUM(reloads) / SUM(pins)) FROM V$LIBRARYCACHE + response_format: variables + - condition: ${kubernetes.hints.oracle.sysmetric.enabled} == true and ${kubernetes.hints.oracle.enabled} == true + data_stream: + dataset: oracle.sysmetric + type: metrics + driver: oracle + dynamic_metric_name_filter: '%' + hosts: + - ${kubernetes.hints.oracle.sysmetric.host|'oracle://sys:Oradoc_db1@0.0.0.0:1521/ORCLCDB.localdomain?sysdba=1'} + metricsets: + - query + period: ${kubernetes.hints.oracle.sysmetric.period|'60s'} + raw_data.enabled: true + sql_queries: + - query: SELECT METRIC_NAME, VALUE FROM V$SYSMETRIC WHERE GROUP_ID = 2 and METRIC_NAME LIKE '%' + response_format: variables + - condition: ${kubernetes.hints.oracle.tablespace.enabled} == true and ${kubernetes.hints.oracle.enabled} == true + data_stream: + dataset: oracle.tablespace + type: metrics + driver: oracle + dynamic_metric_name_filter: "" + hosts: + - ${kubernetes.hints.oracle.tablespace.host|'oracle://sys:Oradoc_db1@0.0.0.0:1521/ORCLCDB.localdomain?sysdba=1'} + metricsets: + - query + period: ${kubernetes.hints.oracle.tablespace.period|'60s'} + raw_data.enabled: true + sql_queries: + - query: WITH data_files AS (SELECT file_name, file_id, tablespace_name, bytes, status, maxbytes, user_bytes, online_status FROM sys.dba_data_files UNION SELECT file_name, file_id, tablespace_name, bytes, status, maxbytes, user_bytes, status AS ONLINE_STATUS FROM sys.dba_temp_files), spaces AS (SELECT b.tablespace_name TB_NAME, tbs_size TB_SIZE_USED, a.free_space TB_SIZE_FREE FROM (SELECT tablespace_name, SUM(bytes) AS free_space FROM dba_free_space GROUP BY tablespace_name) a, (SELECT tablespace_name, SUM(bytes) AS tbs_size FROM dba_data_files GROUP BY tablespace_name) b WHERE a.tablespace_name(+) = b.tablespace_name AND a.tablespace_name != 'TEMP'), temp_spaces AS (SELECT tablespace_name, tablespace_size, allocated_space, free_space FROM dba_temp_free_space WHERE tablespace_name = 'TEMP'), details AS (SELECT df.file_name, df.file_id, df.tablespace_name, df.bytes, df.status, df.maxbytes, df.user_bytes, df.online_status, sp.tb_size_used, sp.tb_size_free FROM data_files df, spaces sp WHERE df.tablespace_name = sp.tb_name UNION SELECT df.file_name, df.file_id, df.tablespace_name, df.bytes, df.status, df.maxbytes, df.user_bytes, df.online_status, tsp.tablespace_size - tsp.free_space AS TB_SIZE_USED, tsp.free_space AS TB_SIZE_FREE FROM data_files df, temp_spaces tsp WHERE df.tablespace_name = tsp.tablespace_name) SELECT file_name, file_id, tablespace_name, bytes, status, maxbytes, user_bytes, online_status, tb_size_used, tb_size_free, SUM(bytes) over() AS TOTAL_BYTES FROM details + response_format: table + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/panw.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/panw.yml new file mode 100644 index 00000000000..93c07883f03 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/panw.yml @@ -0,0 +1,94 @@ +inputs: + - name: tcp-panw + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.panw.panos.enabled} == true or ${kubernetes.hints.panw.enabled} == true + data_stream: + dataset: panw.panos + type: logs + host: localhost:9001 + max_message_size: 50KiB + processors: + - add_locale: null + - syslog: + field: message + format: auto + timezone: Local + - add_fields: + fields: + internal_zones: + - trust + target: _conf + - add_fields: + fields: + external_zones: + - untrust + target: _conf + tags: + - panw-panos + - forwarded + data_stream.namespace: default + - name: udp-panw + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.panw.panos.enabled} == true or ${kubernetes.hints.panw.enabled} == true + data_stream: + dataset: panw.panos + type: logs + host: localhost:9001 + max_message_size: 50KiB + processors: + - add_locale: null + - syslog: + field: message + format: auto + timezone: Local + - add_fields: + fields: + internal_zones: + - trust + target: _conf + - add_fields: + fields: + external_zones: + - untrust + target: _conf + tags: + - panw-panos + - forwarded + data_stream.namespace: default + - name: filestream-panw + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.panw.panos.enabled} == true or ${kubernetes.hints.panw.enabled} == true + data_stream: + dataset: panw.panos + type: logs + exclude_files: + - .gz$ + fields: + _conf: + external_zones: + - untrust + internal_zones: + - trust + tz_offset: local + fields_under_root: true + parsers: + - container: + format: auto + stream: ${kubernetes.hints.panw.panos.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - panw-panos + - forwarded + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/panw_cortex_xdr.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/panw_cortex_xdr.yml new file mode 100644 index 00000000000..ec6a58fd9b2 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/panw_cortex_xdr.yml @@ -0,0 +1,90 @@ +inputs: + - name: httpjson-panw_cortex_xdr + type: httpjson + use_output: default + streams: + - condition: ${kubernetes.hints.panw_cortex_xdr.alerts.enabled} == true or ${kubernetes.hints.panw_cortex_xdr.enabled} == true + config_version: "2" + cursor: + next_ts: + value: '[[.last_event.detection_timestamp]]' + data_stream: + dataset: panw_cortex_xdr.alerts + type: logs + interval: 5m + request.method: POST + request.rate_limit: + limit: '[[.last_response.header.Get "X-Rate-Limit-Limit"]]' + remaining: '[[.last_response.header.Get "X-Rate-Limit-Remaining"]]' + reset: '[[(parseDate (.last_response.header.Get "X-Rate-Limit-Reset")).Unix]]' + request.timeout: 30s + request.transforms: + - set: + target: header.Authorization + value: null + - set: + target: header.x-xdr-auth-id + value: 1 + - set: + target: body.request_data.sort.field + value: creation_time + - set: + target: body.request_data.sort.keyword + value: asc + - append: + default: |- + { + "field": "creation_time", + "operator": "gte", + "value": [[ mul (add (now (parseDuration "-24h")).Unix) 1000 ]] + } + target: body.request_data.filters + value: |- + { + "field": "creation_time", + "operator": "gte", + "value": [[ .cursor.next_ts ]] + } + value_type: json + request.url: https://test.xdr.eu.paloaltonetworks.com/public_api/v1/alerts/get_alerts_multi_events + response.pagination: + - set: + fail_on_template_error: true + target: body.request_data.search_from + value: '[[if (ne (len .last_response.body.reply.alerts) 0)]][[mul .last_response.page 100]][[end]]' + value_type: int + - set: + fail_on_template_error: true + target: body.request_data.search_to + value: '[[if (ne (len .last_response.body.reply.alerts) 0)]][[add (mul .last_response.page 100) 100]][[end]]' + value_type: int + response.split: + split: + keep_parent: true + target: body.events + target: body.reply.alerts + tags: + - forwarded + - panw_cortex_xdr + data_stream.namespace: default + - name: filestream-panw_cortex_xdr + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.panw_cortex_xdr.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/pfsense.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/pfsense.yml new file mode 100644 index 00000000000..e4541f90639 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/pfsense.yml @@ -0,0 +1,62 @@ +inputs: + - name: udp-pfsense + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.pfsense.log.enabled} == true or ${kubernetes.hints.pfsense.enabled} == true + data_stream: + dataset: pfsense.log + type: logs + host: localhost:9001 + processors: + - add_locale: null + - add_fields: + fields: + internal_networks: + - private + tz_offset: local + target: _tmp + tags: + - pfsense + - forwarded + data_stream.namespace: default + - name: tcp-pfsense + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.pfsense.log.enabled} == true and ${kubernetes.hints.pfsense.enabled} == true + data_stream: + dataset: pfsense.log + type: logs + host: localhost:9001 + processors: + - add_locale: null + - add_fields: + fields: + tz_offset: local + target: _tmp + tags: + - pfsense + - forwarded + data_stream.namespace: default + - name: filestream-pfsense + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.pfsense.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/postgresql.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/postgresql.yml new file mode 100644 index 00000000000..a9abf518a9a --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/postgresql.yml @@ -0,0 +1,68 @@ +inputs: + - name: filestream-postgresql + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.postgresql.log.enabled} == true or ${kubernetes.hints.postgresql.enabled} == true + data_stream: + dataset: postgresql.log + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: '^\d{4}-\d{2}-\d{2} ' + parsers: + - container: + format: auto + stream: ${kubernetes.hints.postgresql.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - postgresql-log + data_stream.namespace: default + - name: postgresql/metrics-postgresql + type: postgresql/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.postgresql.activity.enabled} == true or ${kubernetes.hints.postgresql.enabled} == true + data_stream: + dataset: postgresql.activity + type: metrics + hosts: + - ${kubernetes.hints.postgresql.activity.host|'postgres://localhost:5432'} + metricsets: + - activity + period: ${kubernetes.hints.postgresql.activity.period|'10s'} + - condition: ${kubernetes.hints.postgresql.bgwriter.enabled} == true or ${kubernetes.hints.postgresql.enabled} == true + data_stream: + dataset: postgresql.bgwriter + type: metrics + hosts: + - ${kubernetes.hints.postgresql.bgwriter.host|'postgres://localhost:5432'} + metricsets: + - bgwriter + period: ${kubernetes.hints.postgresql.bgwriter.period|'10s'} + - condition: ${kubernetes.hints.postgresql.database.enabled} == true or ${kubernetes.hints.postgresql.enabled} == true + data_stream: + dataset: postgresql.database + type: metrics + hosts: + - ${kubernetes.hints.postgresql.database.host|'postgres://localhost:5432'} + metricsets: + - database + period: ${kubernetes.hints.postgresql.database.period|'10s'} + - condition: ${kubernetes.hints.postgresql.statement.enabled} == true or ${kubernetes.hints.postgresql.enabled} == true + data_stream: + dataset: postgresql.statement + type: metrics + hosts: + - ${kubernetes.hints.postgresql.statement.host|'postgres://localhost:5432'} + metricsets: + - statement + period: ${kubernetes.hints.postgresql.statement.period|'10s'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/prometheus.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/prometheus.yml new file mode 100644 index 00000000000..2a7e630c9cf --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/prometheus.yml @@ -0,0 +1,90 @@ +inputs: + - name: prometheus/metrics-prometheus + type: prometheus/metrics + use_output: default + streams: + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + condition: ${kubernetes.hints.prometheus.collector.enabled} == true or ${kubernetes.hints.prometheus.enabled} == true + data_stream: + dataset: prometheus.collector + type: metrics + hosts: + - ${kubernetes.hints.prometheus.collector.host|'localhost:9090'} + metrics_filters.exclude: null + metrics_filters.include: null + metrics_path: /metrics + metricsets: + - collector + password: ${kubernetes.hints.prometheus.collector.password|'secret'} + period: ${kubernetes.hints.prometheus.collector.period|'10s'} + rate_counters: true + ssl.certificate_authorities: + - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt + use_types: true + username: ${kubernetes.hints.prometheus.collector.username|'user'} + - condition: ${kubernetes.hints.prometheus.query.enabled} == true and ${kubernetes.hints.prometheus.enabled} == true + data_stream: + dataset: prometheus.query + type: metrics + hosts: + - ${kubernetes.hints.prometheus.query.host|'localhost:9090'} + metricsets: + - query + period: ${kubernetes.hints.prometheus.query.period|'10s'} + queries: + - name: instant_vector + params: + query: sum(rate(prometheus_http_requests_total[1m])) + path: /api/v1/query + - name: range_vector + params: + end: "2019-12-21T00:00:00.000Z" + query: up + start: "2019-12-20T00:00:00.000Z" + step: 1h + path: /api/v1/query_range + - name: scalar + params: + query: "100" + path: /api/v1/query + - name: string + params: + query: some_value + path: /api/v1/query + - condition: ${kubernetes.hints.prometheus.remote_write.enabled} == true and ${kubernetes.hints.prometheus.enabled} == true + data_stream: + dataset: prometheus.remote_write + type: metrics + host: localhost + metricsets: + - remote_write + port: 9201 + rate_counters: true + ssl.certificate: /etc/pki/server/cert.pem + ssl.enabled: null + ssl.key: null + types_patterns.exclude: null + types_patterns.include: null + use_types: true + data_stream.namespace: default + - name: filestream-prometheus + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.prometheus.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/qnap_nas.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/qnap_nas.yml new file mode 100644 index 00000000000..546faa79901 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/qnap_nas.yml @@ -0,0 +1,60 @@ +inputs: + - name: udp-qnap_nas + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.qnap_nas.log.enabled} == true and ${kubernetes.hints.qnap_nas.enabled} == true + data_stream: + dataset: qnap_nas.log + type: logs + host: localhost:9301 + processors: + - add_locale: null + - add_fields: + fields: + tz_offset: local + target: _tmp + tags: + - qnap-nas + - forwarded + data_stream.namespace: default + - name: filestream-qnap_nas + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.qnap_nas.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default + - name: tcp-qnap_nas + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.qnap_nas.log.enabled} == true or ${kubernetes.hints.qnap_nas.enabled} == true + data_stream: + dataset: qnap_nas.log + type: logs + host: localhost:9301 + processors: + - add_locale: null + - add_fields: + fields: + tz_offset: local + target: _tmp + tags: + - qnap-nas + - forwarded + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/rabbitmq.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/rabbitmq.yml new file mode 100644 index 00000000000..942c4fa6911 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/rabbitmq.yml @@ -0,0 +1,79 @@ +inputs: + - name: filestream-rabbitmq + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.rabbitmq.log.enabled} == true or ${kubernetes.hints.rabbitmq.enabled} == true + data_stream: + dataset: rabbitmq.log + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: '[0-9]{4}-[0-9]{2}-[0-9]{2}' + parsers: + - container: + format: auto + stream: ${kubernetes.hints.rabbitmq.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - forwarded + data_stream.namespace: default + - name: rabbitmq/metrics-rabbitmq + type: rabbitmq/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.rabbitmq.connection.enabled} == true or ${kubernetes.hints.rabbitmq.enabled} == true + data_stream: + dataset: rabbitmq.connection + type: metrics + hosts: + - ${kubernetes.hints.rabbitmq.connection.host|'localhost:15672'} + metricsets: + - connection + password: ${kubernetes.hints.rabbitmq.connection.password|''} + period: ${kubernetes.hints.rabbitmq.connection.period|'10s'} + username: ${kubernetes.hints.rabbitmq.connection.username|''} + - condition: ${kubernetes.hints.rabbitmq.exchange.enabled} == true or ${kubernetes.hints.rabbitmq.enabled} == true + data_stream: + dataset: rabbitmq.exchange + type: metrics + hosts: + - ${kubernetes.hints.rabbitmq.exchange.host|'localhost:15672'} + metricsets: + - exchange + password: ${kubernetes.hints.rabbitmq.exchange.password|''} + period: ${kubernetes.hints.rabbitmq.exchange.period|'10s'} + username: ${kubernetes.hints.rabbitmq.exchange.username|''} + - condition: ${kubernetes.hints.rabbitmq.node.enabled} == true or ${kubernetes.hints.rabbitmq.enabled} == true + data_stream: + dataset: rabbitmq.node + type: metrics + hosts: + - ${kubernetes.hints.rabbitmq.node.host|'localhost:15672'} + metricsets: + - node + node.collect: node + password: ${kubernetes.hints.rabbitmq.node.password|''} + period: ${kubernetes.hints.rabbitmq.node.period|'10s'} + username: ${kubernetes.hints.rabbitmq.node.username|''} + - condition: ${kubernetes.hints.rabbitmq.queue.enabled} == true or ${kubernetes.hints.rabbitmq.enabled} == true + data_stream: + dataset: rabbitmq.queue + type: metrics + hosts: + - ${kubernetes.hints.rabbitmq.queue.host|'localhost:15672'} + metricsets: + - queue + password: ${kubernetes.hints.rabbitmq.queue.password|''} + period: ${kubernetes.hints.rabbitmq.queue.period|'10s'} + username: ${kubernetes.hints.rabbitmq.queue.username|''} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/redis.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/redis.yml new file mode 100644 index 00000000000..31731f6c1a5 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/redis.yml @@ -0,0 +1,84 @@ +inputs: + - name: filestream-redis + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.redis.log.enabled} == true or ${kubernetes.hints.redis.enabled} == true + data_stream: + dataset: redis.log + type: logs + exclude_files: + - .gz$ + exclude_lines: + - ^\s+[\-`('.|_] + parsers: + - container: + format: auto + stream: ${kubernetes.hints.redis.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - redis-log + data_stream.namespace: default + - name: redis-redis + type: redis + use_output: default + streams: + - condition: ${kubernetes.hints.redis.slowlog.enabled} == true or ${kubernetes.hints.redis.enabled} == true + data_stream: + dataset: redis.slowlog + type: logs + hosts: + - ${kubernetes.hints.redis.slowlog.host|'127.0.0.1:6379'} + password: ${kubernetes.hints.redis.slowlog.password|''} + data_stream.namespace: default + - name: redis/metrics-redis + type: redis/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.redis.info.enabled} == true or ${kubernetes.hints.redis.enabled} == true + data_stream: + dataset: redis.info + type: metrics + hosts: + - ${kubernetes.hints.redis.info.host|'127.0.0.1:6379'} + idle_timeout: 20s + maxconn: 10 + metricsets: + - info + network: tcp + password: ${kubernetes.hints.redis.info.password|''} + period: ${kubernetes.hints.redis.info.period|'10s'} + - condition: ${kubernetes.hints.redis.key.enabled} == true or ${kubernetes.hints.redis.enabled} == true + data_stream: + dataset: redis.key + type: metrics + hosts: + - ${kubernetes.hints.redis.key.host|'127.0.0.1:6379'} + idle_timeout: 20s + key.patterns: + - limit: 20 + pattern: '*' + maxconn: 10 + metricsets: + - key + network: tcp + password: ${kubernetes.hints.redis.key.password|''} + period: ${kubernetes.hints.redis.key.period|'10s'} + - condition: ${kubernetes.hints.redis.keyspace.enabled} == true or ${kubernetes.hints.redis.enabled} == true + data_stream: + dataset: redis.keyspace + type: metrics + hosts: + - ${kubernetes.hints.redis.keyspace.host|'127.0.0.1:6379'} + idle_timeout: 20s + maxconn: 10 + metricsets: + - keyspace + network: tcp + password: ${kubernetes.hints.redis.keyspace.password|''} + period: ${kubernetes.hints.redis.keyspace.period|'10s'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/santa.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/santa.yml new file mode 100644 index 00000000000..d60bfeb744a --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/santa.yml @@ -0,0 +1,23 @@ +inputs: + - name: filestream-santa + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.santa.log.enabled} == true or ${kubernetes.hints.santa.enabled} == true + data_stream: + dataset: santa.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.santa.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - santa-log + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/security_detection_engine.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/security_detection_engine.yml new file mode 100644 index 00000000000..990a4372e8b --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/security_detection_engine.yml @@ -0,0 +1,22 @@ +inputs: + - name: filestream-security_detection_engine + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.security_detection_engine.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/sentinel_one.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/sentinel_one.yml new file mode 100644 index 00000000000..7c06b222d78 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/sentinel_one.yml @@ -0,0 +1,217 @@ +inputs: + - name: httpjson-sentinel_one + type: httpjson + use_output: default + streams: + - condition: ${kubernetes.hints.sentinel_one.activity.enabled} == true or ${kubernetes.hints.sentinel_one.enabled} == true + config_version: 2 + cursor: + last_create_at: + value: '[[.last_event.createdAt]]' + data_stream: + dataset: sentinel_one.activity + type: logs + interval: 1m + request.method: GET + request.transforms: + - set: + target: header.Authorization + value: 'ApiToken ' + - set: + target: url.params.limit + value: "100" + - set: + target: url.params.sortBy + value: createdAt + - set: + target: url.params.sortOrder + value: asc + - set: + default: '[[formatDate (now (parseDuration "-24h"))]]' + target: url.params.createdAt__gte + value: '[[formatDate (parseDate .cursor.last_create_at)]]' + request.url: /web/api/v2.1/activities + response.pagination: + - set: + fail_on_template_error: true + target: url.params.cursor + value: '[[if (ne .last_response.body.pagination.nextCursor nil)]][[.last_response.body.pagination.nextCursor]][[else]][[.last_response.terminate_pagination]][[end]]' + response.split: + target: body.data + tags: + - forwarded + - sentinel_one-activity + - condition: ${kubernetes.hints.sentinel_one.agent.enabled} == true or ${kubernetes.hints.sentinel_one.enabled} == true + config_version: 2 + cursor: + last_update_at: + value: '[[.last_event.updatedAt]]' + data_stream: + dataset: sentinel_one.agent + type: logs + interval: 5m + request.method: GET + request.transforms: + - set: + target: header.Authorization + value: 'ApiToken ' + - set: + target: url.params.limit + value: "100" + - set: + target: url.params.sortBy + value: updatedAt + - set: + target: url.params.sortOrder + value: asc + - set: + default: '[[formatDate (now (parseDuration "-24h"))]]' + target: url.params.updatedAt__gte + value: '[[formatDate (parseDate .cursor.last_update_at)]]' + request.url: /web/api/v2.1/agents + response.pagination: + - set: + fail_on_template_error: true + target: url.params.cursor + value: '[[if (ne .last_response.body.pagination.nextCursor nil)]][[.last_response.body.pagination.nextCursor]][[else]][[.last_response.terminate_pagination]][[end]]' + response.split: + target: body.data + tags: + - forwarded + - sentinel_one-agent + - condition: ${kubernetes.hints.sentinel_one.alert.enabled} == true or ${kubernetes.hints.sentinel_one.enabled} == true + config_version: 2 + cursor: + last_create_at: + value: '[[.last_event.alertInfo.createdAt]]' + data_stream: + dataset: sentinel_one.alert + type: logs + interval: 5m + request.method: GET + request.transforms: + - set: + target: header.Authorization + value: 'ApiToken ' + - set: + target: url.params.limit + value: "100" + - set: + target: url.params.sortBy + value: alertInfoCreatedAt + - set: + target: url.params.sortOrder + value: asc + - set: + default: '[[formatDate (now (parseDuration "-24h"))]]' + target: url.params.createdAt__gte + value: '[[formatDate (parseDate .cursor.last_create_at)]]' + request.url: /web/api/v2.1/cloud-detection/alerts + response.pagination: + - set: + fail_on_template_error: true + target: url.params.cursor + value: '[[if (ne .last_response.body.pagination.nextCursor nil)]][[.last_response.body.pagination.nextCursor]][[else]][[.last_response.terminate_pagination]][[end]]' + response.split: + target: body.data + tags: + - forwarded + - sentinel_one-alert + - condition: ${kubernetes.hints.sentinel_one.group.enabled} == true or ${kubernetes.hints.sentinel_one.enabled} == true + config_version: 2 + cursor: + last_update_at: + value: '[[.last_event.updatedAt]]' + data_stream: + dataset: sentinel_one.group + type: logs + interval: 5m + request.method: GET + request.transforms: + - set: + target: header.Authorization + value: 'ApiToken ' + - set: + target: url.params.limit + value: "100" + - set: + target: url.params.sortBy + value: updatedAt + - set: + target: url.params.sortOrder + value: asc + - set: + default: '[[formatDate (now (parseDuration "-24h"))]]' + target: url.params.updatedAt__gte + value: '[[formatDate (parseDate .cursor.last_update_at)]]' + request.url: /web/api/v2.1/groups + response.pagination: + - set: + fail_on_template_error: true + target: url.params.cursor + value: '[[if (ne .last_response.body.pagination.nextCursor nil)]][[.last_response.body.pagination.nextCursor]][[else]][[.last_response.terminate_pagination]][[end]]' + response.split: + target: body.data + tags: + - forwarded + - sentinel_one-group + - condition: ${kubernetes.hints.sentinel_one.threat.enabled} == true or ${kubernetes.hints.sentinel_one.enabled} == true + config_version: 2 + cursor: + last_update_at: + value: '[[.last_event.threatInfo.updatedAt]]' + data_stream: + dataset: sentinel_one.threat + type: logs + interval: 5m + request.method: GET + request.transforms: + - set: + target: header.Authorization + value: 'ApiToken ' + - set: + target: url.params.limit + value: "100" + - set: + target: url.params.sortBy + value: updatedAt + - set: + target: url.params.sortOrder + value: asc + - set: + default: '[[formatDate (now (parseDuration "-24h"))]]' + target: url.params.updatedAt__gte + value: '[[formatDate (parseDate .cursor.last_update_at)]]' + request.url: /web/api/v2.1/threats + response.pagination: + - set: + fail_on_template_error: true + target: url.params.cursor + value: '[[if (ne .last_response.body.pagination.nextCursor nil)]][[.last_response.body.pagination.nextCursor]][[else]][[.last_response.terminate_pagination]][[end]]' + response.split: + target: body.data + tags: + - forwarded + - sentinel_one-threat + data_stream.namespace: default + - name: filestream-sentinel_one + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.sentinel_one.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/snort.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/snort.yml new file mode 100644 index 00000000000..80ed6df384a --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/snort.yml @@ -0,0 +1,53 @@ +inputs: + - name: filestream-snort + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.snort.log.enabled} == true or ${kubernetes.hints.snort.enabled} == true + data_stream: + dataset: snort.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.snort.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + - add_fields: + fields: + internal_networks: + - private + tz_offset: local + target: _tmp + prospector: + scanner: + symlinks: true + tags: + - forwarded + - snort.log + data_stream.namespace: default + - name: udp-snort + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.snort.log.enabled} == true or ${kubernetes.hints.snort.enabled} == true + data_stream: + dataset: snort.log + type: logs + host: localhost:9514 + processors: + - add_locale: null + - add_fields: + fields: + internal_networks: + - private + tz_offset: local + target: _tmp + tags: + - forwarded + - snort.log + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/snyk.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/snyk.yml new file mode 100644 index 00000000000..aef353751ec --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/snyk.yml @@ -0,0 +1,139 @@ +inputs: + - name: filestream-snyk + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.snyk.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default + - name: httpjson-snyk + type: httpjson + use_output: default + streams: + - condition: ${kubernetes.hints.snyk.audit.enabled} == true or ${kubernetes.hints.snyk.enabled} == true + config_version: 2 + cursor: + interval: + value: -24h + data_stream: + dataset: snyk.audit + type: logs + interval: 10s + request.body: + filters: null + request.method: POST + request.transforms: + - set: + target: header.Authorization + value: token + - set: + target: url.params.to + value: '[[ formatDate (now) "2006-01-02" ]]' + - set: + default: '[[ formatDate (now (parseDuration "-720h")) "2006-01-02" ]]' + target: url.params.from + value: '[[ formatDate (now (parseDuration .cursor.interval)) "2006-01-02" ]]' + request.url: https://snyk.io/api/v1/org//audit?page=1&sortOrder=ASC + response.pagination: + - set: + fail_on_template_error: true + target: url.params.page + value: '[[if (ne (len .last_response.body.response) 0)]][[add .last_response.page 1]][[end]]' + response.request_body_on_pagination: true + tags: + - forwarded + - snyk-audit + - condition: ${kubernetes.hints.snyk.vulnerabilities.enabled} == true or ${kubernetes.hints.snyk.enabled} == true + config_version: 2 + cursor: + interval: + value: -24h + data_stream: + dataset: snyk.vulnerabilities + type: logs + interval: 24h + request.body: + filters: + exploitMaturity: + - mature + - proof-of-concept + - no-known-exploit + - no-data + fixable: false + identifier: null + ignored: false + isFixed: false + isPatchable: false + isPinnable: false + isUpgradable: false + languages: + - javascript + - ruby + - java + - scala + - python + - golang + - php + - dotnet + - swift-objective-c + - elixir + - docker + - terraform + - kubernetes + - helm + - cloudformation + orgs: null + patched: false + priorityScore: + max: 1000 + min: 0 + projects: null + severity: + - critical + - high + - medium + - low + types: + - vuln + - license + - configuration + request.method: POST + request.timeout: 120s + request.transforms: + - set: + target: header.Authorization + value: token + - set: + target: url.params.to + value: '[[ formatDate (now) "2006-01-02" ]]' + - set: + default: '[[ formatDate (now (parseDuration "-24h")) "2006-01-02" ]]' + target: url.params.from + value: '[[ formatDate (now (parseDuration .cursor.interval)) "2006-01-02" ]]' + request.url: https://snyk.io/api/v1/reporting/issues/?page=1&perPage=10&sortBy=issueTitle&order=asc&groupBy=issue + response.pagination: + - set: + fail_on_template_error: true + target: url.params.page + value: '[[if (ne (len .last_response.body.response) 0)]][[add .last_response.page 1]][[end]]' + response.request_body_on_pagination: true + response.split: + target: body.results + tags: + - forwarded + - snyk-vulnerabilities + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/stan.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/stan.yml new file mode 100644 index 00000000000..9fdee28a731 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/stan.yml @@ -0,0 +1,56 @@ +inputs: + - name: filestream-stan + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.stan.log.enabled} == true or ${kubernetes.hints.stan.enabled} == true + data_stream: + dataset: stan.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.stan.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - stan-log + data_stream.namespace: default + - name: stan/metrics-stan + type: stan/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.stan.channels.enabled} == true or ${kubernetes.hints.stan.enabled} == true + data_stream: + dataset: stan.channels + type: metrics + hosts: + - ${kubernetes.hints.stan.channels.host|'localhost:8222'} + metricsets: + - channels + period: ${kubernetes.hints.stan.channels.period|'60s'} + - condition: ${kubernetes.hints.stan.stats.enabled} == true or ${kubernetes.hints.stan.enabled} == true + data_stream: + dataset: stan.stats + type: metrics + hosts: + - ${kubernetes.hints.stan.stats.host|'localhost:8222'} + metricsets: + - stats + period: ${kubernetes.hints.stan.stats.period|'60s'} + - condition: ${kubernetes.hints.stan.subscriptions.enabled} == true or ${kubernetes.hints.stan.enabled} == true + data_stream: + dataset: stan.subscriptions + type: metrics + hosts: + - ${kubernetes.hints.stan.subscriptions.host|'localhost:8222'} + metricsets: + - subscriptions + period: ${kubernetes.hints.stan.subscriptions.period|'60s'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/suricata.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/suricata.yml new file mode 100644 index 00000000000..374d369783e --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/suricata.yml @@ -0,0 +1,24 @@ +inputs: + - name: filestream-suricata + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.suricata.eve.enabled} == true or ${kubernetes.hints.suricata.enabled} == true + data_stream: + dataset: suricata.eve + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.suricata.eve.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - suricata-eve + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/symantec_endpoint.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/symantec_endpoint.yml new file mode 100644 index 00000000000..8e3ca7ce297 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/symantec_endpoint.yml @@ -0,0 +1,67 @@ +inputs: + - name: udp-symantec_endpoint + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.symantec_endpoint.log.enabled} == true or ${kubernetes.hints.symantec_endpoint.enabled} == true + data_stream: + dataset: symantec_endpoint.log + type: logs + fields: + _conf: + remove_mapped_fields: false + tz_offset: UTC + fields_under_root: true + host: localhost:9008 + max_message_size: 1 MiB + tags: + - symantec-endpoint-log + - forwarded + data_stream.namespace: default + - name: filestream-symantec_endpoint + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.symantec_endpoint.log.enabled} == true and ${kubernetes.hints.symantec_endpoint.enabled} == true + data_stream: + dataset: symantec_endpoint.log + type: logs + exclude_files: + - .gz$ + fields: + _conf: + remove_mapped_fields: false + tz_offset: UTC + fields_under_root: true + parsers: + - container: + format: auto + stream: ${kubernetes.hints.symantec_endpoint.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - symantec-endpoint-log + - forwarded + data_stream.namespace: default + - name: tcp-symantec_endpoint + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.symantec_endpoint.log.enabled} == true and ${kubernetes.hints.symantec_endpoint.enabled} == true + data_stream: + dataset: symantec_endpoint.log + type: logs + fields: + _conf: + remove_mapped_fields: false + tz_offset: UTC + fields_under_root: true + host: localhost:9008 + max_message_size: 1 MiB + tags: + - symantec-endpoint-log + - forwarded + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/synthetics.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/synthetics.yml new file mode 100644 index 00000000000..2f375b1a3f0 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/synthetics.yml @@ -0,0 +1,148 @@ +inputs: + - name: synthetics/http-synthetics + type: synthetics/http + use_output: default + streams: + - __ui: null + check.request.method: null + condition: ${kubernetes.hints.synthetics.http.enabled} == true and ${kubernetes.hints.synthetics.enabled} == true + data_stream: + dataset: http + type: synthetics + enabled: true + max_redirects: null + name: null + processors: + - add_observer_metadata: + geo: + name: Fleet managed + - add_fields: + fields: + monitor.fleet_managed: true + target: "" + response.include_body: null + response.include_headers: null + schedule: '@every 3m' + timeout: null + type: http + urls: null + data_stream.namespace: default + - name: synthetics/tcp-synthetics + type: synthetics/tcp + use_output: default + streams: + - __ui: null + condition: ${kubernetes.hints.synthetics.tcp.enabled} == true and ${kubernetes.hints.synthetics.enabled} == true + data_stream: + dataset: tcp + type: synthetics + enabled: true + hosts: ${kubernetes.hints.synthetics.tcp.host|''} + name: null + processors: + - add_observer_metadata: + geo: + name: Fleet managed + - add_fields: + fields: + monitor.fleet_managed: true + target: "" + proxy_use_local_resolver: false + schedule: '@every 3m' + timeout: null + type: tcp + data_stream.namespace: default + - name: synthetics/icmp-synthetics + type: synthetics/icmp + use_output: default + streams: + - __ui: null + condition: ${kubernetes.hints.synthetics.icmp.enabled} == true and ${kubernetes.hints.synthetics.enabled} == true + data_stream: + dataset: icmp + type: synthetics + enabled: true + hosts: ${kubernetes.hints.synthetics.icmp.host|''} + name: null + processors: + - add_observer_metadata: + geo: + name: Fleet managed + - add_fields: + fields: + monitor.fleet_managed: true + target: "" + schedule: '@every 3m' + timeout: null + type: icmp + wait: 1s + data_stream.namespace: default + - name: synthetics/browser-synthetics + type: synthetics/browser + use_output: default + streams: + - __ui: null + condition: ${kubernetes.hints.synthetics.browser.enabled} == true or ${kubernetes.hints.synthetics.enabled} == true + data_stream: + dataset: browser + type: synthetics + enabled: true + name: null + processors: + - add_observer_metadata: + geo: + name: Fleet managed + - add_fields: + fields: + monitor.fleet_managed: true + target: "" + schedule: '@every 3m' + throttling: null + timeout: null + type: browser + - condition: ${kubernetes.hints.synthetics.browser_network.enabled} == true or ${kubernetes.hints.synthetics.enabled} == true + data_stream: + dataset: browser.network + type: synthetics + processors: + - add_observer_metadata: + geo: + name: Fleet managed + - add_fields: + fields: + monitor.fleet_managed: true + target: "" + - condition: ${kubernetes.hints.synthetics.browser_screenshot.enabled} == true or ${kubernetes.hints.synthetics.enabled} == true + data_stream: + dataset: browser.screenshot + type: synthetics + processors: + - add_observer_metadata: + geo: + name: Fleet managed + - add_fields: + fields: + monitor.fleet_managed: true + target: "" + data_stream.namespace: default + - name: filestream-synthetics + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.synthetics.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/tcp.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/tcp.yml new file mode 100644 index 00000000000..34c8d0d984e --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/tcp.yml @@ -0,0 +1,32 @@ +inputs: + - name: filestream-tcp + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.tcp.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default + - name: tcp-tcp + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.tcp.generic.enabled} == true or ${kubernetes.hints.tcp.enabled} == true + data_stream: + dataset: tcp.generic + type: logs + host: localhost:8080 + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/tomcat.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/tomcat.yml new file mode 100644 index 00000000000..1355b57befa --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/tomcat.yml @@ -0,0 +1,8296 @@ +inputs: + - name: udp-tomcat + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.tomcat.log.enabled} == true or ${kubernetes.hints.tomcat.enabled} == true + data_stream: + dataset: tomcat.log + type: logs + fields: + observer: + product: TomCat + type: Web + vendor: Apache + fields_under_root: true + host: localhost:9523 + processors: + - script: + lang: javascript + params: + debug: false + ecs: true + keep_raw: false + rsa: true + tz_offset: local + source: | + // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + // or more contributor license agreements. Licensed under the Elastic License; + // you may not use this file except in compliance with the Elastic License. + + /* jshint -W014,-W016,-W097,-W116 */ + + var processor = require("processor"); + var console = require("console"); + + var FLAG_FIELD = "log.flags"; + var FIELDS_OBJECT = "nwparser"; + var FIELDS_PREFIX = FIELDS_OBJECT + "."; + + var defaults = { + debug: false, + ecs: true, + rsa: false, + keep_raw: false, + tz_offset: "local", + strip_priority: true + }; + + var saved_flags = null; + var debug; + var map_ecs; + var map_rsa; + var keep_raw; + var device; + var tz_offset; + var strip_priority; + + // Register params from configuration. + function register(params) { + debug = params.debug !== undefined ? params.debug : defaults.debug; + map_ecs = params.ecs !== undefined ? params.ecs : defaults.ecs; + map_rsa = params.rsa !== undefined ? params.rsa : defaults.rsa; + keep_raw = params.keep_raw !== undefined ? params.keep_raw : defaults.keep_raw; + tz_offset = parse_tz_offset(params.tz_offset !== undefined? params.tz_offset : defaults.tz_offset); + strip_priority = params.strip_priority !== undefined? params.strip_priority : defaults.strip_priority; + device = new DeviceProcessor(); + } + + function parse_tz_offset(offset) { + var date; + var m; + switch(offset) { + // local uses the tz offset from the JS VM. + case "local": + date = new Date(); + // Reversing the sign as we the offset from UTC, not to UTC. + return parse_local_tz_offset(-date.getTimezoneOffset()); + // event uses the tz offset from event.timezone (add_locale processor). + case "event": + return offset; + // Otherwise a tz offset in the form "[+-][0-9]{4}" is required. + default: + m = offset.match(/^([+\-])([0-9]{2}):?([0-9]{2})?$/); + if (m === null || m.length !== 4) { + throw("bad timezone offset: '" + offset + "'. Must have the form +HH:MM"); + } + return m[1] + m[2] + ":" + (m[3]!==undefined? m[3] : "00"); + } + } + + function parse_local_tz_offset(minutes) { + var neg = minutes < 0; + minutes = Math.abs(minutes); + var min = minutes % 60; + var hours = Math.floor(minutes / 60); + var pad2digit = function(n) { + if (n < 10) { return "0" + n;} + return "" + n; + }; + return (neg? "-" : "+") + pad2digit(hours) + ":" + pad2digit(min); + } + + function process(evt) { + // Function register is only called by the processor when `params` are set + // in the processor config. + if (device === undefined) { + register(defaults); + } + return device.process(evt); + } + + function processor_chain(subprocessors) { + var builder = new processor.Chain(); + subprocessors.forEach(builder.Add); + return builder.Build().Run; + } + + function linear_select(subprocessors) { + return function (evt) { + var flags = evt.Get(FLAG_FIELD); + var i; + for (i = 0; i < subprocessors.length; i++) { + evt.Delete(FLAG_FIELD); + if (debug) console.warn("linear_select trying entry " + i); + subprocessors[i](evt); + // Dissect processor succeeded? + if (evt.Get(FLAG_FIELD) == null) break; + if (debug) console.warn("linear_select failed entry " + i); + } + if (flags !== null) { + evt.Put(FLAG_FIELD, flags); + } + if (debug) { + if (i < subprocessors.length) { + console.warn("linear_select matched entry " + i); + } else { + console.warn("linear_select didn't match"); + } + } + }; + } + + function conditional(opt) { + return function(evt) { + if (opt.if(evt)) { + opt.then(evt); + } else if (opt.else) { + opt.else(evt); + } + }; + } + + var strip_syslog_priority = (function() { + var isEnabled = function() { return strip_priority === true; }; + var fetchPRI = field("_pri"); + var fetchPayload = field("payload"); + var removePayload = remove(["payload"]); + var cleanup = remove(["_pri", "payload"]); + var onMatch = function(evt) { + var pri, priStr = fetchPRI(evt); + if (priStr != null + && 0 < priStr.length && priStr.length < 4 + && !isNaN((pri = Number(priStr))) + && 0 <= pri && pri < 192) { + var severity = pri & 7, + facility = pri >> 3; + setc("_severity", "" + severity)(evt); + setc("_facility", "" + facility)(evt); + // Replace message with priority stripped. + evt.Put("message", fetchPayload(evt)); + removePayload(evt); + } else { + // not a valid syslog PRI, cleanup. + cleanup(evt); + } + }; + return conditional({ + if: isEnabled, + then: cleanup_flags(match( + "STRIP_PRI", + "message", + "<%{_pri}>%{payload}", + onMatch + )) + }); + })(); + + function match(id, src, pattern, on_success) { + var dissect = new processor.Dissect({ + field: src, + tokenizer: pattern, + target_prefix: FIELDS_OBJECT, + ignore_failure: true, + overwrite_keys: true, + trim_values: "right" + }); + return function (evt) { + var msg = evt.Get(src); + dissect.Run(evt); + var failed = evt.Get(FLAG_FIELD) != null; + if (debug) { + if (failed) { + console.debug("dissect fail: " + id + " field:" + src); + } else { + console.debug("dissect OK: " + id + " field:" + src); + } + console.debug(" expr: <<" + pattern + ">>"); + console.debug(" input: <<" + msg + ">>"); + } + if (on_success != null && !failed) { + on_success(evt); + } + }; + } + + function match_copy(id, src, dst, on_success) { + dst = FIELDS_PREFIX + dst; + if (dst === FIELDS_PREFIX || dst === src) { + return function (evt) { + if (debug) { + console.debug("noop OK: " + id + " field:" + src); + console.debug(" input: <<" + evt.Get(src) + ">>"); + } + if (on_success != null) on_success(evt); + } + } + return function (evt) { + var msg = evt.Get(src); + evt.Put(dst, msg); + if (debug) { + console.debug("copy OK: " + id + " field:" + src); + console.debug(" target: '" + dst + "'"); + console.debug(" input: <<" + msg + ">>"); + } + if (on_success != null) on_success(evt); + } + } + + function cleanup_flags(processor) { + return function(evt) { + processor(evt); + evt.Delete(FLAG_FIELD); + }; + } + + function all_match(opts) { + return function (evt) { + var i; + for (i = 0; i < opts.processors.length; i++) { + evt.Delete(FLAG_FIELD); + opts.processors[i](evt); + // Dissect processor succeeded? + if (evt.Get(FLAG_FIELD) != null) { + if (debug) console.warn("all_match failure at " + i); + if (opts.on_failure != null) opts.on_failure(evt); + return; + } + if (debug) console.warn("all_match success at " + i); + } + if (opts.on_success != null) opts.on_success(evt); + }; + } + + function msgid_select(mapping) { + return function (evt) { + var msgid = evt.Get(FIELDS_PREFIX + "messageid"); + if (msgid == null) { + if (debug) console.warn("msgid_select: no messageid captured!"); + return; + } + var next = mapping[msgid]; + if (next === undefined) { + if (debug) console.warn("msgid_select: no mapping for messageid:" + msgid); + return; + } + if (debug) console.info("msgid_select: matched key=" + msgid); + return next(evt); + }; + } + + function msg(msg_id, match) { + return function (evt) { + match(evt); + if (evt.Get(FLAG_FIELD) == null) { + evt.Put(FIELDS_PREFIX + "msg_id1", msg_id); + } + }; + } + + var start; + + function save_flags(evt) { + saved_flags = evt.Get(FLAG_FIELD); + evt.Put("event.original", evt.Get("message")); + } + + function restore_flags(evt) { + if (saved_flags !== null) { + evt.Put(FLAG_FIELD, saved_flags); + } + evt.Delete("message"); + } + + function constant(value) { + return function (evt) { + return value; + }; + } + + function field(name) { + var fullname = FIELDS_PREFIX + name; + return function (evt) { + return evt.Get(fullname); + }; + } + + function STRCAT(args) { + var s = ""; + var i; + for (i = 0; i < args.length; i++) { + s += args[i]; + } + return s; + } + + // TODO: Implement + function DIRCHK(args) { + unimplemented("DIRCHK"); + } + + function strictToInt(str) { + return str * 1; + } + + function CALC(args) { + if (args.length !== 3) { + console.warn("skipped call to CALC with " + args.length + " arguments."); + return; + } + var a = strictToInt(args[0]); + var b = strictToInt(args[2]); + if (isNaN(a) || isNaN(b)) { + console.warn("failed evaluating CALC arguments a='" + args[0] + "' b='" + args[2] + "'."); + return; + } + var result; + switch (args[1]) { + case "+": + result = a + b; + break; + case "-": + result = a - b; + break; + case "*": + result = a * b; + break; + default: + // Only * and + seen in the parsers. + console.warn("unknown CALC operation '" + args[1] + "'."); + return; + } + // Always return a string + return result !== undefined ? "" + result : result; + } + + var quoteChars = "\"'`"; + function RMQ(args) { + if(args.length !== 1) { + console.warn("RMQ: only one argument expected"); + return; + } + var value = args[0].trim(); + var n = value.length; + var char; + return n > 1 + && (char=value.charAt(0)) === value.charAt(n-1) + && quoteChars.indexOf(char) !== -1? + value.substr(1, n-2) + : value; + } + + function call(opts) { + var args = new Array(opts.args.length); + return function (evt) { + for (var i = 0; i < opts.args.length; i++) + if ((args[i] = opts.args[i](evt)) == null) return; + var result = opts.fn(args); + if (result != null) { + evt.Put(opts.dest, result); + } + }; + } + + function nop(evt) { + } + + function appendErrorMsg(evt, msg) { + var value = evt.Get("error.message"); + if (value == null) { + value = [msg]; + } else if (msg instanceof Array) { + value.push(msg); + } else { + value = [value, msg]; + } + evt.Put("error.message", value); + } + + function unimplemented(name) { + appendErrorMsg("unimplemented feature: " + name); + } + + function lookup(opts) { + return function (evt) { + var key = opts.key(evt); + if (key == null) return; + var value = opts.map.keyvaluepairs[key]; + if (value === undefined) { + value = opts.map.default; + } + if (value !== undefined) { + evt.Put(opts.dest, value(evt)); + } + }; + } + + function set(fields) { + return new processor.AddFields({ + target: FIELDS_OBJECT, + fields: fields, + }); + } + + function setf(dst, src) { + return function (evt) { + var val = evt.Get(FIELDS_PREFIX + src); + if (val != null) evt.Put(FIELDS_PREFIX + dst, val); + }; + } + + function setc(dst, value) { + return function (evt) { + evt.Put(FIELDS_PREFIX + dst, value); + }; + } + + function set_field(opts) { + return function (evt) { + var val = opts.value(evt); + if (val != null) evt.Put(opts.dest, val); + }; + } + + function dump(label) { + return function (evt) { + console.log("Dump of event at " + label + ": " + JSON.stringify(evt, null, "\t")); + }; + } + + function date_time_join_args(evt, arglist) { + var str = ""; + for (var i = 0; i < arglist.length; i++) { + var fname = FIELDS_PREFIX + arglist[i]; + var val = evt.Get(fname); + if (val != null) { + if (str !== "") str += " "; + str += val; + } else { + if (debug) console.warn("in date_time: input arg " + fname + " is not set"); + } + } + return str; + } + + function to2Digit(num) { + return num? (num < 10? "0" + num : num) : "00"; + } + + // Make two-digit dates 00-69 interpreted as 2000-2069 + // and dates 70-99 translated to 1970-1999. + var twoDigitYearEpoch = 70; + var twoDigitYearCentury = 2000; + + // This is to accept dates up to 2 days in the future, only used when + // no year is specified in a date. 2 days should be enough to account for + // time differences between systems and different tz offsets. + var maxFutureDelta = 2*24*60*60*1000; + + // DateContainer stores date fields and then converts those fields into + // a Date. Necessary because building a Date using its set() methods gives + // different results depending on the order of components. + function DateContainer(tzOffset) { + this.offset = tzOffset === undefined? "Z" : tzOffset; + } + + DateContainer.prototype = { + setYear: function(v) {this.year = v;}, + setMonth: function(v) {this.month = v;}, + setDay: function(v) {this.day = v;}, + setHours: function(v) {this.hours = v;}, + setMinutes: function(v) {this.minutes = v;}, + setSeconds: function(v) {this.seconds = v;}, + + setUNIX: function(v) {this.unix = v;}, + + set2DigitYear: function(v) { + this.year = v < twoDigitYearEpoch? twoDigitYearCentury + v : twoDigitYearCentury + v - 100; + }, + + toDate: function() { + if (this.unix !== undefined) { + return new Date(this.unix * 1000); + } + if (this.day === undefined || this.month === undefined) { + // Can't make a date from this. + return undefined; + } + if (this.year === undefined) { + // A date without a year. Set current year, or previous year + // if date would be in the future. + var now = new Date(); + this.year = now.getFullYear(); + var date = this.toDate(); + if (date.getTime() - now.getTime() > maxFutureDelta) { + date.setFullYear(now.getFullYear() - 1); + } + return date; + } + var MM = to2Digit(this.month); + var DD = to2Digit(this.day); + var hh = to2Digit(this.hours); + var mm = to2Digit(this.minutes); + var ss = to2Digit(this.seconds); + return new Date(this.year + "-" + MM + "-" + DD + "T" + hh + ":" + mm + ":" + ss + this.offset); + } + } + + function date_time_try_pattern(fmt, str, tzOffset) { + var date = new DateContainer(tzOffset); + var pos = date_time_try_pattern_at_pos(fmt, str, 0, date); + return pos !== undefined? date.toDate() : undefined; + } + + function date_time_try_pattern_at_pos(fmt, str, pos, date) { + var len = str.length; + for (var proc = 0; pos !== undefined && pos < len && proc < fmt.length; proc++) { + pos = fmt[proc](str, pos, date); + } + return pos; + } + + function date_time(opts) { + return function (evt) { + var tzOffset = opts.tz || tz_offset; + if (tzOffset === "event") { + tzOffset = evt.Get("event.timezone"); + } + var str = date_time_join_args(evt, opts.args); + for (var i = 0; i < opts.fmts.length; i++) { + var date = date_time_try_pattern(opts.fmts[i], str, tzOffset); + if (date !== undefined) { + evt.Put(FIELDS_PREFIX + opts.dest, date); + return; + } + } + if (debug) console.warn("in date_time: id=" + opts.id + " FAILED: " + str); + }; + } + + var uA = 60 * 60 * 24; + var uD = 60 * 60 * 24; + var uF = 60 * 60; + var uG = 60 * 60 * 24 * 30; + var uH = 60 * 60; + var uI = 60 * 60; + var uJ = 60 * 60 * 24; + var uM = 60 * 60 * 24 * 30; + var uN = 60 * 60; + var uO = 1; + var uS = 1; + var uT = 60; + var uU = 60; + var uc = dc; + + function duration(opts) { + return function(evt) { + var str = date_time_join_args(evt, opts.args); + for (var i = 0; i < opts.fmts.length; i++) { + var seconds = duration_try_pattern(opts.fmts[i], str); + if (seconds !== undefined) { + evt.Put(FIELDS_PREFIX + opts.dest, seconds); + return; + } + } + if (debug) console.warn("in duration: id=" + opts.id + " (s) FAILED: " + str); + }; + } + + function duration_try_pattern(fmt, str) { + var secs = 0; + var pos = 0; + for (var i=0; i [ month_id , how many chars to skip if month in long form ] + "Jan": [0, 4], + "Feb": [1, 5], + "Mar": [2, 2], + "Apr": [3, 2], + "May": [4, 0], + "Jun": [5, 1], + "Jul": [6, 1], + "Aug": [7, 3], + "Sep": [8, 6], + "Oct": [9, 4], + "Nov": [10, 5], + "Dec": [11, 4], + "jan": [0, 4], + "feb": [1, 5], + "mar": [2, 2], + "apr": [3, 2], + "may": [4, 0], + "jun": [5, 1], + "jul": [6, 1], + "aug": [7, 3], + "sep": [8, 6], + "oct": [9, 4], + "nov": [10, 5], + "dec": [11, 4], + }; + + // var dC = undefined; + var dR = dateMonthName(true); + var dB = dateMonthName(false); + var dM = dateFixedWidthNumber("M", 2, 1, 12, DateContainer.prototype.setMonth); + var dG = dateVariableWidthNumber("G", 1, 12, DateContainer.prototype.setMonth); + var dD = dateFixedWidthNumber("D", 2, 1, 31, DateContainer.prototype.setDay); + var dF = dateVariableWidthNumber("F", 1, 31, DateContainer.prototype.setDay); + var dH = dateFixedWidthNumber("H", 2, 0, 24, DateContainer.prototype.setHours); + var dI = dateVariableWidthNumber("I", 0, 24, DateContainer.prototype.setHours); // Accept hours >12 + var dN = dateVariableWidthNumber("N", 0, 24, DateContainer.prototype.setHours); + var dT = dateFixedWidthNumber("T", 2, 0, 59, DateContainer.prototype.setMinutes); + var dU = dateVariableWidthNumber("U", 0, 59, DateContainer.prototype.setMinutes); + var dP = parseAMPM; // AM|PM + var dQ = parseAMPM; // A.M.|P.M + var dS = dateFixedWidthNumber("S", 2, 0, 60, DateContainer.prototype.setSeconds); + var dO = dateVariableWidthNumber("O", 0, 60, DateContainer.prototype.setSeconds); + var dY = dateFixedWidthNumber("Y", 2, 0, 99, DateContainer.prototype.set2DigitYear); + var dW = dateFixedWidthNumber("W", 4, 1000, 9999, DateContainer.prototype.setYear); + var dZ = parseHMS; + var dX = dateVariableWidthNumber("X", 0, 0x10000000000, DateContainer.prototype.setUNIX); + + // parseAMPM parses "A.M", "AM", "P.M", "PM" from logs. + // Only works if this modifier appears after the hour has been read from logs + // which is always the case in the 300 devices. + function parseAMPM(str, pos, date) { + var n = str.length; + var start = skipws(str, pos); + if (start + 2 > n) return; + var head = str.substr(start, 2).toUpperCase(); + var isPM = false; + var skip = false; + switch (head) { + case "A.": + skip = true; + /* falls through */ + case "AM": + break; + case "P.": + skip = true; + /* falls through */ + case "PM": + isPM = true; + break; + default: + if (debug) console.warn("can't parse pos " + start + " as AM/PM: " + str + "(head:" + head + ")"); + return; + } + pos = start + 2; + if (skip) { + if (pos+2 > n || str.substr(pos, 2).toUpperCase() !== "M.") { + if (debug) console.warn("can't parse pos " + start + " as AM/PM: " + str + "(tail)"); + return; + } + pos += 2; + } + var hh = date.hours; + if (isPM) { + // Accept existing hour in 24h format. + if (hh < 12) hh += 12; + } else { + if (hh === 12) hh = 0; + } + date.setHours(hh); + return pos; + } + + function parseHMS(str, pos, date) { + return date_time_try_pattern_at_pos([dN, dc(":"), dU, dc(":"), dO], str, pos, date); + } + + function skipws(str, pos) { + for ( var n = str.length; + pos < n && str.charAt(pos) === " "; + pos++) + ; + return pos; + } + + function skipdigits(str, pos) { + var c; + for (var n = str.length; + pos < n && (c = str.charAt(pos)) >= "0" && c <= "9"; + pos++) + ; + return pos; + } + + function dSkip(str, pos, date) { + var chr; + for (;pos < str.length && (chr=str[pos])<'0' || chr>'9'; pos++) {} + return pos < str.length? pos : undefined; + } + + function dateVariableWidthNumber(fmtChar, min, max, setter) { + return function (str, pos, date) { + var start = skipws(str, pos); + pos = skipdigits(str, start); + var s = str.substr(start, pos - start); + var value = parseInt(s, 10); + if (value >= min && value <= max) { + setter.call(date, value); + return pos; + } + return; + }; + } + + function dateFixedWidthNumber(fmtChar, width, min, max, setter) { + return function (str, pos, date) { + pos = skipws(str, pos); + var n = str.length; + if (pos + width > n) return; + var s = str.substr(pos, width); + var value = parseInt(s, 10); + if (value >= min && value <= max) { + setter.call(date, value); + return pos + width; + } + return; + }; + } + + // Short month name (Jan..Dec). + function dateMonthName(long) { + return function (str, pos, date) { + pos = skipws(str, pos); + var n = str.length; + if (pos + 3 > n) return; + var mon = str.substr(pos, 3); + var idx = shortMonths[mon]; + if (idx === undefined) { + idx = shortMonths[mon.toLowerCase()]; + } + if (idx === undefined) { + //console.warn("parsing date_time: '" + mon + "' is not a valid short month (%B)"); + return; + } + date.setMonth(idx[0]+1); + return pos + 3 + (long ? idx[1] : 0); + }; + } + + function url_wrapper(dst, src, fn) { + return function(evt) { + var value = evt.Get(FIELDS_PREFIX + src), result; + if (value != null && (result = fn(value))!== undefined) { + evt.Put(FIELDS_PREFIX + dst, result); + } else { + console.debug(fn.name + " failed for '" + value + "'"); + } + }; + } + + // The following regular expression for parsing URLs from: + // https://github.com/wizard04wsu/URI_Parsing + // + // The MIT License (MIT) + // + // Copyright (c) 2014 Andrew Harrison + // + // Permission is hereby granted, free of charge, to any person obtaining a copy of + // this software and associated documentation files (the "Software"), to deal in + // the Software without restriction, including without limitation the rights to + // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + // the Software, and to permit persons to whom the Software is furnished to do so, + // subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + // COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + // IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + var uriRegExp = /^([a-z][a-z0-9+.\-]*):(?:\/\/((?:(?=((?:[a-z0-9\-._~!$&'()*+,;=:]|%[0-9A-F]{2})*))(\3)@)?(?=(\[[0-9A-F:.]{2,}\]|(?:[a-z0-9\-._~!$&'()*+,;=]|%[0-9A-F]{2})*))\5(?::(?=(\d*))\6)?)(\/(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/]|%[0-9A-F]{2})*))\8)?|(\/?(?!\/)(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/]|%[0-9A-F]{2})*))\10)?)(?:\?(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/?]|%[0-9A-F]{2})*))\11)?(?:#(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/?]|%[0-9A-F]{2})*))\12)?$/i; + + var uriScheme = 1; + var uriDomain = 5; + var uriPort = 6; + var uriPath = 7; + var uriPathAlt = 9; + var uriQuery = 11; + + function domain(dst, src) { + return url_wrapper(dst, src, extract_domain); + } + + function split_url(value) { + var m = value.match(uriRegExp); + if (m && m[uriDomain]) return m; + // Support input in the form "www.example.net/path", but not "/path". + m = ("null://" + value).match(uriRegExp); + if (m) return m; + } + + function extract_domain(value) { + var m = split_url(value); + if (m && m[uriDomain]) return m[uriDomain]; + } + + var extFromPage = /\.[^.]+$/; + function extract_ext(value) { + var page = extract_page(value); + if (page) { + var m = page.match(extFromPage); + if (m) return m[0]; + } + } + + function ext(dst, src) { + return url_wrapper(dst, src, extract_ext); + } + + function fqdn(dst, src) { + // TODO: fqdn and domain(eTLD+1) are currently the same. + return domain(dst, src); + } + + var pageFromPathRegExp = /\/([^\/]+)$/; + var pageName = 1; + + function extract_page(value) { + value = extract_path(value); + if (!value) return undefined; + var m = value.match(pageFromPathRegExp); + if (m) return m[pageName]; + } + + function page(dst, src) { + return url_wrapper(dst, src, extract_page); + } + + function extract_path(value) { + var m = split_url(value); + return m? m[uriPath] || m[uriPathAlt] : undefined; + } + + function path(dst, src) { + return url_wrapper(dst, src, extract_path); + } + + // Map common schemes to their default port. + // port has to be a string (will be converted at a later stage). + var schemePort = { + "ftp": "21", + "ssh": "22", + "http": "80", + "https": "443", + }; + + function extract_port(value) { + var m = split_url(value); + if (!m) return undefined; + if (m[uriPort]) return m[uriPort]; + if (m[uriScheme]) { + return schemePort[m[uriScheme]]; + } + } + + function port(dst, src) { + return url_wrapper(dst, src, extract_port); + } + + function extract_query(value) { + var m = split_url(value); + if (m && m[uriQuery]) return m[uriQuery]; + } + + function query(dst, src) { + return url_wrapper(dst, src, extract_query); + } + + function extract_root(value) { + var m = split_url(value); + if (m && m[uriDomain] && m[uriDomain]) { + var scheme = m[uriScheme] && m[uriScheme] !== "null"? + m[uriScheme] + "://" : ""; + var port = m[uriPort]? ":" + m[uriPort] : ""; + return scheme + m[uriDomain] + port; + } + } + + function root(dst, src) { + return url_wrapper(dst, src, extract_root); + } + + function tagval(id, src, cfg, keys, on_success) { + var fail = function(evt) { + evt.Put(FLAG_FIELD, "tagval_parsing_error"); + } + if (cfg.kv_separator.length !== 1) { + throw("Invalid TAGVALMAP ValueDelimiter (must have 1 character)"); + } + var quotes_len = cfg.open_quote.length > 0 && cfg.close_quote.length > 0? + cfg.open_quote.length + cfg.close_quote.length : 0; + var kv_regex = new RegExp('^([^' + cfg.kv_separator + ']*)*' + cfg.kv_separator + ' *(.*)*$'); + return function(evt) { + var msg = evt.Get(src); + if (msg === undefined) { + console.warn("tagval: input field is missing"); + return fail(evt); + } + var pairs = msg.split(cfg.pair_separator); + var i; + var success = false; + var prev = ""; + for (i=0; i 0 && + value.length >= cfg.open_quote.length + cfg.close_quote.length && + value.substr(0, cfg.open_quote.length) === cfg.open_quote && + value.substr(value.length - cfg.close_quote.length) === cfg.close_quote) { + value = value.substr(cfg.open_quote.length, value.length - quotes_len); + } + evt.Put(FIELDS_PREFIX + field, value); + success = true; + } + if (!success) { + return fail(evt); + } + if (on_success != null) { + on_success(evt); + } + } + } + + var ecs_mappings = { + "_facility": {convert: to_long, to:[{field: "log.syslog.facility.code", setter: fld_set}]}, + "_pri": {convert: to_long, to:[{field: "log.syslog.priority", setter: fld_set}]}, + "_severity": {convert: to_long, to:[{field: "log.syslog.severity.code", setter: fld_set}]}, + "action": {to:[{field: "event.action", setter: fld_prio, prio: 0}]}, + "administrator": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 4}]}, + "alias.ip": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 3},{field: "related.ip", setter: fld_append}]}, + "alias.ipv6": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 4},{field: "related.ip", setter: fld_append}]}, + "alias.mac": {convert: to_mac, to:[{field: "host.mac", setter: fld_prio, prio: 1}]}, + "application": {to:[{field: "network.application", setter: fld_set}]}, + "bytes": {convert: to_long, to:[{field: "network.bytes", setter: fld_set}]}, + "c_domain": {to:[{field: "source.domain", setter: fld_prio, prio: 1}]}, + "c_logon_id": {to:[{field: "user.id", setter: fld_prio, prio: 2}]}, + "c_user_name": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 8}]}, + "c_username": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 2}]}, + "cctld": {to:[{field: "url.top_level_domain", setter: fld_prio, prio: 1}]}, + "child_pid": {convert: to_long, to:[{field: "process.pid", setter: fld_prio, prio: 1}]}, + "child_pid_val": {to:[{field: "process.title", setter: fld_set}]}, + "child_process": {to:[{field: "process.name", setter: fld_prio, prio: 1}]}, + "city.dst": {to:[{field: "destination.geo.city_name", setter: fld_set}]}, + "city.src": {to:[{field: "source.geo.city_name", setter: fld_set}]}, + "daddr": {convert: to_ip, to:[{field: "destination.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "daddr_v6": {convert: to_ip, to:[{field: "destination.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "ddomain": {to:[{field: "destination.domain", setter: fld_prio, prio: 0}]}, + "devicehostip": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 2},{field: "related.ip", setter: fld_append}]}, + "devicehostmac": {convert: to_mac, to:[{field: "host.mac", setter: fld_prio, prio: 0}]}, + "dhost": {to:[{field: "destination.address", setter: fld_set},{field: "related.hosts", setter: fld_append}]}, + "dinterface": {to:[{field: "observer.egress.interface.name", setter: fld_set}]}, + "direction": {to:[{field: "network.direction", setter: fld_set}]}, + "directory": {to:[{field: "file.directory", setter: fld_set}]}, + "dmacaddr": {convert: to_mac, to:[{field: "destination.mac", setter: fld_set}]}, + "dns.responsetype": {to:[{field: "dns.answers.type", setter: fld_set}]}, + "dns.resptext": {to:[{field: "dns.answers.name", setter: fld_set}]}, + "dns_querytype": {to:[{field: "dns.question.type", setter: fld_set}]}, + "domain": {to:[{field: "server.domain", setter: fld_prio, prio: 0},{field: "related.hosts", setter: fld_append}]}, + "domain.dst": {to:[{field: "destination.domain", setter: fld_prio, prio: 1}]}, + "domain.src": {to:[{field: "source.domain", setter: fld_prio, prio: 2}]}, + "domain_id": {to:[{field: "user.domain", setter: fld_set}]}, + "domainname": {to:[{field: "server.domain", setter: fld_prio, prio: 1}]}, + "dport": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 0}]}, + "dtransaddr": {convert: to_ip, to:[{field: "destination.nat.ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "dtransport": {convert: to_long, to:[{field: "destination.nat.port", setter: fld_prio, prio: 0}]}, + "ec_outcome": {to:[{field: "event.outcome", setter: fld_ecs_outcome}]}, + "event_description": {to:[{field: "message", setter: fld_prio, prio: 0}]}, + "event_source": {to:[{field: "related.hosts", setter: fld_append}]}, + "event_time": {convert: to_date, to:[{field: "@timestamp", setter: fld_set}]}, + "event_type": {to:[{field: "event.action", setter: fld_prio, prio: 1}]}, + "extension": {to:[{field: "file.extension", setter: fld_prio, prio: 1}]}, + "file.attributes": {to:[{field: "file.attributes", setter: fld_set}]}, + "filename": {to:[{field: "file.name", setter: fld_prio, prio: 0}]}, + "filename_size": {convert: to_long, to:[{field: "file.size", setter: fld_set}]}, + "filepath": {to:[{field: "file.path", setter: fld_set}]}, + "filetype": {to:[{field: "file.type", setter: fld_set}]}, + "fqdn": {to:[{field: "related.hosts", setter: fld_append}]}, + "group": {to:[{field: "group.name", setter: fld_set}]}, + "groupid": {to:[{field: "group.id", setter: fld_set}]}, + "host": {to:[{field: "host.name", setter: fld_prio, prio: 1},{field: "related.hosts", setter: fld_append}]}, + "hostip": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "hostip_v6": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "hostname": {to:[{field: "host.name", setter: fld_prio, prio: 0}]}, + "id": {to:[{field: "event.code", setter: fld_prio, prio: 0}]}, + "interface": {to:[{field: "network.interface.name", setter: fld_set}]}, + "ip.orig": {convert: to_ip, to:[{field: "network.forwarded_ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "ip.trans.dst": {convert: to_ip, to:[{field: "destination.nat.ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "ip.trans.src": {convert: to_ip, to:[{field: "source.nat.ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "ipv6.orig": {convert: to_ip, to:[{field: "network.forwarded_ip", setter: fld_prio, prio: 2},{field: "related.ip", setter: fld_append}]}, + "latdec_dst": {convert: to_double, to:[{field: "destination.geo.location.lat", setter: fld_set}]}, + "latdec_src": {convert: to_double, to:[{field: "source.geo.location.lat", setter: fld_set}]}, + "location_city": {to:[{field: "geo.city_name", setter: fld_set}]}, + "location_country": {to:[{field: "geo.country_name", setter: fld_set}]}, + "location_desc": {to:[{field: "geo.name", setter: fld_set}]}, + "location_dst": {to:[{field: "destination.geo.country_name", setter: fld_set}]}, + "location_src": {to:[{field: "source.geo.country_name", setter: fld_set}]}, + "location_state": {to:[{field: "geo.region_name", setter: fld_set}]}, + "logon_id": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 5}]}, + "longdec_dst": {convert: to_double, to:[{field: "destination.geo.location.lon", setter: fld_set}]}, + "longdec_src": {convert: to_double, to:[{field: "source.geo.location.lon", setter: fld_set}]}, + "macaddr": {convert: to_mac, to:[{field: "host.mac", setter: fld_prio, prio: 2}]}, + "messageid": {to:[{field: "event.code", setter: fld_prio, prio: 1}]}, + "method": {to:[{field: "http.request.method", setter: fld_set}]}, + "msg": {to:[{field: "message", setter: fld_set}]}, + "orig_ip": {convert: to_ip, to:[{field: "network.forwarded_ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "owner": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 6}]}, + "packets": {convert: to_long, to:[{field: "network.packets", setter: fld_set}]}, + "parent_pid": {convert: to_long, to:[{field: "process.parent.pid", setter: fld_prio, prio: 0}]}, + "parent_pid_val": {to:[{field: "process.parent.title", setter: fld_set}]}, + "parent_process": {to:[{field: "process.parent.name", setter: fld_prio, prio: 0}]}, + "patient_fullname": {to:[{field: "user.full_name", setter: fld_prio, prio: 1}]}, + "port.dst": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 1}]}, + "port.src": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 1}]}, + "port.trans.dst": {convert: to_long, to:[{field: "destination.nat.port", setter: fld_prio, prio: 1}]}, + "port.trans.src": {convert: to_long, to:[{field: "source.nat.port", setter: fld_prio, prio: 1}]}, + "process": {to:[{field: "process.name", setter: fld_prio, prio: 0}]}, + "process_id": {convert: to_long, to:[{field: "process.pid", setter: fld_prio, prio: 0}]}, + "process_id_src": {convert: to_long, to:[{field: "process.parent.pid", setter: fld_prio, prio: 1}]}, + "process_src": {to:[{field: "process.parent.name", setter: fld_prio, prio: 1}]}, + "product": {to:[{field: "observer.product", setter: fld_set}]}, + "protocol": {to:[{field: "network.protocol", setter: fld_set}]}, + "query": {to:[{field: "url.query", setter: fld_prio, prio: 2}]}, + "rbytes": {convert: to_long, to:[{field: "destination.bytes", setter: fld_set}]}, + "referer": {to:[{field: "http.request.referrer", setter: fld_prio, prio: 1}]}, + "rulename": {to:[{field: "rule.name", setter: fld_set}]}, + "saddr": {convert: to_ip, to:[{field: "source.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "saddr_v6": {convert: to_ip, to:[{field: "source.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "sbytes": {convert: to_long, to:[{field: "source.bytes", setter: fld_set}]}, + "sdomain": {to:[{field: "source.domain", setter: fld_prio, prio: 0}]}, + "service": {to:[{field: "service.name", setter: fld_prio, prio: 1}]}, + "service.name": {to:[{field: "service.name", setter: fld_prio, prio: 0}]}, + "service_account": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 7}]}, + "severity": {to:[{field: "log.level", setter: fld_set}]}, + "shost": {to:[{field: "host.hostname", setter: fld_set},{field: "source.address", setter: fld_set},{field: "related.hosts", setter: fld_append}]}, + "sinterface": {to:[{field: "observer.ingress.interface.name", setter: fld_set}]}, + "sld": {to:[{field: "url.registered_domain", setter: fld_set}]}, + "smacaddr": {convert: to_mac, to:[{field: "source.mac", setter: fld_set}]}, + "sport": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 0}]}, + "stransaddr": {convert: to_ip, to:[{field: "source.nat.ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "stransport": {convert: to_long, to:[{field: "source.nat.port", setter: fld_prio, prio: 0}]}, + "tcp.dstport": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 2}]}, + "tcp.srcport": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 2}]}, + "timezone": {to:[{field: "event.timezone", setter: fld_set}]}, + "tld": {to:[{field: "url.top_level_domain", setter: fld_prio, prio: 0}]}, + "udp.dstport": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 3}]}, + "udp.srcport": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 3}]}, + "uid": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 3}]}, + "url": {to:[{field: "url.original", setter: fld_prio, prio: 1}]}, + "url_raw": {to:[{field: "url.original", setter: fld_prio, prio: 0}]}, + "urldomain": {to:[{field: "url.domain", setter: fld_prio, prio: 0}]}, + "urlquery": {to:[{field: "url.query", setter: fld_prio, prio: 0}]}, + "user": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 0}]}, + "user.id": {to:[{field: "user.id", setter: fld_prio, prio: 1}]}, + "user_agent": {to:[{field: "user_agent.original", setter: fld_set}]}, + "user_fullname": {to:[{field: "user.full_name", setter: fld_prio, prio: 0}]}, + "user_id": {to:[{field: "user.id", setter: fld_prio, prio: 0}]}, + "username": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 1}]}, + "version": {to:[{field: "observer.version", setter: fld_set}]}, + "web_domain": {to:[{field: "url.domain", setter: fld_prio, prio: 1},{field: "related.hosts", setter: fld_append}]}, + "web_extension": {to:[{field: "file.extension", setter: fld_prio, prio: 0}]}, + "web_query": {to:[{field: "url.query", setter: fld_prio, prio: 1}]}, + "web_ref_domain": {to:[{field: "related.hosts", setter: fld_append}]}, + "web_referer": {to:[{field: "http.request.referrer", setter: fld_prio, prio: 0}]}, + "web_root": {to:[{field: "url.path", setter: fld_set}]}, + "webpage": {to:[{field: "file.name", setter: fld_prio, prio: 1}]}, + }; + + var rsa_mappings = { + "access_point": {to:[{field: "rsa.wireless.access_point", setter: fld_set}]}, + "accesses": {to:[{field: "rsa.identity.accesses", setter: fld_set}]}, + "acl_id": {to:[{field: "rsa.misc.acl_id", setter: fld_set}]}, + "acl_op": {to:[{field: "rsa.misc.acl_op", setter: fld_set}]}, + "acl_pos": {to:[{field: "rsa.misc.acl_pos", setter: fld_set}]}, + "acl_table": {to:[{field: "rsa.misc.acl_table", setter: fld_set}]}, + "action": {to:[{field: "rsa.misc.action", setter: fld_append}]}, + "ad_computer_dst": {to:[{field: "rsa.network.ad_computer_dst", setter: fld_set}]}, + "addr": {to:[{field: "rsa.network.addr", setter: fld_set}]}, + "admin": {to:[{field: "rsa.misc.admin", setter: fld_set}]}, + "agent": {to:[{field: "rsa.misc.client", setter: fld_prio, prio: 0}]}, + "agent.id": {to:[{field: "rsa.misc.agent_id", setter: fld_set}]}, + "alarm_id": {to:[{field: "rsa.misc.alarm_id", setter: fld_set}]}, + "alarmname": {to:[{field: "rsa.misc.alarmname", setter: fld_set}]}, + "alert": {to:[{field: "rsa.threat.alert", setter: fld_set}]}, + "alert_id": {to:[{field: "rsa.misc.alert_id", setter: fld_set}]}, + "alias.host": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "analysis.file": {to:[{field: "rsa.investigations.analysis_file", setter: fld_set}]}, + "analysis.service": {to:[{field: "rsa.investigations.analysis_service", setter: fld_set}]}, + "analysis.session": {to:[{field: "rsa.investigations.analysis_session", setter: fld_set}]}, + "app_id": {to:[{field: "rsa.misc.app_id", setter: fld_set}]}, + "attachment": {to:[{field: "rsa.file.attachment", setter: fld_set}]}, + "audit": {to:[{field: "rsa.misc.audit", setter: fld_set}]}, + "audit_class": {to:[{field: "rsa.internal.audit_class", setter: fld_set}]}, + "audit_object": {to:[{field: "rsa.misc.audit_object", setter: fld_set}]}, + "auditdata": {to:[{field: "rsa.misc.auditdata", setter: fld_set}]}, + "authmethod": {to:[{field: "rsa.identity.auth_method", setter: fld_set}]}, + "autorun_type": {to:[{field: "rsa.misc.autorun_type", setter: fld_set}]}, + "bcc": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "benchmark": {to:[{field: "rsa.misc.benchmark", setter: fld_set}]}, + "binary": {to:[{field: "rsa.file.binary", setter: fld_set}]}, + "boc": {to:[{field: "rsa.investigations.boc", setter: fld_set}]}, + "bssid": {to:[{field: "rsa.wireless.wlan_ssid", setter: fld_prio, prio: 1}]}, + "bypass": {to:[{field: "rsa.misc.bypass", setter: fld_set}]}, + "c_sid": {to:[{field: "rsa.identity.user_sid_src", setter: fld_set}]}, + "cache": {to:[{field: "rsa.misc.cache", setter: fld_set}]}, + "cache_hit": {to:[{field: "rsa.misc.cache_hit", setter: fld_set}]}, + "calling_from": {to:[{field: "rsa.misc.phone", setter: fld_prio, prio: 1}]}, + "calling_to": {to:[{field: "rsa.misc.phone", setter: fld_prio, prio: 0}]}, + "category": {to:[{field: "rsa.misc.category", setter: fld_set}]}, + "cc": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "cc.number": {convert: to_long, to:[{field: "rsa.misc.cc_number", setter: fld_set}]}, + "cefversion": {to:[{field: "rsa.misc.cefversion", setter: fld_set}]}, + "cert.serial": {to:[{field: "rsa.crypto.cert_serial", setter: fld_set}]}, + "cert_ca": {to:[{field: "rsa.crypto.cert_ca", setter: fld_set}]}, + "cert_checksum": {to:[{field: "rsa.crypto.cert_checksum", setter: fld_set}]}, + "cert_common": {to:[{field: "rsa.crypto.cert_common", setter: fld_set}]}, + "cert_error": {to:[{field: "rsa.crypto.cert_error", setter: fld_set}]}, + "cert_hostname": {to:[{field: "rsa.crypto.cert_host_name", setter: fld_set}]}, + "cert_hostname_cat": {to:[{field: "rsa.crypto.cert_host_cat", setter: fld_set}]}, + "cert_issuer": {to:[{field: "rsa.crypto.cert_issuer", setter: fld_set}]}, + "cert_keysize": {to:[{field: "rsa.crypto.cert_keysize", setter: fld_set}]}, + "cert_status": {to:[{field: "rsa.crypto.cert_status", setter: fld_set}]}, + "cert_subject": {to:[{field: "rsa.crypto.cert_subject", setter: fld_set}]}, + "cert_username": {to:[{field: "rsa.crypto.cert_username", setter: fld_set}]}, + "cfg.attr": {to:[{field: "rsa.misc.cfg_attr", setter: fld_set}]}, + "cfg.obj": {to:[{field: "rsa.misc.cfg_obj", setter: fld_set}]}, + "cfg.path": {to:[{field: "rsa.misc.cfg_path", setter: fld_set}]}, + "change_attribute": {to:[{field: "rsa.misc.change_attrib", setter: fld_set}]}, + "change_new": {to:[{field: "rsa.misc.change_new", setter: fld_set}]}, + "change_old": {to:[{field: "rsa.misc.change_old", setter: fld_set}]}, + "changes": {to:[{field: "rsa.misc.changes", setter: fld_set}]}, + "checksum": {to:[{field: "rsa.misc.checksum", setter: fld_set}]}, + "checksum.dst": {to:[{field: "rsa.misc.checksum_dst", setter: fld_set}]}, + "checksum.src": {to:[{field: "rsa.misc.checksum_src", setter: fld_set}]}, + "cid": {to:[{field: "rsa.internal.cid", setter: fld_set}]}, + "client": {to:[{field: "rsa.misc.client", setter: fld_prio, prio: 1}]}, + "client_ip": {to:[{field: "rsa.misc.client_ip", setter: fld_set}]}, + "clustermembers": {to:[{field: "rsa.misc.clustermembers", setter: fld_set}]}, + "cmd": {to:[{field: "rsa.misc.cmd", setter: fld_set}]}, + "cn_acttimeout": {to:[{field: "rsa.misc.cn_acttimeout", setter: fld_set}]}, + "cn_asn_dst": {to:[{field: "rsa.web.cn_asn_dst", setter: fld_set}]}, + "cn_asn_src": {to:[{field: "rsa.misc.cn_asn_src", setter: fld_set}]}, + "cn_bgpv4nxthop": {to:[{field: "rsa.misc.cn_bgpv4nxthop", setter: fld_set}]}, + "cn_ctr_dst_code": {to:[{field: "rsa.misc.cn_ctr_dst_code", setter: fld_set}]}, + "cn_dst_tos": {to:[{field: "rsa.misc.cn_dst_tos", setter: fld_set}]}, + "cn_dst_vlan": {to:[{field: "rsa.misc.cn_dst_vlan", setter: fld_set}]}, + "cn_engine_id": {to:[{field: "rsa.misc.cn_engine_id", setter: fld_set}]}, + "cn_engine_type": {to:[{field: "rsa.misc.cn_engine_type", setter: fld_set}]}, + "cn_f_switch": {to:[{field: "rsa.misc.cn_f_switch", setter: fld_set}]}, + "cn_flowsampid": {to:[{field: "rsa.misc.cn_flowsampid", setter: fld_set}]}, + "cn_flowsampintv": {to:[{field: "rsa.misc.cn_flowsampintv", setter: fld_set}]}, + "cn_flowsampmode": {to:[{field: "rsa.misc.cn_flowsampmode", setter: fld_set}]}, + "cn_inacttimeout": {to:[{field: "rsa.misc.cn_inacttimeout", setter: fld_set}]}, + "cn_inpermbyts": {to:[{field: "rsa.misc.cn_inpermbyts", setter: fld_set}]}, + "cn_inpermpckts": {to:[{field: "rsa.misc.cn_inpermpckts", setter: fld_set}]}, + "cn_invalid": {to:[{field: "rsa.misc.cn_invalid", setter: fld_set}]}, + "cn_ip_proto_ver": {to:[{field: "rsa.misc.cn_ip_proto_ver", setter: fld_set}]}, + "cn_ipv4_ident": {to:[{field: "rsa.misc.cn_ipv4_ident", setter: fld_set}]}, + "cn_l_switch": {to:[{field: "rsa.misc.cn_l_switch", setter: fld_set}]}, + "cn_log_did": {to:[{field: "rsa.misc.cn_log_did", setter: fld_set}]}, + "cn_log_rid": {to:[{field: "rsa.misc.cn_log_rid", setter: fld_set}]}, + "cn_max_ttl": {to:[{field: "rsa.misc.cn_max_ttl", setter: fld_set}]}, + "cn_maxpcktlen": {to:[{field: "rsa.misc.cn_maxpcktlen", setter: fld_set}]}, + "cn_min_ttl": {to:[{field: "rsa.misc.cn_min_ttl", setter: fld_set}]}, + "cn_minpcktlen": {to:[{field: "rsa.misc.cn_minpcktlen", setter: fld_set}]}, + "cn_mpls_lbl_1": {to:[{field: "rsa.misc.cn_mpls_lbl_1", setter: fld_set}]}, + "cn_mpls_lbl_10": {to:[{field: "rsa.misc.cn_mpls_lbl_10", setter: fld_set}]}, + "cn_mpls_lbl_2": {to:[{field: "rsa.misc.cn_mpls_lbl_2", setter: fld_set}]}, + "cn_mpls_lbl_3": {to:[{field: "rsa.misc.cn_mpls_lbl_3", setter: fld_set}]}, + "cn_mpls_lbl_4": {to:[{field: "rsa.misc.cn_mpls_lbl_4", setter: fld_set}]}, + "cn_mpls_lbl_5": {to:[{field: "rsa.misc.cn_mpls_lbl_5", setter: fld_set}]}, + "cn_mpls_lbl_6": {to:[{field: "rsa.misc.cn_mpls_lbl_6", setter: fld_set}]}, + "cn_mpls_lbl_7": {to:[{field: "rsa.misc.cn_mpls_lbl_7", setter: fld_set}]}, + "cn_mpls_lbl_8": {to:[{field: "rsa.misc.cn_mpls_lbl_8", setter: fld_set}]}, + "cn_mpls_lbl_9": {to:[{field: "rsa.misc.cn_mpls_lbl_9", setter: fld_set}]}, + "cn_mplstoplabel": {to:[{field: "rsa.misc.cn_mplstoplabel", setter: fld_set}]}, + "cn_mplstoplabip": {to:[{field: "rsa.misc.cn_mplstoplabip", setter: fld_set}]}, + "cn_mul_dst_byt": {to:[{field: "rsa.misc.cn_mul_dst_byt", setter: fld_set}]}, + "cn_mul_dst_pks": {to:[{field: "rsa.misc.cn_mul_dst_pks", setter: fld_set}]}, + "cn_muligmptype": {to:[{field: "rsa.misc.cn_muligmptype", setter: fld_set}]}, + "cn_rpackets": {to:[{field: "rsa.web.cn_rpackets", setter: fld_set}]}, + "cn_sampalgo": {to:[{field: "rsa.misc.cn_sampalgo", setter: fld_set}]}, + "cn_sampint": {to:[{field: "rsa.misc.cn_sampint", setter: fld_set}]}, + "cn_seqctr": {to:[{field: "rsa.misc.cn_seqctr", setter: fld_set}]}, + "cn_spackets": {to:[{field: "rsa.misc.cn_spackets", setter: fld_set}]}, + "cn_src_tos": {to:[{field: "rsa.misc.cn_src_tos", setter: fld_set}]}, + "cn_src_vlan": {to:[{field: "rsa.misc.cn_src_vlan", setter: fld_set}]}, + "cn_sysuptime": {to:[{field: "rsa.misc.cn_sysuptime", setter: fld_set}]}, + "cn_template_id": {to:[{field: "rsa.misc.cn_template_id", setter: fld_set}]}, + "cn_totbytsexp": {to:[{field: "rsa.misc.cn_totbytsexp", setter: fld_set}]}, + "cn_totflowexp": {to:[{field: "rsa.misc.cn_totflowexp", setter: fld_set}]}, + "cn_totpcktsexp": {to:[{field: "rsa.misc.cn_totpcktsexp", setter: fld_set}]}, + "cn_unixnanosecs": {to:[{field: "rsa.misc.cn_unixnanosecs", setter: fld_set}]}, + "cn_v6flowlabel": {to:[{field: "rsa.misc.cn_v6flowlabel", setter: fld_set}]}, + "cn_v6optheaders": {to:[{field: "rsa.misc.cn_v6optheaders", setter: fld_set}]}, + "code": {to:[{field: "rsa.misc.code", setter: fld_set}]}, + "command": {to:[{field: "rsa.misc.command", setter: fld_set}]}, + "comments": {to:[{field: "rsa.misc.comments", setter: fld_set}]}, + "comp_class": {to:[{field: "rsa.misc.comp_class", setter: fld_set}]}, + "comp_name": {to:[{field: "rsa.misc.comp_name", setter: fld_set}]}, + "comp_rbytes": {to:[{field: "rsa.misc.comp_rbytes", setter: fld_set}]}, + "comp_sbytes": {to:[{field: "rsa.misc.comp_sbytes", setter: fld_set}]}, + "component_version": {to:[{field: "rsa.misc.comp_version", setter: fld_set}]}, + "connection_id": {to:[{field: "rsa.misc.connection_id", setter: fld_prio, prio: 1}]}, + "connectionid": {to:[{field: "rsa.misc.connection_id", setter: fld_prio, prio: 0}]}, + "content": {to:[{field: "rsa.misc.content", setter: fld_set}]}, + "content_type": {to:[{field: "rsa.misc.content_type", setter: fld_set}]}, + "content_version": {to:[{field: "rsa.misc.content_version", setter: fld_set}]}, + "context": {to:[{field: "rsa.misc.context", setter: fld_set}]}, + "count": {to:[{field: "rsa.misc.count", setter: fld_set}]}, + "cpu": {convert: to_long, to:[{field: "rsa.misc.cpu", setter: fld_set}]}, + "cpu_data": {to:[{field: "rsa.misc.cpu_data", setter: fld_set}]}, + "criticality": {to:[{field: "rsa.misc.criticality", setter: fld_set}]}, + "cs_agency_dst": {to:[{field: "rsa.misc.cs_agency_dst", setter: fld_set}]}, + "cs_analyzedby": {to:[{field: "rsa.misc.cs_analyzedby", setter: fld_set}]}, + "cs_av_other": {to:[{field: "rsa.misc.cs_av_other", setter: fld_set}]}, + "cs_av_primary": {to:[{field: "rsa.misc.cs_av_primary", setter: fld_set}]}, + "cs_av_secondary": {to:[{field: "rsa.misc.cs_av_secondary", setter: fld_set}]}, + "cs_bgpv6nxthop": {to:[{field: "rsa.misc.cs_bgpv6nxthop", setter: fld_set}]}, + "cs_bit9status": {to:[{field: "rsa.misc.cs_bit9status", setter: fld_set}]}, + "cs_context": {to:[{field: "rsa.misc.cs_context", setter: fld_set}]}, + "cs_control": {to:[{field: "rsa.misc.cs_control", setter: fld_set}]}, + "cs_data": {to:[{field: "rsa.misc.cs_data", setter: fld_set}]}, + "cs_datecret": {to:[{field: "rsa.misc.cs_datecret", setter: fld_set}]}, + "cs_dst_tld": {to:[{field: "rsa.misc.cs_dst_tld", setter: fld_set}]}, + "cs_eth_dst_ven": {to:[{field: "rsa.misc.cs_eth_dst_ven", setter: fld_set}]}, + "cs_eth_src_ven": {to:[{field: "rsa.misc.cs_eth_src_ven", setter: fld_set}]}, + "cs_event_uuid": {to:[{field: "rsa.misc.cs_event_uuid", setter: fld_set}]}, + "cs_filetype": {to:[{field: "rsa.misc.cs_filetype", setter: fld_set}]}, + "cs_fld": {to:[{field: "rsa.misc.cs_fld", setter: fld_set}]}, + "cs_if_desc": {to:[{field: "rsa.misc.cs_if_desc", setter: fld_set}]}, + "cs_if_name": {to:[{field: "rsa.misc.cs_if_name", setter: fld_set}]}, + "cs_ip_next_hop": {to:[{field: "rsa.misc.cs_ip_next_hop", setter: fld_set}]}, + "cs_ipv4dstpre": {to:[{field: "rsa.misc.cs_ipv4dstpre", setter: fld_set}]}, + "cs_ipv4srcpre": {to:[{field: "rsa.misc.cs_ipv4srcpre", setter: fld_set}]}, + "cs_lifetime": {to:[{field: "rsa.misc.cs_lifetime", setter: fld_set}]}, + "cs_log_medium": {to:[{field: "rsa.misc.cs_log_medium", setter: fld_set}]}, + "cs_loginname": {to:[{field: "rsa.misc.cs_loginname", setter: fld_set}]}, + "cs_modulescore": {to:[{field: "rsa.misc.cs_modulescore", setter: fld_set}]}, + "cs_modulesign": {to:[{field: "rsa.misc.cs_modulesign", setter: fld_set}]}, + "cs_opswatresult": {to:[{field: "rsa.misc.cs_opswatresult", setter: fld_set}]}, + "cs_payload": {to:[{field: "rsa.misc.cs_payload", setter: fld_set}]}, + "cs_registrant": {to:[{field: "rsa.misc.cs_registrant", setter: fld_set}]}, + "cs_registrar": {to:[{field: "rsa.misc.cs_registrar", setter: fld_set}]}, + "cs_represult": {to:[{field: "rsa.misc.cs_represult", setter: fld_set}]}, + "cs_rpayload": {to:[{field: "rsa.misc.cs_rpayload", setter: fld_set}]}, + "cs_sampler_name": {to:[{field: "rsa.misc.cs_sampler_name", setter: fld_set}]}, + "cs_sourcemodule": {to:[{field: "rsa.misc.cs_sourcemodule", setter: fld_set}]}, + "cs_streams": {to:[{field: "rsa.misc.cs_streams", setter: fld_set}]}, + "cs_targetmodule": {to:[{field: "rsa.misc.cs_targetmodule", setter: fld_set}]}, + "cs_v6nxthop": {to:[{field: "rsa.misc.cs_v6nxthop", setter: fld_set}]}, + "cs_whois_server": {to:[{field: "rsa.misc.cs_whois_server", setter: fld_set}]}, + "cs_yararesult": {to:[{field: "rsa.misc.cs_yararesult", setter: fld_set}]}, + "cve": {to:[{field: "rsa.misc.cve", setter: fld_set}]}, + "d_certauth": {to:[{field: "rsa.crypto.d_certauth", setter: fld_set}]}, + "d_cipher": {to:[{field: "rsa.crypto.cipher_dst", setter: fld_set}]}, + "d_ciphersize": {convert: to_long, to:[{field: "rsa.crypto.cipher_size_dst", setter: fld_set}]}, + "d_sslver": {to:[{field: "rsa.crypto.ssl_ver_dst", setter: fld_set}]}, + "data": {to:[{field: "rsa.internal.data", setter: fld_set}]}, + "data_type": {to:[{field: "rsa.misc.data_type", setter: fld_set}]}, + "date": {to:[{field: "rsa.time.date", setter: fld_set}]}, + "datetime": {to:[{field: "rsa.time.datetime", setter: fld_set}]}, + "day": {to:[{field: "rsa.time.day", setter: fld_set}]}, + "db_id": {to:[{field: "rsa.db.db_id", setter: fld_set}]}, + "db_name": {to:[{field: "rsa.db.database", setter: fld_set}]}, + "db_pid": {convert: to_long, to:[{field: "rsa.db.db_pid", setter: fld_set}]}, + "dclass_counter1": {convert: to_long, to:[{field: "rsa.counters.dclass_c1", setter: fld_set}]}, + "dclass_counter1_string": {to:[{field: "rsa.counters.dclass_c1_str", setter: fld_set}]}, + "dclass_counter2": {convert: to_long, to:[{field: "rsa.counters.dclass_c2", setter: fld_set}]}, + "dclass_counter2_string": {to:[{field: "rsa.counters.dclass_c2_str", setter: fld_set}]}, + "dclass_counter3": {convert: to_long, to:[{field: "rsa.counters.dclass_c3", setter: fld_set}]}, + "dclass_counter3_string": {to:[{field: "rsa.counters.dclass_c3_str", setter: fld_set}]}, + "dclass_ratio1": {to:[{field: "rsa.counters.dclass_r1", setter: fld_set}]}, + "dclass_ratio1_string": {to:[{field: "rsa.counters.dclass_r1_str", setter: fld_set}]}, + "dclass_ratio2": {to:[{field: "rsa.counters.dclass_r2", setter: fld_set}]}, + "dclass_ratio2_string": {to:[{field: "rsa.counters.dclass_r2_str", setter: fld_set}]}, + "dclass_ratio3": {to:[{field: "rsa.counters.dclass_r3", setter: fld_set}]}, + "dclass_ratio3_string": {to:[{field: "rsa.counters.dclass_r3_str", setter: fld_set}]}, + "dead": {convert: to_long, to:[{field: "rsa.internal.dead", setter: fld_set}]}, + "description": {to:[{field: "rsa.misc.description", setter: fld_set}]}, + "detail": {to:[{field: "rsa.misc.event_desc", setter: fld_set}]}, + "device": {to:[{field: "rsa.misc.device_name", setter: fld_set}]}, + "device.class": {to:[{field: "rsa.internal.device_class", setter: fld_set}]}, + "device.group": {to:[{field: "rsa.internal.device_group", setter: fld_set}]}, + "device.host": {to:[{field: "rsa.internal.device_host", setter: fld_set}]}, + "device.ip": {convert: to_ip, to:[{field: "rsa.internal.device_ip", setter: fld_set}]}, + "device.ipv6": {convert: to_ip, to:[{field: "rsa.internal.device_ipv6", setter: fld_set}]}, + "device.type": {to:[{field: "rsa.internal.device_type", setter: fld_set}]}, + "device.type.id": {convert: to_long, to:[{field: "rsa.internal.device_type_id", setter: fld_set}]}, + "devicehostname": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "devvendor": {to:[{field: "rsa.misc.devvendor", setter: fld_set}]}, + "dhost": {to:[{field: "rsa.network.host_dst", setter: fld_set}]}, + "did": {to:[{field: "rsa.internal.did", setter: fld_set}]}, + "dinterface": {to:[{field: "rsa.network.dinterface", setter: fld_set}]}, + "directory.dst": {to:[{field: "rsa.file.directory_dst", setter: fld_set}]}, + "directory.src": {to:[{field: "rsa.file.directory_src", setter: fld_set}]}, + "disk_volume": {to:[{field: "rsa.storage.disk_volume", setter: fld_set}]}, + "disposition": {to:[{field: "rsa.misc.disposition", setter: fld_set}]}, + "distance": {to:[{field: "rsa.misc.distance", setter: fld_set}]}, + "dmask": {to:[{field: "rsa.network.dmask", setter: fld_set}]}, + "dn": {to:[{field: "rsa.identity.dn", setter: fld_set}]}, + "dns_a_record": {to:[{field: "rsa.network.dns_a_record", setter: fld_set}]}, + "dns_cname_record": {to:[{field: "rsa.network.dns_cname_record", setter: fld_set}]}, + "dns_id": {to:[{field: "rsa.network.dns_id", setter: fld_set}]}, + "dns_opcode": {to:[{field: "rsa.network.dns_opcode", setter: fld_set}]}, + "dns_ptr_record": {to:[{field: "rsa.network.dns_ptr_record", setter: fld_set}]}, + "dns_resp": {to:[{field: "rsa.network.dns_resp", setter: fld_set}]}, + "dns_type": {to:[{field: "rsa.network.dns_type", setter: fld_set}]}, + "doc_number": {convert: to_long, to:[{field: "rsa.misc.doc_number", setter: fld_set}]}, + "domain": {to:[{field: "rsa.network.domain", setter: fld_set}]}, + "domain1": {to:[{field: "rsa.network.domain1", setter: fld_set}]}, + "dst_dn": {to:[{field: "rsa.identity.dn_dst", setter: fld_set}]}, + "dst_payload": {to:[{field: "rsa.misc.payload_dst", setter: fld_set}]}, + "dst_spi": {to:[{field: "rsa.misc.spi_dst", setter: fld_set}]}, + "dst_zone": {to:[{field: "rsa.network.zone_dst", setter: fld_set}]}, + "dstburb": {to:[{field: "rsa.misc.dstburb", setter: fld_set}]}, + "duration": {convert: to_double, to:[{field: "rsa.time.duration_time", setter: fld_set}]}, + "duration_string": {to:[{field: "rsa.time.duration_str", setter: fld_set}]}, + "ec_activity": {to:[{field: "rsa.investigations.ec_activity", setter: fld_set}]}, + "ec_outcome": {to:[{field: "rsa.investigations.ec_outcome", setter: fld_set}]}, + "ec_subject": {to:[{field: "rsa.investigations.ec_subject", setter: fld_set}]}, + "ec_theme": {to:[{field: "rsa.investigations.ec_theme", setter: fld_set}]}, + "edomain": {to:[{field: "rsa.misc.edomain", setter: fld_set}]}, + "edomaub": {to:[{field: "rsa.misc.edomaub", setter: fld_set}]}, + "effective_time": {convert: to_date, to:[{field: "rsa.time.effective_time", setter: fld_set}]}, + "ein.number": {convert: to_long, to:[{field: "rsa.misc.ein_number", setter: fld_set}]}, + "email": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "encryption_type": {to:[{field: "rsa.crypto.crypto", setter: fld_set}]}, + "endtime": {convert: to_date, to:[{field: "rsa.time.endtime", setter: fld_set}]}, + "entropy.req": {convert: to_long, to:[{field: "rsa.internal.entropy_req", setter: fld_set}]}, + "entropy.res": {convert: to_long, to:[{field: "rsa.internal.entropy_res", setter: fld_set}]}, + "entry": {to:[{field: "rsa.internal.entry", setter: fld_set}]}, + "eoc": {to:[{field: "rsa.investigations.eoc", setter: fld_set}]}, + "error": {to:[{field: "rsa.misc.error", setter: fld_set}]}, + "eth_type": {convert: to_long, to:[{field: "rsa.network.eth_type", setter: fld_set}]}, + "euid": {to:[{field: "rsa.misc.euid", setter: fld_set}]}, + "event.cat": {convert: to_long, to:[{field: "rsa.investigations.event_cat", setter: fld_prio, prio: 1}]}, + "event.cat.name": {to:[{field: "rsa.investigations.event_cat_name", setter: fld_prio, prio: 1}]}, + "event_cat": {convert: to_long, to:[{field: "rsa.investigations.event_cat", setter: fld_prio, prio: 0}]}, + "event_cat_name": {to:[{field: "rsa.investigations.event_cat_name", setter: fld_prio, prio: 0}]}, + "event_category": {to:[{field: "rsa.misc.event_category", setter: fld_set}]}, + "event_computer": {to:[{field: "rsa.misc.event_computer", setter: fld_set}]}, + "event_counter": {convert: to_long, to:[{field: "rsa.counters.event_counter", setter: fld_set}]}, + "event_description": {to:[{field: "rsa.internal.event_desc", setter: fld_set}]}, + "event_id": {to:[{field: "rsa.misc.event_id", setter: fld_set}]}, + "event_log": {to:[{field: "rsa.misc.event_log", setter: fld_set}]}, + "event_name": {to:[{field: "rsa.internal.event_name", setter: fld_set}]}, + "event_queue_time": {convert: to_date, to:[{field: "rsa.time.event_queue_time", setter: fld_set}]}, + "event_source": {to:[{field: "rsa.misc.event_source", setter: fld_set}]}, + "event_state": {to:[{field: "rsa.misc.event_state", setter: fld_set}]}, + "event_time": {convert: to_date, to:[{field: "rsa.time.event_time", setter: fld_set}]}, + "event_time_str": {to:[{field: "rsa.time.event_time_str", setter: fld_prio, prio: 1}]}, + "event_time_string": {to:[{field: "rsa.time.event_time_str", setter: fld_prio, prio: 0}]}, + "event_type": {to:[{field: "rsa.misc.event_type", setter: fld_set}]}, + "event_user": {to:[{field: "rsa.misc.event_user", setter: fld_set}]}, + "eventtime": {to:[{field: "rsa.time.eventtime", setter: fld_set}]}, + "expected_val": {to:[{field: "rsa.misc.expected_val", setter: fld_set}]}, + "expiration_time": {convert: to_date, to:[{field: "rsa.time.expire_time", setter: fld_set}]}, + "expiration_time_string": {to:[{field: "rsa.time.expire_time_str", setter: fld_set}]}, + "facility": {to:[{field: "rsa.misc.facility", setter: fld_set}]}, + "facilityname": {to:[{field: "rsa.misc.facilityname", setter: fld_set}]}, + "faddr": {to:[{field: "rsa.network.faddr", setter: fld_set}]}, + "fcatnum": {to:[{field: "rsa.misc.fcatnum", setter: fld_set}]}, + "federated_idp": {to:[{field: "rsa.identity.federated_idp", setter: fld_set}]}, + "federated_sp": {to:[{field: "rsa.identity.federated_sp", setter: fld_set}]}, + "feed.category": {to:[{field: "rsa.internal.feed_category", setter: fld_set}]}, + "feed_desc": {to:[{field: "rsa.internal.feed_desc", setter: fld_set}]}, + "feed_name": {to:[{field: "rsa.internal.feed_name", setter: fld_set}]}, + "fhost": {to:[{field: "rsa.network.fhost", setter: fld_set}]}, + "file_entropy": {convert: to_double, to:[{field: "rsa.file.file_entropy", setter: fld_set}]}, + "file_vendor": {to:[{field: "rsa.file.file_vendor", setter: fld_set}]}, + "filename_dst": {to:[{field: "rsa.file.filename_dst", setter: fld_set}]}, + "filename_src": {to:[{field: "rsa.file.filename_src", setter: fld_set}]}, + "filename_tmp": {to:[{field: "rsa.file.filename_tmp", setter: fld_set}]}, + "filesystem": {to:[{field: "rsa.file.filesystem", setter: fld_set}]}, + "filter": {to:[{field: "rsa.misc.filter", setter: fld_set}]}, + "finterface": {to:[{field: "rsa.misc.finterface", setter: fld_set}]}, + "flags": {to:[{field: "rsa.misc.flags", setter: fld_set}]}, + "forensic_info": {to:[{field: "rsa.misc.forensic_info", setter: fld_set}]}, + "forward.ip": {convert: to_ip, to:[{field: "rsa.internal.forward_ip", setter: fld_set}]}, + "forward.ipv6": {convert: to_ip, to:[{field: "rsa.internal.forward_ipv6", setter: fld_set}]}, + "found": {to:[{field: "rsa.misc.found", setter: fld_set}]}, + "fport": {to:[{field: "rsa.network.fport", setter: fld_set}]}, + "fqdn": {to:[{field: "rsa.web.fqdn", setter: fld_set}]}, + "fresult": {convert: to_long, to:[{field: "rsa.misc.fresult", setter: fld_set}]}, + "from": {to:[{field: "rsa.email.email_src", setter: fld_set}]}, + "gaddr": {to:[{field: "rsa.misc.gaddr", setter: fld_set}]}, + "gateway": {to:[{field: "rsa.network.gateway", setter: fld_set}]}, + "gmtdate": {to:[{field: "rsa.time.gmtdate", setter: fld_set}]}, + "gmttime": {to:[{field: "rsa.time.gmttime", setter: fld_set}]}, + "group": {to:[{field: "rsa.misc.group", setter: fld_set}]}, + "group_object": {to:[{field: "rsa.misc.group_object", setter: fld_set}]}, + "groupid": {to:[{field: "rsa.misc.group_id", setter: fld_set}]}, + "h_code": {to:[{field: "rsa.internal.hcode", setter: fld_set}]}, + "hardware_id": {to:[{field: "rsa.misc.hardware_id", setter: fld_set}]}, + "header.id": {to:[{field: "rsa.internal.header_id", setter: fld_set}]}, + "host.orig": {to:[{field: "rsa.network.host_orig", setter: fld_set}]}, + "host.state": {to:[{field: "rsa.endpoint.host_state", setter: fld_set}]}, + "host.type": {to:[{field: "rsa.network.host_type", setter: fld_set}]}, + "host_role": {to:[{field: "rsa.identity.host_role", setter: fld_set}]}, + "hostid": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "hostname": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "hour": {to:[{field: "rsa.time.hour", setter: fld_set}]}, + "https.insact": {to:[{field: "rsa.crypto.https_insact", setter: fld_set}]}, + "https.valid": {to:[{field: "rsa.crypto.https_valid", setter: fld_set}]}, + "icmpcode": {convert: to_long, to:[{field: "rsa.network.icmp_code", setter: fld_set}]}, + "icmptype": {convert: to_long, to:[{field: "rsa.network.icmp_type", setter: fld_set}]}, + "id": {to:[{field: "rsa.misc.reference_id", setter: fld_set}]}, + "id1": {to:[{field: "rsa.misc.reference_id1", setter: fld_set}]}, + "id2": {to:[{field: "rsa.misc.reference_id2", setter: fld_set}]}, + "id3": {to:[{field: "rsa.misc.id3", setter: fld_set}]}, + "ike": {to:[{field: "rsa.crypto.ike", setter: fld_set}]}, + "ike_cookie1": {to:[{field: "rsa.crypto.ike_cookie1", setter: fld_set}]}, + "ike_cookie2": {to:[{field: "rsa.crypto.ike_cookie2", setter: fld_set}]}, + "im_buddyid": {to:[{field: "rsa.misc.im_buddyid", setter: fld_set}]}, + "im_buddyname": {to:[{field: "rsa.misc.im_buddyname", setter: fld_set}]}, + "im_client": {to:[{field: "rsa.misc.im_client", setter: fld_set}]}, + "im_croomid": {to:[{field: "rsa.misc.im_croomid", setter: fld_set}]}, + "im_croomtype": {to:[{field: "rsa.misc.im_croomtype", setter: fld_set}]}, + "im_members": {to:[{field: "rsa.misc.im_members", setter: fld_set}]}, + "im_userid": {to:[{field: "rsa.misc.im_userid", setter: fld_set}]}, + "im_username": {to:[{field: "rsa.misc.im_username", setter: fld_set}]}, + "index": {to:[{field: "rsa.misc.index", setter: fld_set}]}, + "info": {to:[{field: "rsa.db.index", setter: fld_set}]}, + "inode": {convert: to_long, to:[{field: "rsa.internal.inode", setter: fld_set}]}, + "inout": {to:[{field: "rsa.misc.inout", setter: fld_set}]}, + "instance": {to:[{field: "rsa.db.instance", setter: fld_set}]}, + "interface": {to:[{field: "rsa.network.interface", setter: fld_set}]}, + "inv.category": {to:[{field: "rsa.investigations.inv_category", setter: fld_set}]}, + "inv.context": {to:[{field: "rsa.investigations.inv_context", setter: fld_set}]}, + "ioc": {to:[{field: "rsa.investigations.ioc", setter: fld_set}]}, + "ip_proto": {convert: to_long, to:[{field: "rsa.network.ip_proto", setter: fld_set}]}, + "ipkt": {to:[{field: "rsa.misc.ipkt", setter: fld_set}]}, + "ipscat": {to:[{field: "rsa.misc.ipscat", setter: fld_set}]}, + "ipspri": {to:[{field: "rsa.misc.ipspri", setter: fld_set}]}, + "jobname": {to:[{field: "rsa.misc.jobname", setter: fld_set}]}, + "jobnum": {to:[{field: "rsa.misc.job_num", setter: fld_set}]}, + "laddr": {to:[{field: "rsa.network.laddr", setter: fld_set}]}, + "language": {to:[{field: "rsa.misc.language", setter: fld_set}]}, + "latitude": {to:[{field: "rsa.misc.latitude", setter: fld_set}]}, + "lc.cid": {to:[{field: "rsa.internal.lc_cid", setter: fld_set}]}, + "lc.ctime": {convert: to_date, to:[{field: "rsa.internal.lc_ctime", setter: fld_set}]}, + "ldap": {to:[{field: "rsa.identity.ldap", setter: fld_set}]}, + "ldap.query": {to:[{field: "rsa.identity.ldap_query", setter: fld_set}]}, + "ldap.response": {to:[{field: "rsa.identity.ldap_response", setter: fld_set}]}, + "level": {convert: to_long, to:[{field: "rsa.internal.level", setter: fld_set}]}, + "lhost": {to:[{field: "rsa.network.lhost", setter: fld_set}]}, + "library": {to:[{field: "rsa.misc.library", setter: fld_set}]}, + "lifetime": {convert: to_long, to:[{field: "rsa.misc.lifetime", setter: fld_set}]}, + "linenum": {to:[{field: "rsa.misc.linenum", setter: fld_set}]}, + "link": {to:[{field: "rsa.misc.link", setter: fld_set}]}, + "linterface": {to:[{field: "rsa.network.linterface", setter: fld_set}]}, + "list_name": {to:[{field: "rsa.misc.list_name", setter: fld_set}]}, + "listnum": {to:[{field: "rsa.misc.listnum", setter: fld_set}]}, + "load_data": {to:[{field: "rsa.misc.load_data", setter: fld_set}]}, + "location_floor": {to:[{field: "rsa.misc.location_floor", setter: fld_set}]}, + "location_mark": {to:[{field: "rsa.misc.location_mark", setter: fld_set}]}, + "log_id": {to:[{field: "rsa.misc.log_id", setter: fld_set}]}, + "log_type": {to:[{field: "rsa.misc.log_type", setter: fld_set}]}, + "logid": {to:[{field: "rsa.misc.logid", setter: fld_set}]}, + "logip": {to:[{field: "rsa.misc.logip", setter: fld_set}]}, + "logname": {to:[{field: "rsa.misc.logname", setter: fld_set}]}, + "logon_type": {to:[{field: "rsa.identity.logon_type", setter: fld_set}]}, + "logon_type_desc": {to:[{field: "rsa.identity.logon_type_desc", setter: fld_set}]}, + "longitude": {to:[{field: "rsa.misc.longitude", setter: fld_set}]}, + "lport": {to:[{field: "rsa.misc.lport", setter: fld_set}]}, + "lread": {convert: to_long, to:[{field: "rsa.db.lread", setter: fld_set}]}, + "lun": {to:[{field: "rsa.storage.lun", setter: fld_set}]}, + "lwrite": {convert: to_long, to:[{field: "rsa.db.lwrite", setter: fld_set}]}, + "macaddr": {convert: to_mac, to:[{field: "rsa.network.eth_host", setter: fld_set}]}, + "mail_id": {to:[{field: "rsa.misc.mail_id", setter: fld_set}]}, + "mask": {to:[{field: "rsa.network.mask", setter: fld_set}]}, + "match": {to:[{field: "rsa.misc.match", setter: fld_set}]}, + "mbug_data": {to:[{field: "rsa.misc.mbug_data", setter: fld_set}]}, + "mcb.req": {convert: to_long, to:[{field: "rsa.internal.mcb_req", setter: fld_set}]}, + "mcb.res": {convert: to_long, to:[{field: "rsa.internal.mcb_res", setter: fld_set}]}, + "mcbc.req": {convert: to_long, to:[{field: "rsa.internal.mcbc_req", setter: fld_set}]}, + "mcbc.res": {convert: to_long, to:[{field: "rsa.internal.mcbc_res", setter: fld_set}]}, + "medium": {convert: to_long, to:[{field: "rsa.internal.medium", setter: fld_set}]}, + "message": {to:[{field: "rsa.internal.message", setter: fld_set}]}, + "message_body": {to:[{field: "rsa.misc.message_body", setter: fld_set}]}, + "messageid": {to:[{field: "rsa.internal.messageid", setter: fld_set}]}, + "min": {to:[{field: "rsa.time.min", setter: fld_set}]}, + "misc": {to:[{field: "rsa.misc.misc", setter: fld_set}]}, + "misc_name": {to:[{field: "rsa.misc.misc_name", setter: fld_set}]}, + "mode": {to:[{field: "rsa.misc.mode", setter: fld_set}]}, + "month": {to:[{field: "rsa.time.month", setter: fld_set}]}, + "msg": {to:[{field: "rsa.internal.msg", setter: fld_set}]}, + "msgIdPart1": {to:[{field: "rsa.misc.msgIdPart1", setter: fld_set}]}, + "msgIdPart2": {to:[{field: "rsa.misc.msgIdPart2", setter: fld_set}]}, + "msgIdPart3": {to:[{field: "rsa.misc.msgIdPart3", setter: fld_set}]}, + "msgIdPart4": {to:[{field: "rsa.misc.msgIdPart4", setter: fld_set}]}, + "msg_id": {to:[{field: "rsa.internal.msg_id", setter: fld_set}]}, + "msg_type": {to:[{field: "rsa.misc.msg_type", setter: fld_set}]}, + "msgid": {to:[{field: "rsa.misc.msgid", setter: fld_set}]}, + "name": {to:[{field: "rsa.misc.name", setter: fld_set}]}, + "netname": {to:[{field: "rsa.network.netname", setter: fld_set}]}, + "netsessid": {to:[{field: "rsa.misc.netsessid", setter: fld_set}]}, + "network_port": {convert: to_long, to:[{field: "rsa.network.network_port", setter: fld_set}]}, + "network_service": {to:[{field: "rsa.network.network_service", setter: fld_set}]}, + "node": {to:[{field: "rsa.misc.node", setter: fld_set}]}, + "nodename": {to:[{field: "rsa.internal.node_name", setter: fld_set}]}, + "ntype": {to:[{field: "rsa.misc.ntype", setter: fld_set}]}, + "num": {to:[{field: "rsa.misc.num", setter: fld_set}]}, + "number": {to:[{field: "rsa.misc.number", setter: fld_set}]}, + "number1": {to:[{field: "rsa.misc.number1", setter: fld_set}]}, + "number2": {to:[{field: "rsa.misc.number2", setter: fld_set}]}, + "nwe.callback_id": {to:[{field: "rsa.internal.nwe_callback_id", setter: fld_set}]}, + "nwwn": {to:[{field: "rsa.misc.nwwn", setter: fld_set}]}, + "obj_id": {to:[{field: "rsa.internal.obj_id", setter: fld_set}]}, + "obj_name": {to:[{field: "rsa.misc.obj_name", setter: fld_set}]}, + "obj_server": {to:[{field: "rsa.internal.obj_server", setter: fld_set}]}, + "obj_type": {to:[{field: "rsa.misc.obj_type", setter: fld_set}]}, + "obj_value": {to:[{field: "rsa.internal.obj_val", setter: fld_set}]}, + "object": {to:[{field: "rsa.misc.object", setter: fld_set}]}, + "observed_val": {to:[{field: "rsa.misc.observed_val", setter: fld_set}]}, + "operation": {to:[{field: "rsa.misc.operation", setter: fld_set}]}, + "operation_id": {to:[{field: "rsa.misc.operation_id", setter: fld_set}]}, + "opkt": {to:[{field: "rsa.misc.opkt", setter: fld_set}]}, + "org.dst": {to:[{field: "rsa.physical.org_dst", setter: fld_prio, prio: 1}]}, + "org.src": {to:[{field: "rsa.physical.org_src", setter: fld_set}]}, + "org_dst": {to:[{field: "rsa.physical.org_dst", setter: fld_prio, prio: 0}]}, + "orig_from": {to:[{field: "rsa.misc.orig_from", setter: fld_set}]}, + "origin": {to:[{field: "rsa.network.origin", setter: fld_set}]}, + "original_owner": {to:[{field: "rsa.identity.owner", setter: fld_set}]}, + "os": {to:[{field: "rsa.misc.OS", setter: fld_set}]}, + "owner_id": {to:[{field: "rsa.misc.owner_id", setter: fld_set}]}, + "p_action": {to:[{field: "rsa.misc.p_action", setter: fld_set}]}, + "p_date": {to:[{field: "rsa.time.p_date", setter: fld_set}]}, + "p_filter": {to:[{field: "rsa.misc.p_filter", setter: fld_set}]}, + "p_group_object": {to:[{field: "rsa.misc.p_group_object", setter: fld_set}]}, + "p_id": {to:[{field: "rsa.misc.p_id", setter: fld_set}]}, + "p_month": {to:[{field: "rsa.time.p_month", setter: fld_set}]}, + "p_msgid": {to:[{field: "rsa.misc.p_msgid", setter: fld_set}]}, + "p_msgid1": {to:[{field: "rsa.misc.p_msgid1", setter: fld_set}]}, + "p_msgid2": {to:[{field: "rsa.misc.p_msgid2", setter: fld_set}]}, + "p_result1": {to:[{field: "rsa.misc.p_result1", setter: fld_set}]}, + "p_time": {to:[{field: "rsa.time.p_time", setter: fld_set}]}, + "p_time1": {to:[{field: "rsa.time.p_time1", setter: fld_set}]}, + "p_time2": {to:[{field: "rsa.time.p_time2", setter: fld_set}]}, + "p_url": {to:[{field: "rsa.web.p_url", setter: fld_set}]}, + "p_user_agent": {to:[{field: "rsa.web.p_user_agent", setter: fld_set}]}, + "p_web_cookie": {to:[{field: "rsa.web.p_web_cookie", setter: fld_set}]}, + "p_web_method": {to:[{field: "rsa.web.p_web_method", setter: fld_set}]}, + "p_web_referer": {to:[{field: "rsa.web.p_web_referer", setter: fld_set}]}, + "p_year": {to:[{field: "rsa.time.p_year", setter: fld_set}]}, + "packet_length": {to:[{field: "rsa.network.packet_length", setter: fld_set}]}, + "paddr": {convert: to_ip, to:[{field: "rsa.network.paddr", setter: fld_set}]}, + "param": {to:[{field: "rsa.misc.param", setter: fld_set}]}, + "param.dst": {to:[{field: "rsa.misc.param_dst", setter: fld_set}]}, + "param.src": {to:[{field: "rsa.misc.param_src", setter: fld_set}]}, + "parent_node": {to:[{field: "rsa.misc.parent_node", setter: fld_set}]}, + "parse.error": {to:[{field: "rsa.internal.parse_error", setter: fld_set}]}, + "password": {to:[{field: "rsa.identity.password", setter: fld_set}]}, + "password_chg": {to:[{field: "rsa.misc.password_chg", setter: fld_set}]}, + "password_expire": {to:[{field: "rsa.misc.password_expire", setter: fld_set}]}, + "patient_fname": {to:[{field: "rsa.healthcare.patient_fname", setter: fld_set}]}, + "patient_id": {to:[{field: "rsa.healthcare.patient_id", setter: fld_set}]}, + "patient_lname": {to:[{field: "rsa.healthcare.patient_lname", setter: fld_set}]}, + "patient_mname": {to:[{field: "rsa.healthcare.patient_mname", setter: fld_set}]}, + "payload.req": {convert: to_long, to:[{field: "rsa.internal.payload_req", setter: fld_set}]}, + "payload.res": {convert: to_long, to:[{field: "rsa.internal.payload_res", setter: fld_set}]}, + "peer": {to:[{field: "rsa.crypto.peer", setter: fld_set}]}, + "peer_id": {to:[{field: "rsa.crypto.peer_id", setter: fld_set}]}, + "permgranted": {to:[{field: "rsa.misc.permgranted", setter: fld_set}]}, + "permissions": {to:[{field: "rsa.db.permissions", setter: fld_set}]}, + "permwanted": {to:[{field: "rsa.misc.permwanted", setter: fld_set}]}, + "pgid": {to:[{field: "rsa.misc.pgid", setter: fld_set}]}, + "phone_number": {to:[{field: "rsa.misc.phone", setter: fld_prio, prio: 2}]}, + "phost": {to:[{field: "rsa.network.phost", setter: fld_set}]}, + "pid": {to:[{field: "rsa.misc.pid", setter: fld_set}]}, + "policy": {to:[{field: "rsa.misc.policy", setter: fld_set}]}, + "policyUUID": {to:[{field: "rsa.misc.policyUUID", setter: fld_set}]}, + "policy_id": {to:[{field: "rsa.misc.policy_id", setter: fld_set}]}, + "policy_value": {to:[{field: "rsa.misc.policy_value", setter: fld_set}]}, + "policy_waiver": {to:[{field: "rsa.misc.policy_waiver", setter: fld_set}]}, + "policyname": {to:[{field: "rsa.misc.policy_name", setter: fld_prio, prio: 0}]}, + "pool_id": {to:[{field: "rsa.misc.pool_id", setter: fld_set}]}, + "pool_name": {to:[{field: "rsa.misc.pool_name", setter: fld_set}]}, + "port": {convert: to_long, to:[{field: "rsa.network.port", setter: fld_set}]}, + "portname": {to:[{field: "rsa.misc.port_name", setter: fld_set}]}, + "pread": {convert: to_long, to:[{field: "rsa.db.pread", setter: fld_set}]}, + "priority": {to:[{field: "rsa.misc.priority", setter: fld_set}]}, + "privilege": {to:[{field: "rsa.file.privilege", setter: fld_set}]}, + "process.vid.dst": {to:[{field: "rsa.internal.process_vid_dst", setter: fld_set}]}, + "process.vid.src": {to:[{field: "rsa.internal.process_vid_src", setter: fld_set}]}, + "process_id_val": {to:[{field: "rsa.misc.process_id_val", setter: fld_set}]}, + "processing_time": {to:[{field: "rsa.time.process_time", setter: fld_set}]}, + "profile": {to:[{field: "rsa.identity.profile", setter: fld_set}]}, + "prog_asp_num": {to:[{field: "rsa.misc.prog_asp_num", setter: fld_set}]}, + "program": {to:[{field: "rsa.misc.program", setter: fld_set}]}, + "protocol_detail": {to:[{field: "rsa.network.protocol_detail", setter: fld_set}]}, + "pwwn": {to:[{field: "rsa.storage.pwwn", setter: fld_set}]}, + "r_hostid": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "real_data": {to:[{field: "rsa.misc.real_data", setter: fld_set}]}, + "realm": {to:[{field: "rsa.identity.realm", setter: fld_set}]}, + "reason": {to:[{field: "rsa.misc.reason", setter: fld_set}]}, + "rec_asp_device": {to:[{field: "rsa.misc.rec_asp_device", setter: fld_set}]}, + "rec_asp_num": {to:[{field: "rsa.misc.rec_asp_num", setter: fld_set}]}, + "rec_library": {to:[{field: "rsa.misc.rec_library", setter: fld_set}]}, + "recorded_time": {convert: to_date, to:[{field: "rsa.time.recorded_time", setter: fld_set}]}, + "recordnum": {to:[{field: "rsa.misc.recordnum", setter: fld_set}]}, + "registry.key": {to:[{field: "rsa.endpoint.registry_key", setter: fld_set}]}, + "registry.value": {to:[{field: "rsa.endpoint.registry_value", setter: fld_set}]}, + "remote_domain": {to:[{field: "rsa.web.remote_domain", setter: fld_set}]}, + "remote_domain_id": {to:[{field: "rsa.network.remote_domain_id", setter: fld_set}]}, + "reputation_num": {convert: to_double, to:[{field: "rsa.web.reputation_num", setter: fld_set}]}, + "resource": {to:[{field: "rsa.internal.resource", setter: fld_set}]}, + "resource_class": {to:[{field: "rsa.internal.resource_class", setter: fld_set}]}, + "result": {to:[{field: "rsa.misc.result", setter: fld_set}]}, + "result_code": {to:[{field: "rsa.misc.result_code", setter: fld_prio, prio: 1}]}, + "resultcode": {to:[{field: "rsa.misc.result_code", setter: fld_prio, prio: 0}]}, + "rid": {convert: to_long, to:[{field: "rsa.internal.rid", setter: fld_set}]}, + "risk": {to:[{field: "rsa.misc.risk", setter: fld_set}]}, + "risk_info": {to:[{field: "rsa.misc.risk_info", setter: fld_set}]}, + "risk_num": {convert: to_double, to:[{field: "rsa.misc.risk_num", setter: fld_set}]}, + "risk_num_comm": {convert: to_double, to:[{field: "rsa.misc.risk_num_comm", setter: fld_set}]}, + "risk_num_next": {convert: to_double, to:[{field: "rsa.misc.risk_num_next", setter: fld_set}]}, + "risk_num_sand": {convert: to_double, to:[{field: "rsa.misc.risk_num_sand", setter: fld_set}]}, + "risk_num_static": {convert: to_double, to:[{field: "rsa.misc.risk_num_static", setter: fld_set}]}, + "risk_suspicious": {to:[{field: "rsa.misc.risk_suspicious", setter: fld_set}]}, + "risk_warning": {to:[{field: "rsa.misc.risk_warning", setter: fld_set}]}, + "rpayload": {to:[{field: "rsa.network.rpayload", setter: fld_set}]}, + "ruid": {to:[{field: "rsa.misc.ruid", setter: fld_set}]}, + "rule": {to:[{field: "rsa.misc.rule", setter: fld_set}]}, + "rule_group": {to:[{field: "rsa.misc.rule_group", setter: fld_set}]}, + "rule_template": {to:[{field: "rsa.misc.rule_template", setter: fld_set}]}, + "rule_uid": {to:[{field: "rsa.misc.rule_uid", setter: fld_set}]}, + "rulename": {to:[{field: "rsa.misc.rule_name", setter: fld_set}]}, + "s_certauth": {to:[{field: "rsa.crypto.s_certauth", setter: fld_set}]}, + "s_cipher": {to:[{field: "rsa.crypto.cipher_src", setter: fld_set}]}, + "s_ciphersize": {convert: to_long, to:[{field: "rsa.crypto.cipher_size_src", setter: fld_set}]}, + "s_context": {to:[{field: "rsa.misc.context_subject", setter: fld_set}]}, + "s_sslver": {to:[{field: "rsa.crypto.ssl_ver_src", setter: fld_set}]}, + "sburb": {to:[{field: "rsa.misc.sburb", setter: fld_set}]}, + "scheme": {to:[{field: "rsa.crypto.scheme", setter: fld_set}]}, + "sdomain_fld": {to:[{field: "rsa.misc.sdomain_fld", setter: fld_set}]}, + "search.text": {to:[{field: "rsa.misc.search_text", setter: fld_set}]}, + "sec": {to:[{field: "rsa.misc.sec", setter: fld_set}]}, + "second": {to:[{field: "rsa.misc.second", setter: fld_set}]}, + "sensor": {to:[{field: "rsa.misc.sensor", setter: fld_set}]}, + "sensorname": {to:[{field: "rsa.misc.sensorname", setter: fld_set}]}, + "seqnum": {to:[{field: "rsa.misc.seqnum", setter: fld_set}]}, + "serial_number": {to:[{field: "rsa.misc.serial_number", setter: fld_set}]}, + "service.account": {to:[{field: "rsa.identity.service_account", setter: fld_set}]}, + "session": {to:[{field: "rsa.misc.session", setter: fld_set}]}, + "session.split": {to:[{field: "rsa.internal.session_split", setter: fld_set}]}, + "sessionid": {to:[{field: "rsa.misc.log_session_id", setter: fld_set}]}, + "sessionid1": {to:[{field: "rsa.misc.log_session_id1", setter: fld_set}]}, + "sessiontype": {to:[{field: "rsa.misc.sessiontype", setter: fld_set}]}, + "severity": {to:[{field: "rsa.misc.severity", setter: fld_set}]}, + "sid": {to:[{field: "rsa.identity.user_sid_dst", setter: fld_set}]}, + "sig.name": {to:[{field: "rsa.misc.sig_name", setter: fld_set}]}, + "sigUUID": {to:[{field: "rsa.misc.sigUUID", setter: fld_set}]}, + "sigcat": {to:[{field: "rsa.misc.sigcat", setter: fld_set}]}, + "sigid": {convert: to_long, to:[{field: "rsa.misc.sig_id", setter: fld_set}]}, + "sigid1": {convert: to_long, to:[{field: "rsa.misc.sig_id1", setter: fld_set}]}, + "sigid_string": {to:[{field: "rsa.misc.sig_id_str", setter: fld_set}]}, + "signame": {to:[{field: "rsa.misc.policy_name", setter: fld_prio, prio: 1}]}, + "sigtype": {to:[{field: "rsa.crypto.sig_type", setter: fld_set}]}, + "sinterface": {to:[{field: "rsa.network.sinterface", setter: fld_set}]}, + "site": {to:[{field: "rsa.internal.site", setter: fld_set}]}, + "size": {convert: to_long, to:[{field: "rsa.internal.size", setter: fld_set}]}, + "smask": {to:[{field: "rsa.network.smask", setter: fld_set}]}, + "snmp.oid": {to:[{field: "rsa.misc.snmp_oid", setter: fld_set}]}, + "snmp.value": {to:[{field: "rsa.misc.snmp_value", setter: fld_set}]}, + "sourcefile": {to:[{field: "rsa.internal.sourcefile", setter: fld_set}]}, + "space": {to:[{field: "rsa.misc.space", setter: fld_set}]}, + "space1": {to:[{field: "rsa.misc.space1", setter: fld_set}]}, + "spi": {to:[{field: "rsa.misc.spi", setter: fld_set}]}, + "sql": {to:[{field: "rsa.misc.sql", setter: fld_set}]}, + "src_dn": {to:[{field: "rsa.identity.dn_src", setter: fld_set}]}, + "src_payload": {to:[{field: "rsa.misc.payload_src", setter: fld_set}]}, + "src_spi": {to:[{field: "rsa.misc.spi_src", setter: fld_set}]}, + "src_zone": {to:[{field: "rsa.network.zone_src", setter: fld_set}]}, + "srcburb": {to:[{field: "rsa.misc.srcburb", setter: fld_set}]}, + "srcdom": {to:[{field: "rsa.misc.srcdom", setter: fld_set}]}, + "srcservice": {to:[{field: "rsa.misc.srcservice", setter: fld_set}]}, + "ssid": {to:[{field: "rsa.wireless.wlan_ssid", setter: fld_prio, prio: 0}]}, + "stamp": {convert: to_date, to:[{field: "rsa.time.stamp", setter: fld_set}]}, + "starttime": {convert: to_date, to:[{field: "rsa.time.starttime", setter: fld_set}]}, + "state": {to:[{field: "rsa.misc.state", setter: fld_set}]}, + "statement": {to:[{field: "rsa.internal.statement", setter: fld_set}]}, + "status": {to:[{field: "rsa.misc.status", setter: fld_set}]}, + "status1": {to:[{field: "rsa.misc.status1", setter: fld_set}]}, + "streams": {convert: to_long, to:[{field: "rsa.misc.streams", setter: fld_set}]}, + "subcategory": {to:[{field: "rsa.misc.subcategory", setter: fld_set}]}, + "subject": {to:[{field: "rsa.email.subject", setter: fld_set}]}, + "svcno": {to:[{field: "rsa.misc.svcno", setter: fld_set}]}, + "system": {to:[{field: "rsa.misc.system", setter: fld_set}]}, + "t_context": {to:[{field: "rsa.misc.context_target", setter: fld_set}]}, + "task_name": {to:[{field: "rsa.file.task_name", setter: fld_set}]}, + "tbdstr1": {to:[{field: "rsa.misc.tbdstr1", setter: fld_set}]}, + "tbdstr2": {to:[{field: "rsa.misc.tbdstr2", setter: fld_set}]}, + "tbl_name": {to:[{field: "rsa.db.table_name", setter: fld_set}]}, + "tcp_flags": {convert: to_long, to:[{field: "rsa.misc.tcp_flags", setter: fld_set}]}, + "terminal": {to:[{field: "rsa.misc.terminal", setter: fld_set}]}, + "tgtdom": {to:[{field: "rsa.misc.tgtdom", setter: fld_set}]}, + "tgtdomain": {to:[{field: "rsa.misc.tgtdomain", setter: fld_set}]}, + "threat_name": {to:[{field: "rsa.threat.threat_category", setter: fld_set}]}, + "threat_source": {to:[{field: "rsa.threat.threat_source", setter: fld_set}]}, + "threat_val": {to:[{field: "rsa.threat.threat_desc", setter: fld_set}]}, + "threshold": {to:[{field: "rsa.misc.threshold", setter: fld_set}]}, + "time": {convert: to_date, to:[{field: "rsa.internal.time", setter: fld_set}]}, + "timestamp": {to:[{field: "rsa.time.timestamp", setter: fld_set}]}, + "timezone": {to:[{field: "rsa.time.timezone", setter: fld_set}]}, + "to": {to:[{field: "rsa.email.email_dst", setter: fld_set}]}, + "tos": {convert: to_long, to:[{field: "rsa.misc.tos", setter: fld_set}]}, + "trans_from": {to:[{field: "rsa.email.trans_from", setter: fld_set}]}, + "trans_id": {to:[{field: "rsa.db.transact_id", setter: fld_set}]}, + "trans_to": {to:[{field: "rsa.email.trans_to", setter: fld_set}]}, + "trigger_desc": {to:[{field: "rsa.misc.trigger_desc", setter: fld_set}]}, + "trigger_val": {to:[{field: "rsa.misc.trigger_val", setter: fld_set}]}, + "type": {to:[{field: "rsa.misc.type", setter: fld_set}]}, + "type1": {to:[{field: "rsa.misc.type1", setter: fld_set}]}, + "tzone": {to:[{field: "rsa.time.tzone", setter: fld_set}]}, + "ubc.req": {convert: to_long, to:[{field: "rsa.internal.ubc_req", setter: fld_set}]}, + "ubc.res": {convert: to_long, to:[{field: "rsa.internal.ubc_res", setter: fld_set}]}, + "udb_class": {to:[{field: "rsa.misc.udb_class", setter: fld_set}]}, + "url_fld": {to:[{field: "rsa.misc.url_fld", setter: fld_set}]}, + "urlpage": {to:[{field: "rsa.web.urlpage", setter: fld_set}]}, + "urlroot": {to:[{field: "rsa.web.urlroot", setter: fld_set}]}, + "user_address": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "user_dept": {to:[{field: "rsa.identity.user_dept", setter: fld_set}]}, + "user_div": {to:[{field: "rsa.misc.user_div", setter: fld_set}]}, + "user_fname": {to:[{field: "rsa.identity.firstname", setter: fld_set}]}, + "user_lname": {to:[{field: "rsa.identity.lastname", setter: fld_set}]}, + "user_mname": {to:[{field: "rsa.identity.middlename", setter: fld_set}]}, + "user_org": {to:[{field: "rsa.identity.org", setter: fld_set}]}, + "user_role": {to:[{field: "rsa.identity.user_role", setter: fld_set}]}, + "userid": {to:[{field: "rsa.misc.userid", setter: fld_set}]}, + "username_fld": {to:[{field: "rsa.misc.username_fld", setter: fld_set}]}, + "utcstamp": {to:[{field: "rsa.misc.utcstamp", setter: fld_set}]}, + "v_instafname": {to:[{field: "rsa.misc.v_instafname", setter: fld_set}]}, + "vendor_event_cat": {to:[{field: "rsa.investigations.event_vcat", setter: fld_set}]}, + "version": {to:[{field: "rsa.misc.version", setter: fld_set}]}, + "vid": {to:[{field: "rsa.internal.msg_vid", setter: fld_set}]}, + "virt_data": {to:[{field: "rsa.misc.virt_data", setter: fld_set}]}, + "virusname": {to:[{field: "rsa.misc.virusname", setter: fld_set}]}, + "vlan": {convert: to_long, to:[{field: "rsa.network.vlan", setter: fld_set}]}, + "vlan.name": {to:[{field: "rsa.network.vlan_name", setter: fld_set}]}, + "vm_target": {to:[{field: "rsa.misc.vm_target", setter: fld_set}]}, + "vpnid": {to:[{field: "rsa.misc.vpnid", setter: fld_set}]}, + "vsys": {to:[{field: "rsa.misc.vsys", setter: fld_set}]}, + "vuln_ref": {to:[{field: "rsa.misc.vuln_ref", setter: fld_set}]}, + "web_cookie": {to:[{field: "rsa.web.web_cookie", setter: fld_set}]}, + "web_extension_tmp": {to:[{field: "rsa.web.web_extension_tmp", setter: fld_set}]}, + "web_host": {to:[{field: "rsa.web.alias_host", setter: fld_set}]}, + "web_method": {to:[{field: "rsa.misc.action", setter: fld_append}]}, + "web_page": {to:[{field: "rsa.web.web_page", setter: fld_set}]}, + "web_ref_domain": {to:[{field: "rsa.web.web_ref_domain", setter: fld_set}]}, + "web_ref_host": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "web_ref_page": {to:[{field: "rsa.web.web_ref_page", setter: fld_set}]}, + "web_ref_query": {to:[{field: "rsa.web.web_ref_query", setter: fld_set}]}, + "web_ref_root": {to:[{field: "rsa.web.web_ref_root", setter: fld_set}]}, + "wifi_channel": {convert: to_long, to:[{field: "rsa.wireless.wlan_channel", setter: fld_set}]}, + "wlan": {to:[{field: "rsa.wireless.wlan_name", setter: fld_set}]}, + "word": {to:[{field: "rsa.internal.word", setter: fld_set}]}, + "workspace_desc": {to:[{field: "rsa.misc.workspace", setter: fld_set}]}, + "workstation": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "year": {to:[{field: "rsa.time.year", setter: fld_set}]}, + "zone": {to:[{field: "rsa.network.zone", setter: fld_set}]}, + }; + + function to_date(value) { + switch (typeof (value)) { + case "object": + // This is a Date. But as it was obtained from evt.Get(), the VM + // doesn't see it as a JS Date anymore, thus value instanceof Date === false. + // Have to trust that any object here is a valid Date for Go. + return value; + case "string": + var asDate = new Date(value); + if (!isNaN(asDate)) return asDate; + } + } + + // ECMAScript 5.1 doesn't have Object.MAX_SAFE_INTEGER / Object.MIN_SAFE_INTEGER. + var maxSafeInt = Math.pow(2, 53) - 1; + var minSafeInt = -maxSafeInt; + + function to_long(value) { + var num = parseInt(value); + // Better not to index a number if it's not safe (above 53 bits). + return !isNaN(num) && minSafeInt <= num && num <= maxSafeInt ? num : undefined; + } + + function to_ip(value) { + if (value.indexOf(":") === -1) + return to_ipv4(value); + return to_ipv6(value); + } + + var ipv4_regex = /^(\d+)\.(\d+)\.(\d+)\.(\d+)$/; + var ipv6_hex_regex = /^[0-9A-Fa-f]{1,4}$/; + + function to_ipv4(value) { + var result = ipv4_regex.exec(value); + if (result == null || result.length !== 5) return; + for (var i = 1; i < 5; i++) { + var num = strictToInt(result[i]); + if (isNaN(num) || num < 0 || num > 255) return; + } + return value; + } + + function to_ipv6(value) { + var sqEnd = value.indexOf("]"); + if (sqEnd > -1) { + if (value.charAt(0) !== "[") return; + value = value.substr(1, sqEnd - 1); + } + var zoneOffset = value.indexOf("%"); + if (zoneOffset > -1) { + value = value.substr(0, zoneOffset); + } + var parts = value.split(":"); + if (parts == null || parts.length < 3 || parts.length > 8) return; + var numEmpty = 0; + var innerEmpty = 0; + for (var i = 0; i < parts.length; i++) { + if (parts[i].length === 0) { + numEmpty++; + if (i > 0 && i + 1 < parts.length) innerEmpty++; + } else if (!parts[i].match(ipv6_hex_regex) && + // Accept an IPv6 with a valid IPv4 at the end. + ((i + 1 < parts.length) || !to_ipv4(parts[i]))) { + return; + } + } + return innerEmpty === 0 && parts.length === 8 || innerEmpty === 1 ? value : undefined; + } + + function to_double(value) { + return parseFloat(value); + } + + function to_mac(value) { + // ES doesn't have a mac datatype so it's safe to ingest whatever was captured. + return value; + } + + function to_lowercase(value) { + // to_lowercase is used against keyword fields, which can accept + // any other type (numbers, dates). + return typeof(value) === "string"? value.toLowerCase() : value; + } + + function fld_set(dst, value) { + dst[this.field] = { v: value }; + } + + function fld_append(dst, value) { + if (dst[this.field] === undefined) { + dst[this.field] = { v: [value] }; + } else { + var base = dst[this.field]; + if (base.v.indexOf(value)===-1) base.v.push(value); + } + } + + function fld_prio(dst, value) { + if (dst[this.field] === undefined) { + dst[this.field] = { v: value, prio: this.prio}; + } else if(this.prio < dst[this.field].prio) { + dst[this.field].v = value; + dst[this.field].prio = this.prio; + } + } + + var valid_ecs_outcome = { + 'failure': true, + 'success': true, + 'unknown': true + }; + + function fld_ecs_outcome(dst, value) { + value = value.toLowerCase(); + if (valid_ecs_outcome[value] === undefined) { + value = 'unknown'; + } + if (dst[this.field] === undefined) { + dst[this.field] = { v: value }; + } else if (dst[this.field].v === 'unknown') { + dst[this.field] = { v: value }; + } + } + + function map_all(evt, targets, value) { + for (var i = 0; i < targets.length; i++) { + evt.Put(targets[i], value); + } + } + + function populate_fields(evt) { + var base = evt.Get(FIELDS_OBJECT); + if (base === null) return; + alternate_datetime(evt); + if (map_ecs) { + do_populate(evt, base, ecs_mappings); + } + if (map_rsa) { + do_populate(evt, base, rsa_mappings); + } + if (keep_raw) { + evt.Put("rsa.raw", base); + } + evt.Delete(FIELDS_OBJECT); + } + + var datetime_alt_components = [ + {field: "day", fmts: [[dF]]}, + {field: "year", fmts: [[dW]]}, + {field: "month", fmts: [[dB],[dG]]}, + {field: "date", fmts: [[dW,dSkip,dG,dSkip,dF],[dW,dSkip,dB,dSkip,dF],[dW,dSkip,dR,dSkip,dF]]}, + {field: "hour", fmts: [[dN]]}, + {field: "min", fmts: [[dU]]}, + {field: "secs", fmts: [[dO]]}, + {field: "time", fmts: [[dN, dSkip, dU, dSkip, dO]]}, + ]; + + function alternate_datetime(evt) { + if (evt.Get(FIELDS_PREFIX + "event_time") != null) { + return; + } + var tzOffset = tz_offset; + if (tzOffset === "event") { + tzOffset = evt.Get("event.timezone"); + } + var container = new DateContainer(tzOffset); + for (var i=0; i} %{timezone}]||%{web_method}||%{web_host}||%{webpage}||%{web_query}||%{network_service}||%{resultcode}||%{sbytes}||%{web_referer}||%{user_agent}||%{web_cookie}", processor_chain([ + dup1, + dup2, + dup3, + dup4, + dup5, + dup6, + ])); + + var hdr1 = match("HEADER#0:0001", "message", "%APACHETOMCAT-%{level}-%{messageid}: %{payload}", processor_chain([ + setc("header_id","0001"), + ])); + + var hdr2 = match("HEADER#1:0002", "message", "%{hmonth->} %{hday->} %{htime->} %{hostname->} %APACHETOMCAT- %{messageid}: %{payload}", processor_chain([ + setc("header_id","0002"), + ])); + + var select1 = linear_select([ + hdr1, + hdr2, + ]); + + var msg1 = msg("ABCD", dup7); + + var msg2 = msg("BADMETHOD", dup7); + + var msg3 = msg("BADMTHD", dup7); + + var msg4 = msg("BDMTHD", dup7); + + var msg5 = msg("INDEX", dup7); + + var msg6 = msg("CFYZ", dup7); + + var msg7 = msg("CONNECT", dup7); + + var msg8 = msg("DELETE", dup7); + + var msg9 = msg("DETECT_METHOD_TYPE", dup7); + + var msg10 = msg("FGET", dup7); + + var msg11 = msg("GET", dup7); + + var msg12 = msg("get", dup7); + + var msg13 = msg("HEAD", dup7); + + var msg14 = msg("id", dup7); + + var msg15 = msg("LOCK", dup7); + + var msg16 = msg("MKCOL", dup7); + + var msg17 = msg("NCIRCLE", dup7); + + var msg18 = msg("OPTIONS", dup7); + + var msg19 = msg("POST", dup7); + + var msg20 = msg("PRONECT", dup7); + + var msg21 = msg("PROPFIND", dup7); + + var msg22 = msg("PUT", dup7); + + var msg23 = msg("QUALYS", dup7); + + var msg24 = msg("SEARCH", dup7); + + var msg25 = msg("TRACK", dup7); + + var msg26 = msg("TRACE", dup7); + + var msg27 = msg("uGET", dup7); + + var msg28 = msg("null", dup7); + + var msg29 = msg("rndmmtd", dup7); + + var msg30 = msg("RNDMMTD", dup7); + + var msg31 = msg("asdf", dup7); + + var msg32 = msg("DEBUG", dup7); + + var msg33 = msg("COOK", dup7); + + var msg34 = msg("nGET", dup7); + + var chain1 = processor_chain([ + select1, + msgid_select({ + "ABCD": msg1, + "BADMETHOD": msg2, + "BADMTHD": msg3, + "BDMTHD": msg4, + "CFYZ": msg6, + "CONNECT": msg7, + "COOK": msg33, + "DEBUG": msg32, + "DELETE": msg8, + "DETECT_METHOD_TYPE": msg9, + "FGET": msg10, + "GET": msg11, + "HEAD": msg13, + "INDEX": msg5, + "LOCK": msg15, + "MKCOL": msg16, + "NCIRCLE": msg17, + "OPTIONS": msg18, + "POST": msg19, + "PRONECT": msg20, + "PROPFIND": msg21, + "PUT": msg22, + "QUALYS": msg23, + "RNDMMTD": msg30, + "SEARCH": msg24, + "TRACE": msg26, + "TRACK": msg25, + "asdf": msg31, + "get": msg12, + "id": msg14, + "nGET": msg34, + "null": msg28, + "rndmmtd": msg29, + "uGET": msg27, + }), + ]); + + var part1 = match("MESSAGE#0:ABCD", "nwparser.payload", "%{saddr}||%{fld5}||%{username}||[%{fld7->} %{timezone}]||%{web_method}||%{web_host}||%{webpage}||%{web_query}||%{network_service}||%{resultcode}||%{sbytes}||%{web_referer}||%{user_agent}||%{web_cookie}", processor_chain([ + dup1, + dup2, + dup3, + dup4, + dup5, + dup6, + ])); + - community_id: null + - registered_domain: + field: dns.question.name + ignore_failure: true + ignore_missing: true + target_etld_field: dns.question.top_level_domain + target_field: dns.question.registered_domain + target_subdomain_field: dns.question.subdomain + - registered_domain: + field: client.domain + ignore_failure: true + ignore_missing: true + target_etld_field: client.top_level_domain + target_field: client.registered_domain + target_subdomain_field: client.subdomain + - registered_domain: + field: server.domain + ignore_failure: true + ignore_missing: true + target_etld_field: server.top_level_domain + target_field: server.registered_domain + target_subdomain_field: server.subdomain + - registered_domain: + field: destination.domain + ignore_failure: true + ignore_missing: true + target_etld_field: destination.top_level_domain + target_field: destination.registered_domain + target_subdomain_field: destination.subdomain + - registered_domain: + field: source.domain + ignore_failure: true + ignore_missing: true + target_etld_field: source.top_level_domain + target_field: source.registered_domain + target_subdomain_field: source.subdomain + - registered_domain: + field: url.domain + ignore_failure: true + ignore_missing: true + target_etld_field: url.top_level_domain + target_field: url.registered_domain + target_subdomain_field: url.subdomain + - add_locale: null + tags: + - tomcat-log + - forwarded + udp: null + data_stream.namespace: default + - name: tcp-tomcat + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.tomcat.log.enabled} == true or ${kubernetes.hints.tomcat.enabled} == true + data_stream: + dataset: tomcat.log + type: logs + fields: + observer: + product: TomCat + type: Web + vendor: Apache + fields_under_root: true + host: localhost:9523 + processors: + - script: + lang: javascript + params: + debug: false + ecs: true + keep_raw: false + rsa: true + tz_offset: local + source: | + // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + // or more contributor license agreements. Licensed under the Elastic License; + // you may not use this file except in compliance with the Elastic License. + + /* jshint -W014,-W016,-W097,-W116 */ + + var processor = require("processor"); + var console = require("console"); + + var FLAG_FIELD = "log.flags"; + var FIELDS_OBJECT = "nwparser"; + var FIELDS_PREFIX = FIELDS_OBJECT + "."; + + var defaults = { + debug: false, + ecs: true, + rsa: false, + keep_raw: false, + tz_offset: "local", + strip_priority: true + }; + + var saved_flags = null; + var debug; + var map_ecs; + var map_rsa; + var keep_raw; + var device; + var tz_offset; + var strip_priority; + + // Register params from configuration. + function register(params) { + debug = params.debug !== undefined ? params.debug : defaults.debug; + map_ecs = params.ecs !== undefined ? params.ecs : defaults.ecs; + map_rsa = params.rsa !== undefined ? params.rsa : defaults.rsa; + keep_raw = params.keep_raw !== undefined ? params.keep_raw : defaults.keep_raw; + tz_offset = parse_tz_offset(params.tz_offset !== undefined? params.tz_offset : defaults.tz_offset); + strip_priority = params.strip_priority !== undefined? params.strip_priority : defaults.strip_priority; + device = new DeviceProcessor(); + } + + function parse_tz_offset(offset) { + var date; + var m; + switch(offset) { + // local uses the tz offset from the JS VM. + case "local": + date = new Date(); + // Reversing the sign as we the offset from UTC, not to UTC. + return parse_local_tz_offset(-date.getTimezoneOffset()); + // event uses the tz offset from event.timezone (add_locale processor). + case "event": + return offset; + // Otherwise a tz offset in the form "[+-][0-9]{4}" is required. + default: + m = offset.match(/^([+\-])([0-9]{2}):?([0-9]{2})?$/); + if (m === null || m.length !== 4) { + throw("bad timezone offset: '" + offset + "'. Must have the form +HH:MM"); + } + return m[1] + m[2] + ":" + (m[3]!==undefined? m[3] : "00"); + } + } + + function parse_local_tz_offset(minutes) { + var neg = minutes < 0; + minutes = Math.abs(minutes); + var min = minutes % 60; + var hours = Math.floor(minutes / 60); + var pad2digit = function(n) { + if (n < 10) { return "0" + n;} + return "" + n; + }; + return (neg? "-" : "+") + pad2digit(hours) + ":" + pad2digit(min); + } + + function process(evt) { + // Function register is only called by the processor when `params` are set + // in the processor config. + if (device === undefined) { + register(defaults); + } + return device.process(evt); + } + + function processor_chain(subprocessors) { + var builder = new processor.Chain(); + subprocessors.forEach(builder.Add); + return builder.Build().Run; + } + + function linear_select(subprocessors) { + return function (evt) { + var flags = evt.Get(FLAG_FIELD); + var i; + for (i = 0; i < subprocessors.length; i++) { + evt.Delete(FLAG_FIELD); + if (debug) console.warn("linear_select trying entry " + i); + subprocessors[i](evt); + // Dissect processor succeeded? + if (evt.Get(FLAG_FIELD) == null) break; + if (debug) console.warn("linear_select failed entry " + i); + } + if (flags !== null) { + evt.Put(FLAG_FIELD, flags); + } + if (debug) { + if (i < subprocessors.length) { + console.warn("linear_select matched entry " + i); + } else { + console.warn("linear_select didn't match"); + } + } + }; + } + + function conditional(opt) { + return function(evt) { + if (opt.if(evt)) { + opt.then(evt); + } else if (opt.else) { + opt.else(evt); + } + }; + } + + var strip_syslog_priority = (function() { + var isEnabled = function() { return strip_priority === true; }; + var fetchPRI = field("_pri"); + var fetchPayload = field("payload"); + var removePayload = remove(["payload"]); + var cleanup = remove(["_pri", "payload"]); + var onMatch = function(evt) { + var pri, priStr = fetchPRI(evt); + if (priStr != null + && 0 < priStr.length && priStr.length < 4 + && !isNaN((pri = Number(priStr))) + && 0 <= pri && pri < 192) { + var severity = pri & 7, + facility = pri >> 3; + setc("_severity", "" + severity)(evt); + setc("_facility", "" + facility)(evt); + // Replace message with priority stripped. + evt.Put("message", fetchPayload(evt)); + removePayload(evt); + } else { + // not a valid syslog PRI, cleanup. + cleanup(evt); + } + }; + return conditional({ + if: isEnabled, + then: cleanup_flags(match( + "STRIP_PRI", + "message", + "<%{_pri}>%{payload}", + onMatch + )) + }); + })(); + + function match(id, src, pattern, on_success) { + var dissect = new processor.Dissect({ + field: src, + tokenizer: pattern, + target_prefix: FIELDS_OBJECT, + ignore_failure: true, + overwrite_keys: true, + trim_values: "right" + }); + return function (evt) { + var msg = evt.Get(src); + dissect.Run(evt); + var failed = evt.Get(FLAG_FIELD) != null; + if (debug) { + if (failed) { + console.debug("dissect fail: " + id + " field:" + src); + } else { + console.debug("dissect OK: " + id + " field:" + src); + } + console.debug(" expr: <<" + pattern + ">>"); + console.debug(" input: <<" + msg + ">>"); + } + if (on_success != null && !failed) { + on_success(evt); + } + }; + } + + function match_copy(id, src, dst, on_success) { + dst = FIELDS_PREFIX + dst; + if (dst === FIELDS_PREFIX || dst === src) { + return function (evt) { + if (debug) { + console.debug("noop OK: " + id + " field:" + src); + console.debug(" input: <<" + evt.Get(src) + ">>"); + } + if (on_success != null) on_success(evt); + } + } + return function (evt) { + var msg = evt.Get(src); + evt.Put(dst, msg); + if (debug) { + console.debug("copy OK: " + id + " field:" + src); + console.debug(" target: '" + dst + "'"); + console.debug(" input: <<" + msg + ">>"); + } + if (on_success != null) on_success(evt); + } + } + + function cleanup_flags(processor) { + return function(evt) { + processor(evt); + evt.Delete(FLAG_FIELD); + }; + } + + function all_match(opts) { + return function (evt) { + var i; + for (i = 0; i < opts.processors.length; i++) { + evt.Delete(FLAG_FIELD); + opts.processors[i](evt); + // Dissect processor succeeded? + if (evt.Get(FLAG_FIELD) != null) { + if (debug) console.warn("all_match failure at " + i); + if (opts.on_failure != null) opts.on_failure(evt); + return; + } + if (debug) console.warn("all_match success at " + i); + } + if (opts.on_success != null) opts.on_success(evt); + }; + } + + function msgid_select(mapping) { + return function (evt) { + var msgid = evt.Get(FIELDS_PREFIX + "messageid"); + if (msgid == null) { + if (debug) console.warn("msgid_select: no messageid captured!"); + return; + } + var next = mapping[msgid]; + if (next === undefined) { + if (debug) console.warn("msgid_select: no mapping for messageid:" + msgid); + return; + } + if (debug) console.info("msgid_select: matched key=" + msgid); + return next(evt); + }; + } + + function msg(msg_id, match) { + return function (evt) { + match(evt); + if (evt.Get(FLAG_FIELD) == null) { + evt.Put(FIELDS_PREFIX + "msg_id1", msg_id); + } + }; + } + + var start; + + function save_flags(evt) { + saved_flags = evt.Get(FLAG_FIELD); + evt.Put("event.original", evt.Get("message")); + } + + function restore_flags(evt) { + if (saved_flags !== null) { + evt.Put(FLAG_FIELD, saved_flags); + } + evt.Delete("message"); + } + + function constant(value) { + return function (evt) { + return value; + }; + } + + function field(name) { + var fullname = FIELDS_PREFIX + name; + return function (evt) { + return evt.Get(fullname); + }; + } + + function STRCAT(args) { + var s = ""; + var i; + for (i = 0; i < args.length; i++) { + s += args[i]; + } + return s; + } + + // TODO: Implement + function DIRCHK(args) { + unimplemented("DIRCHK"); + } + + function strictToInt(str) { + return str * 1; + } + + function CALC(args) { + if (args.length !== 3) { + console.warn("skipped call to CALC with " + args.length + " arguments."); + return; + } + var a = strictToInt(args[0]); + var b = strictToInt(args[2]); + if (isNaN(a) || isNaN(b)) { + console.warn("failed evaluating CALC arguments a='" + args[0] + "' b='" + args[2] + "'."); + return; + } + var result; + switch (args[1]) { + case "+": + result = a + b; + break; + case "-": + result = a - b; + break; + case "*": + result = a * b; + break; + default: + // Only * and + seen in the parsers. + console.warn("unknown CALC operation '" + args[1] + "'."); + return; + } + // Always return a string + return result !== undefined ? "" + result : result; + } + + var quoteChars = "\"'`"; + function RMQ(args) { + if(args.length !== 1) { + console.warn("RMQ: only one argument expected"); + return; + } + var value = args[0].trim(); + var n = value.length; + var char; + return n > 1 + && (char=value.charAt(0)) === value.charAt(n-1) + && quoteChars.indexOf(char) !== -1? + value.substr(1, n-2) + : value; + } + + function call(opts) { + var args = new Array(opts.args.length); + return function (evt) { + for (var i = 0; i < opts.args.length; i++) + if ((args[i] = opts.args[i](evt)) == null) return; + var result = opts.fn(args); + if (result != null) { + evt.Put(opts.dest, result); + } + }; + } + + function nop(evt) { + } + + function appendErrorMsg(evt, msg) { + var value = evt.Get("error.message"); + if (value == null) { + value = [msg]; + } else if (msg instanceof Array) { + value.push(msg); + } else { + value = [value, msg]; + } + evt.Put("error.message", value); + } + + function unimplemented(name) { + appendErrorMsg("unimplemented feature: " + name); + } + + function lookup(opts) { + return function (evt) { + var key = opts.key(evt); + if (key == null) return; + var value = opts.map.keyvaluepairs[key]; + if (value === undefined) { + value = opts.map.default; + } + if (value !== undefined) { + evt.Put(opts.dest, value(evt)); + } + }; + } + + function set(fields) { + return new processor.AddFields({ + target: FIELDS_OBJECT, + fields: fields, + }); + } + + function setf(dst, src) { + return function (evt) { + var val = evt.Get(FIELDS_PREFIX + src); + if (val != null) evt.Put(FIELDS_PREFIX + dst, val); + }; + } + + function setc(dst, value) { + return function (evt) { + evt.Put(FIELDS_PREFIX + dst, value); + }; + } + + function set_field(opts) { + return function (evt) { + var val = opts.value(evt); + if (val != null) evt.Put(opts.dest, val); + }; + } + + function dump(label) { + return function (evt) { + console.log("Dump of event at " + label + ": " + JSON.stringify(evt, null, "\t")); + }; + } + + function date_time_join_args(evt, arglist) { + var str = ""; + for (var i = 0; i < arglist.length; i++) { + var fname = FIELDS_PREFIX + arglist[i]; + var val = evt.Get(fname); + if (val != null) { + if (str !== "") str += " "; + str += val; + } else { + if (debug) console.warn("in date_time: input arg " + fname + " is not set"); + } + } + return str; + } + + function to2Digit(num) { + return num? (num < 10? "0" + num : num) : "00"; + } + + // Make two-digit dates 00-69 interpreted as 2000-2069 + // and dates 70-99 translated to 1970-1999. + var twoDigitYearEpoch = 70; + var twoDigitYearCentury = 2000; + + // This is to accept dates up to 2 days in the future, only used when + // no year is specified in a date. 2 days should be enough to account for + // time differences between systems and different tz offsets. + var maxFutureDelta = 2*24*60*60*1000; + + // DateContainer stores date fields and then converts those fields into + // a Date. Necessary because building a Date using its set() methods gives + // different results depending on the order of components. + function DateContainer(tzOffset) { + this.offset = tzOffset === undefined? "Z" : tzOffset; + } + + DateContainer.prototype = { + setYear: function(v) {this.year = v;}, + setMonth: function(v) {this.month = v;}, + setDay: function(v) {this.day = v;}, + setHours: function(v) {this.hours = v;}, + setMinutes: function(v) {this.minutes = v;}, + setSeconds: function(v) {this.seconds = v;}, + + setUNIX: function(v) {this.unix = v;}, + + set2DigitYear: function(v) { + this.year = v < twoDigitYearEpoch? twoDigitYearCentury + v : twoDigitYearCentury + v - 100; + }, + + toDate: function() { + if (this.unix !== undefined) { + return new Date(this.unix * 1000); + } + if (this.day === undefined || this.month === undefined) { + // Can't make a date from this. + return undefined; + } + if (this.year === undefined) { + // A date without a year. Set current year, or previous year + // if date would be in the future. + var now = new Date(); + this.year = now.getFullYear(); + var date = this.toDate(); + if (date.getTime() - now.getTime() > maxFutureDelta) { + date.setFullYear(now.getFullYear() - 1); + } + return date; + } + var MM = to2Digit(this.month); + var DD = to2Digit(this.day); + var hh = to2Digit(this.hours); + var mm = to2Digit(this.minutes); + var ss = to2Digit(this.seconds); + return new Date(this.year + "-" + MM + "-" + DD + "T" + hh + ":" + mm + ":" + ss + this.offset); + } + } + + function date_time_try_pattern(fmt, str, tzOffset) { + var date = new DateContainer(tzOffset); + var pos = date_time_try_pattern_at_pos(fmt, str, 0, date); + return pos !== undefined? date.toDate() : undefined; + } + + function date_time_try_pattern_at_pos(fmt, str, pos, date) { + var len = str.length; + for (var proc = 0; pos !== undefined && pos < len && proc < fmt.length; proc++) { + pos = fmt[proc](str, pos, date); + } + return pos; + } + + function date_time(opts) { + return function (evt) { + var tzOffset = opts.tz || tz_offset; + if (tzOffset === "event") { + tzOffset = evt.Get("event.timezone"); + } + var str = date_time_join_args(evt, opts.args); + for (var i = 0; i < opts.fmts.length; i++) { + var date = date_time_try_pattern(opts.fmts[i], str, tzOffset); + if (date !== undefined) { + evt.Put(FIELDS_PREFIX + opts.dest, date); + return; + } + } + if (debug) console.warn("in date_time: id=" + opts.id + " FAILED: " + str); + }; + } + + var uA = 60 * 60 * 24; + var uD = 60 * 60 * 24; + var uF = 60 * 60; + var uG = 60 * 60 * 24 * 30; + var uH = 60 * 60; + var uI = 60 * 60; + var uJ = 60 * 60 * 24; + var uM = 60 * 60 * 24 * 30; + var uN = 60 * 60; + var uO = 1; + var uS = 1; + var uT = 60; + var uU = 60; + var uc = dc; + + function duration(opts) { + return function(evt) { + var str = date_time_join_args(evt, opts.args); + for (var i = 0; i < opts.fmts.length; i++) { + var seconds = duration_try_pattern(opts.fmts[i], str); + if (seconds !== undefined) { + evt.Put(FIELDS_PREFIX + opts.dest, seconds); + return; + } + } + if (debug) console.warn("in duration: id=" + opts.id + " (s) FAILED: " + str); + }; + } + + function duration_try_pattern(fmt, str) { + var secs = 0; + var pos = 0; + for (var i=0; i [ month_id , how many chars to skip if month in long form ] + "Jan": [0, 4], + "Feb": [1, 5], + "Mar": [2, 2], + "Apr": [3, 2], + "May": [4, 0], + "Jun": [5, 1], + "Jul": [6, 1], + "Aug": [7, 3], + "Sep": [8, 6], + "Oct": [9, 4], + "Nov": [10, 5], + "Dec": [11, 4], + "jan": [0, 4], + "feb": [1, 5], + "mar": [2, 2], + "apr": [3, 2], + "may": [4, 0], + "jun": [5, 1], + "jul": [6, 1], + "aug": [7, 3], + "sep": [8, 6], + "oct": [9, 4], + "nov": [10, 5], + "dec": [11, 4], + }; + + // var dC = undefined; + var dR = dateMonthName(true); + var dB = dateMonthName(false); + var dM = dateFixedWidthNumber("M", 2, 1, 12, DateContainer.prototype.setMonth); + var dG = dateVariableWidthNumber("G", 1, 12, DateContainer.prototype.setMonth); + var dD = dateFixedWidthNumber("D", 2, 1, 31, DateContainer.prototype.setDay); + var dF = dateVariableWidthNumber("F", 1, 31, DateContainer.prototype.setDay); + var dH = dateFixedWidthNumber("H", 2, 0, 24, DateContainer.prototype.setHours); + var dI = dateVariableWidthNumber("I", 0, 24, DateContainer.prototype.setHours); // Accept hours >12 + var dN = dateVariableWidthNumber("N", 0, 24, DateContainer.prototype.setHours); + var dT = dateFixedWidthNumber("T", 2, 0, 59, DateContainer.prototype.setMinutes); + var dU = dateVariableWidthNumber("U", 0, 59, DateContainer.prototype.setMinutes); + var dP = parseAMPM; // AM|PM + var dQ = parseAMPM; // A.M.|P.M + var dS = dateFixedWidthNumber("S", 2, 0, 60, DateContainer.prototype.setSeconds); + var dO = dateVariableWidthNumber("O", 0, 60, DateContainer.prototype.setSeconds); + var dY = dateFixedWidthNumber("Y", 2, 0, 99, DateContainer.prototype.set2DigitYear); + var dW = dateFixedWidthNumber("W", 4, 1000, 9999, DateContainer.prototype.setYear); + var dZ = parseHMS; + var dX = dateVariableWidthNumber("X", 0, 0x10000000000, DateContainer.prototype.setUNIX); + + // parseAMPM parses "A.M", "AM", "P.M", "PM" from logs. + // Only works if this modifier appears after the hour has been read from logs + // which is always the case in the 300 devices. + function parseAMPM(str, pos, date) { + var n = str.length; + var start = skipws(str, pos); + if (start + 2 > n) return; + var head = str.substr(start, 2).toUpperCase(); + var isPM = false; + var skip = false; + switch (head) { + case "A.": + skip = true; + /* falls through */ + case "AM": + break; + case "P.": + skip = true; + /* falls through */ + case "PM": + isPM = true; + break; + default: + if (debug) console.warn("can't parse pos " + start + " as AM/PM: " + str + "(head:" + head + ")"); + return; + } + pos = start + 2; + if (skip) { + if (pos+2 > n || str.substr(pos, 2).toUpperCase() !== "M.") { + if (debug) console.warn("can't parse pos " + start + " as AM/PM: " + str + "(tail)"); + return; + } + pos += 2; + } + var hh = date.hours; + if (isPM) { + // Accept existing hour in 24h format. + if (hh < 12) hh += 12; + } else { + if (hh === 12) hh = 0; + } + date.setHours(hh); + return pos; + } + + function parseHMS(str, pos, date) { + return date_time_try_pattern_at_pos([dN, dc(":"), dU, dc(":"), dO], str, pos, date); + } + + function skipws(str, pos) { + for ( var n = str.length; + pos < n && str.charAt(pos) === " "; + pos++) + ; + return pos; + } + + function skipdigits(str, pos) { + var c; + for (var n = str.length; + pos < n && (c = str.charAt(pos)) >= "0" && c <= "9"; + pos++) + ; + return pos; + } + + function dSkip(str, pos, date) { + var chr; + for (;pos < str.length && (chr=str[pos])<'0' || chr>'9'; pos++) {} + return pos < str.length? pos : undefined; + } + + function dateVariableWidthNumber(fmtChar, min, max, setter) { + return function (str, pos, date) { + var start = skipws(str, pos); + pos = skipdigits(str, start); + var s = str.substr(start, pos - start); + var value = parseInt(s, 10); + if (value >= min && value <= max) { + setter.call(date, value); + return pos; + } + return; + }; + } + + function dateFixedWidthNumber(fmtChar, width, min, max, setter) { + return function (str, pos, date) { + pos = skipws(str, pos); + var n = str.length; + if (pos + width > n) return; + var s = str.substr(pos, width); + var value = parseInt(s, 10); + if (value >= min && value <= max) { + setter.call(date, value); + return pos + width; + } + return; + }; + } + + // Short month name (Jan..Dec). + function dateMonthName(long) { + return function (str, pos, date) { + pos = skipws(str, pos); + var n = str.length; + if (pos + 3 > n) return; + var mon = str.substr(pos, 3); + var idx = shortMonths[mon]; + if (idx === undefined) { + idx = shortMonths[mon.toLowerCase()]; + } + if (idx === undefined) { + //console.warn("parsing date_time: '" + mon + "' is not a valid short month (%B)"); + return; + } + date.setMonth(idx[0]+1); + return pos + 3 + (long ? idx[1] : 0); + }; + } + + function url_wrapper(dst, src, fn) { + return function(evt) { + var value = evt.Get(FIELDS_PREFIX + src), result; + if (value != null && (result = fn(value))!== undefined) { + evt.Put(FIELDS_PREFIX + dst, result); + } else { + console.debug(fn.name + " failed for '" + value + "'"); + } + }; + } + + // The following regular expression for parsing URLs from: + // https://github.com/wizard04wsu/URI_Parsing + // + // The MIT License (MIT) + // + // Copyright (c) 2014 Andrew Harrison + // + // Permission is hereby granted, free of charge, to any person obtaining a copy of + // this software and associated documentation files (the "Software"), to deal in + // the Software without restriction, including without limitation the rights to + // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + // the Software, and to permit persons to whom the Software is furnished to do so, + // subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + // COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + // IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + var uriRegExp = /^([a-z][a-z0-9+.\-]*):(?:\/\/((?:(?=((?:[a-z0-9\-._~!$&'()*+,;=:]|%[0-9A-F]{2})*))(\3)@)?(?=(\[[0-9A-F:.]{2,}\]|(?:[a-z0-9\-._~!$&'()*+,;=]|%[0-9A-F]{2})*))\5(?::(?=(\d*))\6)?)(\/(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/]|%[0-9A-F]{2})*))\8)?|(\/?(?!\/)(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/]|%[0-9A-F]{2})*))\10)?)(?:\?(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/?]|%[0-9A-F]{2})*))\11)?(?:#(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/?]|%[0-9A-F]{2})*))\12)?$/i; + + var uriScheme = 1; + var uriDomain = 5; + var uriPort = 6; + var uriPath = 7; + var uriPathAlt = 9; + var uriQuery = 11; + + function domain(dst, src) { + return url_wrapper(dst, src, extract_domain); + } + + function split_url(value) { + var m = value.match(uriRegExp); + if (m && m[uriDomain]) return m; + // Support input in the form "www.example.net/path", but not "/path". + m = ("null://" + value).match(uriRegExp); + if (m) return m; + } + + function extract_domain(value) { + var m = split_url(value); + if (m && m[uriDomain]) return m[uriDomain]; + } + + var extFromPage = /\.[^.]+$/; + function extract_ext(value) { + var page = extract_page(value); + if (page) { + var m = page.match(extFromPage); + if (m) return m[0]; + } + } + + function ext(dst, src) { + return url_wrapper(dst, src, extract_ext); + } + + function fqdn(dst, src) { + // TODO: fqdn and domain(eTLD+1) are currently the same. + return domain(dst, src); + } + + var pageFromPathRegExp = /\/([^\/]+)$/; + var pageName = 1; + + function extract_page(value) { + value = extract_path(value); + if (!value) return undefined; + var m = value.match(pageFromPathRegExp); + if (m) return m[pageName]; + } + + function page(dst, src) { + return url_wrapper(dst, src, extract_page); + } + + function extract_path(value) { + var m = split_url(value); + return m? m[uriPath] || m[uriPathAlt] : undefined; + } + + function path(dst, src) { + return url_wrapper(dst, src, extract_path); + } + + // Map common schemes to their default port. + // port has to be a string (will be converted at a later stage). + var schemePort = { + "ftp": "21", + "ssh": "22", + "http": "80", + "https": "443", + }; + + function extract_port(value) { + var m = split_url(value); + if (!m) return undefined; + if (m[uriPort]) return m[uriPort]; + if (m[uriScheme]) { + return schemePort[m[uriScheme]]; + } + } + + function port(dst, src) { + return url_wrapper(dst, src, extract_port); + } + + function extract_query(value) { + var m = split_url(value); + if (m && m[uriQuery]) return m[uriQuery]; + } + + function query(dst, src) { + return url_wrapper(dst, src, extract_query); + } + + function extract_root(value) { + var m = split_url(value); + if (m && m[uriDomain] && m[uriDomain]) { + var scheme = m[uriScheme] && m[uriScheme] !== "null"? + m[uriScheme] + "://" : ""; + var port = m[uriPort]? ":" + m[uriPort] : ""; + return scheme + m[uriDomain] + port; + } + } + + function root(dst, src) { + return url_wrapper(dst, src, extract_root); + } + + function tagval(id, src, cfg, keys, on_success) { + var fail = function(evt) { + evt.Put(FLAG_FIELD, "tagval_parsing_error"); + } + if (cfg.kv_separator.length !== 1) { + throw("Invalid TAGVALMAP ValueDelimiter (must have 1 character)"); + } + var quotes_len = cfg.open_quote.length > 0 && cfg.close_quote.length > 0? + cfg.open_quote.length + cfg.close_quote.length : 0; + var kv_regex = new RegExp('^([^' + cfg.kv_separator + ']*)*' + cfg.kv_separator + ' *(.*)*$'); + return function(evt) { + var msg = evt.Get(src); + if (msg === undefined) { + console.warn("tagval: input field is missing"); + return fail(evt); + } + var pairs = msg.split(cfg.pair_separator); + var i; + var success = false; + var prev = ""; + for (i=0; i 0 && + value.length >= cfg.open_quote.length + cfg.close_quote.length && + value.substr(0, cfg.open_quote.length) === cfg.open_quote && + value.substr(value.length - cfg.close_quote.length) === cfg.close_quote) { + value = value.substr(cfg.open_quote.length, value.length - quotes_len); + } + evt.Put(FIELDS_PREFIX + field, value); + success = true; + } + if (!success) { + return fail(evt); + } + if (on_success != null) { + on_success(evt); + } + } + } + + var ecs_mappings = { + "_facility": {convert: to_long, to:[{field: "log.syslog.facility.code", setter: fld_set}]}, + "_pri": {convert: to_long, to:[{field: "log.syslog.priority", setter: fld_set}]}, + "_severity": {convert: to_long, to:[{field: "log.syslog.severity.code", setter: fld_set}]}, + "action": {to:[{field: "event.action", setter: fld_prio, prio: 0}]}, + "administrator": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 4}]}, + "alias.ip": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 3},{field: "related.ip", setter: fld_append}]}, + "alias.ipv6": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 4},{field: "related.ip", setter: fld_append}]}, + "alias.mac": {convert: to_mac, to:[{field: "host.mac", setter: fld_prio, prio: 1}]}, + "application": {to:[{field: "network.application", setter: fld_set}]}, + "bytes": {convert: to_long, to:[{field: "network.bytes", setter: fld_set}]}, + "c_domain": {to:[{field: "source.domain", setter: fld_prio, prio: 1}]}, + "c_logon_id": {to:[{field: "user.id", setter: fld_prio, prio: 2}]}, + "c_user_name": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 8}]}, + "c_username": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 2}]}, + "cctld": {to:[{field: "url.top_level_domain", setter: fld_prio, prio: 1}]}, + "child_pid": {convert: to_long, to:[{field: "process.pid", setter: fld_prio, prio: 1}]}, + "child_pid_val": {to:[{field: "process.title", setter: fld_set}]}, + "child_process": {to:[{field: "process.name", setter: fld_prio, prio: 1}]}, + "city.dst": {to:[{field: "destination.geo.city_name", setter: fld_set}]}, + "city.src": {to:[{field: "source.geo.city_name", setter: fld_set}]}, + "daddr": {convert: to_ip, to:[{field: "destination.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "daddr_v6": {convert: to_ip, to:[{field: "destination.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "ddomain": {to:[{field: "destination.domain", setter: fld_prio, prio: 0}]}, + "devicehostip": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 2},{field: "related.ip", setter: fld_append}]}, + "devicehostmac": {convert: to_mac, to:[{field: "host.mac", setter: fld_prio, prio: 0}]}, + "dhost": {to:[{field: "destination.address", setter: fld_set},{field: "related.hosts", setter: fld_append}]}, + "dinterface": {to:[{field: "observer.egress.interface.name", setter: fld_set}]}, + "direction": {to:[{field: "network.direction", setter: fld_set}]}, + "directory": {to:[{field: "file.directory", setter: fld_set}]}, + "dmacaddr": {convert: to_mac, to:[{field: "destination.mac", setter: fld_set}]}, + "dns.responsetype": {to:[{field: "dns.answers.type", setter: fld_set}]}, + "dns.resptext": {to:[{field: "dns.answers.name", setter: fld_set}]}, + "dns_querytype": {to:[{field: "dns.question.type", setter: fld_set}]}, + "domain": {to:[{field: "server.domain", setter: fld_prio, prio: 0},{field: "related.hosts", setter: fld_append}]}, + "domain.dst": {to:[{field: "destination.domain", setter: fld_prio, prio: 1}]}, + "domain.src": {to:[{field: "source.domain", setter: fld_prio, prio: 2}]}, + "domain_id": {to:[{field: "user.domain", setter: fld_set}]}, + "domainname": {to:[{field: "server.domain", setter: fld_prio, prio: 1}]}, + "dport": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 0}]}, + "dtransaddr": {convert: to_ip, to:[{field: "destination.nat.ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "dtransport": {convert: to_long, to:[{field: "destination.nat.port", setter: fld_prio, prio: 0}]}, + "ec_outcome": {to:[{field: "event.outcome", setter: fld_ecs_outcome}]}, + "event_description": {to:[{field: "message", setter: fld_prio, prio: 0}]}, + "event_source": {to:[{field: "related.hosts", setter: fld_append}]}, + "event_time": {convert: to_date, to:[{field: "@timestamp", setter: fld_set}]}, + "event_type": {to:[{field: "event.action", setter: fld_prio, prio: 1}]}, + "extension": {to:[{field: "file.extension", setter: fld_prio, prio: 1}]}, + "file.attributes": {to:[{field: "file.attributes", setter: fld_set}]}, + "filename": {to:[{field: "file.name", setter: fld_prio, prio: 0}]}, + "filename_size": {convert: to_long, to:[{field: "file.size", setter: fld_set}]}, + "filepath": {to:[{field: "file.path", setter: fld_set}]}, + "filetype": {to:[{field: "file.type", setter: fld_set}]}, + "fqdn": {to:[{field: "related.hosts", setter: fld_append}]}, + "group": {to:[{field: "group.name", setter: fld_set}]}, + "groupid": {to:[{field: "group.id", setter: fld_set}]}, + "host": {to:[{field: "host.name", setter: fld_prio, prio: 1},{field: "related.hosts", setter: fld_append}]}, + "hostip": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "hostip_v6": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "hostname": {to:[{field: "host.name", setter: fld_prio, prio: 0}]}, + "id": {to:[{field: "event.code", setter: fld_prio, prio: 0}]}, + "interface": {to:[{field: "network.interface.name", setter: fld_set}]}, + "ip.orig": {convert: to_ip, to:[{field: "network.forwarded_ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "ip.trans.dst": {convert: to_ip, to:[{field: "destination.nat.ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "ip.trans.src": {convert: to_ip, to:[{field: "source.nat.ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "ipv6.orig": {convert: to_ip, to:[{field: "network.forwarded_ip", setter: fld_prio, prio: 2},{field: "related.ip", setter: fld_append}]}, + "latdec_dst": {convert: to_double, to:[{field: "destination.geo.location.lat", setter: fld_set}]}, + "latdec_src": {convert: to_double, to:[{field: "source.geo.location.lat", setter: fld_set}]}, + "location_city": {to:[{field: "geo.city_name", setter: fld_set}]}, + "location_country": {to:[{field: "geo.country_name", setter: fld_set}]}, + "location_desc": {to:[{field: "geo.name", setter: fld_set}]}, + "location_dst": {to:[{field: "destination.geo.country_name", setter: fld_set}]}, + "location_src": {to:[{field: "source.geo.country_name", setter: fld_set}]}, + "location_state": {to:[{field: "geo.region_name", setter: fld_set}]}, + "logon_id": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 5}]}, + "longdec_dst": {convert: to_double, to:[{field: "destination.geo.location.lon", setter: fld_set}]}, + "longdec_src": {convert: to_double, to:[{field: "source.geo.location.lon", setter: fld_set}]}, + "macaddr": {convert: to_mac, to:[{field: "host.mac", setter: fld_prio, prio: 2}]}, + "messageid": {to:[{field: "event.code", setter: fld_prio, prio: 1}]}, + "method": {to:[{field: "http.request.method", setter: fld_set}]}, + "msg": {to:[{field: "message", setter: fld_set}]}, + "orig_ip": {convert: to_ip, to:[{field: "network.forwarded_ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "owner": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 6}]}, + "packets": {convert: to_long, to:[{field: "network.packets", setter: fld_set}]}, + "parent_pid": {convert: to_long, to:[{field: "process.parent.pid", setter: fld_prio, prio: 0}]}, + "parent_pid_val": {to:[{field: "process.parent.title", setter: fld_set}]}, + "parent_process": {to:[{field: "process.parent.name", setter: fld_prio, prio: 0}]}, + "patient_fullname": {to:[{field: "user.full_name", setter: fld_prio, prio: 1}]}, + "port.dst": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 1}]}, + "port.src": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 1}]}, + "port.trans.dst": {convert: to_long, to:[{field: "destination.nat.port", setter: fld_prio, prio: 1}]}, + "port.trans.src": {convert: to_long, to:[{field: "source.nat.port", setter: fld_prio, prio: 1}]}, + "process": {to:[{field: "process.name", setter: fld_prio, prio: 0}]}, + "process_id": {convert: to_long, to:[{field: "process.pid", setter: fld_prio, prio: 0}]}, + "process_id_src": {convert: to_long, to:[{field: "process.parent.pid", setter: fld_prio, prio: 1}]}, + "process_src": {to:[{field: "process.parent.name", setter: fld_prio, prio: 1}]}, + "product": {to:[{field: "observer.product", setter: fld_set}]}, + "protocol": {to:[{field: "network.protocol", setter: fld_set}]}, + "query": {to:[{field: "url.query", setter: fld_prio, prio: 2}]}, + "rbytes": {convert: to_long, to:[{field: "destination.bytes", setter: fld_set}]}, + "referer": {to:[{field: "http.request.referrer", setter: fld_prio, prio: 1}]}, + "rulename": {to:[{field: "rule.name", setter: fld_set}]}, + "saddr": {convert: to_ip, to:[{field: "source.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "saddr_v6": {convert: to_ip, to:[{field: "source.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "sbytes": {convert: to_long, to:[{field: "source.bytes", setter: fld_set}]}, + "sdomain": {to:[{field: "source.domain", setter: fld_prio, prio: 0}]}, + "service": {to:[{field: "service.name", setter: fld_prio, prio: 1}]}, + "service.name": {to:[{field: "service.name", setter: fld_prio, prio: 0}]}, + "service_account": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 7}]}, + "severity": {to:[{field: "log.level", setter: fld_set}]}, + "shost": {to:[{field: "host.hostname", setter: fld_set},{field: "source.address", setter: fld_set},{field: "related.hosts", setter: fld_append}]}, + "sinterface": {to:[{field: "observer.ingress.interface.name", setter: fld_set}]}, + "sld": {to:[{field: "url.registered_domain", setter: fld_set}]}, + "smacaddr": {convert: to_mac, to:[{field: "source.mac", setter: fld_set}]}, + "sport": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 0}]}, + "stransaddr": {convert: to_ip, to:[{field: "source.nat.ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "stransport": {convert: to_long, to:[{field: "source.nat.port", setter: fld_prio, prio: 0}]}, + "tcp.dstport": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 2}]}, + "tcp.srcport": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 2}]}, + "timezone": {to:[{field: "event.timezone", setter: fld_set}]}, + "tld": {to:[{field: "url.top_level_domain", setter: fld_prio, prio: 0}]}, + "udp.dstport": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 3}]}, + "udp.srcport": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 3}]}, + "uid": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 3}]}, + "url": {to:[{field: "url.original", setter: fld_prio, prio: 1}]}, + "url_raw": {to:[{field: "url.original", setter: fld_prio, prio: 0}]}, + "urldomain": {to:[{field: "url.domain", setter: fld_prio, prio: 0}]}, + "urlquery": {to:[{field: "url.query", setter: fld_prio, prio: 0}]}, + "user": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 0}]}, + "user.id": {to:[{field: "user.id", setter: fld_prio, prio: 1}]}, + "user_agent": {to:[{field: "user_agent.original", setter: fld_set}]}, + "user_fullname": {to:[{field: "user.full_name", setter: fld_prio, prio: 0}]}, + "user_id": {to:[{field: "user.id", setter: fld_prio, prio: 0}]}, + "username": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 1}]}, + "version": {to:[{field: "observer.version", setter: fld_set}]}, + "web_domain": {to:[{field: "url.domain", setter: fld_prio, prio: 1},{field: "related.hosts", setter: fld_append}]}, + "web_extension": {to:[{field: "file.extension", setter: fld_prio, prio: 0}]}, + "web_query": {to:[{field: "url.query", setter: fld_prio, prio: 1}]}, + "web_ref_domain": {to:[{field: "related.hosts", setter: fld_append}]}, + "web_referer": {to:[{field: "http.request.referrer", setter: fld_prio, prio: 0}]}, + "web_root": {to:[{field: "url.path", setter: fld_set}]}, + "webpage": {to:[{field: "file.name", setter: fld_prio, prio: 1}]}, + }; + + var rsa_mappings = { + "access_point": {to:[{field: "rsa.wireless.access_point", setter: fld_set}]}, + "accesses": {to:[{field: "rsa.identity.accesses", setter: fld_set}]}, + "acl_id": {to:[{field: "rsa.misc.acl_id", setter: fld_set}]}, + "acl_op": {to:[{field: "rsa.misc.acl_op", setter: fld_set}]}, + "acl_pos": {to:[{field: "rsa.misc.acl_pos", setter: fld_set}]}, + "acl_table": {to:[{field: "rsa.misc.acl_table", setter: fld_set}]}, + "action": {to:[{field: "rsa.misc.action", setter: fld_append}]}, + "ad_computer_dst": {to:[{field: "rsa.network.ad_computer_dst", setter: fld_set}]}, + "addr": {to:[{field: "rsa.network.addr", setter: fld_set}]}, + "admin": {to:[{field: "rsa.misc.admin", setter: fld_set}]}, + "agent": {to:[{field: "rsa.misc.client", setter: fld_prio, prio: 0}]}, + "agent.id": {to:[{field: "rsa.misc.agent_id", setter: fld_set}]}, + "alarm_id": {to:[{field: "rsa.misc.alarm_id", setter: fld_set}]}, + "alarmname": {to:[{field: "rsa.misc.alarmname", setter: fld_set}]}, + "alert": {to:[{field: "rsa.threat.alert", setter: fld_set}]}, + "alert_id": {to:[{field: "rsa.misc.alert_id", setter: fld_set}]}, + "alias.host": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "analysis.file": {to:[{field: "rsa.investigations.analysis_file", setter: fld_set}]}, + "analysis.service": {to:[{field: "rsa.investigations.analysis_service", setter: fld_set}]}, + "analysis.session": {to:[{field: "rsa.investigations.analysis_session", setter: fld_set}]}, + "app_id": {to:[{field: "rsa.misc.app_id", setter: fld_set}]}, + "attachment": {to:[{field: "rsa.file.attachment", setter: fld_set}]}, + "audit": {to:[{field: "rsa.misc.audit", setter: fld_set}]}, + "audit_class": {to:[{field: "rsa.internal.audit_class", setter: fld_set}]}, + "audit_object": {to:[{field: "rsa.misc.audit_object", setter: fld_set}]}, + "auditdata": {to:[{field: "rsa.misc.auditdata", setter: fld_set}]}, + "authmethod": {to:[{field: "rsa.identity.auth_method", setter: fld_set}]}, + "autorun_type": {to:[{field: "rsa.misc.autorun_type", setter: fld_set}]}, + "bcc": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "benchmark": {to:[{field: "rsa.misc.benchmark", setter: fld_set}]}, + "binary": {to:[{field: "rsa.file.binary", setter: fld_set}]}, + "boc": {to:[{field: "rsa.investigations.boc", setter: fld_set}]}, + "bssid": {to:[{field: "rsa.wireless.wlan_ssid", setter: fld_prio, prio: 1}]}, + "bypass": {to:[{field: "rsa.misc.bypass", setter: fld_set}]}, + "c_sid": {to:[{field: "rsa.identity.user_sid_src", setter: fld_set}]}, + "cache": {to:[{field: "rsa.misc.cache", setter: fld_set}]}, + "cache_hit": {to:[{field: "rsa.misc.cache_hit", setter: fld_set}]}, + "calling_from": {to:[{field: "rsa.misc.phone", setter: fld_prio, prio: 1}]}, + "calling_to": {to:[{field: "rsa.misc.phone", setter: fld_prio, prio: 0}]}, + "category": {to:[{field: "rsa.misc.category", setter: fld_set}]}, + "cc": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "cc.number": {convert: to_long, to:[{field: "rsa.misc.cc_number", setter: fld_set}]}, + "cefversion": {to:[{field: "rsa.misc.cefversion", setter: fld_set}]}, + "cert.serial": {to:[{field: "rsa.crypto.cert_serial", setter: fld_set}]}, + "cert_ca": {to:[{field: "rsa.crypto.cert_ca", setter: fld_set}]}, + "cert_checksum": {to:[{field: "rsa.crypto.cert_checksum", setter: fld_set}]}, + "cert_common": {to:[{field: "rsa.crypto.cert_common", setter: fld_set}]}, + "cert_error": {to:[{field: "rsa.crypto.cert_error", setter: fld_set}]}, + "cert_hostname": {to:[{field: "rsa.crypto.cert_host_name", setter: fld_set}]}, + "cert_hostname_cat": {to:[{field: "rsa.crypto.cert_host_cat", setter: fld_set}]}, + "cert_issuer": {to:[{field: "rsa.crypto.cert_issuer", setter: fld_set}]}, + "cert_keysize": {to:[{field: "rsa.crypto.cert_keysize", setter: fld_set}]}, + "cert_status": {to:[{field: "rsa.crypto.cert_status", setter: fld_set}]}, + "cert_subject": {to:[{field: "rsa.crypto.cert_subject", setter: fld_set}]}, + "cert_username": {to:[{field: "rsa.crypto.cert_username", setter: fld_set}]}, + "cfg.attr": {to:[{field: "rsa.misc.cfg_attr", setter: fld_set}]}, + "cfg.obj": {to:[{field: "rsa.misc.cfg_obj", setter: fld_set}]}, + "cfg.path": {to:[{field: "rsa.misc.cfg_path", setter: fld_set}]}, + "change_attribute": {to:[{field: "rsa.misc.change_attrib", setter: fld_set}]}, + "change_new": {to:[{field: "rsa.misc.change_new", setter: fld_set}]}, + "change_old": {to:[{field: "rsa.misc.change_old", setter: fld_set}]}, + "changes": {to:[{field: "rsa.misc.changes", setter: fld_set}]}, + "checksum": {to:[{field: "rsa.misc.checksum", setter: fld_set}]}, + "checksum.dst": {to:[{field: "rsa.misc.checksum_dst", setter: fld_set}]}, + "checksum.src": {to:[{field: "rsa.misc.checksum_src", setter: fld_set}]}, + "cid": {to:[{field: "rsa.internal.cid", setter: fld_set}]}, + "client": {to:[{field: "rsa.misc.client", setter: fld_prio, prio: 1}]}, + "client_ip": {to:[{field: "rsa.misc.client_ip", setter: fld_set}]}, + "clustermembers": {to:[{field: "rsa.misc.clustermembers", setter: fld_set}]}, + "cmd": {to:[{field: "rsa.misc.cmd", setter: fld_set}]}, + "cn_acttimeout": {to:[{field: "rsa.misc.cn_acttimeout", setter: fld_set}]}, + "cn_asn_dst": {to:[{field: "rsa.web.cn_asn_dst", setter: fld_set}]}, + "cn_asn_src": {to:[{field: "rsa.misc.cn_asn_src", setter: fld_set}]}, + "cn_bgpv4nxthop": {to:[{field: "rsa.misc.cn_bgpv4nxthop", setter: fld_set}]}, + "cn_ctr_dst_code": {to:[{field: "rsa.misc.cn_ctr_dst_code", setter: fld_set}]}, + "cn_dst_tos": {to:[{field: "rsa.misc.cn_dst_tos", setter: fld_set}]}, + "cn_dst_vlan": {to:[{field: "rsa.misc.cn_dst_vlan", setter: fld_set}]}, + "cn_engine_id": {to:[{field: "rsa.misc.cn_engine_id", setter: fld_set}]}, + "cn_engine_type": {to:[{field: "rsa.misc.cn_engine_type", setter: fld_set}]}, + "cn_f_switch": {to:[{field: "rsa.misc.cn_f_switch", setter: fld_set}]}, + "cn_flowsampid": {to:[{field: "rsa.misc.cn_flowsampid", setter: fld_set}]}, + "cn_flowsampintv": {to:[{field: "rsa.misc.cn_flowsampintv", setter: fld_set}]}, + "cn_flowsampmode": {to:[{field: "rsa.misc.cn_flowsampmode", setter: fld_set}]}, + "cn_inacttimeout": {to:[{field: "rsa.misc.cn_inacttimeout", setter: fld_set}]}, + "cn_inpermbyts": {to:[{field: "rsa.misc.cn_inpermbyts", setter: fld_set}]}, + "cn_inpermpckts": {to:[{field: "rsa.misc.cn_inpermpckts", setter: fld_set}]}, + "cn_invalid": {to:[{field: "rsa.misc.cn_invalid", setter: fld_set}]}, + "cn_ip_proto_ver": {to:[{field: "rsa.misc.cn_ip_proto_ver", setter: fld_set}]}, + "cn_ipv4_ident": {to:[{field: "rsa.misc.cn_ipv4_ident", setter: fld_set}]}, + "cn_l_switch": {to:[{field: "rsa.misc.cn_l_switch", setter: fld_set}]}, + "cn_log_did": {to:[{field: "rsa.misc.cn_log_did", setter: fld_set}]}, + "cn_log_rid": {to:[{field: "rsa.misc.cn_log_rid", setter: fld_set}]}, + "cn_max_ttl": {to:[{field: "rsa.misc.cn_max_ttl", setter: fld_set}]}, + "cn_maxpcktlen": {to:[{field: "rsa.misc.cn_maxpcktlen", setter: fld_set}]}, + "cn_min_ttl": {to:[{field: "rsa.misc.cn_min_ttl", setter: fld_set}]}, + "cn_minpcktlen": {to:[{field: "rsa.misc.cn_minpcktlen", setter: fld_set}]}, + "cn_mpls_lbl_1": {to:[{field: "rsa.misc.cn_mpls_lbl_1", setter: fld_set}]}, + "cn_mpls_lbl_10": {to:[{field: "rsa.misc.cn_mpls_lbl_10", setter: fld_set}]}, + "cn_mpls_lbl_2": {to:[{field: "rsa.misc.cn_mpls_lbl_2", setter: fld_set}]}, + "cn_mpls_lbl_3": {to:[{field: "rsa.misc.cn_mpls_lbl_3", setter: fld_set}]}, + "cn_mpls_lbl_4": {to:[{field: "rsa.misc.cn_mpls_lbl_4", setter: fld_set}]}, + "cn_mpls_lbl_5": {to:[{field: "rsa.misc.cn_mpls_lbl_5", setter: fld_set}]}, + "cn_mpls_lbl_6": {to:[{field: "rsa.misc.cn_mpls_lbl_6", setter: fld_set}]}, + "cn_mpls_lbl_7": {to:[{field: "rsa.misc.cn_mpls_lbl_7", setter: fld_set}]}, + "cn_mpls_lbl_8": {to:[{field: "rsa.misc.cn_mpls_lbl_8", setter: fld_set}]}, + "cn_mpls_lbl_9": {to:[{field: "rsa.misc.cn_mpls_lbl_9", setter: fld_set}]}, + "cn_mplstoplabel": {to:[{field: "rsa.misc.cn_mplstoplabel", setter: fld_set}]}, + "cn_mplstoplabip": {to:[{field: "rsa.misc.cn_mplstoplabip", setter: fld_set}]}, + "cn_mul_dst_byt": {to:[{field: "rsa.misc.cn_mul_dst_byt", setter: fld_set}]}, + "cn_mul_dst_pks": {to:[{field: "rsa.misc.cn_mul_dst_pks", setter: fld_set}]}, + "cn_muligmptype": {to:[{field: "rsa.misc.cn_muligmptype", setter: fld_set}]}, + "cn_rpackets": {to:[{field: "rsa.web.cn_rpackets", setter: fld_set}]}, + "cn_sampalgo": {to:[{field: "rsa.misc.cn_sampalgo", setter: fld_set}]}, + "cn_sampint": {to:[{field: "rsa.misc.cn_sampint", setter: fld_set}]}, + "cn_seqctr": {to:[{field: "rsa.misc.cn_seqctr", setter: fld_set}]}, + "cn_spackets": {to:[{field: "rsa.misc.cn_spackets", setter: fld_set}]}, + "cn_src_tos": {to:[{field: "rsa.misc.cn_src_tos", setter: fld_set}]}, + "cn_src_vlan": {to:[{field: "rsa.misc.cn_src_vlan", setter: fld_set}]}, + "cn_sysuptime": {to:[{field: "rsa.misc.cn_sysuptime", setter: fld_set}]}, + "cn_template_id": {to:[{field: "rsa.misc.cn_template_id", setter: fld_set}]}, + "cn_totbytsexp": {to:[{field: "rsa.misc.cn_totbytsexp", setter: fld_set}]}, + "cn_totflowexp": {to:[{field: "rsa.misc.cn_totflowexp", setter: fld_set}]}, + "cn_totpcktsexp": {to:[{field: "rsa.misc.cn_totpcktsexp", setter: fld_set}]}, + "cn_unixnanosecs": {to:[{field: "rsa.misc.cn_unixnanosecs", setter: fld_set}]}, + "cn_v6flowlabel": {to:[{field: "rsa.misc.cn_v6flowlabel", setter: fld_set}]}, + "cn_v6optheaders": {to:[{field: "rsa.misc.cn_v6optheaders", setter: fld_set}]}, + "code": {to:[{field: "rsa.misc.code", setter: fld_set}]}, + "command": {to:[{field: "rsa.misc.command", setter: fld_set}]}, + "comments": {to:[{field: "rsa.misc.comments", setter: fld_set}]}, + "comp_class": {to:[{field: "rsa.misc.comp_class", setter: fld_set}]}, + "comp_name": {to:[{field: "rsa.misc.comp_name", setter: fld_set}]}, + "comp_rbytes": {to:[{field: "rsa.misc.comp_rbytes", setter: fld_set}]}, + "comp_sbytes": {to:[{field: "rsa.misc.comp_sbytes", setter: fld_set}]}, + "component_version": {to:[{field: "rsa.misc.comp_version", setter: fld_set}]}, + "connection_id": {to:[{field: "rsa.misc.connection_id", setter: fld_prio, prio: 1}]}, + "connectionid": {to:[{field: "rsa.misc.connection_id", setter: fld_prio, prio: 0}]}, + "content": {to:[{field: "rsa.misc.content", setter: fld_set}]}, + "content_type": {to:[{field: "rsa.misc.content_type", setter: fld_set}]}, + "content_version": {to:[{field: "rsa.misc.content_version", setter: fld_set}]}, + "context": {to:[{field: "rsa.misc.context", setter: fld_set}]}, + "count": {to:[{field: "rsa.misc.count", setter: fld_set}]}, + "cpu": {convert: to_long, to:[{field: "rsa.misc.cpu", setter: fld_set}]}, + "cpu_data": {to:[{field: "rsa.misc.cpu_data", setter: fld_set}]}, + "criticality": {to:[{field: "rsa.misc.criticality", setter: fld_set}]}, + "cs_agency_dst": {to:[{field: "rsa.misc.cs_agency_dst", setter: fld_set}]}, + "cs_analyzedby": {to:[{field: "rsa.misc.cs_analyzedby", setter: fld_set}]}, + "cs_av_other": {to:[{field: "rsa.misc.cs_av_other", setter: fld_set}]}, + "cs_av_primary": {to:[{field: "rsa.misc.cs_av_primary", setter: fld_set}]}, + "cs_av_secondary": {to:[{field: "rsa.misc.cs_av_secondary", setter: fld_set}]}, + "cs_bgpv6nxthop": {to:[{field: "rsa.misc.cs_bgpv6nxthop", setter: fld_set}]}, + "cs_bit9status": {to:[{field: "rsa.misc.cs_bit9status", setter: fld_set}]}, + "cs_context": {to:[{field: "rsa.misc.cs_context", setter: fld_set}]}, + "cs_control": {to:[{field: "rsa.misc.cs_control", setter: fld_set}]}, + "cs_data": {to:[{field: "rsa.misc.cs_data", setter: fld_set}]}, + "cs_datecret": {to:[{field: "rsa.misc.cs_datecret", setter: fld_set}]}, + "cs_dst_tld": {to:[{field: "rsa.misc.cs_dst_tld", setter: fld_set}]}, + "cs_eth_dst_ven": {to:[{field: "rsa.misc.cs_eth_dst_ven", setter: fld_set}]}, + "cs_eth_src_ven": {to:[{field: "rsa.misc.cs_eth_src_ven", setter: fld_set}]}, + "cs_event_uuid": {to:[{field: "rsa.misc.cs_event_uuid", setter: fld_set}]}, + "cs_filetype": {to:[{field: "rsa.misc.cs_filetype", setter: fld_set}]}, + "cs_fld": {to:[{field: "rsa.misc.cs_fld", setter: fld_set}]}, + "cs_if_desc": {to:[{field: "rsa.misc.cs_if_desc", setter: fld_set}]}, + "cs_if_name": {to:[{field: "rsa.misc.cs_if_name", setter: fld_set}]}, + "cs_ip_next_hop": {to:[{field: "rsa.misc.cs_ip_next_hop", setter: fld_set}]}, + "cs_ipv4dstpre": {to:[{field: "rsa.misc.cs_ipv4dstpre", setter: fld_set}]}, + "cs_ipv4srcpre": {to:[{field: "rsa.misc.cs_ipv4srcpre", setter: fld_set}]}, + "cs_lifetime": {to:[{field: "rsa.misc.cs_lifetime", setter: fld_set}]}, + "cs_log_medium": {to:[{field: "rsa.misc.cs_log_medium", setter: fld_set}]}, + "cs_loginname": {to:[{field: "rsa.misc.cs_loginname", setter: fld_set}]}, + "cs_modulescore": {to:[{field: "rsa.misc.cs_modulescore", setter: fld_set}]}, + "cs_modulesign": {to:[{field: "rsa.misc.cs_modulesign", setter: fld_set}]}, + "cs_opswatresult": {to:[{field: "rsa.misc.cs_opswatresult", setter: fld_set}]}, + "cs_payload": {to:[{field: "rsa.misc.cs_payload", setter: fld_set}]}, + "cs_registrant": {to:[{field: "rsa.misc.cs_registrant", setter: fld_set}]}, + "cs_registrar": {to:[{field: "rsa.misc.cs_registrar", setter: fld_set}]}, + "cs_represult": {to:[{field: "rsa.misc.cs_represult", setter: fld_set}]}, + "cs_rpayload": {to:[{field: "rsa.misc.cs_rpayload", setter: fld_set}]}, + "cs_sampler_name": {to:[{field: "rsa.misc.cs_sampler_name", setter: fld_set}]}, + "cs_sourcemodule": {to:[{field: "rsa.misc.cs_sourcemodule", setter: fld_set}]}, + "cs_streams": {to:[{field: "rsa.misc.cs_streams", setter: fld_set}]}, + "cs_targetmodule": {to:[{field: "rsa.misc.cs_targetmodule", setter: fld_set}]}, + "cs_v6nxthop": {to:[{field: "rsa.misc.cs_v6nxthop", setter: fld_set}]}, + "cs_whois_server": {to:[{field: "rsa.misc.cs_whois_server", setter: fld_set}]}, + "cs_yararesult": {to:[{field: "rsa.misc.cs_yararesult", setter: fld_set}]}, + "cve": {to:[{field: "rsa.misc.cve", setter: fld_set}]}, + "d_certauth": {to:[{field: "rsa.crypto.d_certauth", setter: fld_set}]}, + "d_cipher": {to:[{field: "rsa.crypto.cipher_dst", setter: fld_set}]}, + "d_ciphersize": {convert: to_long, to:[{field: "rsa.crypto.cipher_size_dst", setter: fld_set}]}, + "d_sslver": {to:[{field: "rsa.crypto.ssl_ver_dst", setter: fld_set}]}, + "data": {to:[{field: "rsa.internal.data", setter: fld_set}]}, + "data_type": {to:[{field: "rsa.misc.data_type", setter: fld_set}]}, + "date": {to:[{field: "rsa.time.date", setter: fld_set}]}, + "datetime": {to:[{field: "rsa.time.datetime", setter: fld_set}]}, + "day": {to:[{field: "rsa.time.day", setter: fld_set}]}, + "db_id": {to:[{field: "rsa.db.db_id", setter: fld_set}]}, + "db_name": {to:[{field: "rsa.db.database", setter: fld_set}]}, + "db_pid": {convert: to_long, to:[{field: "rsa.db.db_pid", setter: fld_set}]}, + "dclass_counter1": {convert: to_long, to:[{field: "rsa.counters.dclass_c1", setter: fld_set}]}, + "dclass_counter1_string": {to:[{field: "rsa.counters.dclass_c1_str", setter: fld_set}]}, + "dclass_counter2": {convert: to_long, to:[{field: "rsa.counters.dclass_c2", setter: fld_set}]}, + "dclass_counter2_string": {to:[{field: "rsa.counters.dclass_c2_str", setter: fld_set}]}, + "dclass_counter3": {convert: to_long, to:[{field: "rsa.counters.dclass_c3", setter: fld_set}]}, + "dclass_counter3_string": {to:[{field: "rsa.counters.dclass_c3_str", setter: fld_set}]}, + "dclass_ratio1": {to:[{field: "rsa.counters.dclass_r1", setter: fld_set}]}, + "dclass_ratio1_string": {to:[{field: "rsa.counters.dclass_r1_str", setter: fld_set}]}, + "dclass_ratio2": {to:[{field: "rsa.counters.dclass_r2", setter: fld_set}]}, + "dclass_ratio2_string": {to:[{field: "rsa.counters.dclass_r2_str", setter: fld_set}]}, + "dclass_ratio3": {to:[{field: "rsa.counters.dclass_r3", setter: fld_set}]}, + "dclass_ratio3_string": {to:[{field: "rsa.counters.dclass_r3_str", setter: fld_set}]}, + "dead": {convert: to_long, to:[{field: "rsa.internal.dead", setter: fld_set}]}, + "description": {to:[{field: "rsa.misc.description", setter: fld_set}]}, + "detail": {to:[{field: "rsa.misc.event_desc", setter: fld_set}]}, + "device": {to:[{field: "rsa.misc.device_name", setter: fld_set}]}, + "device.class": {to:[{field: "rsa.internal.device_class", setter: fld_set}]}, + "device.group": {to:[{field: "rsa.internal.device_group", setter: fld_set}]}, + "device.host": {to:[{field: "rsa.internal.device_host", setter: fld_set}]}, + "device.ip": {convert: to_ip, to:[{field: "rsa.internal.device_ip", setter: fld_set}]}, + "device.ipv6": {convert: to_ip, to:[{field: "rsa.internal.device_ipv6", setter: fld_set}]}, + "device.type": {to:[{field: "rsa.internal.device_type", setter: fld_set}]}, + "device.type.id": {convert: to_long, to:[{field: "rsa.internal.device_type_id", setter: fld_set}]}, + "devicehostname": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "devvendor": {to:[{field: "rsa.misc.devvendor", setter: fld_set}]}, + "dhost": {to:[{field: "rsa.network.host_dst", setter: fld_set}]}, + "did": {to:[{field: "rsa.internal.did", setter: fld_set}]}, + "dinterface": {to:[{field: "rsa.network.dinterface", setter: fld_set}]}, + "directory.dst": {to:[{field: "rsa.file.directory_dst", setter: fld_set}]}, + "directory.src": {to:[{field: "rsa.file.directory_src", setter: fld_set}]}, + "disk_volume": {to:[{field: "rsa.storage.disk_volume", setter: fld_set}]}, + "disposition": {to:[{field: "rsa.misc.disposition", setter: fld_set}]}, + "distance": {to:[{field: "rsa.misc.distance", setter: fld_set}]}, + "dmask": {to:[{field: "rsa.network.dmask", setter: fld_set}]}, + "dn": {to:[{field: "rsa.identity.dn", setter: fld_set}]}, + "dns_a_record": {to:[{field: "rsa.network.dns_a_record", setter: fld_set}]}, + "dns_cname_record": {to:[{field: "rsa.network.dns_cname_record", setter: fld_set}]}, + "dns_id": {to:[{field: "rsa.network.dns_id", setter: fld_set}]}, + "dns_opcode": {to:[{field: "rsa.network.dns_opcode", setter: fld_set}]}, + "dns_ptr_record": {to:[{field: "rsa.network.dns_ptr_record", setter: fld_set}]}, + "dns_resp": {to:[{field: "rsa.network.dns_resp", setter: fld_set}]}, + "dns_type": {to:[{field: "rsa.network.dns_type", setter: fld_set}]}, + "doc_number": {convert: to_long, to:[{field: "rsa.misc.doc_number", setter: fld_set}]}, + "domain": {to:[{field: "rsa.network.domain", setter: fld_set}]}, + "domain1": {to:[{field: "rsa.network.domain1", setter: fld_set}]}, + "dst_dn": {to:[{field: "rsa.identity.dn_dst", setter: fld_set}]}, + "dst_payload": {to:[{field: "rsa.misc.payload_dst", setter: fld_set}]}, + "dst_spi": {to:[{field: "rsa.misc.spi_dst", setter: fld_set}]}, + "dst_zone": {to:[{field: "rsa.network.zone_dst", setter: fld_set}]}, + "dstburb": {to:[{field: "rsa.misc.dstburb", setter: fld_set}]}, + "duration": {convert: to_double, to:[{field: "rsa.time.duration_time", setter: fld_set}]}, + "duration_string": {to:[{field: "rsa.time.duration_str", setter: fld_set}]}, + "ec_activity": {to:[{field: "rsa.investigations.ec_activity", setter: fld_set}]}, + "ec_outcome": {to:[{field: "rsa.investigations.ec_outcome", setter: fld_set}]}, + "ec_subject": {to:[{field: "rsa.investigations.ec_subject", setter: fld_set}]}, + "ec_theme": {to:[{field: "rsa.investigations.ec_theme", setter: fld_set}]}, + "edomain": {to:[{field: "rsa.misc.edomain", setter: fld_set}]}, + "edomaub": {to:[{field: "rsa.misc.edomaub", setter: fld_set}]}, + "effective_time": {convert: to_date, to:[{field: "rsa.time.effective_time", setter: fld_set}]}, + "ein.number": {convert: to_long, to:[{field: "rsa.misc.ein_number", setter: fld_set}]}, + "email": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "encryption_type": {to:[{field: "rsa.crypto.crypto", setter: fld_set}]}, + "endtime": {convert: to_date, to:[{field: "rsa.time.endtime", setter: fld_set}]}, + "entropy.req": {convert: to_long, to:[{field: "rsa.internal.entropy_req", setter: fld_set}]}, + "entropy.res": {convert: to_long, to:[{field: "rsa.internal.entropy_res", setter: fld_set}]}, + "entry": {to:[{field: "rsa.internal.entry", setter: fld_set}]}, + "eoc": {to:[{field: "rsa.investigations.eoc", setter: fld_set}]}, + "error": {to:[{field: "rsa.misc.error", setter: fld_set}]}, + "eth_type": {convert: to_long, to:[{field: "rsa.network.eth_type", setter: fld_set}]}, + "euid": {to:[{field: "rsa.misc.euid", setter: fld_set}]}, + "event.cat": {convert: to_long, to:[{field: "rsa.investigations.event_cat", setter: fld_prio, prio: 1}]}, + "event.cat.name": {to:[{field: "rsa.investigations.event_cat_name", setter: fld_prio, prio: 1}]}, + "event_cat": {convert: to_long, to:[{field: "rsa.investigations.event_cat", setter: fld_prio, prio: 0}]}, + "event_cat_name": {to:[{field: "rsa.investigations.event_cat_name", setter: fld_prio, prio: 0}]}, + "event_category": {to:[{field: "rsa.misc.event_category", setter: fld_set}]}, + "event_computer": {to:[{field: "rsa.misc.event_computer", setter: fld_set}]}, + "event_counter": {convert: to_long, to:[{field: "rsa.counters.event_counter", setter: fld_set}]}, + "event_description": {to:[{field: "rsa.internal.event_desc", setter: fld_set}]}, + "event_id": {to:[{field: "rsa.misc.event_id", setter: fld_set}]}, + "event_log": {to:[{field: "rsa.misc.event_log", setter: fld_set}]}, + "event_name": {to:[{field: "rsa.internal.event_name", setter: fld_set}]}, + "event_queue_time": {convert: to_date, to:[{field: "rsa.time.event_queue_time", setter: fld_set}]}, + "event_source": {to:[{field: "rsa.misc.event_source", setter: fld_set}]}, + "event_state": {to:[{field: "rsa.misc.event_state", setter: fld_set}]}, + "event_time": {convert: to_date, to:[{field: "rsa.time.event_time", setter: fld_set}]}, + "event_time_str": {to:[{field: "rsa.time.event_time_str", setter: fld_prio, prio: 1}]}, + "event_time_string": {to:[{field: "rsa.time.event_time_str", setter: fld_prio, prio: 0}]}, + "event_type": {to:[{field: "rsa.misc.event_type", setter: fld_set}]}, + "event_user": {to:[{field: "rsa.misc.event_user", setter: fld_set}]}, + "eventtime": {to:[{field: "rsa.time.eventtime", setter: fld_set}]}, + "expected_val": {to:[{field: "rsa.misc.expected_val", setter: fld_set}]}, + "expiration_time": {convert: to_date, to:[{field: "rsa.time.expire_time", setter: fld_set}]}, + "expiration_time_string": {to:[{field: "rsa.time.expire_time_str", setter: fld_set}]}, + "facility": {to:[{field: "rsa.misc.facility", setter: fld_set}]}, + "facilityname": {to:[{field: "rsa.misc.facilityname", setter: fld_set}]}, + "faddr": {to:[{field: "rsa.network.faddr", setter: fld_set}]}, + "fcatnum": {to:[{field: "rsa.misc.fcatnum", setter: fld_set}]}, + "federated_idp": {to:[{field: "rsa.identity.federated_idp", setter: fld_set}]}, + "federated_sp": {to:[{field: "rsa.identity.federated_sp", setter: fld_set}]}, + "feed.category": {to:[{field: "rsa.internal.feed_category", setter: fld_set}]}, + "feed_desc": {to:[{field: "rsa.internal.feed_desc", setter: fld_set}]}, + "feed_name": {to:[{field: "rsa.internal.feed_name", setter: fld_set}]}, + "fhost": {to:[{field: "rsa.network.fhost", setter: fld_set}]}, + "file_entropy": {convert: to_double, to:[{field: "rsa.file.file_entropy", setter: fld_set}]}, + "file_vendor": {to:[{field: "rsa.file.file_vendor", setter: fld_set}]}, + "filename_dst": {to:[{field: "rsa.file.filename_dst", setter: fld_set}]}, + "filename_src": {to:[{field: "rsa.file.filename_src", setter: fld_set}]}, + "filename_tmp": {to:[{field: "rsa.file.filename_tmp", setter: fld_set}]}, + "filesystem": {to:[{field: "rsa.file.filesystem", setter: fld_set}]}, + "filter": {to:[{field: "rsa.misc.filter", setter: fld_set}]}, + "finterface": {to:[{field: "rsa.misc.finterface", setter: fld_set}]}, + "flags": {to:[{field: "rsa.misc.flags", setter: fld_set}]}, + "forensic_info": {to:[{field: "rsa.misc.forensic_info", setter: fld_set}]}, + "forward.ip": {convert: to_ip, to:[{field: "rsa.internal.forward_ip", setter: fld_set}]}, + "forward.ipv6": {convert: to_ip, to:[{field: "rsa.internal.forward_ipv6", setter: fld_set}]}, + "found": {to:[{field: "rsa.misc.found", setter: fld_set}]}, + "fport": {to:[{field: "rsa.network.fport", setter: fld_set}]}, + "fqdn": {to:[{field: "rsa.web.fqdn", setter: fld_set}]}, + "fresult": {convert: to_long, to:[{field: "rsa.misc.fresult", setter: fld_set}]}, + "from": {to:[{field: "rsa.email.email_src", setter: fld_set}]}, + "gaddr": {to:[{field: "rsa.misc.gaddr", setter: fld_set}]}, + "gateway": {to:[{field: "rsa.network.gateway", setter: fld_set}]}, + "gmtdate": {to:[{field: "rsa.time.gmtdate", setter: fld_set}]}, + "gmttime": {to:[{field: "rsa.time.gmttime", setter: fld_set}]}, + "group": {to:[{field: "rsa.misc.group", setter: fld_set}]}, + "group_object": {to:[{field: "rsa.misc.group_object", setter: fld_set}]}, + "groupid": {to:[{field: "rsa.misc.group_id", setter: fld_set}]}, + "h_code": {to:[{field: "rsa.internal.hcode", setter: fld_set}]}, + "hardware_id": {to:[{field: "rsa.misc.hardware_id", setter: fld_set}]}, + "header.id": {to:[{field: "rsa.internal.header_id", setter: fld_set}]}, + "host.orig": {to:[{field: "rsa.network.host_orig", setter: fld_set}]}, + "host.state": {to:[{field: "rsa.endpoint.host_state", setter: fld_set}]}, + "host.type": {to:[{field: "rsa.network.host_type", setter: fld_set}]}, + "host_role": {to:[{field: "rsa.identity.host_role", setter: fld_set}]}, + "hostid": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "hostname": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "hour": {to:[{field: "rsa.time.hour", setter: fld_set}]}, + "https.insact": {to:[{field: "rsa.crypto.https_insact", setter: fld_set}]}, + "https.valid": {to:[{field: "rsa.crypto.https_valid", setter: fld_set}]}, + "icmpcode": {convert: to_long, to:[{field: "rsa.network.icmp_code", setter: fld_set}]}, + "icmptype": {convert: to_long, to:[{field: "rsa.network.icmp_type", setter: fld_set}]}, + "id": {to:[{field: "rsa.misc.reference_id", setter: fld_set}]}, + "id1": {to:[{field: "rsa.misc.reference_id1", setter: fld_set}]}, + "id2": {to:[{field: "rsa.misc.reference_id2", setter: fld_set}]}, + "id3": {to:[{field: "rsa.misc.id3", setter: fld_set}]}, + "ike": {to:[{field: "rsa.crypto.ike", setter: fld_set}]}, + "ike_cookie1": {to:[{field: "rsa.crypto.ike_cookie1", setter: fld_set}]}, + "ike_cookie2": {to:[{field: "rsa.crypto.ike_cookie2", setter: fld_set}]}, + "im_buddyid": {to:[{field: "rsa.misc.im_buddyid", setter: fld_set}]}, + "im_buddyname": {to:[{field: "rsa.misc.im_buddyname", setter: fld_set}]}, + "im_client": {to:[{field: "rsa.misc.im_client", setter: fld_set}]}, + "im_croomid": {to:[{field: "rsa.misc.im_croomid", setter: fld_set}]}, + "im_croomtype": {to:[{field: "rsa.misc.im_croomtype", setter: fld_set}]}, + "im_members": {to:[{field: "rsa.misc.im_members", setter: fld_set}]}, + "im_userid": {to:[{field: "rsa.misc.im_userid", setter: fld_set}]}, + "im_username": {to:[{field: "rsa.misc.im_username", setter: fld_set}]}, + "index": {to:[{field: "rsa.misc.index", setter: fld_set}]}, + "info": {to:[{field: "rsa.db.index", setter: fld_set}]}, + "inode": {convert: to_long, to:[{field: "rsa.internal.inode", setter: fld_set}]}, + "inout": {to:[{field: "rsa.misc.inout", setter: fld_set}]}, + "instance": {to:[{field: "rsa.db.instance", setter: fld_set}]}, + "interface": {to:[{field: "rsa.network.interface", setter: fld_set}]}, + "inv.category": {to:[{field: "rsa.investigations.inv_category", setter: fld_set}]}, + "inv.context": {to:[{field: "rsa.investigations.inv_context", setter: fld_set}]}, + "ioc": {to:[{field: "rsa.investigations.ioc", setter: fld_set}]}, + "ip_proto": {convert: to_long, to:[{field: "rsa.network.ip_proto", setter: fld_set}]}, + "ipkt": {to:[{field: "rsa.misc.ipkt", setter: fld_set}]}, + "ipscat": {to:[{field: "rsa.misc.ipscat", setter: fld_set}]}, + "ipspri": {to:[{field: "rsa.misc.ipspri", setter: fld_set}]}, + "jobname": {to:[{field: "rsa.misc.jobname", setter: fld_set}]}, + "jobnum": {to:[{field: "rsa.misc.job_num", setter: fld_set}]}, + "laddr": {to:[{field: "rsa.network.laddr", setter: fld_set}]}, + "language": {to:[{field: "rsa.misc.language", setter: fld_set}]}, + "latitude": {to:[{field: "rsa.misc.latitude", setter: fld_set}]}, + "lc.cid": {to:[{field: "rsa.internal.lc_cid", setter: fld_set}]}, + "lc.ctime": {convert: to_date, to:[{field: "rsa.internal.lc_ctime", setter: fld_set}]}, + "ldap": {to:[{field: "rsa.identity.ldap", setter: fld_set}]}, + "ldap.query": {to:[{field: "rsa.identity.ldap_query", setter: fld_set}]}, + "ldap.response": {to:[{field: "rsa.identity.ldap_response", setter: fld_set}]}, + "level": {convert: to_long, to:[{field: "rsa.internal.level", setter: fld_set}]}, + "lhost": {to:[{field: "rsa.network.lhost", setter: fld_set}]}, + "library": {to:[{field: "rsa.misc.library", setter: fld_set}]}, + "lifetime": {convert: to_long, to:[{field: "rsa.misc.lifetime", setter: fld_set}]}, + "linenum": {to:[{field: "rsa.misc.linenum", setter: fld_set}]}, + "link": {to:[{field: "rsa.misc.link", setter: fld_set}]}, + "linterface": {to:[{field: "rsa.network.linterface", setter: fld_set}]}, + "list_name": {to:[{field: "rsa.misc.list_name", setter: fld_set}]}, + "listnum": {to:[{field: "rsa.misc.listnum", setter: fld_set}]}, + "load_data": {to:[{field: "rsa.misc.load_data", setter: fld_set}]}, + "location_floor": {to:[{field: "rsa.misc.location_floor", setter: fld_set}]}, + "location_mark": {to:[{field: "rsa.misc.location_mark", setter: fld_set}]}, + "log_id": {to:[{field: "rsa.misc.log_id", setter: fld_set}]}, + "log_type": {to:[{field: "rsa.misc.log_type", setter: fld_set}]}, + "logid": {to:[{field: "rsa.misc.logid", setter: fld_set}]}, + "logip": {to:[{field: "rsa.misc.logip", setter: fld_set}]}, + "logname": {to:[{field: "rsa.misc.logname", setter: fld_set}]}, + "logon_type": {to:[{field: "rsa.identity.logon_type", setter: fld_set}]}, + "logon_type_desc": {to:[{field: "rsa.identity.logon_type_desc", setter: fld_set}]}, + "longitude": {to:[{field: "rsa.misc.longitude", setter: fld_set}]}, + "lport": {to:[{field: "rsa.misc.lport", setter: fld_set}]}, + "lread": {convert: to_long, to:[{field: "rsa.db.lread", setter: fld_set}]}, + "lun": {to:[{field: "rsa.storage.lun", setter: fld_set}]}, + "lwrite": {convert: to_long, to:[{field: "rsa.db.lwrite", setter: fld_set}]}, + "macaddr": {convert: to_mac, to:[{field: "rsa.network.eth_host", setter: fld_set}]}, + "mail_id": {to:[{field: "rsa.misc.mail_id", setter: fld_set}]}, + "mask": {to:[{field: "rsa.network.mask", setter: fld_set}]}, + "match": {to:[{field: "rsa.misc.match", setter: fld_set}]}, + "mbug_data": {to:[{field: "rsa.misc.mbug_data", setter: fld_set}]}, + "mcb.req": {convert: to_long, to:[{field: "rsa.internal.mcb_req", setter: fld_set}]}, + "mcb.res": {convert: to_long, to:[{field: "rsa.internal.mcb_res", setter: fld_set}]}, + "mcbc.req": {convert: to_long, to:[{field: "rsa.internal.mcbc_req", setter: fld_set}]}, + "mcbc.res": {convert: to_long, to:[{field: "rsa.internal.mcbc_res", setter: fld_set}]}, + "medium": {convert: to_long, to:[{field: "rsa.internal.medium", setter: fld_set}]}, + "message": {to:[{field: "rsa.internal.message", setter: fld_set}]}, + "message_body": {to:[{field: "rsa.misc.message_body", setter: fld_set}]}, + "messageid": {to:[{field: "rsa.internal.messageid", setter: fld_set}]}, + "min": {to:[{field: "rsa.time.min", setter: fld_set}]}, + "misc": {to:[{field: "rsa.misc.misc", setter: fld_set}]}, + "misc_name": {to:[{field: "rsa.misc.misc_name", setter: fld_set}]}, + "mode": {to:[{field: "rsa.misc.mode", setter: fld_set}]}, + "month": {to:[{field: "rsa.time.month", setter: fld_set}]}, + "msg": {to:[{field: "rsa.internal.msg", setter: fld_set}]}, + "msgIdPart1": {to:[{field: "rsa.misc.msgIdPart1", setter: fld_set}]}, + "msgIdPart2": {to:[{field: "rsa.misc.msgIdPart2", setter: fld_set}]}, + "msgIdPart3": {to:[{field: "rsa.misc.msgIdPart3", setter: fld_set}]}, + "msgIdPart4": {to:[{field: "rsa.misc.msgIdPart4", setter: fld_set}]}, + "msg_id": {to:[{field: "rsa.internal.msg_id", setter: fld_set}]}, + "msg_type": {to:[{field: "rsa.misc.msg_type", setter: fld_set}]}, + "msgid": {to:[{field: "rsa.misc.msgid", setter: fld_set}]}, + "name": {to:[{field: "rsa.misc.name", setter: fld_set}]}, + "netname": {to:[{field: "rsa.network.netname", setter: fld_set}]}, + "netsessid": {to:[{field: "rsa.misc.netsessid", setter: fld_set}]}, + "network_port": {convert: to_long, to:[{field: "rsa.network.network_port", setter: fld_set}]}, + "network_service": {to:[{field: "rsa.network.network_service", setter: fld_set}]}, + "node": {to:[{field: "rsa.misc.node", setter: fld_set}]}, + "nodename": {to:[{field: "rsa.internal.node_name", setter: fld_set}]}, + "ntype": {to:[{field: "rsa.misc.ntype", setter: fld_set}]}, + "num": {to:[{field: "rsa.misc.num", setter: fld_set}]}, + "number": {to:[{field: "rsa.misc.number", setter: fld_set}]}, + "number1": {to:[{field: "rsa.misc.number1", setter: fld_set}]}, + "number2": {to:[{field: "rsa.misc.number2", setter: fld_set}]}, + "nwe.callback_id": {to:[{field: "rsa.internal.nwe_callback_id", setter: fld_set}]}, + "nwwn": {to:[{field: "rsa.misc.nwwn", setter: fld_set}]}, + "obj_id": {to:[{field: "rsa.internal.obj_id", setter: fld_set}]}, + "obj_name": {to:[{field: "rsa.misc.obj_name", setter: fld_set}]}, + "obj_server": {to:[{field: "rsa.internal.obj_server", setter: fld_set}]}, + "obj_type": {to:[{field: "rsa.misc.obj_type", setter: fld_set}]}, + "obj_value": {to:[{field: "rsa.internal.obj_val", setter: fld_set}]}, + "object": {to:[{field: "rsa.misc.object", setter: fld_set}]}, + "observed_val": {to:[{field: "rsa.misc.observed_val", setter: fld_set}]}, + "operation": {to:[{field: "rsa.misc.operation", setter: fld_set}]}, + "operation_id": {to:[{field: "rsa.misc.operation_id", setter: fld_set}]}, + "opkt": {to:[{field: "rsa.misc.opkt", setter: fld_set}]}, + "org.dst": {to:[{field: "rsa.physical.org_dst", setter: fld_prio, prio: 1}]}, + "org.src": {to:[{field: "rsa.physical.org_src", setter: fld_set}]}, + "org_dst": {to:[{field: "rsa.physical.org_dst", setter: fld_prio, prio: 0}]}, + "orig_from": {to:[{field: "rsa.misc.orig_from", setter: fld_set}]}, + "origin": {to:[{field: "rsa.network.origin", setter: fld_set}]}, + "original_owner": {to:[{field: "rsa.identity.owner", setter: fld_set}]}, + "os": {to:[{field: "rsa.misc.OS", setter: fld_set}]}, + "owner_id": {to:[{field: "rsa.misc.owner_id", setter: fld_set}]}, + "p_action": {to:[{field: "rsa.misc.p_action", setter: fld_set}]}, + "p_date": {to:[{field: "rsa.time.p_date", setter: fld_set}]}, + "p_filter": {to:[{field: "rsa.misc.p_filter", setter: fld_set}]}, + "p_group_object": {to:[{field: "rsa.misc.p_group_object", setter: fld_set}]}, + "p_id": {to:[{field: "rsa.misc.p_id", setter: fld_set}]}, + "p_month": {to:[{field: "rsa.time.p_month", setter: fld_set}]}, + "p_msgid": {to:[{field: "rsa.misc.p_msgid", setter: fld_set}]}, + "p_msgid1": {to:[{field: "rsa.misc.p_msgid1", setter: fld_set}]}, + "p_msgid2": {to:[{field: "rsa.misc.p_msgid2", setter: fld_set}]}, + "p_result1": {to:[{field: "rsa.misc.p_result1", setter: fld_set}]}, + "p_time": {to:[{field: "rsa.time.p_time", setter: fld_set}]}, + "p_time1": {to:[{field: "rsa.time.p_time1", setter: fld_set}]}, + "p_time2": {to:[{field: "rsa.time.p_time2", setter: fld_set}]}, + "p_url": {to:[{field: "rsa.web.p_url", setter: fld_set}]}, + "p_user_agent": {to:[{field: "rsa.web.p_user_agent", setter: fld_set}]}, + "p_web_cookie": {to:[{field: "rsa.web.p_web_cookie", setter: fld_set}]}, + "p_web_method": {to:[{field: "rsa.web.p_web_method", setter: fld_set}]}, + "p_web_referer": {to:[{field: "rsa.web.p_web_referer", setter: fld_set}]}, + "p_year": {to:[{field: "rsa.time.p_year", setter: fld_set}]}, + "packet_length": {to:[{field: "rsa.network.packet_length", setter: fld_set}]}, + "paddr": {convert: to_ip, to:[{field: "rsa.network.paddr", setter: fld_set}]}, + "param": {to:[{field: "rsa.misc.param", setter: fld_set}]}, + "param.dst": {to:[{field: "rsa.misc.param_dst", setter: fld_set}]}, + "param.src": {to:[{field: "rsa.misc.param_src", setter: fld_set}]}, + "parent_node": {to:[{field: "rsa.misc.parent_node", setter: fld_set}]}, + "parse.error": {to:[{field: "rsa.internal.parse_error", setter: fld_set}]}, + "password": {to:[{field: "rsa.identity.password", setter: fld_set}]}, + "password_chg": {to:[{field: "rsa.misc.password_chg", setter: fld_set}]}, + "password_expire": {to:[{field: "rsa.misc.password_expire", setter: fld_set}]}, + "patient_fname": {to:[{field: "rsa.healthcare.patient_fname", setter: fld_set}]}, + "patient_id": {to:[{field: "rsa.healthcare.patient_id", setter: fld_set}]}, + "patient_lname": {to:[{field: "rsa.healthcare.patient_lname", setter: fld_set}]}, + "patient_mname": {to:[{field: "rsa.healthcare.patient_mname", setter: fld_set}]}, + "payload.req": {convert: to_long, to:[{field: "rsa.internal.payload_req", setter: fld_set}]}, + "payload.res": {convert: to_long, to:[{field: "rsa.internal.payload_res", setter: fld_set}]}, + "peer": {to:[{field: "rsa.crypto.peer", setter: fld_set}]}, + "peer_id": {to:[{field: "rsa.crypto.peer_id", setter: fld_set}]}, + "permgranted": {to:[{field: "rsa.misc.permgranted", setter: fld_set}]}, + "permissions": {to:[{field: "rsa.db.permissions", setter: fld_set}]}, + "permwanted": {to:[{field: "rsa.misc.permwanted", setter: fld_set}]}, + "pgid": {to:[{field: "rsa.misc.pgid", setter: fld_set}]}, + "phone_number": {to:[{field: "rsa.misc.phone", setter: fld_prio, prio: 2}]}, + "phost": {to:[{field: "rsa.network.phost", setter: fld_set}]}, + "pid": {to:[{field: "rsa.misc.pid", setter: fld_set}]}, + "policy": {to:[{field: "rsa.misc.policy", setter: fld_set}]}, + "policyUUID": {to:[{field: "rsa.misc.policyUUID", setter: fld_set}]}, + "policy_id": {to:[{field: "rsa.misc.policy_id", setter: fld_set}]}, + "policy_value": {to:[{field: "rsa.misc.policy_value", setter: fld_set}]}, + "policy_waiver": {to:[{field: "rsa.misc.policy_waiver", setter: fld_set}]}, + "policyname": {to:[{field: "rsa.misc.policy_name", setter: fld_prio, prio: 0}]}, + "pool_id": {to:[{field: "rsa.misc.pool_id", setter: fld_set}]}, + "pool_name": {to:[{field: "rsa.misc.pool_name", setter: fld_set}]}, + "port": {convert: to_long, to:[{field: "rsa.network.port", setter: fld_set}]}, + "portname": {to:[{field: "rsa.misc.port_name", setter: fld_set}]}, + "pread": {convert: to_long, to:[{field: "rsa.db.pread", setter: fld_set}]}, + "priority": {to:[{field: "rsa.misc.priority", setter: fld_set}]}, + "privilege": {to:[{field: "rsa.file.privilege", setter: fld_set}]}, + "process.vid.dst": {to:[{field: "rsa.internal.process_vid_dst", setter: fld_set}]}, + "process.vid.src": {to:[{field: "rsa.internal.process_vid_src", setter: fld_set}]}, + "process_id_val": {to:[{field: "rsa.misc.process_id_val", setter: fld_set}]}, + "processing_time": {to:[{field: "rsa.time.process_time", setter: fld_set}]}, + "profile": {to:[{field: "rsa.identity.profile", setter: fld_set}]}, + "prog_asp_num": {to:[{field: "rsa.misc.prog_asp_num", setter: fld_set}]}, + "program": {to:[{field: "rsa.misc.program", setter: fld_set}]}, + "protocol_detail": {to:[{field: "rsa.network.protocol_detail", setter: fld_set}]}, + "pwwn": {to:[{field: "rsa.storage.pwwn", setter: fld_set}]}, + "r_hostid": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "real_data": {to:[{field: "rsa.misc.real_data", setter: fld_set}]}, + "realm": {to:[{field: "rsa.identity.realm", setter: fld_set}]}, + "reason": {to:[{field: "rsa.misc.reason", setter: fld_set}]}, + "rec_asp_device": {to:[{field: "rsa.misc.rec_asp_device", setter: fld_set}]}, + "rec_asp_num": {to:[{field: "rsa.misc.rec_asp_num", setter: fld_set}]}, + "rec_library": {to:[{field: "rsa.misc.rec_library", setter: fld_set}]}, + "recorded_time": {convert: to_date, to:[{field: "rsa.time.recorded_time", setter: fld_set}]}, + "recordnum": {to:[{field: "rsa.misc.recordnum", setter: fld_set}]}, + "registry.key": {to:[{field: "rsa.endpoint.registry_key", setter: fld_set}]}, + "registry.value": {to:[{field: "rsa.endpoint.registry_value", setter: fld_set}]}, + "remote_domain": {to:[{field: "rsa.web.remote_domain", setter: fld_set}]}, + "remote_domain_id": {to:[{field: "rsa.network.remote_domain_id", setter: fld_set}]}, + "reputation_num": {convert: to_double, to:[{field: "rsa.web.reputation_num", setter: fld_set}]}, + "resource": {to:[{field: "rsa.internal.resource", setter: fld_set}]}, + "resource_class": {to:[{field: "rsa.internal.resource_class", setter: fld_set}]}, + "result": {to:[{field: "rsa.misc.result", setter: fld_set}]}, + "result_code": {to:[{field: "rsa.misc.result_code", setter: fld_prio, prio: 1}]}, + "resultcode": {to:[{field: "rsa.misc.result_code", setter: fld_prio, prio: 0}]}, + "rid": {convert: to_long, to:[{field: "rsa.internal.rid", setter: fld_set}]}, + "risk": {to:[{field: "rsa.misc.risk", setter: fld_set}]}, + "risk_info": {to:[{field: "rsa.misc.risk_info", setter: fld_set}]}, + "risk_num": {convert: to_double, to:[{field: "rsa.misc.risk_num", setter: fld_set}]}, + "risk_num_comm": {convert: to_double, to:[{field: "rsa.misc.risk_num_comm", setter: fld_set}]}, + "risk_num_next": {convert: to_double, to:[{field: "rsa.misc.risk_num_next", setter: fld_set}]}, + "risk_num_sand": {convert: to_double, to:[{field: "rsa.misc.risk_num_sand", setter: fld_set}]}, + "risk_num_static": {convert: to_double, to:[{field: "rsa.misc.risk_num_static", setter: fld_set}]}, + "risk_suspicious": {to:[{field: "rsa.misc.risk_suspicious", setter: fld_set}]}, + "risk_warning": {to:[{field: "rsa.misc.risk_warning", setter: fld_set}]}, + "rpayload": {to:[{field: "rsa.network.rpayload", setter: fld_set}]}, + "ruid": {to:[{field: "rsa.misc.ruid", setter: fld_set}]}, + "rule": {to:[{field: "rsa.misc.rule", setter: fld_set}]}, + "rule_group": {to:[{field: "rsa.misc.rule_group", setter: fld_set}]}, + "rule_template": {to:[{field: "rsa.misc.rule_template", setter: fld_set}]}, + "rule_uid": {to:[{field: "rsa.misc.rule_uid", setter: fld_set}]}, + "rulename": {to:[{field: "rsa.misc.rule_name", setter: fld_set}]}, + "s_certauth": {to:[{field: "rsa.crypto.s_certauth", setter: fld_set}]}, + "s_cipher": {to:[{field: "rsa.crypto.cipher_src", setter: fld_set}]}, + "s_ciphersize": {convert: to_long, to:[{field: "rsa.crypto.cipher_size_src", setter: fld_set}]}, + "s_context": {to:[{field: "rsa.misc.context_subject", setter: fld_set}]}, + "s_sslver": {to:[{field: "rsa.crypto.ssl_ver_src", setter: fld_set}]}, + "sburb": {to:[{field: "rsa.misc.sburb", setter: fld_set}]}, + "scheme": {to:[{field: "rsa.crypto.scheme", setter: fld_set}]}, + "sdomain_fld": {to:[{field: "rsa.misc.sdomain_fld", setter: fld_set}]}, + "search.text": {to:[{field: "rsa.misc.search_text", setter: fld_set}]}, + "sec": {to:[{field: "rsa.misc.sec", setter: fld_set}]}, + "second": {to:[{field: "rsa.misc.second", setter: fld_set}]}, + "sensor": {to:[{field: "rsa.misc.sensor", setter: fld_set}]}, + "sensorname": {to:[{field: "rsa.misc.sensorname", setter: fld_set}]}, + "seqnum": {to:[{field: "rsa.misc.seqnum", setter: fld_set}]}, + "serial_number": {to:[{field: "rsa.misc.serial_number", setter: fld_set}]}, + "service.account": {to:[{field: "rsa.identity.service_account", setter: fld_set}]}, + "session": {to:[{field: "rsa.misc.session", setter: fld_set}]}, + "session.split": {to:[{field: "rsa.internal.session_split", setter: fld_set}]}, + "sessionid": {to:[{field: "rsa.misc.log_session_id", setter: fld_set}]}, + "sessionid1": {to:[{field: "rsa.misc.log_session_id1", setter: fld_set}]}, + "sessiontype": {to:[{field: "rsa.misc.sessiontype", setter: fld_set}]}, + "severity": {to:[{field: "rsa.misc.severity", setter: fld_set}]}, + "sid": {to:[{field: "rsa.identity.user_sid_dst", setter: fld_set}]}, + "sig.name": {to:[{field: "rsa.misc.sig_name", setter: fld_set}]}, + "sigUUID": {to:[{field: "rsa.misc.sigUUID", setter: fld_set}]}, + "sigcat": {to:[{field: "rsa.misc.sigcat", setter: fld_set}]}, + "sigid": {convert: to_long, to:[{field: "rsa.misc.sig_id", setter: fld_set}]}, + "sigid1": {convert: to_long, to:[{field: "rsa.misc.sig_id1", setter: fld_set}]}, + "sigid_string": {to:[{field: "rsa.misc.sig_id_str", setter: fld_set}]}, + "signame": {to:[{field: "rsa.misc.policy_name", setter: fld_prio, prio: 1}]}, + "sigtype": {to:[{field: "rsa.crypto.sig_type", setter: fld_set}]}, + "sinterface": {to:[{field: "rsa.network.sinterface", setter: fld_set}]}, + "site": {to:[{field: "rsa.internal.site", setter: fld_set}]}, + "size": {convert: to_long, to:[{field: "rsa.internal.size", setter: fld_set}]}, + "smask": {to:[{field: "rsa.network.smask", setter: fld_set}]}, + "snmp.oid": {to:[{field: "rsa.misc.snmp_oid", setter: fld_set}]}, + "snmp.value": {to:[{field: "rsa.misc.snmp_value", setter: fld_set}]}, + "sourcefile": {to:[{field: "rsa.internal.sourcefile", setter: fld_set}]}, + "space": {to:[{field: "rsa.misc.space", setter: fld_set}]}, + "space1": {to:[{field: "rsa.misc.space1", setter: fld_set}]}, + "spi": {to:[{field: "rsa.misc.spi", setter: fld_set}]}, + "sql": {to:[{field: "rsa.misc.sql", setter: fld_set}]}, + "src_dn": {to:[{field: "rsa.identity.dn_src", setter: fld_set}]}, + "src_payload": {to:[{field: "rsa.misc.payload_src", setter: fld_set}]}, + "src_spi": {to:[{field: "rsa.misc.spi_src", setter: fld_set}]}, + "src_zone": {to:[{field: "rsa.network.zone_src", setter: fld_set}]}, + "srcburb": {to:[{field: "rsa.misc.srcburb", setter: fld_set}]}, + "srcdom": {to:[{field: "rsa.misc.srcdom", setter: fld_set}]}, + "srcservice": {to:[{field: "rsa.misc.srcservice", setter: fld_set}]}, + "ssid": {to:[{field: "rsa.wireless.wlan_ssid", setter: fld_prio, prio: 0}]}, + "stamp": {convert: to_date, to:[{field: "rsa.time.stamp", setter: fld_set}]}, + "starttime": {convert: to_date, to:[{field: "rsa.time.starttime", setter: fld_set}]}, + "state": {to:[{field: "rsa.misc.state", setter: fld_set}]}, + "statement": {to:[{field: "rsa.internal.statement", setter: fld_set}]}, + "status": {to:[{field: "rsa.misc.status", setter: fld_set}]}, + "status1": {to:[{field: "rsa.misc.status1", setter: fld_set}]}, + "streams": {convert: to_long, to:[{field: "rsa.misc.streams", setter: fld_set}]}, + "subcategory": {to:[{field: "rsa.misc.subcategory", setter: fld_set}]}, + "subject": {to:[{field: "rsa.email.subject", setter: fld_set}]}, + "svcno": {to:[{field: "rsa.misc.svcno", setter: fld_set}]}, + "system": {to:[{field: "rsa.misc.system", setter: fld_set}]}, + "t_context": {to:[{field: "rsa.misc.context_target", setter: fld_set}]}, + "task_name": {to:[{field: "rsa.file.task_name", setter: fld_set}]}, + "tbdstr1": {to:[{field: "rsa.misc.tbdstr1", setter: fld_set}]}, + "tbdstr2": {to:[{field: "rsa.misc.tbdstr2", setter: fld_set}]}, + "tbl_name": {to:[{field: "rsa.db.table_name", setter: fld_set}]}, + "tcp_flags": {convert: to_long, to:[{field: "rsa.misc.tcp_flags", setter: fld_set}]}, + "terminal": {to:[{field: "rsa.misc.terminal", setter: fld_set}]}, + "tgtdom": {to:[{field: "rsa.misc.tgtdom", setter: fld_set}]}, + "tgtdomain": {to:[{field: "rsa.misc.tgtdomain", setter: fld_set}]}, + "threat_name": {to:[{field: "rsa.threat.threat_category", setter: fld_set}]}, + "threat_source": {to:[{field: "rsa.threat.threat_source", setter: fld_set}]}, + "threat_val": {to:[{field: "rsa.threat.threat_desc", setter: fld_set}]}, + "threshold": {to:[{field: "rsa.misc.threshold", setter: fld_set}]}, + "time": {convert: to_date, to:[{field: "rsa.internal.time", setter: fld_set}]}, + "timestamp": {to:[{field: "rsa.time.timestamp", setter: fld_set}]}, + "timezone": {to:[{field: "rsa.time.timezone", setter: fld_set}]}, + "to": {to:[{field: "rsa.email.email_dst", setter: fld_set}]}, + "tos": {convert: to_long, to:[{field: "rsa.misc.tos", setter: fld_set}]}, + "trans_from": {to:[{field: "rsa.email.trans_from", setter: fld_set}]}, + "trans_id": {to:[{field: "rsa.db.transact_id", setter: fld_set}]}, + "trans_to": {to:[{field: "rsa.email.trans_to", setter: fld_set}]}, + "trigger_desc": {to:[{field: "rsa.misc.trigger_desc", setter: fld_set}]}, + "trigger_val": {to:[{field: "rsa.misc.trigger_val", setter: fld_set}]}, + "type": {to:[{field: "rsa.misc.type", setter: fld_set}]}, + "type1": {to:[{field: "rsa.misc.type1", setter: fld_set}]}, + "tzone": {to:[{field: "rsa.time.tzone", setter: fld_set}]}, + "ubc.req": {convert: to_long, to:[{field: "rsa.internal.ubc_req", setter: fld_set}]}, + "ubc.res": {convert: to_long, to:[{field: "rsa.internal.ubc_res", setter: fld_set}]}, + "udb_class": {to:[{field: "rsa.misc.udb_class", setter: fld_set}]}, + "url_fld": {to:[{field: "rsa.misc.url_fld", setter: fld_set}]}, + "urlpage": {to:[{field: "rsa.web.urlpage", setter: fld_set}]}, + "urlroot": {to:[{field: "rsa.web.urlroot", setter: fld_set}]}, + "user_address": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "user_dept": {to:[{field: "rsa.identity.user_dept", setter: fld_set}]}, + "user_div": {to:[{field: "rsa.misc.user_div", setter: fld_set}]}, + "user_fname": {to:[{field: "rsa.identity.firstname", setter: fld_set}]}, + "user_lname": {to:[{field: "rsa.identity.lastname", setter: fld_set}]}, + "user_mname": {to:[{field: "rsa.identity.middlename", setter: fld_set}]}, + "user_org": {to:[{field: "rsa.identity.org", setter: fld_set}]}, + "user_role": {to:[{field: "rsa.identity.user_role", setter: fld_set}]}, + "userid": {to:[{field: "rsa.misc.userid", setter: fld_set}]}, + "username_fld": {to:[{field: "rsa.misc.username_fld", setter: fld_set}]}, + "utcstamp": {to:[{field: "rsa.misc.utcstamp", setter: fld_set}]}, + "v_instafname": {to:[{field: "rsa.misc.v_instafname", setter: fld_set}]}, + "vendor_event_cat": {to:[{field: "rsa.investigations.event_vcat", setter: fld_set}]}, + "version": {to:[{field: "rsa.misc.version", setter: fld_set}]}, + "vid": {to:[{field: "rsa.internal.msg_vid", setter: fld_set}]}, + "virt_data": {to:[{field: "rsa.misc.virt_data", setter: fld_set}]}, + "virusname": {to:[{field: "rsa.misc.virusname", setter: fld_set}]}, + "vlan": {convert: to_long, to:[{field: "rsa.network.vlan", setter: fld_set}]}, + "vlan.name": {to:[{field: "rsa.network.vlan_name", setter: fld_set}]}, + "vm_target": {to:[{field: "rsa.misc.vm_target", setter: fld_set}]}, + "vpnid": {to:[{field: "rsa.misc.vpnid", setter: fld_set}]}, + "vsys": {to:[{field: "rsa.misc.vsys", setter: fld_set}]}, + "vuln_ref": {to:[{field: "rsa.misc.vuln_ref", setter: fld_set}]}, + "web_cookie": {to:[{field: "rsa.web.web_cookie", setter: fld_set}]}, + "web_extension_tmp": {to:[{field: "rsa.web.web_extension_tmp", setter: fld_set}]}, + "web_host": {to:[{field: "rsa.web.alias_host", setter: fld_set}]}, + "web_method": {to:[{field: "rsa.misc.action", setter: fld_append}]}, + "web_page": {to:[{field: "rsa.web.web_page", setter: fld_set}]}, + "web_ref_domain": {to:[{field: "rsa.web.web_ref_domain", setter: fld_set}]}, + "web_ref_host": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "web_ref_page": {to:[{field: "rsa.web.web_ref_page", setter: fld_set}]}, + "web_ref_query": {to:[{field: "rsa.web.web_ref_query", setter: fld_set}]}, + "web_ref_root": {to:[{field: "rsa.web.web_ref_root", setter: fld_set}]}, + "wifi_channel": {convert: to_long, to:[{field: "rsa.wireless.wlan_channel", setter: fld_set}]}, + "wlan": {to:[{field: "rsa.wireless.wlan_name", setter: fld_set}]}, + "word": {to:[{field: "rsa.internal.word", setter: fld_set}]}, + "workspace_desc": {to:[{field: "rsa.misc.workspace", setter: fld_set}]}, + "workstation": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "year": {to:[{field: "rsa.time.year", setter: fld_set}]}, + "zone": {to:[{field: "rsa.network.zone", setter: fld_set}]}, + }; + + function to_date(value) { + switch (typeof (value)) { + case "object": + // This is a Date. But as it was obtained from evt.Get(), the VM + // doesn't see it as a JS Date anymore, thus value instanceof Date === false. + // Have to trust that any object here is a valid Date for Go. + return value; + case "string": + var asDate = new Date(value); + if (!isNaN(asDate)) return asDate; + } + } + + // ECMAScript 5.1 doesn't have Object.MAX_SAFE_INTEGER / Object.MIN_SAFE_INTEGER. + var maxSafeInt = Math.pow(2, 53) - 1; + var minSafeInt = -maxSafeInt; + + function to_long(value) { + var num = parseInt(value); + // Better not to index a number if it's not safe (above 53 bits). + return !isNaN(num) && minSafeInt <= num && num <= maxSafeInt ? num : undefined; + } + + function to_ip(value) { + if (value.indexOf(":") === -1) + return to_ipv4(value); + return to_ipv6(value); + } + + var ipv4_regex = /^(\d+)\.(\d+)\.(\d+)\.(\d+)$/; + var ipv6_hex_regex = /^[0-9A-Fa-f]{1,4}$/; + + function to_ipv4(value) { + var result = ipv4_regex.exec(value); + if (result == null || result.length !== 5) return; + for (var i = 1; i < 5; i++) { + var num = strictToInt(result[i]); + if (isNaN(num) || num < 0 || num > 255) return; + } + return value; + } + + function to_ipv6(value) { + var sqEnd = value.indexOf("]"); + if (sqEnd > -1) { + if (value.charAt(0) !== "[") return; + value = value.substr(1, sqEnd - 1); + } + var zoneOffset = value.indexOf("%"); + if (zoneOffset > -1) { + value = value.substr(0, zoneOffset); + } + var parts = value.split(":"); + if (parts == null || parts.length < 3 || parts.length > 8) return; + var numEmpty = 0; + var innerEmpty = 0; + for (var i = 0; i < parts.length; i++) { + if (parts[i].length === 0) { + numEmpty++; + if (i > 0 && i + 1 < parts.length) innerEmpty++; + } else if (!parts[i].match(ipv6_hex_regex) && + // Accept an IPv6 with a valid IPv4 at the end. + ((i + 1 < parts.length) || !to_ipv4(parts[i]))) { + return; + } + } + return innerEmpty === 0 && parts.length === 8 || innerEmpty === 1 ? value : undefined; + } + + function to_double(value) { + return parseFloat(value); + } + + function to_mac(value) { + // ES doesn't have a mac datatype so it's safe to ingest whatever was captured. + return value; + } + + function to_lowercase(value) { + // to_lowercase is used against keyword fields, which can accept + // any other type (numbers, dates). + return typeof(value) === "string"? value.toLowerCase() : value; + } + + function fld_set(dst, value) { + dst[this.field] = { v: value }; + } + + function fld_append(dst, value) { + if (dst[this.field] === undefined) { + dst[this.field] = { v: [value] }; + } else { + var base = dst[this.field]; + if (base.v.indexOf(value)===-1) base.v.push(value); + } + } + + function fld_prio(dst, value) { + if (dst[this.field] === undefined) { + dst[this.field] = { v: value, prio: this.prio}; + } else if(this.prio < dst[this.field].prio) { + dst[this.field].v = value; + dst[this.field].prio = this.prio; + } + } + + var valid_ecs_outcome = { + 'failure': true, + 'success': true, + 'unknown': true + }; + + function fld_ecs_outcome(dst, value) { + value = value.toLowerCase(); + if (valid_ecs_outcome[value] === undefined) { + value = 'unknown'; + } + if (dst[this.field] === undefined) { + dst[this.field] = { v: value }; + } else if (dst[this.field].v === 'unknown') { + dst[this.field] = { v: value }; + } + } + + function map_all(evt, targets, value) { + for (var i = 0; i < targets.length; i++) { + evt.Put(targets[i], value); + } + } + + function populate_fields(evt) { + var base = evt.Get(FIELDS_OBJECT); + if (base === null) return; + alternate_datetime(evt); + if (map_ecs) { + do_populate(evt, base, ecs_mappings); + } + if (map_rsa) { + do_populate(evt, base, rsa_mappings); + } + if (keep_raw) { + evt.Put("rsa.raw", base); + } + evt.Delete(FIELDS_OBJECT); + } + + var datetime_alt_components = [ + {field: "day", fmts: [[dF]]}, + {field: "year", fmts: [[dW]]}, + {field: "month", fmts: [[dB],[dG]]}, + {field: "date", fmts: [[dW,dSkip,dG,dSkip,dF],[dW,dSkip,dB,dSkip,dF],[dW,dSkip,dR,dSkip,dF]]}, + {field: "hour", fmts: [[dN]]}, + {field: "min", fmts: [[dU]]}, + {field: "secs", fmts: [[dO]]}, + {field: "time", fmts: [[dN, dSkip, dU, dSkip, dO]]}, + ]; + + function alternate_datetime(evt) { + if (evt.Get(FIELDS_PREFIX + "event_time") != null) { + return; + } + var tzOffset = tz_offset; + if (tzOffset === "event") { + tzOffset = evt.Get("event.timezone"); + } + var container = new DateContainer(tzOffset); + for (var i=0; i} %{timezone}]||%{web_method}||%{web_host}||%{webpage}||%{web_query}||%{network_service}||%{resultcode}||%{sbytes}||%{web_referer}||%{user_agent}||%{web_cookie}", processor_chain([ + dup1, + dup2, + dup3, + dup4, + dup5, + dup6, + ])); + + var hdr1 = match("HEADER#0:0001", "message", "%APACHETOMCAT-%{level}-%{messageid}: %{payload}", processor_chain([ + setc("header_id","0001"), + ])); + + var hdr2 = match("HEADER#1:0002", "message", "%{hmonth->} %{hday->} %{htime->} %{hostname->} %APACHETOMCAT- %{messageid}: %{payload}", processor_chain([ + setc("header_id","0002"), + ])); + + var select1 = linear_select([ + hdr1, + hdr2, + ]); + + var msg1 = msg("ABCD", dup7); + + var msg2 = msg("BADMETHOD", dup7); + + var msg3 = msg("BADMTHD", dup7); + + var msg4 = msg("BDMTHD", dup7); + + var msg5 = msg("INDEX", dup7); + + var msg6 = msg("CFYZ", dup7); + + var msg7 = msg("CONNECT", dup7); + + var msg8 = msg("DELETE", dup7); + + var msg9 = msg("DETECT_METHOD_TYPE", dup7); + + var msg10 = msg("FGET", dup7); + + var msg11 = msg("GET", dup7); + + var msg12 = msg("get", dup7); + + var msg13 = msg("HEAD", dup7); + + var msg14 = msg("id", dup7); + + var msg15 = msg("LOCK", dup7); + + var msg16 = msg("MKCOL", dup7); + + var msg17 = msg("NCIRCLE", dup7); + + var msg18 = msg("OPTIONS", dup7); + + var msg19 = msg("POST", dup7); + + var msg20 = msg("PRONECT", dup7); + + var msg21 = msg("PROPFIND", dup7); + + var msg22 = msg("PUT", dup7); + + var msg23 = msg("QUALYS", dup7); + + var msg24 = msg("SEARCH", dup7); + + var msg25 = msg("TRACK", dup7); + + var msg26 = msg("TRACE", dup7); + + var msg27 = msg("uGET", dup7); + + var msg28 = msg("null", dup7); + + var msg29 = msg("rndmmtd", dup7); + + var msg30 = msg("RNDMMTD", dup7); + + var msg31 = msg("asdf", dup7); + + var msg32 = msg("DEBUG", dup7); + + var msg33 = msg("COOK", dup7); + + var msg34 = msg("nGET", dup7); + + var chain1 = processor_chain([ + select1, + msgid_select({ + "ABCD": msg1, + "BADMETHOD": msg2, + "BADMTHD": msg3, + "BDMTHD": msg4, + "CFYZ": msg6, + "CONNECT": msg7, + "COOK": msg33, + "DEBUG": msg32, + "DELETE": msg8, + "DETECT_METHOD_TYPE": msg9, + "FGET": msg10, + "GET": msg11, + "HEAD": msg13, + "INDEX": msg5, + "LOCK": msg15, + "MKCOL": msg16, + "NCIRCLE": msg17, + "OPTIONS": msg18, + "POST": msg19, + "PRONECT": msg20, + "PROPFIND": msg21, + "PUT": msg22, + "QUALYS": msg23, + "RNDMMTD": msg30, + "SEARCH": msg24, + "TRACE": msg26, + "TRACK": msg25, + "asdf": msg31, + "get": msg12, + "id": msg14, + "nGET": msg34, + "null": msg28, + "rndmmtd": msg29, + "uGET": msg27, + }), + ]); + + var part1 = match("MESSAGE#0:ABCD", "nwparser.payload", "%{saddr}||%{fld5}||%{username}||[%{fld7->} %{timezone}]||%{web_method}||%{web_host}||%{webpage}||%{web_query}||%{network_service}||%{resultcode}||%{sbytes}||%{web_referer}||%{user_agent}||%{web_cookie}", processor_chain([ + dup1, + dup2, + dup3, + dup4, + dup5, + dup6, + ])); + - community_id: null + - registered_domain: + field: dns.question.name + ignore_failure: true + ignore_missing: true + target_etld_field: dns.question.top_level_domain + target_field: dns.question.registered_domain + target_subdomain_field: dns.question.subdomain + - registered_domain: + field: client.domain + ignore_failure: true + ignore_missing: true + target_etld_field: client.top_level_domain + target_field: client.registered_domain + target_subdomain_field: client.subdomain + - registered_domain: + field: server.domain + ignore_failure: true + ignore_missing: true + target_etld_field: server.top_level_domain + target_field: server.registered_domain + target_subdomain_field: server.subdomain + - registered_domain: + field: destination.domain + ignore_failure: true + ignore_missing: true + target_etld_field: destination.top_level_domain + target_field: destination.registered_domain + target_subdomain_field: destination.subdomain + - registered_domain: + field: source.domain + ignore_failure: true + ignore_missing: true + target_etld_field: source.top_level_domain + target_field: source.registered_domain + target_subdomain_field: source.subdomain + - registered_domain: + field: url.domain + ignore_failure: true + ignore_missing: true + target_etld_field: url.top_level_domain + target_field: url.registered_domain + target_subdomain_field: url.subdomain + - add_locale: null + tags: + - tomcat-log + - forwarded + tcp: null + data_stream.namespace: default + - name: filestream-tomcat + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.tomcat.log.enabled} == true and ${kubernetes.hints.tomcat.enabled} == true + data_stream: + dataset: tomcat.log + type: logs + exclude_files: + - .gz$ + fields: + observer: + product: TomCat + type: Web + vendor: Apache + fields_under_root: true + parsers: + - container: + format: auto + stream: ${kubernetes.hints.tomcat.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - script: + lang: javascript + params: + debug: false + ecs: true + keep_raw: false + rsa: true + tz_offset: local + source: | + // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + // or more contributor license agreements. Licensed under the Elastic License; + // you may not use this file except in compliance with the Elastic License. + + /* jshint -W014,-W016,-W097,-W116 */ + + var processor = require("processor"); + var console = require("console"); + + var FLAG_FIELD = "log.flags"; + var FIELDS_OBJECT = "nwparser"; + var FIELDS_PREFIX = FIELDS_OBJECT + "."; + + var defaults = { + debug: false, + ecs: true, + rsa: false, + keep_raw: false, + tz_offset: "local", + strip_priority: true + }; + + var saved_flags = null; + var debug; + var map_ecs; + var map_rsa; + var keep_raw; + var device; + var tz_offset; + var strip_priority; + + // Register params from configuration. + function register(params) { + debug = params.debug !== undefined ? params.debug : defaults.debug; + map_ecs = params.ecs !== undefined ? params.ecs : defaults.ecs; + map_rsa = params.rsa !== undefined ? params.rsa : defaults.rsa; + keep_raw = params.keep_raw !== undefined ? params.keep_raw : defaults.keep_raw; + tz_offset = parse_tz_offset(params.tz_offset !== undefined? params.tz_offset : defaults.tz_offset); + strip_priority = params.strip_priority !== undefined? params.strip_priority : defaults.strip_priority; + device = new DeviceProcessor(); + } + + function parse_tz_offset(offset) { + var date; + var m; + switch(offset) { + // local uses the tz offset from the JS VM. + case "local": + date = new Date(); + // Reversing the sign as we the offset from UTC, not to UTC. + return parse_local_tz_offset(-date.getTimezoneOffset()); + // event uses the tz offset from event.timezone (add_locale processor). + case "event": + return offset; + // Otherwise a tz offset in the form "[+-][0-9]{4}" is required. + default: + m = offset.match(/^([+\-])([0-9]{2}):?([0-9]{2})?$/); + if (m === null || m.length !== 4) { + throw("bad timezone offset: '" + offset + "'. Must have the form +HH:MM"); + } + return m[1] + m[2] + ":" + (m[3]!==undefined? m[3] : "00"); + } + } + + function parse_local_tz_offset(minutes) { + var neg = minutes < 0; + minutes = Math.abs(minutes); + var min = minutes % 60; + var hours = Math.floor(minutes / 60); + var pad2digit = function(n) { + if (n < 10) { return "0" + n;} + return "" + n; + }; + return (neg? "-" : "+") + pad2digit(hours) + ":" + pad2digit(min); + } + + function process(evt) { + // Function register is only called by the processor when `params` are set + // in the processor config. + if (device === undefined) { + register(defaults); + } + return device.process(evt); + } + + function processor_chain(subprocessors) { + var builder = new processor.Chain(); + subprocessors.forEach(builder.Add); + return builder.Build().Run; + } + + function linear_select(subprocessors) { + return function (evt) { + var flags = evt.Get(FLAG_FIELD); + var i; + for (i = 0; i < subprocessors.length; i++) { + evt.Delete(FLAG_FIELD); + if (debug) console.warn("linear_select trying entry " + i); + subprocessors[i](evt); + // Dissect processor succeeded? + if (evt.Get(FLAG_FIELD) == null) break; + if (debug) console.warn("linear_select failed entry " + i); + } + if (flags !== null) { + evt.Put(FLAG_FIELD, flags); + } + if (debug) { + if (i < subprocessors.length) { + console.warn("linear_select matched entry " + i); + } else { + console.warn("linear_select didn't match"); + } + } + }; + } + + function conditional(opt) { + return function(evt) { + if (opt.if(evt)) { + opt.then(evt); + } else if (opt.else) { + opt.else(evt); + } + }; + } + + var strip_syslog_priority = (function() { + var isEnabled = function() { return strip_priority === true; }; + var fetchPRI = field("_pri"); + var fetchPayload = field("payload"); + var removePayload = remove(["payload"]); + var cleanup = remove(["_pri", "payload"]); + var onMatch = function(evt) { + var pri, priStr = fetchPRI(evt); + if (priStr != null + && 0 < priStr.length && priStr.length < 4 + && !isNaN((pri = Number(priStr))) + && 0 <= pri && pri < 192) { + var severity = pri & 7, + facility = pri >> 3; + setc("_severity", "" + severity)(evt); + setc("_facility", "" + facility)(evt); + // Replace message with priority stripped. + evt.Put("message", fetchPayload(evt)); + removePayload(evt); + } else { + // not a valid syslog PRI, cleanup. + cleanup(evt); + } + }; + return conditional({ + if: isEnabled, + then: cleanup_flags(match( + "STRIP_PRI", + "message", + "<%{_pri}>%{payload}", + onMatch + )) + }); + })(); + + function match(id, src, pattern, on_success) { + var dissect = new processor.Dissect({ + field: src, + tokenizer: pattern, + target_prefix: FIELDS_OBJECT, + ignore_failure: true, + overwrite_keys: true, + trim_values: "right" + }); + return function (evt) { + var msg = evt.Get(src); + dissect.Run(evt); + var failed = evt.Get(FLAG_FIELD) != null; + if (debug) { + if (failed) { + console.debug("dissect fail: " + id + " field:" + src); + } else { + console.debug("dissect OK: " + id + " field:" + src); + } + console.debug(" expr: <<" + pattern + ">>"); + console.debug(" input: <<" + msg + ">>"); + } + if (on_success != null && !failed) { + on_success(evt); + } + }; + } + + function match_copy(id, src, dst, on_success) { + dst = FIELDS_PREFIX + dst; + if (dst === FIELDS_PREFIX || dst === src) { + return function (evt) { + if (debug) { + console.debug("noop OK: " + id + " field:" + src); + console.debug(" input: <<" + evt.Get(src) + ">>"); + } + if (on_success != null) on_success(evt); + } + } + return function (evt) { + var msg = evt.Get(src); + evt.Put(dst, msg); + if (debug) { + console.debug("copy OK: " + id + " field:" + src); + console.debug(" target: '" + dst + "'"); + console.debug(" input: <<" + msg + ">>"); + } + if (on_success != null) on_success(evt); + } + } + + function cleanup_flags(processor) { + return function(evt) { + processor(evt); + evt.Delete(FLAG_FIELD); + }; + } + + function all_match(opts) { + return function (evt) { + var i; + for (i = 0; i < opts.processors.length; i++) { + evt.Delete(FLAG_FIELD); + opts.processors[i](evt); + // Dissect processor succeeded? + if (evt.Get(FLAG_FIELD) != null) { + if (debug) console.warn("all_match failure at " + i); + if (opts.on_failure != null) opts.on_failure(evt); + return; + } + if (debug) console.warn("all_match success at " + i); + } + if (opts.on_success != null) opts.on_success(evt); + }; + } + + function msgid_select(mapping) { + return function (evt) { + var msgid = evt.Get(FIELDS_PREFIX + "messageid"); + if (msgid == null) { + if (debug) console.warn("msgid_select: no messageid captured!"); + return; + } + var next = mapping[msgid]; + if (next === undefined) { + if (debug) console.warn("msgid_select: no mapping for messageid:" + msgid); + return; + } + if (debug) console.info("msgid_select: matched key=" + msgid); + return next(evt); + }; + } + + function msg(msg_id, match) { + return function (evt) { + match(evt); + if (evt.Get(FLAG_FIELD) == null) { + evt.Put(FIELDS_PREFIX + "msg_id1", msg_id); + } + }; + } + + var start; + + function save_flags(evt) { + saved_flags = evt.Get(FLAG_FIELD); + evt.Put("event.original", evt.Get("message")); + } + + function restore_flags(evt) { + if (saved_flags !== null) { + evt.Put(FLAG_FIELD, saved_flags); + } + evt.Delete("message"); + } + + function constant(value) { + return function (evt) { + return value; + }; + } + + function field(name) { + var fullname = FIELDS_PREFIX + name; + return function (evt) { + return evt.Get(fullname); + }; + } + + function STRCAT(args) { + var s = ""; + var i; + for (i = 0; i < args.length; i++) { + s += args[i]; + } + return s; + } + + // TODO: Implement + function DIRCHK(args) { + unimplemented("DIRCHK"); + } + + function strictToInt(str) { + return str * 1; + } + + function CALC(args) { + if (args.length !== 3) { + console.warn("skipped call to CALC with " + args.length + " arguments."); + return; + } + var a = strictToInt(args[0]); + var b = strictToInt(args[2]); + if (isNaN(a) || isNaN(b)) { + console.warn("failed evaluating CALC arguments a='" + args[0] + "' b='" + args[2] + "'."); + return; + } + var result; + switch (args[1]) { + case "+": + result = a + b; + break; + case "-": + result = a - b; + break; + case "*": + result = a * b; + break; + default: + // Only * and + seen in the parsers. + console.warn("unknown CALC operation '" + args[1] + "'."); + return; + } + // Always return a string + return result !== undefined ? "" + result : result; + } + + var quoteChars = "\"'`"; + function RMQ(args) { + if(args.length !== 1) { + console.warn("RMQ: only one argument expected"); + return; + } + var value = args[0].trim(); + var n = value.length; + var char; + return n > 1 + && (char=value.charAt(0)) === value.charAt(n-1) + && quoteChars.indexOf(char) !== -1? + value.substr(1, n-2) + : value; + } + + function call(opts) { + var args = new Array(opts.args.length); + return function (evt) { + for (var i = 0; i < opts.args.length; i++) + if ((args[i] = opts.args[i](evt)) == null) return; + var result = opts.fn(args); + if (result != null) { + evt.Put(opts.dest, result); + } + }; + } + + function nop(evt) { + } + + function appendErrorMsg(evt, msg) { + var value = evt.Get("error.message"); + if (value == null) { + value = [msg]; + } else if (msg instanceof Array) { + value.push(msg); + } else { + value = [value, msg]; + } + evt.Put("error.message", value); + } + + function unimplemented(name) { + appendErrorMsg("unimplemented feature: " + name); + } + + function lookup(opts) { + return function (evt) { + var key = opts.key(evt); + if (key == null) return; + var value = opts.map.keyvaluepairs[key]; + if (value === undefined) { + value = opts.map.default; + } + if (value !== undefined) { + evt.Put(opts.dest, value(evt)); + } + }; + } + + function set(fields) { + return new processor.AddFields({ + target: FIELDS_OBJECT, + fields: fields, + }); + } + + function setf(dst, src) { + return function (evt) { + var val = evt.Get(FIELDS_PREFIX + src); + if (val != null) evt.Put(FIELDS_PREFIX + dst, val); + }; + } + + function setc(dst, value) { + return function (evt) { + evt.Put(FIELDS_PREFIX + dst, value); + }; + } + + function set_field(opts) { + return function (evt) { + var val = opts.value(evt); + if (val != null) evt.Put(opts.dest, val); + }; + } + + function dump(label) { + return function (evt) { + console.log("Dump of event at " + label + ": " + JSON.stringify(evt, null, "\t")); + }; + } + + function date_time_join_args(evt, arglist) { + var str = ""; + for (var i = 0; i < arglist.length; i++) { + var fname = FIELDS_PREFIX + arglist[i]; + var val = evt.Get(fname); + if (val != null) { + if (str !== "") str += " "; + str += val; + } else { + if (debug) console.warn("in date_time: input arg " + fname + " is not set"); + } + } + return str; + } + + function to2Digit(num) { + return num? (num < 10? "0" + num : num) : "00"; + } + + // Make two-digit dates 00-69 interpreted as 2000-2069 + // and dates 70-99 translated to 1970-1999. + var twoDigitYearEpoch = 70; + var twoDigitYearCentury = 2000; + + // This is to accept dates up to 2 days in the future, only used when + // no year is specified in a date. 2 days should be enough to account for + // time differences between systems and different tz offsets. + var maxFutureDelta = 2*24*60*60*1000; + + // DateContainer stores date fields and then converts those fields into + // a Date. Necessary because building a Date using its set() methods gives + // different results depending on the order of components. + function DateContainer(tzOffset) { + this.offset = tzOffset === undefined? "Z" : tzOffset; + } + + DateContainer.prototype = { + setYear: function(v) {this.year = v;}, + setMonth: function(v) {this.month = v;}, + setDay: function(v) {this.day = v;}, + setHours: function(v) {this.hours = v;}, + setMinutes: function(v) {this.minutes = v;}, + setSeconds: function(v) {this.seconds = v;}, + + setUNIX: function(v) {this.unix = v;}, + + set2DigitYear: function(v) { + this.year = v < twoDigitYearEpoch? twoDigitYearCentury + v : twoDigitYearCentury + v - 100; + }, + + toDate: function() { + if (this.unix !== undefined) { + return new Date(this.unix * 1000); + } + if (this.day === undefined || this.month === undefined) { + // Can't make a date from this. + return undefined; + } + if (this.year === undefined) { + // A date without a year. Set current year, or previous year + // if date would be in the future. + var now = new Date(); + this.year = now.getFullYear(); + var date = this.toDate(); + if (date.getTime() - now.getTime() > maxFutureDelta) { + date.setFullYear(now.getFullYear() - 1); + } + return date; + } + var MM = to2Digit(this.month); + var DD = to2Digit(this.day); + var hh = to2Digit(this.hours); + var mm = to2Digit(this.minutes); + var ss = to2Digit(this.seconds); + return new Date(this.year + "-" + MM + "-" + DD + "T" + hh + ":" + mm + ":" + ss + this.offset); + } + } + + function date_time_try_pattern(fmt, str, tzOffset) { + var date = new DateContainer(tzOffset); + var pos = date_time_try_pattern_at_pos(fmt, str, 0, date); + return pos !== undefined? date.toDate() : undefined; + } + + function date_time_try_pattern_at_pos(fmt, str, pos, date) { + var len = str.length; + for (var proc = 0; pos !== undefined && pos < len && proc < fmt.length; proc++) { + pos = fmt[proc](str, pos, date); + } + return pos; + } + + function date_time(opts) { + return function (evt) { + var tzOffset = opts.tz || tz_offset; + if (tzOffset === "event") { + tzOffset = evt.Get("event.timezone"); + } + var str = date_time_join_args(evt, opts.args); + for (var i = 0; i < opts.fmts.length; i++) { + var date = date_time_try_pattern(opts.fmts[i], str, tzOffset); + if (date !== undefined) { + evt.Put(FIELDS_PREFIX + opts.dest, date); + return; + } + } + if (debug) console.warn("in date_time: id=" + opts.id + " FAILED: " + str); + }; + } + + var uA = 60 * 60 * 24; + var uD = 60 * 60 * 24; + var uF = 60 * 60; + var uG = 60 * 60 * 24 * 30; + var uH = 60 * 60; + var uI = 60 * 60; + var uJ = 60 * 60 * 24; + var uM = 60 * 60 * 24 * 30; + var uN = 60 * 60; + var uO = 1; + var uS = 1; + var uT = 60; + var uU = 60; + var uc = dc; + + function duration(opts) { + return function(evt) { + var str = date_time_join_args(evt, opts.args); + for (var i = 0; i < opts.fmts.length; i++) { + var seconds = duration_try_pattern(opts.fmts[i], str); + if (seconds !== undefined) { + evt.Put(FIELDS_PREFIX + opts.dest, seconds); + return; + } + } + if (debug) console.warn("in duration: id=" + opts.id + " (s) FAILED: " + str); + }; + } + + function duration_try_pattern(fmt, str) { + var secs = 0; + var pos = 0; + for (var i=0; i [ month_id , how many chars to skip if month in long form ] + "Jan": [0, 4], + "Feb": [1, 5], + "Mar": [2, 2], + "Apr": [3, 2], + "May": [4, 0], + "Jun": [5, 1], + "Jul": [6, 1], + "Aug": [7, 3], + "Sep": [8, 6], + "Oct": [9, 4], + "Nov": [10, 5], + "Dec": [11, 4], + "jan": [0, 4], + "feb": [1, 5], + "mar": [2, 2], + "apr": [3, 2], + "may": [4, 0], + "jun": [5, 1], + "jul": [6, 1], + "aug": [7, 3], + "sep": [8, 6], + "oct": [9, 4], + "nov": [10, 5], + "dec": [11, 4], + }; + + // var dC = undefined; + var dR = dateMonthName(true); + var dB = dateMonthName(false); + var dM = dateFixedWidthNumber("M", 2, 1, 12, DateContainer.prototype.setMonth); + var dG = dateVariableWidthNumber("G", 1, 12, DateContainer.prototype.setMonth); + var dD = dateFixedWidthNumber("D", 2, 1, 31, DateContainer.prototype.setDay); + var dF = dateVariableWidthNumber("F", 1, 31, DateContainer.prototype.setDay); + var dH = dateFixedWidthNumber("H", 2, 0, 24, DateContainer.prototype.setHours); + var dI = dateVariableWidthNumber("I", 0, 24, DateContainer.prototype.setHours); // Accept hours >12 + var dN = dateVariableWidthNumber("N", 0, 24, DateContainer.prototype.setHours); + var dT = dateFixedWidthNumber("T", 2, 0, 59, DateContainer.prototype.setMinutes); + var dU = dateVariableWidthNumber("U", 0, 59, DateContainer.prototype.setMinutes); + var dP = parseAMPM; // AM|PM + var dQ = parseAMPM; // A.M.|P.M + var dS = dateFixedWidthNumber("S", 2, 0, 60, DateContainer.prototype.setSeconds); + var dO = dateVariableWidthNumber("O", 0, 60, DateContainer.prototype.setSeconds); + var dY = dateFixedWidthNumber("Y", 2, 0, 99, DateContainer.prototype.set2DigitYear); + var dW = dateFixedWidthNumber("W", 4, 1000, 9999, DateContainer.prototype.setYear); + var dZ = parseHMS; + var dX = dateVariableWidthNumber("X", 0, 0x10000000000, DateContainer.prototype.setUNIX); + + // parseAMPM parses "A.M", "AM", "P.M", "PM" from logs. + // Only works if this modifier appears after the hour has been read from logs + // which is always the case in the 300 devices. + function parseAMPM(str, pos, date) { + var n = str.length; + var start = skipws(str, pos); + if (start + 2 > n) return; + var head = str.substr(start, 2).toUpperCase(); + var isPM = false; + var skip = false; + switch (head) { + case "A.": + skip = true; + /* falls through */ + case "AM": + break; + case "P.": + skip = true; + /* falls through */ + case "PM": + isPM = true; + break; + default: + if (debug) console.warn("can't parse pos " + start + " as AM/PM: " + str + "(head:" + head + ")"); + return; + } + pos = start + 2; + if (skip) { + if (pos+2 > n || str.substr(pos, 2).toUpperCase() !== "M.") { + if (debug) console.warn("can't parse pos " + start + " as AM/PM: " + str + "(tail)"); + return; + } + pos += 2; + } + var hh = date.hours; + if (isPM) { + // Accept existing hour in 24h format. + if (hh < 12) hh += 12; + } else { + if (hh === 12) hh = 0; + } + date.setHours(hh); + return pos; + } + + function parseHMS(str, pos, date) { + return date_time_try_pattern_at_pos([dN, dc(":"), dU, dc(":"), dO], str, pos, date); + } + + function skipws(str, pos) { + for ( var n = str.length; + pos < n && str.charAt(pos) === " "; + pos++) + ; + return pos; + } + + function skipdigits(str, pos) { + var c; + for (var n = str.length; + pos < n && (c = str.charAt(pos)) >= "0" && c <= "9"; + pos++) + ; + return pos; + } + + function dSkip(str, pos, date) { + var chr; + for (;pos < str.length && (chr=str[pos])<'0' || chr>'9'; pos++) {} + return pos < str.length? pos : undefined; + } + + function dateVariableWidthNumber(fmtChar, min, max, setter) { + return function (str, pos, date) { + var start = skipws(str, pos); + pos = skipdigits(str, start); + var s = str.substr(start, pos - start); + var value = parseInt(s, 10); + if (value >= min && value <= max) { + setter.call(date, value); + return pos; + } + return; + }; + } + + function dateFixedWidthNumber(fmtChar, width, min, max, setter) { + return function (str, pos, date) { + pos = skipws(str, pos); + var n = str.length; + if (pos + width > n) return; + var s = str.substr(pos, width); + var value = parseInt(s, 10); + if (value >= min && value <= max) { + setter.call(date, value); + return pos + width; + } + return; + }; + } + + // Short month name (Jan..Dec). + function dateMonthName(long) { + return function (str, pos, date) { + pos = skipws(str, pos); + var n = str.length; + if (pos + 3 > n) return; + var mon = str.substr(pos, 3); + var idx = shortMonths[mon]; + if (idx === undefined) { + idx = shortMonths[mon.toLowerCase()]; + } + if (idx === undefined) { + //console.warn("parsing date_time: '" + mon + "' is not a valid short month (%B)"); + return; + } + date.setMonth(idx[0]+1); + return pos + 3 + (long ? idx[1] : 0); + }; + } + + function url_wrapper(dst, src, fn) { + return function(evt) { + var value = evt.Get(FIELDS_PREFIX + src), result; + if (value != null && (result = fn(value))!== undefined) { + evt.Put(FIELDS_PREFIX + dst, result); + } else { + console.debug(fn.name + " failed for '" + value + "'"); + } + }; + } + + // The following regular expression for parsing URLs from: + // https://github.com/wizard04wsu/URI_Parsing + // + // The MIT License (MIT) + // + // Copyright (c) 2014 Andrew Harrison + // + // Permission is hereby granted, free of charge, to any person obtaining a copy of + // this software and associated documentation files (the "Software"), to deal in + // the Software without restriction, including without limitation the rights to + // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + // the Software, and to permit persons to whom the Software is furnished to do so, + // subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + // COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + // IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + var uriRegExp = /^([a-z][a-z0-9+.\-]*):(?:\/\/((?:(?=((?:[a-z0-9\-._~!$&'()*+,;=:]|%[0-9A-F]{2})*))(\3)@)?(?=(\[[0-9A-F:.]{2,}\]|(?:[a-z0-9\-._~!$&'()*+,;=]|%[0-9A-F]{2})*))\5(?::(?=(\d*))\6)?)(\/(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/]|%[0-9A-F]{2})*))\8)?|(\/?(?!\/)(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/]|%[0-9A-F]{2})*))\10)?)(?:\?(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/?]|%[0-9A-F]{2})*))\11)?(?:#(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/?]|%[0-9A-F]{2})*))\12)?$/i; + + var uriScheme = 1; + var uriDomain = 5; + var uriPort = 6; + var uriPath = 7; + var uriPathAlt = 9; + var uriQuery = 11; + + function domain(dst, src) { + return url_wrapper(dst, src, extract_domain); + } + + function split_url(value) { + var m = value.match(uriRegExp); + if (m && m[uriDomain]) return m; + // Support input in the form "www.example.net/path", but not "/path". + m = ("null://" + value).match(uriRegExp); + if (m) return m; + } + + function extract_domain(value) { + var m = split_url(value); + if (m && m[uriDomain]) return m[uriDomain]; + } + + var extFromPage = /\.[^.]+$/; + function extract_ext(value) { + var page = extract_page(value); + if (page) { + var m = page.match(extFromPage); + if (m) return m[0]; + } + } + + function ext(dst, src) { + return url_wrapper(dst, src, extract_ext); + } + + function fqdn(dst, src) { + // TODO: fqdn and domain(eTLD+1) are currently the same. + return domain(dst, src); + } + + var pageFromPathRegExp = /\/([^\/]+)$/; + var pageName = 1; + + function extract_page(value) { + value = extract_path(value); + if (!value) return undefined; + var m = value.match(pageFromPathRegExp); + if (m) return m[pageName]; + } + + function page(dst, src) { + return url_wrapper(dst, src, extract_page); + } + + function extract_path(value) { + var m = split_url(value); + return m? m[uriPath] || m[uriPathAlt] : undefined; + } + + function path(dst, src) { + return url_wrapper(dst, src, extract_path); + } + + // Map common schemes to their default port. + // port has to be a string (will be converted at a later stage). + var schemePort = { + "ftp": "21", + "ssh": "22", + "http": "80", + "https": "443", + }; + + function extract_port(value) { + var m = split_url(value); + if (!m) return undefined; + if (m[uriPort]) return m[uriPort]; + if (m[uriScheme]) { + return schemePort[m[uriScheme]]; + } + } + + function port(dst, src) { + return url_wrapper(dst, src, extract_port); + } + + function extract_query(value) { + var m = split_url(value); + if (m && m[uriQuery]) return m[uriQuery]; + } + + function query(dst, src) { + return url_wrapper(dst, src, extract_query); + } + + function extract_root(value) { + var m = split_url(value); + if (m && m[uriDomain] && m[uriDomain]) { + var scheme = m[uriScheme] && m[uriScheme] !== "null"? + m[uriScheme] + "://" : ""; + var port = m[uriPort]? ":" + m[uriPort] : ""; + return scheme + m[uriDomain] + port; + } + } + + function root(dst, src) { + return url_wrapper(dst, src, extract_root); + } + + function tagval(id, src, cfg, keys, on_success) { + var fail = function(evt) { + evt.Put(FLAG_FIELD, "tagval_parsing_error"); + } + if (cfg.kv_separator.length !== 1) { + throw("Invalid TAGVALMAP ValueDelimiter (must have 1 character)"); + } + var quotes_len = cfg.open_quote.length > 0 && cfg.close_quote.length > 0? + cfg.open_quote.length + cfg.close_quote.length : 0; + var kv_regex = new RegExp('^([^' + cfg.kv_separator + ']*)*' + cfg.kv_separator + ' *(.*)*$'); + return function(evt) { + var msg = evt.Get(src); + if (msg === undefined) { + console.warn("tagval: input field is missing"); + return fail(evt); + } + var pairs = msg.split(cfg.pair_separator); + var i; + var success = false; + var prev = ""; + for (i=0; i 0 && + value.length >= cfg.open_quote.length + cfg.close_quote.length && + value.substr(0, cfg.open_quote.length) === cfg.open_quote && + value.substr(value.length - cfg.close_quote.length) === cfg.close_quote) { + value = value.substr(cfg.open_quote.length, value.length - quotes_len); + } + evt.Put(FIELDS_PREFIX + field, value); + success = true; + } + if (!success) { + return fail(evt); + } + if (on_success != null) { + on_success(evt); + } + } + } + + var ecs_mappings = { + "_facility": {convert: to_long, to:[{field: "log.syslog.facility.code", setter: fld_set}]}, + "_pri": {convert: to_long, to:[{field: "log.syslog.priority", setter: fld_set}]}, + "_severity": {convert: to_long, to:[{field: "log.syslog.severity.code", setter: fld_set}]}, + "action": {to:[{field: "event.action", setter: fld_prio, prio: 0}]}, + "administrator": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 4}]}, + "alias.ip": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 3},{field: "related.ip", setter: fld_append}]}, + "alias.ipv6": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 4},{field: "related.ip", setter: fld_append}]}, + "alias.mac": {convert: to_mac, to:[{field: "host.mac", setter: fld_prio, prio: 1}]}, + "application": {to:[{field: "network.application", setter: fld_set}]}, + "bytes": {convert: to_long, to:[{field: "network.bytes", setter: fld_set}]}, + "c_domain": {to:[{field: "source.domain", setter: fld_prio, prio: 1}]}, + "c_logon_id": {to:[{field: "user.id", setter: fld_prio, prio: 2}]}, + "c_user_name": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 8}]}, + "c_username": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 2}]}, + "cctld": {to:[{field: "url.top_level_domain", setter: fld_prio, prio: 1}]}, + "child_pid": {convert: to_long, to:[{field: "process.pid", setter: fld_prio, prio: 1}]}, + "child_pid_val": {to:[{field: "process.title", setter: fld_set}]}, + "child_process": {to:[{field: "process.name", setter: fld_prio, prio: 1}]}, + "city.dst": {to:[{field: "destination.geo.city_name", setter: fld_set}]}, + "city.src": {to:[{field: "source.geo.city_name", setter: fld_set}]}, + "daddr": {convert: to_ip, to:[{field: "destination.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "daddr_v6": {convert: to_ip, to:[{field: "destination.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "ddomain": {to:[{field: "destination.domain", setter: fld_prio, prio: 0}]}, + "devicehostip": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 2},{field: "related.ip", setter: fld_append}]}, + "devicehostmac": {convert: to_mac, to:[{field: "host.mac", setter: fld_prio, prio: 0}]}, + "dhost": {to:[{field: "destination.address", setter: fld_set},{field: "related.hosts", setter: fld_append}]}, + "dinterface": {to:[{field: "observer.egress.interface.name", setter: fld_set}]}, + "direction": {to:[{field: "network.direction", setter: fld_set}]}, + "directory": {to:[{field: "file.directory", setter: fld_set}]}, + "dmacaddr": {convert: to_mac, to:[{field: "destination.mac", setter: fld_set}]}, + "dns.responsetype": {to:[{field: "dns.answers.type", setter: fld_set}]}, + "dns.resptext": {to:[{field: "dns.answers.name", setter: fld_set}]}, + "dns_querytype": {to:[{field: "dns.question.type", setter: fld_set}]}, + "domain": {to:[{field: "server.domain", setter: fld_prio, prio: 0},{field: "related.hosts", setter: fld_append}]}, + "domain.dst": {to:[{field: "destination.domain", setter: fld_prio, prio: 1}]}, + "domain.src": {to:[{field: "source.domain", setter: fld_prio, prio: 2}]}, + "domain_id": {to:[{field: "user.domain", setter: fld_set}]}, + "domainname": {to:[{field: "server.domain", setter: fld_prio, prio: 1}]}, + "dport": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 0}]}, + "dtransaddr": {convert: to_ip, to:[{field: "destination.nat.ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "dtransport": {convert: to_long, to:[{field: "destination.nat.port", setter: fld_prio, prio: 0}]}, + "ec_outcome": {to:[{field: "event.outcome", setter: fld_ecs_outcome}]}, + "event_description": {to:[{field: "message", setter: fld_prio, prio: 0}]}, + "event_source": {to:[{field: "related.hosts", setter: fld_append}]}, + "event_time": {convert: to_date, to:[{field: "@timestamp", setter: fld_set}]}, + "event_type": {to:[{field: "event.action", setter: fld_prio, prio: 1}]}, + "extension": {to:[{field: "file.extension", setter: fld_prio, prio: 1}]}, + "file.attributes": {to:[{field: "file.attributes", setter: fld_set}]}, + "filename": {to:[{field: "file.name", setter: fld_prio, prio: 0}]}, + "filename_size": {convert: to_long, to:[{field: "file.size", setter: fld_set}]}, + "filepath": {to:[{field: "file.path", setter: fld_set}]}, + "filetype": {to:[{field: "file.type", setter: fld_set}]}, + "fqdn": {to:[{field: "related.hosts", setter: fld_append}]}, + "group": {to:[{field: "group.name", setter: fld_set}]}, + "groupid": {to:[{field: "group.id", setter: fld_set}]}, + "host": {to:[{field: "host.name", setter: fld_prio, prio: 1},{field: "related.hosts", setter: fld_append}]}, + "hostip": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "hostip_v6": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "hostname": {to:[{field: "host.name", setter: fld_prio, prio: 0}]}, + "id": {to:[{field: "event.code", setter: fld_prio, prio: 0}]}, + "interface": {to:[{field: "network.interface.name", setter: fld_set}]}, + "ip.orig": {convert: to_ip, to:[{field: "network.forwarded_ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "ip.trans.dst": {convert: to_ip, to:[{field: "destination.nat.ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "ip.trans.src": {convert: to_ip, to:[{field: "source.nat.ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "ipv6.orig": {convert: to_ip, to:[{field: "network.forwarded_ip", setter: fld_prio, prio: 2},{field: "related.ip", setter: fld_append}]}, + "latdec_dst": {convert: to_double, to:[{field: "destination.geo.location.lat", setter: fld_set}]}, + "latdec_src": {convert: to_double, to:[{field: "source.geo.location.lat", setter: fld_set}]}, + "location_city": {to:[{field: "geo.city_name", setter: fld_set}]}, + "location_country": {to:[{field: "geo.country_name", setter: fld_set}]}, + "location_desc": {to:[{field: "geo.name", setter: fld_set}]}, + "location_dst": {to:[{field: "destination.geo.country_name", setter: fld_set}]}, + "location_src": {to:[{field: "source.geo.country_name", setter: fld_set}]}, + "location_state": {to:[{field: "geo.region_name", setter: fld_set}]}, + "logon_id": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 5}]}, + "longdec_dst": {convert: to_double, to:[{field: "destination.geo.location.lon", setter: fld_set}]}, + "longdec_src": {convert: to_double, to:[{field: "source.geo.location.lon", setter: fld_set}]}, + "macaddr": {convert: to_mac, to:[{field: "host.mac", setter: fld_prio, prio: 2}]}, + "messageid": {to:[{field: "event.code", setter: fld_prio, prio: 1}]}, + "method": {to:[{field: "http.request.method", setter: fld_set}]}, + "msg": {to:[{field: "message", setter: fld_set}]}, + "orig_ip": {convert: to_ip, to:[{field: "network.forwarded_ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "owner": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 6}]}, + "packets": {convert: to_long, to:[{field: "network.packets", setter: fld_set}]}, + "parent_pid": {convert: to_long, to:[{field: "process.parent.pid", setter: fld_prio, prio: 0}]}, + "parent_pid_val": {to:[{field: "process.parent.title", setter: fld_set}]}, + "parent_process": {to:[{field: "process.parent.name", setter: fld_prio, prio: 0}]}, + "patient_fullname": {to:[{field: "user.full_name", setter: fld_prio, prio: 1}]}, + "port.dst": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 1}]}, + "port.src": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 1}]}, + "port.trans.dst": {convert: to_long, to:[{field: "destination.nat.port", setter: fld_prio, prio: 1}]}, + "port.trans.src": {convert: to_long, to:[{field: "source.nat.port", setter: fld_prio, prio: 1}]}, + "process": {to:[{field: "process.name", setter: fld_prio, prio: 0}]}, + "process_id": {convert: to_long, to:[{field: "process.pid", setter: fld_prio, prio: 0}]}, + "process_id_src": {convert: to_long, to:[{field: "process.parent.pid", setter: fld_prio, prio: 1}]}, + "process_src": {to:[{field: "process.parent.name", setter: fld_prio, prio: 1}]}, + "product": {to:[{field: "observer.product", setter: fld_set}]}, + "protocol": {to:[{field: "network.protocol", setter: fld_set}]}, + "query": {to:[{field: "url.query", setter: fld_prio, prio: 2}]}, + "rbytes": {convert: to_long, to:[{field: "destination.bytes", setter: fld_set}]}, + "referer": {to:[{field: "http.request.referrer", setter: fld_prio, prio: 1}]}, + "rulename": {to:[{field: "rule.name", setter: fld_set}]}, + "saddr": {convert: to_ip, to:[{field: "source.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "saddr_v6": {convert: to_ip, to:[{field: "source.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "sbytes": {convert: to_long, to:[{field: "source.bytes", setter: fld_set}]}, + "sdomain": {to:[{field: "source.domain", setter: fld_prio, prio: 0}]}, + "service": {to:[{field: "service.name", setter: fld_prio, prio: 1}]}, + "service.name": {to:[{field: "service.name", setter: fld_prio, prio: 0}]}, + "service_account": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 7}]}, + "severity": {to:[{field: "log.level", setter: fld_set}]}, + "shost": {to:[{field: "host.hostname", setter: fld_set},{field: "source.address", setter: fld_set},{field: "related.hosts", setter: fld_append}]}, + "sinterface": {to:[{field: "observer.ingress.interface.name", setter: fld_set}]}, + "sld": {to:[{field: "url.registered_domain", setter: fld_set}]}, + "smacaddr": {convert: to_mac, to:[{field: "source.mac", setter: fld_set}]}, + "sport": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 0}]}, + "stransaddr": {convert: to_ip, to:[{field: "source.nat.ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "stransport": {convert: to_long, to:[{field: "source.nat.port", setter: fld_prio, prio: 0}]}, + "tcp.dstport": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 2}]}, + "tcp.srcport": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 2}]}, + "timezone": {to:[{field: "event.timezone", setter: fld_set}]}, + "tld": {to:[{field: "url.top_level_domain", setter: fld_prio, prio: 0}]}, + "udp.dstport": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 3}]}, + "udp.srcport": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 3}]}, + "uid": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 3}]}, + "url": {to:[{field: "url.original", setter: fld_prio, prio: 1}]}, + "url_raw": {to:[{field: "url.original", setter: fld_prio, prio: 0}]}, + "urldomain": {to:[{field: "url.domain", setter: fld_prio, prio: 0}]}, + "urlquery": {to:[{field: "url.query", setter: fld_prio, prio: 0}]}, + "user": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 0}]}, + "user.id": {to:[{field: "user.id", setter: fld_prio, prio: 1}]}, + "user_agent": {to:[{field: "user_agent.original", setter: fld_set}]}, + "user_fullname": {to:[{field: "user.full_name", setter: fld_prio, prio: 0}]}, + "user_id": {to:[{field: "user.id", setter: fld_prio, prio: 0}]}, + "username": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 1}]}, + "version": {to:[{field: "observer.version", setter: fld_set}]}, + "web_domain": {to:[{field: "url.domain", setter: fld_prio, prio: 1},{field: "related.hosts", setter: fld_append}]}, + "web_extension": {to:[{field: "file.extension", setter: fld_prio, prio: 0}]}, + "web_query": {to:[{field: "url.query", setter: fld_prio, prio: 1}]}, + "web_ref_domain": {to:[{field: "related.hosts", setter: fld_append}]}, + "web_referer": {to:[{field: "http.request.referrer", setter: fld_prio, prio: 0}]}, + "web_root": {to:[{field: "url.path", setter: fld_set}]}, + "webpage": {to:[{field: "file.name", setter: fld_prio, prio: 1}]}, + }; + + var rsa_mappings = { + "access_point": {to:[{field: "rsa.wireless.access_point", setter: fld_set}]}, + "accesses": {to:[{field: "rsa.identity.accesses", setter: fld_set}]}, + "acl_id": {to:[{field: "rsa.misc.acl_id", setter: fld_set}]}, + "acl_op": {to:[{field: "rsa.misc.acl_op", setter: fld_set}]}, + "acl_pos": {to:[{field: "rsa.misc.acl_pos", setter: fld_set}]}, + "acl_table": {to:[{field: "rsa.misc.acl_table", setter: fld_set}]}, + "action": {to:[{field: "rsa.misc.action", setter: fld_append}]}, + "ad_computer_dst": {to:[{field: "rsa.network.ad_computer_dst", setter: fld_set}]}, + "addr": {to:[{field: "rsa.network.addr", setter: fld_set}]}, + "admin": {to:[{field: "rsa.misc.admin", setter: fld_set}]}, + "agent": {to:[{field: "rsa.misc.client", setter: fld_prio, prio: 0}]}, + "agent.id": {to:[{field: "rsa.misc.agent_id", setter: fld_set}]}, + "alarm_id": {to:[{field: "rsa.misc.alarm_id", setter: fld_set}]}, + "alarmname": {to:[{field: "rsa.misc.alarmname", setter: fld_set}]}, + "alert": {to:[{field: "rsa.threat.alert", setter: fld_set}]}, + "alert_id": {to:[{field: "rsa.misc.alert_id", setter: fld_set}]}, + "alias.host": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "analysis.file": {to:[{field: "rsa.investigations.analysis_file", setter: fld_set}]}, + "analysis.service": {to:[{field: "rsa.investigations.analysis_service", setter: fld_set}]}, + "analysis.session": {to:[{field: "rsa.investigations.analysis_session", setter: fld_set}]}, + "app_id": {to:[{field: "rsa.misc.app_id", setter: fld_set}]}, + "attachment": {to:[{field: "rsa.file.attachment", setter: fld_set}]}, + "audit": {to:[{field: "rsa.misc.audit", setter: fld_set}]}, + "audit_class": {to:[{field: "rsa.internal.audit_class", setter: fld_set}]}, + "audit_object": {to:[{field: "rsa.misc.audit_object", setter: fld_set}]}, + "auditdata": {to:[{field: "rsa.misc.auditdata", setter: fld_set}]}, + "authmethod": {to:[{field: "rsa.identity.auth_method", setter: fld_set}]}, + "autorun_type": {to:[{field: "rsa.misc.autorun_type", setter: fld_set}]}, + "bcc": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "benchmark": {to:[{field: "rsa.misc.benchmark", setter: fld_set}]}, + "binary": {to:[{field: "rsa.file.binary", setter: fld_set}]}, + "boc": {to:[{field: "rsa.investigations.boc", setter: fld_set}]}, + "bssid": {to:[{field: "rsa.wireless.wlan_ssid", setter: fld_prio, prio: 1}]}, + "bypass": {to:[{field: "rsa.misc.bypass", setter: fld_set}]}, + "c_sid": {to:[{field: "rsa.identity.user_sid_src", setter: fld_set}]}, + "cache": {to:[{field: "rsa.misc.cache", setter: fld_set}]}, + "cache_hit": {to:[{field: "rsa.misc.cache_hit", setter: fld_set}]}, + "calling_from": {to:[{field: "rsa.misc.phone", setter: fld_prio, prio: 1}]}, + "calling_to": {to:[{field: "rsa.misc.phone", setter: fld_prio, prio: 0}]}, + "category": {to:[{field: "rsa.misc.category", setter: fld_set}]}, + "cc": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "cc.number": {convert: to_long, to:[{field: "rsa.misc.cc_number", setter: fld_set}]}, + "cefversion": {to:[{field: "rsa.misc.cefversion", setter: fld_set}]}, + "cert.serial": {to:[{field: "rsa.crypto.cert_serial", setter: fld_set}]}, + "cert_ca": {to:[{field: "rsa.crypto.cert_ca", setter: fld_set}]}, + "cert_checksum": {to:[{field: "rsa.crypto.cert_checksum", setter: fld_set}]}, + "cert_common": {to:[{field: "rsa.crypto.cert_common", setter: fld_set}]}, + "cert_error": {to:[{field: "rsa.crypto.cert_error", setter: fld_set}]}, + "cert_hostname": {to:[{field: "rsa.crypto.cert_host_name", setter: fld_set}]}, + "cert_hostname_cat": {to:[{field: "rsa.crypto.cert_host_cat", setter: fld_set}]}, + "cert_issuer": {to:[{field: "rsa.crypto.cert_issuer", setter: fld_set}]}, + "cert_keysize": {to:[{field: "rsa.crypto.cert_keysize", setter: fld_set}]}, + "cert_status": {to:[{field: "rsa.crypto.cert_status", setter: fld_set}]}, + "cert_subject": {to:[{field: "rsa.crypto.cert_subject", setter: fld_set}]}, + "cert_username": {to:[{field: "rsa.crypto.cert_username", setter: fld_set}]}, + "cfg.attr": {to:[{field: "rsa.misc.cfg_attr", setter: fld_set}]}, + "cfg.obj": {to:[{field: "rsa.misc.cfg_obj", setter: fld_set}]}, + "cfg.path": {to:[{field: "rsa.misc.cfg_path", setter: fld_set}]}, + "change_attribute": {to:[{field: "rsa.misc.change_attrib", setter: fld_set}]}, + "change_new": {to:[{field: "rsa.misc.change_new", setter: fld_set}]}, + "change_old": {to:[{field: "rsa.misc.change_old", setter: fld_set}]}, + "changes": {to:[{field: "rsa.misc.changes", setter: fld_set}]}, + "checksum": {to:[{field: "rsa.misc.checksum", setter: fld_set}]}, + "checksum.dst": {to:[{field: "rsa.misc.checksum_dst", setter: fld_set}]}, + "checksum.src": {to:[{field: "rsa.misc.checksum_src", setter: fld_set}]}, + "cid": {to:[{field: "rsa.internal.cid", setter: fld_set}]}, + "client": {to:[{field: "rsa.misc.client", setter: fld_prio, prio: 1}]}, + "client_ip": {to:[{field: "rsa.misc.client_ip", setter: fld_set}]}, + "clustermembers": {to:[{field: "rsa.misc.clustermembers", setter: fld_set}]}, + "cmd": {to:[{field: "rsa.misc.cmd", setter: fld_set}]}, + "cn_acttimeout": {to:[{field: "rsa.misc.cn_acttimeout", setter: fld_set}]}, + "cn_asn_dst": {to:[{field: "rsa.web.cn_asn_dst", setter: fld_set}]}, + "cn_asn_src": {to:[{field: "rsa.misc.cn_asn_src", setter: fld_set}]}, + "cn_bgpv4nxthop": {to:[{field: "rsa.misc.cn_bgpv4nxthop", setter: fld_set}]}, + "cn_ctr_dst_code": {to:[{field: "rsa.misc.cn_ctr_dst_code", setter: fld_set}]}, + "cn_dst_tos": {to:[{field: "rsa.misc.cn_dst_tos", setter: fld_set}]}, + "cn_dst_vlan": {to:[{field: "rsa.misc.cn_dst_vlan", setter: fld_set}]}, + "cn_engine_id": {to:[{field: "rsa.misc.cn_engine_id", setter: fld_set}]}, + "cn_engine_type": {to:[{field: "rsa.misc.cn_engine_type", setter: fld_set}]}, + "cn_f_switch": {to:[{field: "rsa.misc.cn_f_switch", setter: fld_set}]}, + "cn_flowsampid": {to:[{field: "rsa.misc.cn_flowsampid", setter: fld_set}]}, + "cn_flowsampintv": {to:[{field: "rsa.misc.cn_flowsampintv", setter: fld_set}]}, + "cn_flowsampmode": {to:[{field: "rsa.misc.cn_flowsampmode", setter: fld_set}]}, + "cn_inacttimeout": {to:[{field: "rsa.misc.cn_inacttimeout", setter: fld_set}]}, + "cn_inpermbyts": {to:[{field: "rsa.misc.cn_inpermbyts", setter: fld_set}]}, + "cn_inpermpckts": {to:[{field: "rsa.misc.cn_inpermpckts", setter: fld_set}]}, + "cn_invalid": {to:[{field: "rsa.misc.cn_invalid", setter: fld_set}]}, + "cn_ip_proto_ver": {to:[{field: "rsa.misc.cn_ip_proto_ver", setter: fld_set}]}, + "cn_ipv4_ident": {to:[{field: "rsa.misc.cn_ipv4_ident", setter: fld_set}]}, + "cn_l_switch": {to:[{field: "rsa.misc.cn_l_switch", setter: fld_set}]}, + "cn_log_did": {to:[{field: "rsa.misc.cn_log_did", setter: fld_set}]}, + "cn_log_rid": {to:[{field: "rsa.misc.cn_log_rid", setter: fld_set}]}, + "cn_max_ttl": {to:[{field: "rsa.misc.cn_max_ttl", setter: fld_set}]}, + "cn_maxpcktlen": {to:[{field: "rsa.misc.cn_maxpcktlen", setter: fld_set}]}, + "cn_min_ttl": {to:[{field: "rsa.misc.cn_min_ttl", setter: fld_set}]}, + "cn_minpcktlen": {to:[{field: "rsa.misc.cn_minpcktlen", setter: fld_set}]}, + "cn_mpls_lbl_1": {to:[{field: "rsa.misc.cn_mpls_lbl_1", setter: fld_set}]}, + "cn_mpls_lbl_10": {to:[{field: "rsa.misc.cn_mpls_lbl_10", setter: fld_set}]}, + "cn_mpls_lbl_2": {to:[{field: "rsa.misc.cn_mpls_lbl_2", setter: fld_set}]}, + "cn_mpls_lbl_3": {to:[{field: "rsa.misc.cn_mpls_lbl_3", setter: fld_set}]}, + "cn_mpls_lbl_4": {to:[{field: "rsa.misc.cn_mpls_lbl_4", setter: fld_set}]}, + "cn_mpls_lbl_5": {to:[{field: "rsa.misc.cn_mpls_lbl_5", setter: fld_set}]}, + "cn_mpls_lbl_6": {to:[{field: "rsa.misc.cn_mpls_lbl_6", setter: fld_set}]}, + "cn_mpls_lbl_7": {to:[{field: "rsa.misc.cn_mpls_lbl_7", setter: fld_set}]}, + "cn_mpls_lbl_8": {to:[{field: "rsa.misc.cn_mpls_lbl_8", setter: fld_set}]}, + "cn_mpls_lbl_9": {to:[{field: "rsa.misc.cn_mpls_lbl_9", setter: fld_set}]}, + "cn_mplstoplabel": {to:[{field: "rsa.misc.cn_mplstoplabel", setter: fld_set}]}, + "cn_mplstoplabip": {to:[{field: "rsa.misc.cn_mplstoplabip", setter: fld_set}]}, + "cn_mul_dst_byt": {to:[{field: "rsa.misc.cn_mul_dst_byt", setter: fld_set}]}, + "cn_mul_dst_pks": {to:[{field: "rsa.misc.cn_mul_dst_pks", setter: fld_set}]}, + "cn_muligmptype": {to:[{field: "rsa.misc.cn_muligmptype", setter: fld_set}]}, + "cn_rpackets": {to:[{field: "rsa.web.cn_rpackets", setter: fld_set}]}, + "cn_sampalgo": {to:[{field: "rsa.misc.cn_sampalgo", setter: fld_set}]}, + "cn_sampint": {to:[{field: "rsa.misc.cn_sampint", setter: fld_set}]}, + "cn_seqctr": {to:[{field: "rsa.misc.cn_seqctr", setter: fld_set}]}, + "cn_spackets": {to:[{field: "rsa.misc.cn_spackets", setter: fld_set}]}, + "cn_src_tos": {to:[{field: "rsa.misc.cn_src_tos", setter: fld_set}]}, + "cn_src_vlan": {to:[{field: "rsa.misc.cn_src_vlan", setter: fld_set}]}, + "cn_sysuptime": {to:[{field: "rsa.misc.cn_sysuptime", setter: fld_set}]}, + "cn_template_id": {to:[{field: "rsa.misc.cn_template_id", setter: fld_set}]}, + "cn_totbytsexp": {to:[{field: "rsa.misc.cn_totbytsexp", setter: fld_set}]}, + "cn_totflowexp": {to:[{field: "rsa.misc.cn_totflowexp", setter: fld_set}]}, + "cn_totpcktsexp": {to:[{field: "rsa.misc.cn_totpcktsexp", setter: fld_set}]}, + "cn_unixnanosecs": {to:[{field: "rsa.misc.cn_unixnanosecs", setter: fld_set}]}, + "cn_v6flowlabel": {to:[{field: "rsa.misc.cn_v6flowlabel", setter: fld_set}]}, + "cn_v6optheaders": {to:[{field: "rsa.misc.cn_v6optheaders", setter: fld_set}]}, + "code": {to:[{field: "rsa.misc.code", setter: fld_set}]}, + "command": {to:[{field: "rsa.misc.command", setter: fld_set}]}, + "comments": {to:[{field: "rsa.misc.comments", setter: fld_set}]}, + "comp_class": {to:[{field: "rsa.misc.comp_class", setter: fld_set}]}, + "comp_name": {to:[{field: "rsa.misc.comp_name", setter: fld_set}]}, + "comp_rbytes": {to:[{field: "rsa.misc.comp_rbytes", setter: fld_set}]}, + "comp_sbytes": {to:[{field: "rsa.misc.comp_sbytes", setter: fld_set}]}, + "component_version": {to:[{field: "rsa.misc.comp_version", setter: fld_set}]}, + "connection_id": {to:[{field: "rsa.misc.connection_id", setter: fld_prio, prio: 1}]}, + "connectionid": {to:[{field: "rsa.misc.connection_id", setter: fld_prio, prio: 0}]}, + "content": {to:[{field: "rsa.misc.content", setter: fld_set}]}, + "content_type": {to:[{field: "rsa.misc.content_type", setter: fld_set}]}, + "content_version": {to:[{field: "rsa.misc.content_version", setter: fld_set}]}, + "context": {to:[{field: "rsa.misc.context", setter: fld_set}]}, + "count": {to:[{field: "rsa.misc.count", setter: fld_set}]}, + "cpu": {convert: to_long, to:[{field: "rsa.misc.cpu", setter: fld_set}]}, + "cpu_data": {to:[{field: "rsa.misc.cpu_data", setter: fld_set}]}, + "criticality": {to:[{field: "rsa.misc.criticality", setter: fld_set}]}, + "cs_agency_dst": {to:[{field: "rsa.misc.cs_agency_dst", setter: fld_set}]}, + "cs_analyzedby": {to:[{field: "rsa.misc.cs_analyzedby", setter: fld_set}]}, + "cs_av_other": {to:[{field: "rsa.misc.cs_av_other", setter: fld_set}]}, + "cs_av_primary": {to:[{field: "rsa.misc.cs_av_primary", setter: fld_set}]}, + "cs_av_secondary": {to:[{field: "rsa.misc.cs_av_secondary", setter: fld_set}]}, + "cs_bgpv6nxthop": {to:[{field: "rsa.misc.cs_bgpv6nxthop", setter: fld_set}]}, + "cs_bit9status": {to:[{field: "rsa.misc.cs_bit9status", setter: fld_set}]}, + "cs_context": {to:[{field: "rsa.misc.cs_context", setter: fld_set}]}, + "cs_control": {to:[{field: "rsa.misc.cs_control", setter: fld_set}]}, + "cs_data": {to:[{field: "rsa.misc.cs_data", setter: fld_set}]}, + "cs_datecret": {to:[{field: "rsa.misc.cs_datecret", setter: fld_set}]}, + "cs_dst_tld": {to:[{field: "rsa.misc.cs_dst_tld", setter: fld_set}]}, + "cs_eth_dst_ven": {to:[{field: "rsa.misc.cs_eth_dst_ven", setter: fld_set}]}, + "cs_eth_src_ven": {to:[{field: "rsa.misc.cs_eth_src_ven", setter: fld_set}]}, + "cs_event_uuid": {to:[{field: "rsa.misc.cs_event_uuid", setter: fld_set}]}, + "cs_filetype": {to:[{field: "rsa.misc.cs_filetype", setter: fld_set}]}, + "cs_fld": {to:[{field: "rsa.misc.cs_fld", setter: fld_set}]}, + "cs_if_desc": {to:[{field: "rsa.misc.cs_if_desc", setter: fld_set}]}, + "cs_if_name": {to:[{field: "rsa.misc.cs_if_name", setter: fld_set}]}, + "cs_ip_next_hop": {to:[{field: "rsa.misc.cs_ip_next_hop", setter: fld_set}]}, + "cs_ipv4dstpre": {to:[{field: "rsa.misc.cs_ipv4dstpre", setter: fld_set}]}, + "cs_ipv4srcpre": {to:[{field: "rsa.misc.cs_ipv4srcpre", setter: fld_set}]}, + "cs_lifetime": {to:[{field: "rsa.misc.cs_lifetime", setter: fld_set}]}, + "cs_log_medium": {to:[{field: "rsa.misc.cs_log_medium", setter: fld_set}]}, + "cs_loginname": {to:[{field: "rsa.misc.cs_loginname", setter: fld_set}]}, + "cs_modulescore": {to:[{field: "rsa.misc.cs_modulescore", setter: fld_set}]}, + "cs_modulesign": {to:[{field: "rsa.misc.cs_modulesign", setter: fld_set}]}, + "cs_opswatresult": {to:[{field: "rsa.misc.cs_opswatresult", setter: fld_set}]}, + "cs_payload": {to:[{field: "rsa.misc.cs_payload", setter: fld_set}]}, + "cs_registrant": {to:[{field: "rsa.misc.cs_registrant", setter: fld_set}]}, + "cs_registrar": {to:[{field: "rsa.misc.cs_registrar", setter: fld_set}]}, + "cs_represult": {to:[{field: "rsa.misc.cs_represult", setter: fld_set}]}, + "cs_rpayload": {to:[{field: "rsa.misc.cs_rpayload", setter: fld_set}]}, + "cs_sampler_name": {to:[{field: "rsa.misc.cs_sampler_name", setter: fld_set}]}, + "cs_sourcemodule": {to:[{field: "rsa.misc.cs_sourcemodule", setter: fld_set}]}, + "cs_streams": {to:[{field: "rsa.misc.cs_streams", setter: fld_set}]}, + "cs_targetmodule": {to:[{field: "rsa.misc.cs_targetmodule", setter: fld_set}]}, + "cs_v6nxthop": {to:[{field: "rsa.misc.cs_v6nxthop", setter: fld_set}]}, + "cs_whois_server": {to:[{field: "rsa.misc.cs_whois_server", setter: fld_set}]}, + "cs_yararesult": {to:[{field: "rsa.misc.cs_yararesult", setter: fld_set}]}, + "cve": {to:[{field: "rsa.misc.cve", setter: fld_set}]}, + "d_certauth": {to:[{field: "rsa.crypto.d_certauth", setter: fld_set}]}, + "d_cipher": {to:[{field: "rsa.crypto.cipher_dst", setter: fld_set}]}, + "d_ciphersize": {convert: to_long, to:[{field: "rsa.crypto.cipher_size_dst", setter: fld_set}]}, + "d_sslver": {to:[{field: "rsa.crypto.ssl_ver_dst", setter: fld_set}]}, + "data": {to:[{field: "rsa.internal.data", setter: fld_set}]}, + "data_type": {to:[{field: "rsa.misc.data_type", setter: fld_set}]}, + "date": {to:[{field: "rsa.time.date", setter: fld_set}]}, + "datetime": {to:[{field: "rsa.time.datetime", setter: fld_set}]}, + "day": {to:[{field: "rsa.time.day", setter: fld_set}]}, + "db_id": {to:[{field: "rsa.db.db_id", setter: fld_set}]}, + "db_name": {to:[{field: "rsa.db.database", setter: fld_set}]}, + "db_pid": {convert: to_long, to:[{field: "rsa.db.db_pid", setter: fld_set}]}, + "dclass_counter1": {convert: to_long, to:[{field: "rsa.counters.dclass_c1", setter: fld_set}]}, + "dclass_counter1_string": {to:[{field: "rsa.counters.dclass_c1_str", setter: fld_set}]}, + "dclass_counter2": {convert: to_long, to:[{field: "rsa.counters.dclass_c2", setter: fld_set}]}, + "dclass_counter2_string": {to:[{field: "rsa.counters.dclass_c2_str", setter: fld_set}]}, + "dclass_counter3": {convert: to_long, to:[{field: "rsa.counters.dclass_c3", setter: fld_set}]}, + "dclass_counter3_string": {to:[{field: "rsa.counters.dclass_c3_str", setter: fld_set}]}, + "dclass_ratio1": {to:[{field: "rsa.counters.dclass_r1", setter: fld_set}]}, + "dclass_ratio1_string": {to:[{field: "rsa.counters.dclass_r1_str", setter: fld_set}]}, + "dclass_ratio2": {to:[{field: "rsa.counters.dclass_r2", setter: fld_set}]}, + "dclass_ratio2_string": {to:[{field: "rsa.counters.dclass_r2_str", setter: fld_set}]}, + "dclass_ratio3": {to:[{field: "rsa.counters.dclass_r3", setter: fld_set}]}, + "dclass_ratio3_string": {to:[{field: "rsa.counters.dclass_r3_str", setter: fld_set}]}, + "dead": {convert: to_long, to:[{field: "rsa.internal.dead", setter: fld_set}]}, + "description": {to:[{field: "rsa.misc.description", setter: fld_set}]}, + "detail": {to:[{field: "rsa.misc.event_desc", setter: fld_set}]}, + "device": {to:[{field: "rsa.misc.device_name", setter: fld_set}]}, + "device.class": {to:[{field: "rsa.internal.device_class", setter: fld_set}]}, + "device.group": {to:[{field: "rsa.internal.device_group", setter: fld_set}]}, + "device.host": {to:[{field: "rsa.internal.device_host", setter: fld_set}]}, + "device.ip": {convert: to_ip, to:[{field: "rsa.internal.device_ip", setter: fld_set}]}, + "device.ipv6": {convert: to_ip, to:[{field: "rsa.internal.device_ipv6", setter: fld_set}]}, + "device.type": {to:[{field: "rsa.internal.device_type", setter: fld_set}]}, + "device.type.id": {convert: to_long, to:[{field: "rsa.internal.device_type_id", setter: fld_set}]}, + "devicehostname": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "devvendor": {to:[{field: "rsa.misc.devvendor", setter: fld_set}]}, + "dhost": {to:[{field: "rsa.network.host_dst", setter: fld_set}]}, + "did": {to:[{field: "rsa.internal.did", setter: fld_set}]}, + "dinterface": {to:[{field: "rsa.network.dinterface", setter: fld_set}]}, + "directory.dst": {to:[{field: "rsa.file.directory_dst", setter: fld_set}]}, + "directory.src": {to:[{field: "rsa.file.directory_src", setter: fld_set}]}, + "disk_volume": {to:[{field: "rsa.storage.disk_volume", setter: fld_set}]}, + "disposition": {to:[{field: "rsa.misc.disposition", setter: fld_set}]}, + "distance": {to:[{field: "rsa.misc.distance", setter: fld_set}]}, + "dmask": {to:[{field: "rsa.network.dmask", setter: fld_set}]}, + "dn": {to:[{field: "rsa.identity.dn", setter: fld_set}]}, + "dns_a_record": {to:[{field: "rsa.network.dns_a_record", setter: fld_set}]}, + "dns_cname_record": {to:[{field: "rsa.network.dns_cname_record", setter: fld_set}]}, + "dns_id": {to:[{field: "rsa.network.dns_id", setter: fld_set}]}, + "dns_opcode": {to:[{field: "rsa.network.dns_opcode", setter: fld_set}]}, + "dns_ptr_record": {to:[{field: "rsa.network.dns_ptr_record", setter: fld_set}]}, + "dns_resp": {to:[{field: "rsa.network.dns_resp", setter: fld_set}]}, + "dns_type": {to:[{field: "rsa.network.dns_type", setter: fld_set}]}, + "doc_number": {convert: to_long, to:[{field: "rsa.misc.doc_number", setter: fld_set}]}, + "domain": {to:[{field: "rsa.network.domain", setter: fld_set}]}, + "domain1": {to:[{field: "rsa.network.domain1", setter: fld_set}]}, + "dst_dn": {to:[{field: "rsa.identity.dn_dst", setter: fld_set}]}, + "dst_payload": {to:[{field: "rsa.misc.payload_dst", setter: fld_set}]}, + "dst_spi": {to:[{field: "rsa.misc.spi_dst", setter: fld_set}]}, + "dst_zone": {to:[{field: "rsa.network.zone_dst", setter: fld_set}]}, + "dstburb": {to:[{field: "rsa.misc.dstburb", setter: fld_set}]}, + "duration": {convert: to_double, to:[{field: "rsa.time.duration_time", setter: fld_set}]}, + "duration_string": {to:[{field: "rsa.time.duration_str", setter: fld_set}]}, + "ec_activity": {to:[{field: "rsa.investigations.ec_activity", setter: fld_set}]}, + "ec_outcome": {to:[{field: "rsa.investigations.ec_outcome", setter: fld_set}]}, + "ec_subject": {to:[{field: "rsa.investigations.ec_subject", setter: fld_set}]}, + "ec_theme": {to:[{field: "rsa.investigations.ec_theme", setter: fld_set}]}, + "edomain": {to:[{field: "rsa.misc.edomain", setter: fld_set}]}, + "edomaub": {to:[{field: "rsa.misc.edomaub", setter: fld_set}]}, + "effective_time": {convert: to_date, to:[{field: "rsa.time.effective_time", setter: fld_set}]}, + "ein.number": {convert: to_long, to:[{field: "rsa.misc.ein_number", setter: fld_set}]}, + "email": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "encryption_type": {to:[{field: "rsa.crypto.crypto", setter: fld_set}]}, + "endtime": {convert: to_date, to:[{field: "rsa.time.endtime", setter: fld_set}]}, + "entropy.req": {convert: to_long, to:[{field: "rsa.internal.entropy_req", setter: fld_set}]}, + "entropy.res": {convert: to_long, to:[{field: "rsa.internal.entropy_res", setter: fld_set}]}, + "entry": {to:[{field: "rsa.internal.entry", setter: fld_set}]}, + "eoc": {to:[{field: "rsa.investigations.eoc", setter: fld_set}]}, + "error": {to:[{field: "rsa.misc.error", setter: fld_set}]}, + "eth_type": {convert: to_long, to:[{field: "rsa.network.eth_type", setter: fld_set}]}, + "euid": {to:[{field: "rsa.misc.euid", setter: fld_set}]}, + "event.cat": {convert: to_long, to:[{field: "rsa.investigations.event_cat", setter: fld_prio, prio: 1}]}, + "event.cat.name": {to:[{field: "rsa.investigations.event_cat_name", setter: fld_prio, prio: 1}]}, + "event_cat": {convert: to_long, to:[{field: "rsa.investigations.event_cat", setter: fld_prio, prio: 0}]}, + "event_cat_name": {to:[{field: "rsa.investigations.event_cat_name", setter: fld_prio, prio: 0}]}, + "event_category": {to:[{field: "rsa.misc.event_category", setter: fld_set}]}, + "event_computer": {to:[{field: "rsa.misc.event_computer", setter: fld_set}]}, + "event_counter": {convert: to_long, to:[{field: "rsa.counters.event_counter", setter: fld_set}]}, + "event_description": {to:[{field: "rsa.internal.event_desc", setter: fld_set}]}, + "event_id": {to:[{field: "rsa.misc.event_id", setter: fld_set}]}, + "event_log": {to:[{field: "rsa.misc.event_log", setter: fld_set}]}, + "event_name": {to:[{field: "rsa.internal.event_name", setter: fld_set}]}, + "event_queue_time": {convert: to_date, to:[{field: "rsa.time.event_queue_time", setter: fld_set}]}, + "event_source": {to:[{field: "rsa.misc.event_source", setter: fld_set}]}, + "event_state": {to:[{field: "rsa.misc.event_state", setter: fld_set}]}, + "event_time": {convert: to_date, to:[{field: "rsa.time.event_time", setter: fld_set}]}, + "event_time_str": {to:[{field: "rsa.time.event_time_str", setter: fld_prio, prio: 1}]}, + "event_time_string": {to:[{field: "rsa.time.event_time_str", setter: fld_prio, prio: 0}]}, + "event_type": {to:[{field: "rsa.misc.event_type", setter: fld_set}]}, + "event_user": {to:[{field: "rsa.misc.event_user", setter: fld_set}]}, + "eventtime": {to:[{field: "rsa.time.eventtime", setter: fld_set}]}, + "expected_val": {to:[{field: "rsa.misc.expected_val", setter: fld_set}]}, + "expiration_time": {convert: to_date, to:[{field: "rsa.time.expire_time", setter: fld_set}]}, + "expiration_time_string": {to:[{field: "rsa.time.expire_time_str", setter: fld_set}]}, + "facility": {to:[{field: "rsa.misc.facility", setter: fld_set}]}, + "facilityname": {to:[{field: "rsa.misc.facilityname", setter: fld_set}]}, + "faddr": {to:[{field: "rsa.network.faddr", setter: fld_set}]}, + "fcatnum": {to:[{field: "rsa.misc.fcatnum", setter: fld_set}]}, + "federated_idp": {to:[{field: "rsa.identity.federated_idp", setter: fld_set}]}, + "federated_sp": {to:[{field: "rsa.identity.federated_sp", setter: fld_set}]}, + "feed.category": {to:[{field: "rsa.internal.feed_category", setter: fld_set}]}, + "feed_desc": {to:[{field: "rsa.internal.feed_desc", setter: fld_set}]}, + "feed_name": {to:[{field: "rsa.internal.feed_name", setter: fld_set}]}, + "fhost": {to:[{field: "rsa.network.fhost", setter: fld_set}]}, + "file_entropy": {convert: to_double, to:[{field: "rsa.file.file_entropy", setter: fld_set}]}, + "file_vendor": {to:[{field: "rsa.file.file_vendor", setter: fld_set}]}, + "filename_dst": {to:[{field: "rsa.file.filename_dst", setter: fld_set}]}, + "filename_src": {to:[{field: "rsa.file.filename_src", setter: fld_set}]}, + "filename_tmp": {to:[{field: "rsa.file.filename_tmp", setter: fld_set}]}, + "filesystem": {to:[{field: "rsa.file.filesystem", setter: fld_set}]}, + "filter": {to:[{field: "rsa.misc.filter", setter: fld_set}]}, + "finterface": {to:[{field: "rsa.misc.finterface", setter: fld_set}]}, + "flags": {to:[{field: "rsa.misc.flags", setter: fld_set}]}, + "forensic_info": {to:[{field: "rsa.misc.forensic_info", setter: fld_set}]}, + "forward.ip": {convert: to_ip, to:[{field: "rsa.internal.forward_ip", setter: fld_set}]}, + "forward.ipv6": {convert: to_ip, to:[{field: "rsa.internal.forward_ipv6", setter: fld_set}]}, + "found": {to:[{field: "rsa.misc.found", setter: fld_set}]}, + "fport": {to:[{field: "rsa.network.fport", setter: fld_set}]}, + "fqdn": {to:[{field: "rsa.web.fqdn", setter: fld_set}]}, + "fresult": {convert: to_long, to:[{field: "rsa.misc.fresult", setter: fld_set}]}, + "from": {to:[{field: "rsa.email.email_src", setter: fld_set}]}, + "gaddr": {to:[{field: "rsa.misc.gaddr", setter: fld_set}]}, + "gateway": {to:[{field: "rsa.network.gateway", setter: fld_set}]}, + "gmtdate": {to:[{field: "rsa.time.gmtdate", setter: fld_set}]}, + "gmttime": {to:[{field: "rsa.time.gmttime", setter: fld_set}]}, + "group": {to:[{field: "rsa.misc.group", setter: fld_set}]}, + "group_object": {to:[{field: "rsa.misc.group_object", setter: fld_set}]}, + "groupid": {to:[{field: "rsa.misc.group_id", setter: fld_set}]}, + "h_code": {to:[{field: "rsa.internal.hcode", setter: fld_set}]}, + "hardware_id": {to:[{field: "rsa.misc.hardware_id", setter: fld_set}]}, + "header.id": {to:[{field: "rsa.internal.header_id", setter: fld_set}]}, + "host.orig": {to:[{field: "rsa.network.host_orig", setter: fld_set}]}, + "host.state": {to:[{field: "rsa.endpoint.host_state", setter: fld_set}]}, + "host.type": {to:[{field: "rsa.network.host_type", setter: fld_set}]}, + "host_role": {to:[{field: "rsa.identity.host_role", setter: fld_set}]}, + "hostid": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "hostname": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "hour": {to:[{field: "rsa.time.hour", setter: fld_set}]}, + "https.insact": {to:[{field: "rsa.crypto.https_insact", setter: fld_set}]}, + "https.valid": {to:[{field: "rsa.crypto.https_valid", setter: fld_set}]}, + "icmpcode": {convert: to_long, to:[{field: "rsa.network.icmp_code", setter: fld_set}]}, + "icmptype": {convert: to_long, to:[{field: "rsa.network.icmp_type", setter: fld_set}]}, + "id": {to:[{field: "rsa.misc.reference_id", setter: fld_set}]}, + "id1": {to:[{field: "rsa.misc.reference_id1", setter: fld_set}]}, + "id2": {to:[{field: "rsa.misc.reference_id2", setter: fld_set}]}, + "id3": {to:[{field: "rsa.misc.id3", setter: fld_set}]}, + "ike": {to:[{field: "rsa.crypto.ike", setter: fld_set}]}, + "ike_cookie1": {to:[{field: "rsa.crypto.ike_cookie1", setter: fld_set}]}, + "ike_cookie2": {to:[{field: "rsa.crypto.ike_cookie2", setter: fld_set}]}, + "im_buddyid": {to:[{field: "rsa.misc.im_buddyid", setter: fld_set}]}, + "im_buddyname": {to:[{field: "rsa.misc.im_buddyname", setter: fld_set}]}, + "im_client": {to:[{field: "rsa.misc.im_client", setter: fld_set}]}, + "im_croomid": {to:[{field: "rsa.misc.im_croomid", setter: fld_set}]}, + "im_croomtype": {to:[{field: "rsa.misc.im_croomtype", setter: fld_set}]}, + "im_members": {to:[{field: "rsa.misc.im_members", setter: fld_set}]}, + "im_userid": {to:[{field: "rsa.misc.im_userid", setter: fld_set}]}, + "im_username": {to:[{field: "rsa.misc.im_username", setter: fld_set}]}, + "index": {to:[{field: "rsa.misc.index", setter: fld_set}]}, + "info": {to:[{field: "rsa.db.index", setter: fld_set}]}, + "inode": {convert: to_long, to:[{field: "rsa.internal.inode", setter: fld_set}]}, + "inout": {to:[{field: "rsa.misc.inout", setter: fld_set}]}, + "instance": {to:[{field: "rsa.db.instance", setter: fld_set}]}, + "interface": {to:[{field: "rsa.network.interface", setter: fld_set}]}, + "inv.category": {to:[{field: "rsa.investigations.inv_category", setter: fld_set}]}, + "inv.context": {to:[{field: "rsa.investigations.inv_context", setter: fld_set}]}, + "ioc": {to:[{field: "rsa.investigations.ioc", setter: fld_set}]}, + "ip_proto": {convert: to_long, to:[{field: "rsa.network.ip_proto", setter: fld_set}]}, + "ipkt": {to:[{field: "rsa.misc.ipkt", setter: fld_set}]}, + "ipscat": {to:[{field: "rsa.misc.ipscat", setter: fld_set}]}, + "ipspri": {to:[{field: "rsa.misc.ipspri", setter: fld_set}]}, + "jobname": {to:[{field: "rsa.misc.jobname", setter: fld_set}]}, + "jobnum": {to:[{field: "rsa.misc.job_num", setter: fld_set}]}, + "laddr": {to:[{field: "rsa.network.laddr", setter: fld_set}]}, + "language": {to:[{field: "rsa.misc.language", setter: fld_set}]}, + "latitude": {to:[{field: "rsa.misc.latitude", setter: fld_set}]}, + "lc.cid": {to:[{field: "rsa.internal.lc_cid", setter: fld_set}]}, + "lc.ctime": {convert: to_date, to:[{field: "rsa.internal.lc_ctime", setter: fld_set}]}, + "ldap": {to:[{field: "rsa.identity.ldap", setter: fld_set}]}, + "ldap.query": {to:[{field: "rsa.identity.ldap_query", setter: fld_set}]}, + "ldap.response": {to:[{field: "rsa.identity.ldap_response", setter: fld_set}]}, + "level": {convert: to_long, to:[{field: "rsa.internal.level", setter: fld_set}]}, + "lhost": {to:[{field: "rsa.network.lhost", setter: fld_set}]}, + "library": {to:[{field: "rsa.misc.library", setter: fld_set}]}, + "lifetime": {convert: to_long, to:[{field: "rsa.misc.lifetime", setter: fld_set}]}, + "linenum": {to:[{field: "rsa.misc.linenum", setter: fld_set}]}, + "link": {to:[{field: "rsa.misc.link", setter: fld_set}]}, + "linterface": {to:[{field: "rsa.network.linterface", setter: fld_set}]}, + "list_name": {to:[{field: "rsa.misc.list_name", setter: fld_set}]}, + "listnum": {to:[{field: "rsa.misc.listnum", setter: fld_set}]}, + "load_data": {to:[{field: "rsa.misc.load_data", setter: fld_set}]}, + "location_floor": {to:[{field: "rsa.misc.location_floor", setter: fld_set}]}, + "location_mark": {to:[{field: "rsa.misc.location_mark", setter: fld_set}]}, + "log_id": {to:[{field: "rsa.misc.log_id", setter: fld_set}]}, + "log_type": {to:[{field: "rsa.misc.log_type", setter: fld_set}]}, + "logid": {to:[{field: "rsa.misc.logid", setter: fld_set}]}, + "logip": {to:[{field: "rsa.misc.logip", setter: fld_set}]}, + "logname": {to:[{field: "rsa.misc.logname", setter: fld_set}]}, + "logon_type": {to:[{field: "rsa.identity.logon_type", setter: fld_set}]}, + "logon_type_desc": {to:[{field: "rsa.identity.logon_type_desc", setter: fld_set}]}, + "longitude": {to:[{field: "rsa.misc.longitude", setter: fld_set}]}, + "lport": {to:[{field: "rsa.misc.lport", setter: fld_set}]}, + "lread": {convert: to_long, to:[{field: "rsa.db.lread", setter: fld_set}]}, + "lun": {to:[{field: "rsa.storage.lun", setter: fld_set}]}, + "lwrite": {convert: to_long, to:[{field: "rsa.db.lwrite", setter: fld_set}]}, + "macaddr": {convert: to_mac, to:[{field: "rsa.network.eth_host", setter: fld_set}]}, + "mail_id": {to:[{field: "rsa.misc.mail_id", setter: fld_set}]}, + "mask": {to:[{field: "rsa.network.mask", setter: fld_set}]}, + "match": {to:[{field: "rsa.misc.match", setter: fld_set}]}, + "mbug_data": {to:[{field: "rsa.misc.mbug_data", setter: fld_set}]}, + "mcb.req": {convert: to_long, to:[{field: "rsa.internal.mcb_req", setter: fld_set}]}, + "mcb.res": {convert: to_long, to:[{field: "rsa.internal.mcb_res", setter: fld_set}]}, + "mcbc.req": {convert: to_long, to:[{field: "rsa.internal.mcbc_req", setter: fld_set}]}, + "mcbc.res": {convert: to_long, to:[{field: "rsa.internal.mcbc_res", setter: fld_set}]}, + "medium": {convert: to_long, to:[{field: "rsa.internal.medium", setter: fld_set}]}, + "message": {to:[{field: "rsa.internal.message", setter: fld_set}]}, + "message_body": {to:[{field: "rsa.misc.message_body", setter: fld_set}]}, + "messageid": {to:[{field: "rsa.internal.messageid", setter: fld_set}]}, + "min": {to:[{field: "rsa.time.min", setter: fld_set}]}, + "misc": {to:[{field: "rsa.misc.misc", setter: fld_set}]}, + "misc_name": {to:[{field: "rsa.misc.misc_name", setter: fld_set}]}, + "mode": {to:[{field: "rsa.misc.mode", setter: fld_set}]}, + "month": {to:[{field: "rsa.time.month", setter: fld_set}]}, + "msg": {to:[{field: "rsa.internal.msg", setter: fld_set}]}, + "msgIdPart1": {to:[{field: "rsa.misc.msgIdPart1", setter: fld_set}]}, + "msgIdPart2": {to:[{field: "rsa.misc.msgIdPart2", setter: fld_set}]}, + "msgIdPart3": {to:[{field: "rsa.misc.msgIdPart3", setter: fld_set}]}, + "msgIdPart4": {to:[{field: "rsa.misc.msgIdPart4", setter: fld_set}]}, + "msg_id": {to:[{field: "rsa.internal.msg_id", setter: fld_set}]}, + "msg_type": {to:[{field: "rsa.misc.msg_type", setter: fld_set}]}, + "msgid": {to:[{field: "rsa.misc.msgid", setter: fld_set}]}, + "name": {to:[{field: "rsa.misc.name", setter: fld_set}]}, + "netname": {to:[{field: "rsa.network.netname", setter: fld_set}]}, + "netsessid": {to:[{field: "rsa.misc.netsessid", setter: fld_set}]}, + "network_port": {convert: to_long, to:[{field: "rsa.network.network_port", setter: fld_set}]}, + "network_service": {to:[{field: "rsa.network.network_service", setter: fld_set}]}, + "node": {to:[{field: "rsa.misc.node", setter: fld_set}]}, + "nodename": {to:[{field: "rsa.internal.node_name", setter: fld_set}]}, + "ntype": {to:[{field: "rsa.misc.ntype", setter: fld_set}]}, + "num": {to:[{field: "rsa.misc.num", setter: fld_set}]}, + "number": {to:[{field: "rsa.misc.number", setter: fld_set}]}, + "number1": {to:[{field: "rsa.misc.number1", setter: fld_set}]}, + "number2": {to:[{field: "rsa.misc.number2", setter: fld_set}]}, + "nwe.callback_id": {to:[{field: "rsa.internal.nwe_callback_id", setter: fld_set}]}, + "nwwn": {to:[{field: "rsa.misc.nwwn", setter: fld_set}]}, + "obj_id": {to:[{field: "rsa.internal.obj_id", setter: fld_set}]}, + "obj_name": {to:[{field: "rsa.misc.obj_name", setter: fld_set}]}, + "obj_server": {to:[{field: "rsa.internal.obj_server", setter: fld_set}]}, + "obj_type": {to:[{field: "rsa.misc.obj_type", setter: fld_set}]}, + "obj_value": {to:[{field: "rsa.internal.obj_val", setter: fld_set}]}, + "object": {to:[{field: "rsa.misc.object", setter: fld_set}]}, + "observed_val": {to:[{field: "rsa.misc.observed_val", setter: fld_set}]}, + "operation": {to:[{field: "rsa.misc.operation", setter: fld_set}]}, + "operation_id": {to:[{field: "rsa.misc.operation_id", setter: fld_set}]}, + "opkt": {to:[{field: "rsa.misc.opkt", setter: fld_set}]}, + "org.dst": {to:[{field: "rsa.physical.org_dst", setter: fld_prio, prio: 1}]}, + "org.src": {to:[{field: "rsa.physical.org_src", setter: fld_set}]}, + "org_dst": {to:[{field: "rsa.physical.org_dst", setter: fld_prio, prio: 0}]}, + "orig_from": {to:[{field: "rsa.misc.orig_from", setter: fld_set}]}, + "origin": {to:[{field: "rsa.network.origin", setter: fld_set}]}, + "original_owner": {to:[{field: "rsa.identity.owner", setter: fld_set}]}, + "os": {to:[{field: "rsa.misc.OS", setter: fld_set}]}, + "owner_id": {to:[{field: "rsa.misc.owner_id", setter: fld_set}]}, + "p_action": {to:[{field: "rsa.misc.p_action", setter: fld_set}]}, + "p_date": {to:[{field: "rsa.time.p_date", setter: fld_set}]}, + "p_filter": {to:[{field: "rsa.misc.p_filter", setter: fld_set}]}, + "p_group_object": {to:[{field: "rsa.misc.p_group_object", setter: fld_set}]}, + "p_id": {to:[{field: "rsa.misc.p_id", setter: fld_set}]}, + "p_month": {to:[{field: "rsa.time.p_month", setter: fld_set}]}, + "p_msgid": {to:[{field: "rsa.misc.p_msgid", setter: fld_set}]}, + "p_msgid1": {to:[{field: "rsa.misc.p_msgid1", setter: fld_set}]}, + "p_msgid2": {to:[{field: "rsa.misc.p_msgid2", setter: fld_set}]}, + "p_result1": {to:[{field: "rsa.misc.p_result1", setter: fld_set}]}, + "p_time": {to:[{field: "rsa.time.p_time", setter: fld_set}]}, + "p_time1": {to:[{field: "rsa.time.p_time1", setter: fld_set}]}, + "p_time2": {to:[{field: "rsa.time.p_time2", setter: fld_set}]}, + "p_url": {to:[{field: "rsa.web.p_url", setter: fld_set}]}, + "p_user_agent": {to:[{field: "rsa.web.p_user_agent", setter: fld_set}]}, + "p_web_cookie": {to:[{field: "rsa.web.p_web_cookie", setter: fld_set}]}, + "p_web_method": {to:[{field: "rsa.web.p_web_method", setter: fld_set}]}, + "p_web_referer": {to:[{field: "rsa.web.p_web_referer", setter: fld_set}]}, + "p_year": {to:[{field: "rsa.time.p_year", setter: fld_set}]}, + "packet_length": {to:[{field: "rsa.network.packet_length", setter: fld_set}]}, + "paddr": {convert: to_ip, to:[{field: "rsa.network.paddr", setter: fld_set}]}, + "param": {to:[{field: "rsa.misc.param", setter: fld_set}]}, + "param.dst": {to:[{field: "rsa.misc.param_dst", setter: fld_set}]}, + "param.src": {to:[{field: "rsa.misc.param_src", setter: fld_set}]}, + "parent_node": {to:[{field: "rsa.misc.parent_node", setter: fld_set}]}, + "parse.error": {to:[{field: "rsa.internal.parse_error", setter: fld_set}]}, + "password": {to:[{field: "rsa.identity.password", setter: fld_set}]}, + "password_chg": {to:[{field: "rsa.misc.password_chg", setter: fld_set}]}, + "password_expire": {to:[{field: "rsa.misc.password_expire", setter: fld_set}]}, + "patient_fname": {to:[{field: "rsa.healthcare.patient_fname", setter: fld_set}]}, + "patient_id": {to:[{field: "rsa.healthcare.patient_id", setter: fld_set}]}, + "patient_lname": {to:[{field: "rsa.healthcare.patient_lname", setter: fld_set}]}, + "patient_mname": {to:[{field: "rsa.healthcare.patient_mname", setter: fld_set}]}, + "payload.req": {convert: to_long, to:[{field: "rsa.internal.payload_req", setter: fld_set}]}, + "payload.res": {convert: to_long, to:[{field: "rsa.internal.payload_res", setter: fld_set}]}, + "peer": {to:[{field: "rsa.crypto.peer", setter: fld_set}]}, + "peer_id": {to:[{field: "rsa.crypto.peer_id", setter: fld_set}]}, + "permgranted": {to:[{field: "rsa.misc.permgranted", setter: fld_set}]}, + "permissions": {to:[{field: "rsa.db.permissions", setter: fld_set}]}, + "permwanted": {to:[{field: "rsa.misc.permwanted", setter: fld_set}]}, + "pgid": {to:[{field: "rsa.misc.pgid", setter: fld_set}]}, + "phone_number": {to:[{field: "rsa.misc.phone", setter: fld_prio, prio: 2}]}, + "phost": {to:[{field: "rsa.network.phost", setter: fld_set}]}, + "pid": {to:[{field: "rsa.misc.pid", setter: fld_set}]}, + "policy": {to:[{field: "rsa.misc.policy", setter: fld_set}]}, + "policyUUID": {to:[{field: "rsa.misc.policyUUID", setter: fld_set}]}, + "policy_id": {to:[{field: "rsa.misc.policy_id", setter: fld_set}]}, + "policy_value": {to:[{field: "rsa.misc.policy_value", setter: fld_set}]}, + "policy_waiver": {to:[{field: "rsa.misc.policy_waiver", setter: fld_set}]}, + "policyname": {to:[{field: "rsa.misc.policy_name", setter: fld_prio, prio: 0}]}, + "pool_id": {to:[{field: "rsa.misc.pool_id", setter: fld_set}]}, + "pool_name": {to:[{field: "rsa.misc.pool_name", setter: fld_set}]}, + "port": {convert: to_long, to:[{field: "rsa.network.port", setter: fld_set}]}, + "portname": {to:[{field: "rsa.misc.port_name", setter: fld_set}]}, + "pread": {convert: to_long, to:[{field: "rsa.db.pread", setter: fld_set}]}, + "priority": {to:[{field: "rsa.misc.priority", setter: fld_set}]}, + "privilege": {to:[{field: "rsa.file.privilege", setter: fld_set}]}, + "process.vid.dst": {to:[{field: "rsa.internal.process_vid_dst", setter: fld_set}]}, + "process.vid.src": {to:[{field: "rsa.internal.process_vid_src", setter: fld_set}]}, + "process_id_val": {to:[{field: "rsa.misc.process_id_val", setter: fld_set}]}, + "processing_time": {to:[{field: "rsa.time.process_time", setter: fld_set}]}, + "profile": {to:[{field: "rsa.identity.profile", setter: fld_set}]}, + "prog_asp_num": {to:[{field: "rsa.misc.prog_asp_num", setter: fld_set}]}, + "program": {to:[{field: "rsa.misc.program", setter: fld_set}]}, + "protocol_detail": {to:[{field: "rsa.network.protocol_detail", setter: fld_set}]}, + "pwwn": {to:[{field: "rsa.storage.pwwn", setter: fld_set}]}, + "r_hostid": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "real_data": {to:[{field: "rsa.misc.real_data", setter: fld_set}]}, + "realm": {to:[{field: "rsa.identity.realm", setter: fld_set}]}, + "reason": {to:[{field: "rsa.misc.reason", setter: fld_set}]}, + "rec_asp_device": {to:[{field: "rsa.misc.rec_asp_device", setter: fld_set}]}, + "rec_asp_num": {to:[{field: "rsa.misc.rec_asp_num", setter: fld_set}]}, + "rec_library": {to:[{field: "rsa.misc.rec_library", setter: fld_set}]}, + "recorded_time": {convert: to_date, to:[{field: "rsa.time.recorded_time", setter: fld_set}]}, + "recordnum": {to:[{field: "rsa.misc.recordnum", setter: fld_set}]}, + "registry.key": {to:[{field: "rsa.endpoint.registry_key", setter: fld_set}]}, + "registry.value": {to:[{field: "rsa.endpoint.registry_value", setter: fld_set}]}, + "remote_domain": {to:[{field: "rsa.web.remote_domain", setter: fld_set}]}, + "remote_domain_id": {to:[{field: "rsa.network.remote_domain_id", setter: fld_set}]}, + "reputation_num": {convert: to_double, to:[{field: "rsa.web.reputation_num", setter: fld_set}]}, + "resource": {to:[{field: "rsa.internal.resource", setter: fld_set}]}, + "resource_class": {to:[{field: "rsa.internal.resource_class", setter: fld_set}]}, + "result": {to:[{field: "rsa.misc.result", setter: fld_set}]}, + "result_code": {to:[{field: "rsa.misc.result_code", setter: fld_prio, prio: 1}]}, + "resultcode": {to:[{field: "rsa.misc.result_code", setter: fld_prio, prio: 0}]}, + "rid": {convert: to_long, to:[{field: "rsa.internal.rid", setter: fld_set}]}, + "risk": {to:[{field: "rsa.misc.risk", setter: fld_set}]}, + "risk_info": {to:[{field: "rsa.misc.risk_info", setter: fld_set}]}, + "risk_num": {convert: to_double, to:[{field: "rsa.misc.risk_num", setter: fld_set}]}, + "risk_num_comm": {convert: to_double, to:[{field: "rsa.misc.risk_num_comm", setter: fld_set}]}, + "risk_num_next": {convert: to_double, to:[{field: "rsa.misc.risk_num_next", setter: fld_set}]}, + "risk_num_sand": {convert: to_double, to:[{field: "rsa.misc.risk_num_sand", setter: fld_set}]}, + "risk_num_static": {convert: to_double, to:[{field: "rsa.misc.risk_num_static", setter: fld_set}]}, + "risk_suspicious": {to:[{field: "rsa.misc.risk_suspicious", setter: fld_set}]}, + "risk_warning": {to:[{field: "rsa.misc.risk_warning", setter: fld_set}]}, + "rpayload": {to:[{field: "rsa.network.rpayload", setter: fld_set}]}, + "ruid": {to:[{field: "rsa.misc.ruid", setter: fld_set}]}, + "rule": {to:[{field: "rsa.misc.rule", setter: fld_set}]}, + "rule_group": {to:[{field: "rsa.misc.rule_group", setter: fld_set}]}, + "rule_template": {to:[{field: "rsa.misc.rule_template", setter: fld_set}]}, + "rule_uid": {to:[{field: "rsa.misc.rule_uid", setter: fld_set}]}, + "rulename": {to:[{field: "rsa.misc.rule_name", setter: fld_set}]}, + "s_certauth": {to:[{field: "rsa.crypto.s_certauth", setter: fld_set}]}, + "s_cipher": {to:[{field: "rsa.crypto.cipher_src", setter: fld_set}]}, + "s_ciphersize": {convert: to_long, to:[{field: "rsa.crypto.cipher_size_src", setter: fld_set}]}, + "s_context": {to:[{field: "rsa.misc.context_subject", setter: fld_set}]}, + "s_sslver": {to:[{field: "rsa.crypto.ssl_ver_src", setter: fld_set}]}, + "sburb": {to:[{field: "rsa.misc.sburb", setter: fld_set}]}, + "scheme": {to:[{field: "rsa.crypto.scheme", setter: fld_set}]}, + "sdomain_fld": {to:[{field: "rsa.misc.sdomain_fld", setter: fld_set}]}, + "search.text": {to:[{field: "rsa.misc.search_text", setter: fld_set}]}, + "sec": {to:[{field: "rsa.misc.sec", setter: fld_set}]}, + "second": {to:[{field: "rsa.misc.second", setter: fld_set}]}, + "sensor": {to:[{field: "rsa.misc.sensor", setter: fld_set}]}, + "sensorname": {to:[{field: "rsa.misc.sensorname", setter: fld_set}]}, + "seqnum": {to:[{field: "rsa.misc.seqnum", setter: fld_set}]}, + "serial_number": {to:[{field: "rsa.misc.serial_number", setter: fld_set}]}, + "service.account": {to:[{field: "rsa.identity.service_account", setter: fld_set}]}, + "session": {to:[{field: "rsa.misc.session", setter: fld_set}]}, + "session.split": {to:[{field: "rsa.internal.session_split", setter: fld_set}]}, + "sessionid": {to:[{field: "rsa.misc.log_session_id", setter: fld_set}]}, + "sessionid1": {to:[{field: "rsa.misc.log_session_id1", setter: fld_set}]}, + "sessiontype": {to:[{field: "rsa.misc.sessiontype", setter: fld_set}]}, + "severity": {to:[{field: "rsa.misc.severity", setter: fld_set}]}, + "sid": {to:[{field: "rsa.identity.user_sid_dst", setter: fld_set}]}, + "sig.name": {to:[{field: "rsa.misc.sig_name", setter: fld_set}]}, + "sigUUID": {to:[{field: "rsa.misc.sigUUID", setter: fld_set}]}, + "sigcat": {to:[{field: "rsa.misc.sigcat", setter: fld_set}]}, + "sigid": {convert: to_long, to:[{field: "rsa.misc.sig_id", setter: fld_set}]}, + "sigid1": {convert: to_long, to:[{field: "rsa.misc.sig_id1", setter: fld_set}]}, + "sigid_string": {to:[{field: "rsa.misc.sig_id_str", setter: fld_set}]}, + "signame": {to:[{field: "rsa.misc.policy_name", setter: fld_prio, prio: 1}]}, + "sigtype": {to:[{field: "rsa.crypto.sig_type", setter: fld_set}]}, + "sinterface": {to:[{field: "rsa.network.sinterface", setter: fld_set}]}, + "site": {to:[{field: "rsa.internal.site", setter: fld_set}]}, + "size": {convert: to_long, to:[{field: "rsa.internal.size", setter: fld_set}]}, + "smask": {to:[{field: "rsa.network.smask", setter: fld_set}]}, + "snmp.oid": {to:[{field: "rsa.misc.snmp_oid", setter: fld_set}]}, + "snmp.value": {to:[{field: "rsa.misc.snmp_value", setter: fld_set}]}, + "sourcefile": {to:[{field: "rsa.internal.sourcefile", setter: fld_set}]}, + "space": {to:[{field: "rsa.misc.space", setter: fld_set}]}, + "space1": {to:[{field: "rsa.misc.space1", setter: fld_set}]}, + "spi": {to:[{field: "rsa.misc.spi", setter: fld_set}]}, + "sql": {to:[{field: "rsa.misc.sql", setter: fld_set}]}, + "src_dn": {to:[{field: "rsa.identity.dn_src", setter: fld_set}]}, + "src_payload": {to:[{field: "rsa.misc.payload_src", setter: fld_set}]}, + "src_spi": {to:[{field: "rsa.misc.spi_src", setter: fld_set}]}, + "src_zone": {to:[{field: "rsa.network.zone_src", setter: fld_set}]}, + "srcburb": {to:[{field: "rsa.misc.srcburb", setter: fld_set}]}, + "srcdom": {to:[{field: "rsa.misc.srcdom", setter: fld_set}]}, + "srcservice": {to:[{field: "rsa.misc.srcservice", setter: fld_set}]}, + "ssid": {to:[{field: "rsa.wireless.wlan_ssid", setter: fld_prio, prio: 0}]}, + "stamp": {convert: to_date, to:[{field: "rsa.time.stamp", setter: fld_set}]}, + "starttime": {convert: to_date, to:[{field: "rsa.time.starttime", setter: fld_set}]}, + "state": {to:[{field: "rsa.misc.state", setter: fld_set}]}, + "statement": {to:[{field: "rsa.internal.statement", setter: fld_set}]}, + "status": {to:[{field: "rsa.misc.status", setter: fld_set}]}, + "status1": {to:[{field: "rsa.misc.status1", setter: fld_set}]}, + "streams": {convert: to_long, to:[{field: "rsa.misc.streams", setter: fld_set}]}, + "subcategory": {to:[{field: "rsa.misc.subcategory", setter: fld_set}]}, + "subject": {to:[{field: "rsa.email.subject", setter: fld_set}]}, + "svcno": {to:[{field: "rsa.misc.svcno", setter: fld_set}]}, + "system": {to:[{field: "rsa.misc.system", setter: fld_set}]}, + "t_context": {to:[{field: "rsa.misc.context_target", setter: fld_set}]}, + "task_name": {to:[{field: "rsa.file.task_name", setter: fld_set}]}, + "tbdstr1": {to:[{field: "rsa.misc.tbdstr1", setter: fld_set}]}, + "tbdstr2": {to:[{field: "rsa.misc.tbdstr2", setter: fld_set}]}, + "tbl_name": {to:[{field: "rsa.db.table_name", setter: fld_set}]}, + "tcp_flags": {convert: to_long, to:[{field: "rsa.misc.tcp_flags", setter: fld_set}]}, + "terminal": {to:[{field: "rsa.misc.terminal", setter: fld_set}]}, + "tgtdom": {to:[{field: "rsa.misc.tgtdom", setter: fld_set}]}, + "tgtdomain": {to:[{field: "rsa.misc.tgtdomain", setter: fld_set}]}, + "threat_name": {to:[{field: "rsa.threat.threat_category", setter: fld_set}]}, + "threat_source": {to:[{field: "rsa.threat.threat_source", setter: fld_set}]}, + "threat_val": {to:[{field: "rsa.threat.threat_desc", setter: fld_set}]}, + "threshold": {to:[{field: "rsa.misc.threshold", setter: fld_set}]}, + "time": {convert: to_date, to:[{field: "rsa.internal.time", setter: fld_set}]}, + "timestamp": {to:[{field: "rsa.time.timestamp", setter: fld_set}]}, + "timezone": {to:[{field: "rsa.time.timezone", setter: fld_set}]}, + "to": {to:[{field: "rsa.email.email_dst", setter: fld_set}]}, + "tos": {convert: to_long, to:[{field: "rsa.misc.tos", setter: fld_set}]}, + "trans_from": {to:[{field: "rsa.email.trans_from", setter: fld_set}]}, + "trans_id": {to:[{field: "rsa.db.transact_id", setter: fld_set}]}, + "trans_to": {to:[{field: "rsa.email.trans_to", setter: fld_set}]}, + "trigger_desc": {to:[{field: "rsa.misc.trigger_desc", setter: fld_set}]}, + "trigger_val": {to:[{field: "rsa.misc.trigger_val", setter: fld_set}]}, + "type": {to:[{field: "rsa.misc.type", setter: fld_set}]}, + "type1": {to:[{field: "rsa.misc.type1", setter: fld_set}]}, + "tzone": {to:[{field: "rsa.time.tzone", setter: fld_set}]}, + "ubc.req": {convert: to_long, to:[{field: "rsa.internal.ubc_req", setter: fld_set}]}, + "ubc.res": {convert: to_long, to:[{field: "rsa.internal.ubc_res", setter: fld_set}]}, + "udb_class": {to:[{field: "rsa.misc.udb_class", setter: fld_set}]}, + "url_fld": {to:[{field: "rsa.misc.url_fld", setter: fld_set}]}, + "urlpage": {to:[{field: "rsa.web.urlpage", setter: fld_set}]}, + "urlroot": {to:[{field: "rsa.web.urlroot", setter: fld_set}]}, + "user_address": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "user_dept": {to:[{field: "rsa.identity.user_dept", setter: fld_set}]}, + "user_div": {to:[{field: "rsa.misc.user_div", setter: fld_set}]}, + "user_fname": {to:[{field: "rsa.identity.firstname", setter: fld_set}]}, + "user_lname": {to:[{field: "rsa.identity.lastname", setter: fld_set}]}, + "user_mname": {to:[{field: "rsa.identity.middlename", setter: fld_set}]}, + "user_org": {to:[{field: "rsa.identity.org", setter: fld_set}]}, + "user_role": {to:[{field: "rsa.identity.user_role", setter: fld_set}]}, + "userid": {to:[{field: "rsa.misc.userid", setter: fld_set}]}, + "username_fld": {to:[{field: "rsa.misc.username_fld", setter: fld_set}]}, + "utcstamp": {to:[{field: "rsa.misc.utcstamp", setter: fld_set}]}, + "v_instafname": {to:[{field: "rsa.misc.v_instafname", setter: fld_set}]}, + "vendor_event_cat": {to:[{field: "rsa.investigations.event_vcat", setter: fld_set}]}, + "version": {to:[{field: "rsa.misc.version", setter: fld_set}]}, + "vid": {to:[{field: "rsa.internal.msg_vid", setter: fld_set}]}, + "virt_data": {to:[{field: "rsa.misc.virt_data", setter: fld_set}]}, + "virusname": {to:[{field: "rsa.misc.virusname", setter: fld_set}]}, + "vlan": {convert: to_long, to:[{field: "rsa.network.vlan", setter: fld_set}]}, + "vlan.name": {to:[{field: "rsa.network.vlan_name", setter: fld_set}]}, + "vm_target": {to:[{field: "rsa.misc.vm_target", setter: fld_set}]}, + "vpnid": {to:[{field: "rsa.misc.vpnid", setter: fld_set}]}, + "vsys": {to:[{field: "rsa.misc.vsys", setter: fld_set}]}, + "vuln_ref": {to:[{field: "rsa.misc.vuln_ref", setter: fld_set}]}, + "web_cookie": {to:[{field: "rsa.web.web_cookie", setter: fld_set}]}, + "web_extension_tmp": {to:[{field: "rsa.web.web_extension_tmp", setter: fld_set}]}, + "web_host": {to:[{field: "rsa.web.alias_host", setter: fld_set}]}, + "web_method": {to:[{field: "rsa.misc.action", setter: fld_append}]}, + "web_page": {to:[{field: "rsa.web.web_page", setter: fld_set}]}, + "web_ref_domain": {to:[{field: "rsa.web.web_ref_domain", setter: fld_set}]}, + "web_ref_host": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "web_ref_page": {to:[{field: "rsa.web.web_ref_page", setter: fld_set}]}, + "web_ref_query": {to:[{field: "rsa.web.web_ref_query", setter: fld_set}]}, + "web_ref_root": {to:[{field: "rsa.web.web_ref_root", setter: fld_set}]}, + "wifi_channel": {convert: to_long, to:[{field: "rsa.wireless.wlan_channel", setter: fld_set}]}, + "wlan": {to:[{field: "rsa.wireless.wlan_name", setter: fld_set}]}, + "word": {to:[{field: "rsa.internal.word", setter: fld_set}]}, + "workspace_desc": {to:[{field: "rsa.misc.workspace", setter: fld_set}]}, + "workstation": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "year": {to:[{field: "rsa.time.year", setter: fld_set}]}, + "zone": {to:[{field: "rsa.network.zone", setter: fld_set}]}, + }; + + function to_date(value) { + switch (typeof (value)) { + case "object": + // This is a Date. But as it was obtained from evt.Get(), the VM + // doesn't see it as a JS Date anymore, thus value instanceof Date === false. + // Have to trust that any object here is a valid Date for Go. + return value; + case "string": + var asDate = new Date(value); + if (!isNaN(asDate)) return asDate; + } + } + + // ECMAScript 5.1 doesn't have Object.MAX_SAFE_INTEGER / Object.MIN_SAFE_INTEGER. + var maxSafeInt = Math.pow(2, 53) - 1; + var minSafeInt = -maxSafeInt; + + function to_long(value) { + var num = parseInt(value); + // Better not to index a number if it's not safe (above 53 bits). + return !isNaN(num) && minSafeInt <= num && num <= maxSafeInt ? num : undefined; + } + + function to_ip(value) { + if (value.indexOf(":") === -1) + return to_ipv4(value); + return to_ipv6(value); + } + + var ipv4_regex = /^(\d+)\.(\d+)\.(\d+)\.(\d+)$/; + var ipv6_hex_regex = /^[0-9A-Fa-f]{1,4}$/; + + function to_ipv4(value) { + var result = ipv4_regex.exec(value); + if (result == null || result.length !== 5) return; + for (var i = 1; i < 5; i++) { + var num = strictToInt(result[i]); + if (isNaN(num) || num < 0 || num > 255) return; + } + return value; + } + + function to_ipv6(value) { + var sqEnd = value.indexOf("]"); + if (sqEnd > -1) { + if (value.charAt(0) !== "[") return; + value = value.substr(1, sqEnd - 1); + } + var zoneOffset = value.indexOf("%"); + if (zoneOffset > -1) { + value = value.substr(0, zoneOffset); + } + var parts = value.split(":"); + if (parts == null || parts.length < 3 || parts.length > 8) return; + var numEmpty = 0; + var innerEmpty = 0; + for (var i = 0; i < parts.length; i++) { + if (parts[i].length === 0) { + numEmpty++; + if (i > 0 && i + 1 < parts.length) innerEmpty++; + } else if (!parts[i].match(ipv6_hex_regex) && + // Accept an IPv6 with a valid IPv4 at the end. + ((i + 1 < parts.length) || !to_ipv4(parts[i]))) { + return; + } + } + return innerEmpty === 0 && parts.length === 8 || innerEmpty === 1 ? value : undefined; + } + + function to_double(value) { + return parseFloat(value); + } + + function to_mac(value) { + // ES doesn't have a mac datatype so it's safe to ingest whatever was captured. + return value; + } + + function to_lowercase(value) { + // to_lowercase is used against keyword fields, which can accept + // any other type (numbers, dates). + return typeof(value) === "string"? value.toLowerCase() : value; + } + + function fld_set(dst, value) { + dst[this.field] = { v: value }; + } + + function fld_append(dst, value) { + if (dst[this.field] === undefined) { + dst[this.field] = { v: [value] }; + } else { + var base = dst[this.field]; + if (base.v.indexOf(value)===-1) base.v.push(value); + } + } + + function fld_prio(dst, value) { + if (dst[this.field] === undefined) { + dst[this.field] = { v: value, prio: this.prio}; + } else if(this.prio < dst[this.field].prio) { + dst[this.field].v = value; + dst[this.field].prio = this.prio; + } + } + + var valid_ecs_outcome = { + 'failure': true, + 'success': true, + 'unknown': true + }; + + function fld_ecs_outcome(dst, value) { + value = value.toLowerCase(); + if (valid_ecs_outcome[value] === undefined) { + value = 'unknown'; + } + if (dst[this.field] === undefined) { + dst[this.field] = { v: value }; + } else if (dst[this.field].v === 'unknown') { + dst[this.field] = { v: value }; + } + } + + function map_all(evt, targets, value) { + for (var i = 0; i < targets.length; i++) { + evt.Put(targets[i], value); + } + } + + function populate_fields(evt) { + var base = evt.Get(FIELDS_OBJECT); + if (base === null) return; + alternate_datetime(evt); + if (map_ecs) { + do_populate(evt, base, ecs_mappings); + } + if (map_rsa) { + do_populate(evt, base, rsa_mappings); + } + if (keep_raw) { + evt.Put("rsa.raw", base); + } + evt.Delete(FIELDS_OBJECT); + } + + var datetime_alt_components = [ + {field: "day", fmts: [[dF]]}, + {field: "year", fmts: [[dW]]}, + {field: "month", fmts: [[dB],[dG]]}, + {field: "date", fmts: [[dW,dSkip,dG,dSkip,dF],[dW,dSkip,dB,dSkip,dF],[dW,dSkip,dR,dSkip,dF]]}, + {field: "hour", fmts: [[dN]]}, + {field: "min", fmts: [[dU]]}, + {field: "secs", fmts: [[dO]]}, + {field: "time", fmts: [[dN, dSkip, dU, dSkip, dO]]}, + ]; + + function alternate_datetime(evt) { + if (evt.Get(FIELDS_PREFIX + "event_time") != null) { + return; + } + var tzOffset = tz_offset; + if (tzOffset === "event") { + tzOffset = evt.Get("event.timezone"); + } + var container = new DateContainer(tzOffset); + for (var i=0; i} %{timezone}]||%{web_method}||%{web_host}||%{webpage}||%{web_query}||%{network_service}||%{resultcode}||%{sbytes}||%{web_referer}||%{user_agent}||%{web_cookie}", processor_chain([ + dup1, + dup2, + dup3, + dup4, + dup5, + dup6, + ])); + + var hdr1 = match("HEADER#0:0001", "message", "%APACHETOMCAT-%{level}-%{messageid}: %{payload}", processor_chain([ + setc("header_id","0001"), + ])); + + var hdr2 = match("HEADER#1:0002", "message", "%{hmonth->} %{hday->} %{htime->} %{hostname->} %APACHETOMCAT- %{messageid}: %{payload}", processor_chain([ + setc("header_id","0002"), + ])); + + var select1 = linear_select([ + hdr1, + hdr2, + ]); + + var msg1 = msg("ABCD", dup7); + + var msg2 = msg("BADMETHOD", dup7); + + var msg3 = msg("BADMTHD", dup7); + + var msg4 = msg("BDMTHD", dup7); + + var msg5 = msg("INDEX", dup7); + + var msg6 = msg("CFYZ", dup7); + + var msg7 = msg("CONNECT", dup7); + + var msg8 = msg("DELETE", dup7); + + var msg9 = msg("DETECT_METHOD_TYPE", dup7); + + var msg10 = msg("FGET", dup7); + + var msg11 = msg("GET", dup7); + + var msg12 = msg("get", dup7); + + var msg13 = msg("HEAD", dup7); + + var msg14 = msg("id", dup7); + + var msg15 = msg("LOCK", dup7); + + var msg16 = msg("MKCOL", dup7); + + var msg17 = msg("NCIRCLE", dup7); + + var msg18 = msg("OPTIONS", dup7); + + var msg19 = msg("POST", dup7); + + var msg20 = msg("PRONECT", dup7); + + var msg21 = msg("PROPFIND", dup7); + + var msg22 = msg("PUT", dup7); + + var msg23 = msg("QUALYS", dup7); + + var msg24 = msg("SEARCH", dup7); + + var msg25 = msg("TRACK", dup7); + + var msg26 = msg("TRACE", dup7); + + var msg27 = msg("uGET", dup7); + + var msg28 = msg("null", dup7); + + var msg29 = msg("rndmmtd", dup7); + + var msg30 = msg("RNDMMTD", dup7); + + var msg31 = msg("asdf", dup7); + + var msg32 = msg("DEBUG", dup7); + + var msg33 = msg("COOK", dup7); + + var msg34 = msg("nGET", dup7); + + var chain1 = processor_chain([ + select1, + msgid_select({ + "ABCD": msg1, + "BADMETHOD": msg2, + "BADMTHD": msg3, + "BDMTHD": msg4, + "CFYZ": msg6, + "CONNECT": msg7, + "COOK": msg33, + "DEBUG": msg32, + "DELETE": msg8, + "DETECT_METHOD_TYPE": msg9, + "FGET": msg10, + "GET": msg11, + "HEAD": msg13, + "INDEX": msg5, + "LOCK": msg15, + "MKCOL": msg16, + "NCIRCLE": msg17, + "OPTIONS": msg18, + "POST": msg19, + "PRONECT": msg20, + "PROPFIND": msg21, + "PUT": msg22, + "QUALYS": msg23, + "RNDMMTD": msg30, + "SEARCH": msg24, + "TRACE": msg26, + "TRACK": msg25, + "asdf": msg31, + "get": msg12, + "id": msg14, + "nGET": msg34, + "null": msg28, + "rndmmtd": msg29, + "uGET": msg27, + }), + ]); + + var part1 = match("MESSAGE#0:ABCD", "nwparser.payload", "%{saddr}||%{fld5}||%{username}||[%{fld7->} %{timezone}]||%{web_method}||%{web_host}||%{webpage}||%{web_query}||%{network_service}||%{resultcode}||%{sbytes}||%{web_referer}||%{user_agent}||%{web_cookie}", processor_chain([ + dup1, + dup2, + dup3, + dup4, + dup5, + dup6, + ])); + - community_id: null + - registered_domain: + field: dns.question.name + ignore_failure: true + ignore_missing: true + target_etld_field: dns.question.top_level_domain + target_field: dns.question.registered_domain + target_subdomain_field: dns.question.subdomain + - registered_domain: + field: client.domain + ignore_failure: true + ignore_missing: true + target_etld_field: client.top_level_domain + target_field: client.registered_domain + target_subdomain_field: client.subdomain + - registered_domain: + field: server.domain + ignore_failure: true + ignore_missing: true + target_etld_field: server.top_level_domain + target_field: server.registered_domain + target_subdomain_field: server.subdomain + - registered_domain: + field: destination.domain + ignore_failure: true + ignore_missing: true + target_etld_field: destination.top_level_domain + target_field: destination.registered_domain + target_subdomain_field: destination.subdomain + - registered_domain: + field: source.domain + ignore_failure: true + ignore_missing: true + target_etld_field: source.top_level_domain + target_field: source.registered_domain + target_subdomain_field: source.subdomain + - registered_domain: + field: url.domain + ignore_failure: true + ignore_missing: true + target_etld_field: url.top_level_domain + target_field: url.registered_domain + target_subdomain_field: url.subdomain + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - tomcat-log + - forwarded + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/traefik.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/traefik.yml new file mode 100644 index 00000000000..4ab26982389 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/traefik.yml @@ -0,0 +1,37 @@ +inputs: + - name: filestream-traefik + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.traefik.access.enabled} == true or ${kubernetes.hints.traefik.enabled} == true + data_stream: + dataset: traefik.access + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.traefik.access.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + data_stream.namespace: default + - name: traefik/metrics-traefik + type: traefik/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.traefik.health.enabled} == true or ${kubernetes.hints.traefik.enabled} == true + data_stream: + dataset: traefik.health + type: metrics + hosts: + - ${kubernetes.hints.traefik.health.host|'localhost:8080'} + metricsets: + - health + period: ${kubernetes.hints.traefik.health.period|'10s'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/udp.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/udp.yml new file mode 100644 index 00000000000..60fa5ebf598 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/udp.yml @@ -0,0 +1,33 @@ +inputs: + - name: udp-udp + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.udp.generic.enabled} == true or ${kubernetes.hints.udp.enabled} == true + data_stream: + dataset: udp.generic + type: logs + host: localhost:8080 + max_message_size: 10KiB + data_stream.namespace: default + - name: filestream-udp + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.udp.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/zeek.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/zeek.yml new file mode 100644 index 00000000000..22bcc875894 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/zeek.yml @@ -0,0 +1,2271 @@ +inputs: + - name: filestream-zeek + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.zeek.capture_loss.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.capture_loss + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.capture_loss.stream|'all'} + paths: + - /var/log/bro/current/capture_loss.log + - /opt/zeek/logs/current/capture_loss.log + - /usr/local/var/spool/zeek/capture_loss.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-capture-loss + - condition: ${kubernetes.hints.zeek.connection.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.connection + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.connection.stream|'all'} + paths: + - /var/log/bro/current/conn.log + - /opt/zeek/logs/current/conn.log + - /usr/local/var/spool/zeek/conn.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-connection + - condition: ${kubernetes.hints.zeek.dce_rpc.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.dce_rpc + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.dce_rpc.stream|'all'} + paths: + - /var/log/bro/current/dce_rpc.log + - /opt/zeek/logs/current/dce_rpc.log + - /usr/local/var/spool/zeek/dce_rpc.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-dce-rpc + - condition: ${kubernetes.hints.zeek.dhcp.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.dhcp + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.dhcp.stream|'all'} + paths: + - /var/log/bro/current/dhcp.log + - /opt/zeek/logs/current/dhcp.log + - /usr/local/var/spool/zeek/dhcp.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-dhcp + - condition: ${kubernetes.hints.zeek.dnp3.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.dnp3 + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.dnp3.stream|'all'} + paths: + - /var/log/bro/current/dnp3.log + - /opt/zeek/logs/current/dnp3.log + - /usr/local/var/spool/zeek/dnp3.log + prospector: + scanner: + symlinks: true + tags: + - zeek-dnp3 + - condition: ${kubernetes.hints.zeek.dns.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.dns + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.dns.stream|'all'} + paths: + - /var/log/bro/current/dns.log + - /opt/zeek/logs/current/dns.log + - /usr/local/var/spool/zeek/dns.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-dns + - condition: ${kubernetes.hints.zeek.dpd.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.dpd + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.dpd.stream|'all'} + paths: + - /var/log/bro/current/dpd.log + - /opt/zeek/logs/current/dpd.log + - /usr/local/var/spool/zeek/dpd.log + prospector: + scanner: + symlinks: true + tags: + - zeek-dpd + - condition: ${kubernetes.hints.zeek.files.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.files + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.files.stream|'all'} + paths: + - /var/log/bro/current/files.log + - /opt/zeek/logs/current/files.log + - /usr/local/var/spool/zeek/files.log + prospector: + scanner: + symlinks: true + tags: + - zeek-files + - condition: ${kubernetes.hints.zeek.ftp.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.ftp + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.ftp.stream|'all'} + paths: + - /var/log/bro/current/ftp.log + - /opt/zeek/logs/current/ftp.log + - /usr/local/var/spool/zeek/ftp.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-ftp + - condition: ${kubernetes.hints.zeek.http.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.http + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.http.stream|'all'} + paths: + - /var/log/bro/current/http.log + - /opt/zeek/logs/current/http.log + - /usr/local/var/spool/zeek/http.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-http + - condition: ${kubernetes.hints.zeek.intel.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.intel + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.intel.stream|'all'} + paths: + - /var/log/bro/current/intel.log + - /opt/zeek/logs/current/intel.log + - /usr/local/var/spool/zeek/intel.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-intel + - condition: ${kubernetes.hints.zeek.irc.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.irc + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.irc.stream|'all'} + paths: + - /var/log/bro/current/irc.log + - /opt/zeek/logs/current/irc.log + - /usr/local/var/spool/zeek/irc.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-irc + - condition: ${kubernetes.hints.zeek.kerberos.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.kerberos + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.kerberos.stream|'all'} + paths: + - /var/log/bro/current/kerberos.log + - /opt/zeek/logs/current/kerberos.log + - /usr/local/var/spool/zeek/kerberos.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-kerberos + - condition: ${kubernetes.hints.zeek.known_certs.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.known_certs + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.known_certs.stream|'all'} + paths: + - /var/log/bro/current/known_certs.log + - /opt/zeek/logs/current/known_certs.log + - /usr/local/var/spool/zeek/known_certs.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-known_certs + - condition: ${kubernetes.hints.zeek.known_hosts.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.known_hosts + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.known_hosts.stream|'all'} + paths: + - /var/log/bro/current/known_hosts.log + - /opt/zeek/logs/current/known_hosts.log + - /usr/local/var/spool/zeek/known_hosts.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-known_hosts + - condition: ${kubernetes.hints.zeek.known_services.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.known_services + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.known_services.stream|'all'} + paths: + - /var/log/bro/current/known_services.log + - /opt/zeek/logs/current/known_services.log + - /usr/local/var/spool/zeek/known_services.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-known_services + - condition: ${kubernetes.hints.zeek.modbus.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.modbus + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.modbus.stream|'all'} + paths: + - /var/log/bro/current/modbus.log + - /opt/zeek/logs/current/modbus.log + - /usr/local/var/spool/zeek/modbus.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-modbus + - condition: ${kubernetes.hints.zeek.mysql.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.mysql + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.mysql.stream|'all'} + paths: + - /var/log/bro/current/mysql.log + - /opt/zeek/logs/current/mysql.log + - /usr/local/var/spool/zeek/mysql.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-mysql + - condition: ${kubernetes.hints.zeek.notice.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.notice + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.notice.stream|'all'} + paths: + - /var/log/bro/current/notice.log + - /opt/zeek/logs/current/notice.log + - /usr/local/var/spool/zeek/notice.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-notice + - condition: ${kubernetes.hints.zeek.ntlm.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.ntlm + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.ntlm.stream|'all'} + paths: + - /var/log/bro/current/ntlm.log + - /opt/zeek/logs/current/ntlm.log + - /usr/local/var/spool/zeek/ntlm.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-ntlm + - condition: ${kubernetes.hints.zeek.ntp.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.ntp + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.ntp.stream|'all'} + paths: + - /var/log/bro/current/ntp.log + - /opt/zeek/logs/current/ntp.log + - /usr/local/var/spool/zeek/ntp.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-ntp + - condition: ${kubernetes.hints.zeek.ocsp.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.ocsp + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.ocsp.stream|'all'} + paths: + - /var/log/bro/current/ocsp.log + - /opt/zeek/logs/current/ocsp.log + - /usr/local/var/spool/zeek/ocsp.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-ocsp + - condition: ${kubernetes.hints.zeek.pe.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.pe + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.pe.stream|'all'} + paths: + - /var/log/bro/current/pe.log + - /opt/zeek/logs/current/pe.log + - /usr/local/var/spool/zeek/pe.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-pe + - condition: ${kubernetes.hints.zeek.radius.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.radius + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.radius.stream|'all'} + paths: + - /var/log/bro/current/radius.log + - /opt/zeek/logs/current/radius.log + - /usr/local/var/spool/zeek/radius.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-radius + - condition: ${kubernetes.hints.zeek.rdp.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.rdp + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.rdp.stream|'all'} + paths: + - /var/log/bro/current/rdp.log + - /opt/zeek/logs/current/rdp.log + - /usr/local/var/spool/zeek/rdp.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-rdp + - condition: ${kubernetes.hints.zeek.rfb.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.rfb + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.rfb.stream|'all'} + paths: + - /var/log/bro/current/rfb.log + - /opt/zeek/logs/current/rfb.log + - /usr/local/var/spool/zeek/rfb.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-rfb + - condition: ${kubernetes.hints.zeek.signature.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.signature + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.signature.stream|'all'} + paths: + - /var/log/bro/current/signature.log + - /opt/zeek/logs/current/signature.log + - /usr/local/var/spool/zeek/signature.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-signature + - condition: ${kubernetes.hints.zeek.sip.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.sip + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.sip.stream|'all'} + paths: + - /var/log/bro/current/sip.log + - /opt/zeek/logs/current/sip.log + - /usr/local/var/spool/zeek/sip.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-sip + - condition: ${kubernetes.hints.zeek.smb_cmd.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.smb_cmd + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.smb_cmd.stream|'all'} + paths: + - /var/log/bro/current/smb_cmd.log + - /opt/zeek/logs/current/smb_cmd.log + - /usr/local/var/spool/zeek/smb_cmd.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-smb-cmd + - condition: ${kubernetes.hints.zeek.smb_files.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.smb_files + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.smb_files.stream|'all'} + paths: + - /var/log/bro/current/smb_files.log + - /opt/zeek/logs/current/smb_files.log + - /usr/local/var/spool/zeek/smb_files.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-smb-files + - condition: ${kubernetes.hints.zeek.smb_mapping.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.smb_mapping + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.smb_mapping.stream|'all'} + paths: + - /var/log/bro/current/smb_mapping.log + - /opt/zeek/logs/current/smb_mapping.log + - /usr/local/var/spool/zeek/smb_mapping.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek.smb_mapping + - condition: ${kubernetes.hints.zeek.smtp.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.smtp + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.smtp.stream|'all'} + paths: + - /var/log/bro/current/smtp.log + - /opt/zeek/logs/current/smtp.log + - /usr/local/var/spool/zeek/smtp.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-smtp + - condition: ${kubernetes.hints.zeek.snmp.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.snmp + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.snmp.stream|'all'} + paths: + - /var/log/bro/current/snmp.log + - /opt/zeek/logs/current/snmp.log + - /usr/local/var/spool/zeek/snmp.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-snmp + - condition: ${kubernetes.hints.zeek.socks.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.socks + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.socks.stream|'all'} + paths: + - /var/log/bro/current/socks.log + - /opt/zeek/logs/current/socks.log + - /usr/local/var/spool/zeek/socks.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-socks + - condition: ${kubernetes.hints.zeek.software.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.software + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.software.stream|'all'} + paths: + - /var/log/bro/current/software.log + - /opt/zeek/logs/current/software.log + - /usr/local/var/spool/zeek/software.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-software + - condition: ${kubernetes.hints.zeek.ssh.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.ssh + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.ssh.stream|'all'} + paths: + - /var/log/bro/current/ssh.log + - /opt/zeek/logs/current/ssh.log + - /usr/local/var/spool/zeek/ssh.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-ssh + - condition: ${kubernetes.hints.zeek.ssl.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.ssl + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.ssl.stream|'all'} + paths: + - /var/log/bro/current/ssl.log + - /opt/zeek/logs/current/ssl.log + - /usr/local/var/spool/zeek/ssl.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-ssl + - condition: ${kubernetes.hints.zeek.stats.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.stats + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.stats.stream|'all'} + paths: + - /var/log/bro/current/stats.log + - /opt/zeek/logs/current/stats.log + - /usr/local/var/spool/zeek/stats.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-stats + - condition: ${kubernetes.hints.zeek.syslog.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.syslog + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.syslog.stream|'all'} + paths: + - /var/log/bro/current/syslog.log + - /opt/zeek/logs/current/syslog.log + - /usr/local/var/spool/zeek/syslog.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-syslog + - condition: ${kubernetes.hints.zeek.traceroute.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.traceroute + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.traceroute.stream|'all'} + paths: + - /var/log/bro/current/traceroute.log + - /opt/zeek/logs/current/traceroute.log + - /usr/local/var/spool/zeek/traceroute.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-traceroute + - condition: ${kubernetes.hints.zeek.tunnel.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.tunnel + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.tunnel.stream|'all'} + paths: + - /var/log/bro/current/tunnel.log + - /opt/zeek/logs/current/tunnel.log + - /usr/local/var/spool/zeek/tunnel.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-tunnel + - condition: ${kubernetes.hints.zeek.weird.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.weird + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.weird.stream|'all'} + paths: + - /var/log/bro/current/weird.log + - /opt/zeek/logs/current/weird.log + - /usr/local/var/spool/zeek/weird.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-weird + - condition: ${kubernetes.hints.zeek.x509.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.x509 + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.x509.stream|'all'} + paths: + - /var/log/bro/current/x509.log + - /opt/zeek/logs/current/x509.log + - /usr/local/var/spool/zeek/x509.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-x509 + data_stream.namespace: default + - name: httpjson-zeek + type: httpjson + use_output: default + streams: + - condition: ${kubernetes.hints.zeek.capture_loss.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.capture_loss + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="capture_loss-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-capture-loss + - condition: ${kubernetes.hints.zeek.connection.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.connection + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="conn-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-connection + - condition: ${kubernetes.hints.zeek.dce_rpc.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.dce_rpc + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="dce_rpc-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-dce-rpc + - condition: ${kubernetes.hints.zeek.dhcp.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.dhcp + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="dhcp-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-dhcp + - condition: ${kubernetes.hints.zeek.dnp3.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.dnp3 + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="dnp3-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-dnp3 + - condition: ${kubernetes.hints.zeek.dns.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.dns + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="dns-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-dns + - condition: ${kubernetes.hints.zeek.dpd.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.dpd + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="dpd-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-dpd + - condition: ${kubernetes.hints.zeek.files.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.files + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="files-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-files + - condition: ${kubernetes.hints.zeek.ftp.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.ftp + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="ftp-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-ftp + - condition: ${kubernetes.hints.zeek.http.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.http + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="http-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-http + - condition: ${kubernetes.hints.zeek.intel.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.intel + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="intel-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-intel + - condition: ${kubernetes.hints.zeek.irc.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.irc + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="irc-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-irc + - condition: ${kubernetes.hints.zeek.kerberos.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.kerberos + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="kerberos-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-kerberos + - condition: ${kubernetes.hints.zeek.modbus.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.modbus + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="modbus-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-modbus + - condition: ${kubernetes.hints.zeek.mysql.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.mysql + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="mysql-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-mysql + - condition: ${kubernetes.hints.zeek.notice.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.notice + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="notice-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-notice + - condition: ${kubernetes.hints.zeek.ntlm.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.ntlm + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="ntlm-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-ntlm + - condition: ${kubernetes.hints.zeek.ntp.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.ntp + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="ntp-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-ntp + - condition: ${kubernetes.hints.zeek.ocsp.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.ocsp + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="ocsp-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-ocsp + - condition: ${kubernetes.hints.zeek.pe.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.pe + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="pe-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-pe + - condition: ${kubernetes.hints.zeek.radius.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.radius + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="radius-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-radius + - condition: ${kubernetes.hints.zeek.rdp.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.rdp + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="rdp-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-rdp + - condition: ${kubernetes.hints.zeek.rfb.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.rfb + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="rfb-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-rfb + - condition: ${kubernetes.hints.zeek.signature.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.signature + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="signature-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-signature + - condition: ${kubernetes.hints.zeek.sip.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.sip + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="sip-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-sip + - condition: ${kubernetes.hints.zeek.smb_cmd.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.smb_cmd + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="smb_cmd-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-smb-cmd + - condition: ${kubernetes.hints.zeek.smb_files.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.smb_files + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="smb_files-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-smb-files + - condition: ${kubernetes.hints.zeek.smb_mapping.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.smb_mapping + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="smb_mapping-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - condition: ${kubernetes.hints.zeek.smtp.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.smtp + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="smtp-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-smtp + - condition: ${kubernetes.hints.zeek.snmp.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.snmp + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="snmp-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-snmp + - condition: ${kubernetes.hints.zeek.socks.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.socks + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="socks-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-socks + - condition: ${kubernetes.hints.zeek.ssh.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.ssh + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="ssh-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-ssh + - condition: ${kubernetes.hints.zeek.ssl.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.ssl + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="ssl-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-ssl + - condition: ${kubernetes.hints.zeek.stats.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.stats + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="stats-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-stats + - condition: ${kubernetes.hints.zeek.syslog.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.syslog + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="syslog-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-syslog + - condition: ${kubernetes.hints.zeek.traceroute.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.traceroute + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="traceroute-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-traceroute + - condition: ${kubernetes.hints.zeek.tunnel.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.tunnel + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="tunnel-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-tunnel + - condition: ${kubernetes.hints.zeek.weird.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.weird + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="weird-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-weird + - condition: ${kubernetes.hints.zeek.x509.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.x509 + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="x509-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-x509 + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/zookeeper.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/zookeeper.yml new file mode 100644 index 00000000000..5199734c315 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/zookeeper.yml @@ -0,0 +1,54 @@ +inputs: + - name: zookeeper/metrics-zookeeper + type: zookeeper/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.zookeeper.connection.enabled} == true or ${kubernetes.hints.zookeeper.enabled} == true + data_stream: + dataset: zookeeper.connection + type: metrics + hosts: + - ${kubernetes.hints.zookeeper.connection.host|'localhost:2181'} + metricsets: + - connection + period: ${kubernetes.hints.zookeeper.connection.period|'10s'} + - condition: ${kubernetes.hints.zookeeper.mntr.enabled} == true or ${kubernetes.hints.zookeeper.enabled} == true + data_stream: + dataset: zookeeper.mntr + type: metrics + hosts: + - ${kubernetes.hints.zookeeper.mntr.host|'localhost:2181'} + metricsets: + - mntr + period: ${kubernetes.hints.zookeeper.mntr.period|'10s'} + - condition: ${kubernetes.hints.zookeeper.server.enabled} == true or ${kubernetes.hints.zookeeper.enabled} == true + data_stream: + dataset: zookeeper.server + type: metrics + hosts: + - ${kubernetes.hints.zookeeper.server.host|'localhost:2181'} + metricsets: + - server + period: ${kubernetes.hints.zookeeper.server.period|'10s'} + data_stream.namespace: default + - name: filestream-zookeeper + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.zookeeper.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/dev-tools/mage/crossbuild.go b/dev-tools/mage/crossbuild.go index 56a6860393b..8a193003ab7 100644 --- a/dev-tools/mage/crossbuild.go +++ b/dev-tools/mage/crossbuild.go @@ -281,6 +281,15 @@ func (b GolangCrossBuilder) Build() error { verbose = "true" } var args []string + // There's a bug on certain debian versions: + // https://discuss.linuxcontainers.org/t/debian-jessie-containers-have-extremely-low-performance/1272 + // basically, apt-get has a bug where will try to iterate through every possible FD as set by the NOFILE ulimit. + // On certain docker installs, docker will set the ulimit to a value > 10^9, which means apt-get will take >1 hour. + // This runs across all possible debian platforms, since there's no real harm in it. + if strings.Contains(image, "debian") { + args = append(args, "--ulimit", "nofile=262144:262144") + } + if runtime.GOOS != "windows" { args = append(args, "--env", "EXEC_UID="+strconv.Itoa(os.Getuid()), diff --git a/dev-tools/packaging/files/darwin/PkgInfo b/dev-tools/packaging/files/darwin/PkgInfo new file mode 100644 index 00000000000..bd04210fb49 --- /dev/null +++ b/dev-tools/packaging/files/darwin/PkgInfo @@ -0,0 +1 @@ +APPL???? \ No newline at end of file diff --git a/dev-tools/packaging/packages.yml b/dev-tools/packaging/packages.yml index c02c0596e0e..70a47df591d 100644 --- a/dev-tools/packaging/packages.yml +++ b/dev-tools/packaging/packages.yml @@ -118,13 +118,7 @@ shared: config_mode: 0644 skip_on_missing: true - - &agent_binary_files - '{{.BeatName}}{{.BinaryExt}}': - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - 'data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}}': - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 + - &agent_binary_common_files LICENSE.txt: source: '{{ repo.RootDir }}/LICENSE.txt' mode: 0644 @@ -150,18 +144,54 @@ shared: {{ commit }} mode: 0644 - # Binary package spec (tar.gz for linux/darwin) for community beats. + - &agent_binary_files + '{{.BeatName}}{{.BinaryExt}}': + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + 'data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}}': + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + <<: *agent_binary_common_files + + - &agent_darwin_app_bundle_files + 'data/{{.BeatName}}-{{ commit_short }}/elastic-agent.app/Contents/Info.plist': + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/Info.plist.tmpl' + mode: 0644 + 'data/{{.BeatName}}-{{ commit_short }}/elastic-agent.app/Contents/PkgInfo': + template: '{{ elastic_beats_dir }}/dev-tools/packaging/files/darwin/PkgInfo' + mode: 0644 + + - &agent_darwin_binary_files + '{{.BeatName}}{{.BinaryExt}}': + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + 'data/{{.BeatName}}-{{ commit_short }}/elastic-agent.app/Contents/MacOS/{{.BeatName}}{{.BinaryExt}}': + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + <<: *agent_darwin_app_bundle_files + <<: *agent_binary_common_files + + - &agent_components + 'data/{{.BeatName}}-{{ commit_short }}/components': + source: '{{.AgentDropPath}}/{{.GOOS}}-{{.AgentArchName}}.tar.gz/' + mode: 0755 + config_mode: 0644 + skip_on_missing: true + + # Binary package spec (tar.gz for linux) for community beats. - &agent_binary_spec <<: *common files: <<: *agent_binary_files - 'data/{{.BeatName}}-{{ commit_short }}/components': - source: '{{.AgentDropPath}}/{{.GOOS}}-{{.AgentArchName}}.tar.gz/' - mode: 0755 - config_mode: 0644 - skip_on_missing: true + <<: *agent_components + - &agent_darwin_binary_spec + <<: *common + files: + <<: *agent_darwin_binary_files + <<: *agent_components + # Binary package spec (zip for windows) for community beats. - &agent_windows_binary_spec <<: *common @@ -730,11 +760,14 @@ specs: - os: darwin types: [tgz] spec: - <<: *agent_binary_spec + <<: *agent_darwin_binary_spec <<: *elastic_license_for_binaries files: + 'data/{{.BeatName}}-{{ commit_short }}/elastic-agent': + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/elastic-agent.tmpl' + mode: 0755 '{{.BeatName}}{{.BinaryExt}}': - source: data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}} + source: data/{{.BeatName}}-{{ commit_short }}/elastic-agent.app/Contents/MacOS/{{.BeatName}}{{.BinaryExt}} symlink: true mode: 0755 diff --git a/dev-tools/packaging/templates/darwin/Info.plist.tmpl b/dev-tools/packaging/templates/darwin/Info.plist.tmpl new file mode 100644 index 00000000000..b98202219b5 --- /dev/null +++ b/dev-tools/packaging/templates/darwin/Info.plist.tmpl @@ -0,0 +1,20 @@ + + + + + CFBundleExecutable + elastic-agent + CFBundleIdentifier + co.elastic.elastic-agent + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + elastic-agent + CFBundlePackageType + APPL + CFBundleShortVersionString + {{ beat_version }} + CFBundleVersion + {{ beat_version }} + + diff --git a/dev-tools/packaging/templates/darwin/elastic-agent.tmpl b/dev-tools/packaging/templates/darwin/elastic-agent.tmpl new file mode 100644 index 00000000000..74c0f238c28 --- /dev/null +++ b/dev-tools/packaging/templates/darwin/elastic-agent.tmpl @@ -0,0 +1,11 @@ +#!/bin/sh +# Fix up the symlink and exit + +set -e + +symlink="/Library/Elastic/Agent/elastic-agent" + +if test -L "$symlink"; then + ln -sfn "data/elastic-agent-{{ commit_short }}/elastic-agent.app/Contents/MacOS/elastic-agent" "$symlink" +fi + diff --git a/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl b/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl index 7ff81be9559..ab16391a611 100644 --- a/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl +++ b/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl @@ -1,245 +1,233 @@ -{{- $beatHome := printf "%s/%s" "/usr/share" .BeatName }} -{{- $beatBinary := printf "%s/%s" $beatHome .BeatName }} -{{- $repoInfo := repo }} - -# Prepare home in a different stage to avoid creating additional layers on -# the final image because of permission changes. -FROM {{ .buildFrom }} AS home - -COPY beat {{ $beatHome }} - -RUN mkdir -p {{ $beatHome }}/data {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/logs && \ - chown -R root:root {{ $beatHome }} && \ - find {{ $beatHome }} -type d -exec chmod 0755 {} \; && \ - find {{ $beatHome }} -type f -exec chmod 0644 {} \; && \ - find {{ $beatHome }}/data -type d -exec chmod 0770 {} \; && \ - find {{ $beatHome }}/data -type f -exec chmod 0660 {} \; && \ - rm {{ $beatBinary }} && \ - ln -s {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/elastic-agent {{ $beatBinary }} && \ - chmod 0755 {{ $beatHome }}/data/elastic-agent-*/elastic-agent && \ - chmod 0755 {{ $beatHome }}/data/elastic-agent-*/components/*beat && \ - (chmod 0755 {{ $beatHome }}/data/elastic-agent-*/components/apm-server || true) && \ - (chmod 0755 {{ $beatHome }}/data/elastic-agent-*/components/elastic-endpoint || true) && \ - find {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/components -name "*.yml*" -type f -exec chown root:root {} \; && \ - find {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/components -name "*.yml*" -type f -exec chmod 0644 {} \; && \ -{{- range $i, $modulesd := .ModulesDirs }} - chmod 0775 {{ $beatHome}}/{{ $modulesd }} && \ -{{- end }} -{{- if contains .image_name "-cloud" }} - mkdir -p /opt/filebeat /opt/metricbeat && \ - tar xf {{ $beatHome }}/data/cloud_downloads/metricbeat-*.tar.gz -C /opt/metricbeat --strip-components=1 && \ - tar xf {{ $beatHome }}/data/cloud_downloads/filebeat-*.tar.gz -C /opt/filebeat --strip-components=1 && \ -{{- end }} - rm -rf {{ $beatHome }}/data/cloud_downloads && \ - true - -FROM {{ .from }} - -ENV BEAT_SETUID_AS={{ .user }} - -{{- if contains .from "ubi-minimal" }} -RUN for iter in {1..10}; do microdnf update -y && microdnf install -y tar gzip findutils shadow-utils && microdnf clean all && exit_code=0 && break || exit_code=$? && echo "microdnf error: retry $iter in 10s" && sleep 10; done; (exit $exit_code) -{{- else }} - -RUN for iter in {1..10}; do \ - apt-get update -y && \ - DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends --yes ca-certificates curl gawk libcap2-bin xz-utils && \ - apt-get clean all && \ - exit_code=0 && break || exit_code=$? && echo "apt-get error: retry $iter in 10s" && sleep 10; \ - done; \ - (exit $exit_code) -{{- end }} - -{{- if (and (contains .image_name "-complete") (not (contains .from "ubi-minimal"))) }} -RUN apt-get update -y && \ - for iter in {1..10}; do \ - DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends --yes \ - libglib2.0-0\ - libnss3\ - libnspr4\ - libatk1.0-0\ - libatk-bridge2.0-0\ - libcups2\ - libdrm2\ - libdbus-1-3\ - libxcb1\ - libxkbcommon0\ - libx11-6\ - libxcomposite1\ - libxdamage1\ - libxext6\ - libxfixes3\ - libxrandr2\ - libgbm1\ - libpango-1.0-0\ - libcairo2\ - libasound2\ - libatspi2.0-0\ - libxshmfence1 \ - fonts-noto-core\ - fonts-noto-cjk &&\ - apt-get clean all && \ - exit_code=0 && break || exit_code=$? && echo "apt-get error: retry $iter in 10s" && sleep 10; \ - done; \ - (exit $exit_code) -ENV NODE_PATH={{ $beatHome }}/.node -RUN echo \ - $NODE_PATH \ - {{ $beatHome }}/.config \ - {{ $beatHome }}/.synthetics \ - {{ $beatHome }}/.npm \ - {{ $beatHome }}/.cache \ - | xargs -IDIR sh -c 'mkdir -p DIR && chmod 0770 DIR' -{{- end }} - -LABEL \ - org.label-schema.build-date="{{ date }}" \ - org.label-schema.schema-version="1.0" \ - org.label-schema.vendor="{{ .BeatVendor }}" \ - org.label-schema.license="{{ .License }}" \ - org.label-schema.name="{{ .BeatName }}" \ - org.label-schema.version="{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}" \ - org.label-schema.url="{{ .BeatURL }}" \ - org.label-schema.vcs-url="{{ $repoInfo.RootImportPath }}" \ - org.label-schema.vcs-ref="{{ commit }}" \ - io.k8s.description="{{ .BeatDescription }}" \ - io.k8s.display-name="{{ .BeatName | title }} image" \ - org.opencontainers.image.created="{{ date }}" \ - org.opencontainers.image.licenses="{{ .License }}" \ - org.opencontainers.image.title="{{ .BeatName | title }}" \ - org.opencontainers.image.vendor="{{ .BeatVendor }}" \ - name="{{ .BeatName }}" \ - maintainer="infra@elastic.co" \ - vendor="{{ .BeatVendor }}" \ - version="{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}" \ - release="1" \ - url="{{ .BeatURL }}" \ - summary="{{ .BeatName }}" \ - license="{{ .License }}" \ - description="{{ .BeatDescription }}" - -ENV ELASTIC_CONTAINER "true" -ENV PATH={{ $beatHome }}:$PATH -ENV GODEBUG="madvdontneed=1" - -# Add an init process, check the checksum to make sure it's a match -RUN set -e ; \ - TINI_BIN=""; \ - TINI_SHA256=""; \ - TINI_VERSION="v0.19.0"; \ - case "$(arch)" in \ - x86_64) \ - TINI_BIN="tini-amd64"; \ - TINI_SHA256="93dcc18adc78c65a028a84799ecf8ad40c936fdfc5f2a57b1acda5a8117fa82c"; \ - ;; \ - aarch64) \ - TINI_BIN="tini-arm64"; \ - TINI_SHA256="07952557df20bfd2a95f9bef198b445e006171969499a1d361bd9e6f8e5e0e81"; \ - ;; \ - *) \ - echo >&2 ; echo >&2 "Unsupported architecture \$(arch)" ; echo >&2 ; exit 1 ; \ - ;; \ - esac ; \ - curl --retry 8 -S -L -O "https://github.com/krallin/tini/releases/download/${TINI_VERSION}/${TINI_BIN}" ; \ - echo "${TINI_SHA256} ${TINI_BIN}" | sha256sum -c - ; \ - mv "${TINI_BIN}" /usr/bin/tini ; \ - chmod +x /usr/bin/tini - -COPY docker-entrypoint /usr/local/bin/docker-entrypoint -RUN chmod 755 /usr/local/bin/docker-entrypoint - -COPY --from=home {{ $beatHome }} {{ $beatHome }} - -# Elastic Agent needs group permissions in the home itself to be able to -# create fleet.yml when running as non-root. -RUN chmod 0770 {{ $beatHome }} - -RUN mkdir /licenses -COPY --from=home {{ $beatHome }}/LICENSE.txt /licenses -COPY --from=home {{ $beatHome }}/NOTICE.txt /licenses - -{{- if contains .image_name "-cloud" }} -COPY --from=home /opt /opt -{{- end }} - - -RUN setcap cap_net_raw,cap_setuid+p {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/components/heartbeat && \ -{{- if .linux_capabilities }} -# Since the beat is stored at the other end of a symlink we must follow the symlink first -# For security reasons setcap does not support symlinks. This is smart in the general case -# but in our specific case since we're building a trusted image from trusted binaries this is -# fine. Thus, we use readlink to follow the link and setcap on the actual binary - readlink -f {{ $beatBinary }} | xargs setcap {{ .linux_capabilities }} && \ -{{- end }} -true - -{{- if eq .user "root" }} -{{- if contains .image_name "-cloud" }} -# Generate folder for a stub command that will be overwritten at runtime -RUN mkdir /app -{{- end }} -{{- else }} -RUN groupadd --gid 1000 {{ .BeatName }} -RUN useradd -M --uid 1000 --gid 1000 --groups 0 --home {{ $beatHome }} {{ .user }} -{{- if (and (contains .image_name "-complete") (not (contains .from "ubi-minimal"))) }} -RUN chown {{ .user }} $NODE_PATH -{{- end }} -{{- if contains .image_name "-cloud" }} -# Generate folder for a stub command that will be overwritten at runtime -RUN mkdir /app -RUN chown {{ .user }} /app -{{- end }} -{{- end }} - -USER {{ .user }} - -{{- if (and (contains .image_name "-complete") (not (contains .from "ubi-minimal"))) }} -# Setup synthetics env vars -ENV ELASTIC_SYNTHETICS_CAPABLE=true -ENV SUITES_DIR={{ $beatHome }}/suites -ENV NODE_VERSION=16.15.0 -ENV PATH="$NODE_PATH/node/bin:$PATH" -# Install the latest version of @elastic/synthetics forcefully ignoring the previously -# cached node_modules, heartbeat then calls the global executable to run test suites -# Setup node -RUN cd {{$beatHome}}/.node \ - && NODE_DOWNLOAD_URL="" \ - && case "$(arch)" in \ - x86_64) \ - NODE_DOWNLOAD_URL=https://nodejs.org/dist/v${NODE_VERSION}/node-v${NODE_VERSION}-linux-x64.tar.xz \ - ;; \ - aarch64) \ - NODE_DOWNLOAD_URL=https://nodejs.org/dist/v${NODE_VERSION}/node-v${NODE_VERSION}-linux-arm64.tar.xz \ - ;; \ - *) \ - echo >&2 ; echo >&2 "Unsupported architecture \$(arch)" ; echo >&2 ; exit 1 ; \ - ;; \ - esac \ - && mkdir -p node \ - && curl ${NODE_DOWNLOAD_URL} | tar -xJ --strip 1 -C node \ - && chmod ug+rwX -R $NODE_PATH \ - && npm i -g -f @elastic/synthetics && chmod ug+rwX -R $NODE_PATH -{{- end }} - - -{{- range $i, $port := .ExposePorts }} -EXPOSE {{ $port }} -{{- end }} - -# When running under Docker, we must ensure libbeat monitoring pulls cgroup -# metrics from /sys/fs/cgroup//, ignoring any paths found in -# /proc/self/cgroup. -ENV LIBBEAT_MONITORING_CGROUPS_HIERARCHY_OVERRIDE=/ - -WORKDIR {{ $beatHome }} - -{{- if contains .image_name "-cloud" }} -ENTRYPOINT ["/usr/bin/tini", "--"] -CMD ["/app/apm.sh"] -# Generate a stub command that will be overwritten at runtime -RUN echo -e '#!/bin/sh\nexec /usr/local/bin/docker-entrypoint' > /app/apm.sh && \ - chmod 0555 /app/apm.sh -{{- else }} -ENTRYPOINT ["/usr/bin/tini", "--", "/usr/local/bin/docker-entrypoint"] -{{- end }} - +{{- $beatHome := printf "%s/%s" "/usr/share" .BeatName }} +{{- $beatBinary := printf "%s/%s" $beatHome .BeatName }} +{{- $repoInfo := repo }} + +# Prepare home in a different stage to avoid creating additional layers on +# the final image because of permission changes. +FROM {{ .buildFrom }} AS home + +COPY beat {{ $beatHome }} + +RUN mkdir -p {{ $beatHome }}/data {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/logs && \ + chown -R root:root {{ $beatHome }} && \ + find {{ $beatHome }} -type d -exec chmod 0755 {} \; && \ + find {{ $beatHome }} -type f -exec chmod 0644 {} \; && \ + find {{ $beatHome }}/data -type d -exec chmod 0770 {} \; && \ + find {{ $beatHome }}/data -type f -exec chmod 0660 {} \; && \ + rm {{ $beatBinary }} && \ + ln -s {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/elastic-agent {{ $beatBinary }} && \ + chmod 0755 {{ $beatHome }}/data/elastic-agent-*/elastic-agent && \ + chmod 0755 {{ $beatHome }}/data/elastic-agent-*/components/*beat && \ + (chmod 0755 {{ $beatHome }}/data/elastic-agent-*/components/apm-server || true) && \ + (chmod 0755 {{ $beatHome }}/data/elastic-agent-*/components/elastic-endpoint || true) && \ + find {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/components -name "*.yml*" -type f -exec chown root:root {} \; && \ + find {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/components -name "*.yml*" -type f -exec chmod 0644 {} \; && \ +{{- range $i, $modulesd := .ModulesDirs }} + chmod 0775 {{ $beatHome}}/{{ $modulesd }} && \ +{{- end }} +{{- if contains .image_name "-cloud" }} + mkdir -p /opt/filebeat /opt/metricbeat && \ + tar xf {{ $beatHome }}/data/cloud_downloads/metricbeat-*.tar.gz -C /opt/metricbeat --strip-components=1 && \ + tar xf {{ $beatHome }}/data/cloud_downloads/filebeat-*.tar.gz -C /opt/filebeat --strip-components=1 && \ +{{- end }} + rm -rf {{ $beatHome }}/data/cloud_downloads && \ + true + +FROM {{ .from }} + +ENV BEAT_SETUID_AS={{ .user }} + +{{- if contains .from "ubi-minimal" }} +RUN for iter in {1..10}; do microdnf update -y && microdnf install -y tar gzip findutils shadow-utils && microdnf clean all && exit_code=0 && break || exit_code=$? && echo "microdnf error: retry $iter in 10s" && sleep 10; done; (exit $exit_code) +{{- else }} + +RUN for iter in {1..10}; do \ + apt-get update -y && \ + DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends --yes ca-certificates curl gawk libcap2-bin xz-utils && \ + apt-get clean all && \ + exit_code=0 && break || exit_code=$? && echo "apt-get error: retry $iter in 10s" && sleep 10; \ + done; \ + (exit $exit_code) +{{- end }} + +LABEL \ + org.label-schema.build-date="{{ date }}" \ + org.label-schema.schema-version="1.0" \ + org.label-schema.vendor="{{ .BeatVendor }}" \ + org.label-schema.license="{{ .License }}" \ + org.label-schema.name="{{ .BeatName }}" \ + org.label-schema.version="{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}" \ + org.label-schema.url="{{ .BeatURL }}" \ + org.label-schema.vcs-url="{{ $repoInfo.RootImportPath }}" \ + org.label-schema.vcs-ref="{{ commit }}" \ + io.k8s.description="{{ .BeatDescription }}" \ + io.k8s.display-name="{{ .BeatName | title }} image" \ + org.opencontainers.image.created="{{ date }}" \ + org.opencontainers.image.licenses="{{ .License }}" \ + org.opencontainers.image.title="{{ .BeatName | title }}" \ + org.opencontainers.image.vendor="{{ .BeatVendor }}" \ + name="{{ .BeatName }}" \ + maintainer="infra@elastic.co" \ + vendor="{{ .BeatVendor }}" \ + version="{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}" \ + release="1" \ + url="{{ .BeatURL }}" \ + summary="{{ .BeatName }}" \ + license="{{ .License }}" \ + description="{{ .BeatDescription }}" + +ENV ELASTIC_CONTAINER "true" +ENV PATH={{ $beatHome }}:$PATH +ENV GODEBUG="madvdontneed=1" + +# Add an init process, check the checksum to make sure it's a match +RUN set -e ; \ + TINI_BIN=""; \ + TINI_SHA256=""; \ + TINI_VERSION="v0.19.0"; \ + case "$(arch)" in \ + x86_64) \ + TINI_BIN="tini-amd64"; \ + TINI_SHA256="93dcc18adc78c65a028a84799ecf8ad40c936fdfc5f2a57b1acda5a8117fa82c"; \ + ;; \ + aarch64) \ + TINI_BIN="tini-arm64"; \ + TINI_SHA256="07952557df20bfd2a95f9bef198b445e006171969499a1d361bd9e6f8e5e0e81"; \ + ;; \ + *) \ + echo >&2 ; echo >&2 "Unsupported architecture \$(arch)" ; echo >&2 ; exit 1 ; \ + ;; \ + esac ; \ + curl --retry 8 -S -L -O "https://github.com/krallin/tini/releases/download/${TINI_VERSION}/${TINI_BIN}" ; \ + echo "${TINI_SHA256} ${TINI_BIN}" | sha256sum -c - ; \ + mv "${TINI_BIN}" /usr/bin/tini ; \ + chmod +x /usr/bin/tini + +COPY docker-entrypoint /usr/local/bin/docker-entrypoint +RUN chmod 755 /usr/local/bin/docker-entrypoint + +COPY --from=home {{ $beatHome }} {{ $beatHome }} + +# Elastic Agent needs group permissions in the home itself to be able to +# create fleet.yml when running as non-root. +RUN chmod 0770 {{ $beatHome }} + +RUN mkdir /licenses +COPY --from=home {{ $beatHome }}/LICENSE.txt /licenses +COPY --from=home {{ $beatHome }}/NOTICE.txt /licenses + +{{- if contains .image_name "-cloud" }} +COPY --from=home /opt /opt +{{- end }} + + +RUN setcap cap_net_raw,cap_setuid+p {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/components/heartbeat && \ +{{- if .linux_capabilities }} +# Since the beat is stored at the other end of a symlink we must follow the symlink first +# For security reasons setcap does not support symlinks. This is smart in the general case +# but in our specific case since we're building a trusted image from trusted binaries this is +# fine. Thus, we use readlink to follow the link and setcap on the actual binary + readlink -f {{ $beatBinary }} | xargs setcap {{ .linux_capabilities }} && \ +{{- end }} +true + +{{- if eq .user "root" }} +{{- if contains .image_name "-cloud" }} +# Generate folder for a stub command that will be overwritten at runtime +RUN mkdir /app +{{- end }} +{{- else }} +RUN groupadd --gid 1000 {{ .BeatName }} +RUN useradd -M --uid 1000 --gid 1000 --groups 0 --home {{ $beatHome }} {{ .user }} + +{{- if contains .image_name "-cloud" }} +# Generate folder for a stub command that will be overwritten at runtime +RUN mkdir /app +RUN chown {{ .user }} /app +{{- end }} +{{- end }} + +{{- if (and (contains .image_name "-complete") (not (contains .from "ubi-minimal"))) }} +USER root +ENV NODE_PATH={{ $beatHome }}/.node +RUN echo \ + $NODE_PATH \ + {{ $beatHome }}/.config \ + {{ $beatHome }}/.synthetics \ + {{ $beatHome }}/.npm \ + {{ $beatHome }}/.cache \ + | xargs -IDIR sh -c 'mkdir -p DIR && chmod 0770 DIR' + +# Setup synthetics env vars +ENV ELASTIC_SYNTHETICS_CAPABLE=true +ENV NODE_VERSION=16.15.0 +ENV PATH="$NODE_PATH/node/bin:$PATH" +# Install the latest version of @elastic/synthetics forcefully ignoring the previously +# cached node_modules, heartbeat then calls the global executable to run test suites +# Setup node +RUN cd {{$beatHome}}/.node \ + && NODE_DOWNLOAD_URL="" \ + && case "$(arch)" in \ + arm64) \ + NODE_DOWNLOAD_URL=https://nodejs.org/dist/v${NODE_VERSION}/node-v${NODE_VERSION}-linux-x64.tar.xz \ + ;; \ + x86_64) \ + NODE_DOWNLOAD_URL=https://nodejs.org/dist/v${NODE_VERSION}/node-v${NODE_VERSION}-linux-x64.tar.xz \ + ;; \ + aarch64) \ + NODE_DOWNLOAD_URL=https://nodejs.org/dist/v${NODE_VERSION}/node-v${NODE_VERSION}-linux-arm64.tar.xz \ + ;; \ + *) \ + echo >&2 ; echo >&2 "Unsupported architecture \$(arch)" ; echo >&2 ; exit 1 ; \ + ;; \ + esac \ + && mkdir -p node \ + && curl ${NODE_DOWNLOAD_URL} | tar -xJ --strip 1 -C node \ + && chmod ug+rwX -R $NODE_PATH + +# Install synthetics as a regular user, installing npm deps as root odesn't work +RUN chown -R {{ .user }} $NODE_PATH +USER {{ .user }} +# If this fails dump the NPM logs +RUN npm i -g --loglevel verbose -f @elastic/synthetics@stack_release || sh -c 'tail -n +1 /root/.npm/_logs/* && exit 1' +RUN chmod ug+rwX -R $NODE_PATH +USER root + +# Install the deps as needed by the exact version of playwright elastic synthetics uses +# We don't use npx playwright install-deps because that could pull a newer version +# Install additional fonts as well +RUN for iter in {1..10}; do \ + apt-get update -y && \ + $NODE_PATH/node/lib/node_modules/@elastic/synthetics/node_modules/.bin/playwright install-deps chromium && \ + DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends --yes \ + fonts-noto \ + fonts-noto-cjk && \ + exit_code=0 && break || exit_code=$? && echo "apt-get error: retry $iter in 10s" && sleep 10; \ + done; \ + (exit $exit_code) + +{{- end }} +USER {{ .user }} + + +{{- range $i, $port := .ExposePorts }} +EXPOSE {{ $port }} +{{- end }} + +# When running under Docker, we must ensure libbeat monitoring pulls cgroup +# metrics from /sys/fs/cgroup//, ignoring any paths found in +# /proc/self/cgroup. +ENV LIBBEAT_MONITORING_CGROUPS_HIERARCHY_OVERRIDE=/ + +WORKDIR {{ $beatHome }} + +{{- if contains .image_name "-cloud" }} +ENTRYPOINT ["/usr/bin/tini", "--"] +CMD ["/app/apm.sh"] +# Generate a stub command that will be overwritten at runtime +RUN echo -e '#!/bin/sh\nexec /usr/local/bin/docker-entrypoint' > /app/apm.sh && \ + chmod 0555 /app/apm.sh +{{- else }} +ENTRYPOINT ["/usr/bin/tini", "--", "/usr/local/bin/docker-entrypoint"] +{{- end }} + diff --git a/dev-tools/packaging/templates/docker/Dockerfile.tmpl b/dev-tools/packaging/templates/docker/Dockerfile.tmpl index 06cce5a13b0..d2edf7909cb 100644 --- a/dev-tools/packaging/templates/docker/Dockerfile.tmpl +++ b/dev-tools/packaging/templates/docker/Dockerfile.tmpl @@ -181,7 +181,7 @@ RUN cd /usr/share/heartbeat/.node \ && mkdir -p node \ && curl ${NODE_DOWNLOAD_URL} | tar -xJ --strip 1 -C node \ && chmod ug+rwX -R $NODE_PATH \ - && npm i -g -f @elastic/synthetics && chmod ug+rwX -R $NODE_PATH + && npm i -g -f @elastic/synthetics@stack_release && chmod ug+rwX -R $NODE_PATH {{- end }} {{- range $i, $port := .ExposePorts }} diff --git a/dev-tools/packaging/templates/ironbank/hardening_manifest.yaml.tmpl b/dev-tools/packaging/templates/ironbank/hardening_manifest.yaml.tmpl index 3c753caa0fb..e4b4df82e23 100644 --- a/dev-tools/packaging/templates/ironbank/hardening_manifest.yaml.tmpl +++ b/dev-tools/packaging/templates/ironbank/hardening_manifest.yaml.tmpl @@ -35,7 +35,7 @@ labels: ## This value can be "opensource" or "commercial" mil.dso.ironbank.image.type: "commercial" ## Product the image belongs to for grouping multiple images - mil.dso.ironbank.product.name: "beats" + mil.dso.ironbank.product.name: "elastic-agent" # List of resources to make available to the offline build context resources: diff --git a/dev-tools/packaging/templates/linux/postinstall.sh.tmpl b/dev-tools/packaging/templates/linux/postinstall.sh.tmpl index 083ebb91060..d96f21a8629 100644 --- a/dev-tools/packaging/templates/linux/postinstall.sh.tmpl +++ b/dev-tools/packaging/templates/linux/postinstall.sh.tmpl @@ -3,16 +3,26 @@ set -e symlink="/usr/share/elastic-agent/bin/elastic-agent" -old_agent_dir="$( dirname "$(readlink -f -- "$symlink")" )" +old_agent_dir="" + +# check if $symlink exists for the previous install +# and derive the old agent directory +if test -L "$symlink"; then + resolved_symlink="$(readlink -f -- "$symlink")" + # check if it is resolved to non empty string + if ! [ -z "$resolved_symlink" ]; then + old_agent_dir="$( dirname "$resolved_symlink" )" + fi +fi commit_hash="{{ commit_short }}" -yml_path="$old_agent_dir/state.yml" -enc_path="$old_agent_dir/state.enc" +new_agent_dir="/var/lib/elastic-agent/data/elastic-agent-$commit_hash" -new_agent_dir="$( dirname "$old_agent_dir")/elastic-agent-$commit_hash" - -if ! [[ "$old_agent_dir" -ef "$new_agent_dir" ]]; then +# copy the state files if there was a previous agent install +if ! [ -z "$old_agent_dir" ] && ! [ "$old_agent_dir" -ef "$new_agent_dir" ]; then + yml_path="$old_agent_dir/state.yml" + enc_path="$old_agent_dir/state.enc" echo "migrate state from $old_agent_dir to $new_agent_dir" if test -f "$yml_path"; then @@ -24,15 +34,17 @@ if ! [[ "$old_agent_dir" -ef "$new_agent_dir" ]]; then echo "found "$enc_path", copy to "$new_agent_dir"." cp "$enc_path" "$new_agent_dir" fi +fi - if test -f "$symlink"; then - echo "found symlink $symlink, unlink" - unlink "$symlink" - fi - - echo "create symlink "$symlink" to "$new_agent_dir/elastic-agent"" - ln -s "$new_agent_dir/elastic-agent" "$symlink" +# delete symlink if exists +if test -L "$symlink"; then + echo "found symlink $symlink, unlink" + unlink "$symlink" fi +# create symlink to the new agent +echo "create symlink "$symlink" to "$new_agent_dir/elastic-agent"" +ln -s "$new_agent_dir/elastic-agent" "$symlink" + systemctl daemon-reload 2> /dev/null exit 0 diff --git a/go.mod b/go.mod index 148cee40adc..245d331130f 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534 github.com/docker/go-units v0.4.0 github.com/elastic/e2e-testing v1.99.2-0.20220117192005-d3365c99b9c4 - github.com/elastic/elastic-agent-autodiscover v0.0.0-20220404145827-89887023c1ab + github.com/elastic/elastic-agent-autodiscover v0.2.1 github.com/elastic/elastic-agent-client/v7 v7.0.0-20220804181728-b0328d2fe484 github.com/elastic/elastic-agent-libs v0.2.6 github.com/elastic/elastic-agent-system-metrics v0.4.4 @@ -121,7 +121,6 @@ require ( go.elastic.co/apm/v2 v2.0.0 // indirect go.elastic.co/fastjson v1.1.0 // indirect go.uber.org/atomic v1.9.0 // indirect - go.uber.org/goleak v1.1.12 // indirect go.uber.org/multierr v1.8.0 // indirect golang.org/x/mod v0.5.1 // indirect golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 // indirect diff --git a/go.sum b/go.sum index 47d8d474785..f8fb1ecc1a7 100644 --- a/go.sum +++ b/go.sum @@ -92,6 +92,7 @@ github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2 github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.8.24/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -227,6 +228,7 @@ github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4S github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= @@ -240,6 +242,7 @@ github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= @@ -281,6 +284,7 @@ github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDG github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= @@ -376,11 +380,11 @@ github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:Htrtb github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elastic/e2e-testing v1.99.2-0.20220117192005-d3365c99b9c4 h1:uYT+Krd8dsvnhnLK9pe/JHZkYtXEGPfbV4Wt1JPPol0= github.com/elastic/e2e-testing v1.99.2-0.20220117192005-d3365c99b9c4/go.mod h1:UcNuf4pX/qDVNQr0zybm1NL2YoWik+jKBaINZqQCA40= -github.com/elastic/elastic-agent-autodiscover v0.0.0-20220404145827-89887023c1ab h1:Jk6Mfk5BF8gtfE7X0bNCiDGBtwJVxRI79b4wLCAsP+A= -github.com/elastic/elastic-agent-autodiscover v0.0.0-20220404145827-89887023c1ab/go.mod h1:Gg1fsQI+rVms9FJ2DefBSojfPIzgkV8xlyG8fPG0DE8= +github.com/elastic/elastic-agent-autodiscover v0.2.1 h1:Nbeayh3vq2FNm6xaFo34mhUdOu0EVlpj53CqCsbU0E4= +github.com/elastic/elastic-agent-autodiscover v0.2.1/go.mod h1:gPnzzfdYNdgznAb+iG9eyyXaQXBbAMHa+Y6Z8hXfcGY= github.com/elastic/elastic-agent-client/v7 v7.0.0-20220804181728-b0328d2fe484 h1:uJIMfLgCenJvxsVmEjBjYGxt0JddCgw2IxgoNfcIXOk= github.com/elastic/elastic-agent-client/v7 v7.0.0-20220804181728-b0328d2fe484/go.mod h1:fkvyUfFwyAG5OnMF0h+FV9sC0Xn9YLITwQpSuwungQs= -github.com/elastic/elastic-agent-libs v0.0.0-20220303160015-5b4e674da3dd/go.mod h1://82M1l73IHx0wDbS2Tzkq6Fx9fkmytS1KgkIyzvNTM= +github.com/elastic/elastic-agent-libs v0.2.5/go.mod h1:chO3rtcLyGlKi9S0iGVZhYCzDfdDsAQYBc+ui588AFE= github.com/elastic/elastic-agent-libs v0.2.6 h1:DpcUcCVYZ7lNtHLUlyT1u/GtGAh49wpL15DTH7+8O5o= github.com/elastic/elastic-agent-libs v0.2.6/go.mod h1:chO3rtcLyGlKi9S0iGVZhYCzDfdDsAQYBc+ui588AFE= github.com/elastic/elastic-agent-system-metrics v0.4.4 h1:Br3S+TlBhijrLysOvbHscFhgQ00X/trDT5VEnOau0E0= @@ -949,7 +953,6 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -1403,6 +1406,7 @@ golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 h1:HVyaeDAYux4pnY+D/SiwmLOR36ewZ4iGQIIrtnuCjFA= @@ -1562,6 +1566,7 @@ golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= diff --git a/internal/pkg/agent/application/application.go b/internal/pkg/agent/application/application.go index d3edb21888e..327138ac67a 100644 --- a/internal/pkg/agent/application/application.go +++ b/internal/pkg/agent/application/application.go @@ -75,6 +75,7 @@ func New( var configMgr coordinator.ConfigManager var managed *managedConfigManager var compModifiers []coordinator.ComponentsModifier + var composableManaged bool if configuration.IsStandalone(cfg.Fleet) { log.Info("Parsed configuration and determined agent is managed locally") @@ -102,6 +103,7 @@ func New( } else { log.Info("Parsed configuration and determined agent is managed by Fleet") + composableManaged = true compModifiers = append(compModifiers, FleetServerComponentModifier(cfg.Fleet.Server)) managed, err = newManagedConfigManager(log, agentInfo, cfg, store, runtime) if err != nil { @@ -111,7 +113,7 @@ func New( } } - composable, err := composable.New(log, rawConfig) + composable, err := composable.New(log, rawConfig, composableManaged) if err != nil { return nil, errors.New(err, "failed to initialize composable controller") } diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go index 0288152f726..09396cf49fc 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go @@ -63,17 +63,18 @@ type stateStore interface { } type fleetGateway struct { - log *logger.Logger - client client.Sender - scheduler scheduler.Scheduler - settings *fleetGatewaySettings - agentInfo agentInfo - acker acker.Acker - unauthCounter int - stateFetcher coordinator.StateFetcher - stateStore stateStore - errCh chan error - actionCh chan []fleetapi.Action + log *logger.Logger + client client.Sender + scheduler scheduler.Scheduler + settings *fleetGatewaySettings + agentInfo agentInfo + acker acker.Acker + unauthCounter int + checkinFailCounter int + stateFetcher coordinator.StateFetcher + stateStore stateStore + errCh chan error + actionCh chan []fleetapi.Action } // New creates a new fleet gateway @@ -180,13 +181,25 @@ func (f *fleetGateway) doExecute(ctx context.Context, bo backoff.Backoff) (*flee // this mean we are rebooting to change the log level or the system is shutting us down. for ctx.Err() == nil { f.log.Debugf("Checking started") - resp, err := f.execute(ctx) + resp, took, err := f.execute(ctx) if err != nil { - f.log.Errorf("Could not communicate with fleet-server Checking API will retry, error: %s", err) + f.checkinFailCounter++ + + // Report the first two failures at warn level as they may be recoverable with retries. + if f.checkinFailCounter <= 2 { + f.log.Warnw("Possible transient error during checkin with fleet-server, retrying", + "error.message", err, "request_duration_ns", took, "failed_checkins", f.checkinFailCounter, + "retry_after_ns", bo.NextWait()) + } else { + f.log.Errorw("Cannot checkin in with fleet-server, retrying", + "error.message", err, "request_duration_ns", took, "failed_checkins", f.checkinFailCounter, + "retry_after_ns", bo.NextWait()) + } + if !bo.Wait() { // Something bad has happened and we log it and we should update our current state. err := errors.New( - "execute retry loop was stopped", + "checkin retry loop was stopped", errors.TypeNetwork, errors.M(errors.MetaKeyURI, f.client.URI()), ) @@ -197,6 +210,13 @@ func (f *fleetGateway) doExecute(ctx context.Context, bo backoff.Backoff) (*flee } continue } + + if f.checkinFailCounter > 0 { + // Log at same level as error logs above so subsequent successes are visible when log level is set to 'error'. + f.log.Errorf("Checkin request to fleet-server succeeded after %d failures", f.checkinFailCounter) + } + + f.checkinFailCounter = 0 // Request was successful, return the collected actions. return resp, nil } @@ -273,7 +293,7 @@ func (f *fleetGateway) convertToCheckinComponents(components []runtime.Component return checkinComponents } -func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, error) { +func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, time.Duration, error) { ecsMeta, err := info.Metadata() if err != nil { f.log.Error(errors.New("failed to load metadata", err)) @@ -301,7 +321,7 @@ func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, Components: components, } - resp, err := cmd.Execute(ctx, req) + resp, took, err := cmd.Execute(ctx, req) if isUnauth(err) { f.unauthCounter++ @@ -309,15 +329,15 @@ func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, f.log.Warnf("retrieved an invalid api key error '%d' times. Starting to unenroll the elastic agent.", f.unauthCounter) return &fleetapi.CheckinResponse{ Actions: []fleetapi.Action{&fleetapi.ActionUnenroll{ActionID: "", ActionType: "UNENROLL", IsDetected: true}}, - }, nil + }, took, nil } - return nil, err + return nil, took, err } f.unauthCounter = 0 if err != nil { - return nil, err + return nil, took, err } // Save the latest ackToken @@ -329,7 +349,7 @@ func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, } } - return resp, nil + return resp, took, nil } // shouldUnenroll checks if the max number of trying an invalid key is reached diff --git a/internal/pkg/agent/application/info/state.go b/internal/pkg/agent/application/info/state.go index 1a6602f51f8..b9d73504d06 100644 --- a/internal/pkg/agent/application/info/state.go +++ b/internal/pkg/agent/application/info/state.go @@ -5,13 +5,15 @@ package info import ( - "fmt" "os" "path/filepath" - "strings" + "runtime" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/release" +) + +const ( + darwin = "darwin" ) // RunningInstalled returns true when executing Agent is the installed Agent. @@ -19,22 +21,19 @@ import ( // This verifies the running executable path based on hard-coded paths // for each platform type. func RunningInstalled() bool { - expected := filepath.Join(paths.InstallPath, paths.BinaryName) + expectedPaths := []string{filepath.Join(paths.InstallPath, paths.BinaryName)} + if runtime.GOOS == darwin { + // For the symlink on darwin the execPath is /usr/local/bin/elastic-agent + expectedPaths = append(expectedPaths, paths.ShellWrapperPath) + } execPath, _ := os.Executable() execPath, _ = filepath.Abs(execPath) - execName := filepath.Base(execPath) - execDir := filepath.Dir(execPath) - if IsInsideData(execDir) { - // executable path is being reported as being down inside of data path - // move up to directories to perform the comparison - execDir = filepath.Dir(filepath.Dir(execDir)) - execPath = filepath.Join(execDir, execName) - } - return paths.ArePathsEqual(expected, execPath) -} -// IsInsideData returns true when the exePath is inside of the current Agents data path. -func IsInsideData(exePath string) bool { - expectedPath := filepath.Join("data", fmt.Sprintf("elastic-agent-%s", release.ShortCommit())) - return strings.HasSuffix(exePath, expectedPath) + execPath = filepath.Join(paths.ExecDir(filepath.Dir(execPath)), filepath.Base(execPath)) + for _, expected := range expectedPaths { + if paths.ArePathsEqual(expected, execPath) { + return true + } + } + return false } diff --git a/internal/pkg/agent/application/paths/common.go b/internal/pkg/agent/application/paths/common.go index 1b6ef95b188..b89a197fdff 100644 --- a/internal/pkg/agent/application/paths/common.go +++ b/internal/pkg/agent/application/paths/common.go @@ -9,6 +9,7 @@ import ( "fmt" "os" "path/filepath" + "runtime" "strings" "sync" @@ -21,6 +22,8 @@ const ( // AgentLockFileName is the name of the overall Elastic Agent file lock. AgentLockFileName = "agent.lock" tempSubdir = "tmp" + + darwin = "darwin" ) // ExternalInputsPattern is a glob that matches the paths of external configuration files. @@ -190,16 +193,14 @@ func SetInstall(path string) { // initialTop returns the initial top-level path for the binary // // When nested in top-level/data/elastic-agent-${hash}/ the result is top-level/. +// The agent executable for MacOS is wrapped in the app bundle, so the path to the binary is +// top-level/data/elastic-agent-${hash}/elastic-agent.app/Contents/MacOS func initialTop() string { - exePath := retrieveExecutablePath() - if insideData(exePath) { - return filepath.Dir(filepath.Dir(exePath)) - } - return exePath + return ExecDir(retrieveExecutableDir()) } // retrieveExecutablePath returns the executing binary, even if the started binary was a symlink -func retrieveExecutablePath() string { +func retrieveExecutableDir() string { execPath, err := os.Executable() if err != nil { panic(err) @@ -211,8 +212,37 @@ func retrieveExecutablePath() string { return filepath.Dir(evalPath) } -// insideData returns true when the exePath is inside of the current Agents data path. -func insideData(exePath string) bool { - expectedPath := filepath.Join("data", fmt.Sprintf("elastic-agent-%s", release.ShortCommit())) - return strings.HasSuffix(exePath, expectedPath) +// isInsideData returns true when the exePath is inside of the current Agents data path. +func isInsideData(exeDir string) bool { + expectedDir := binaryDir(filepath.Join("data", fmt.Sprintf("elastic-agent-%s", release.ShortCommit()))) + return strings.HasSuffix(exeDir, expectedDir) +} + +// ExecDir returns the "executable" directory which is: +// 1. The same if the execDir is not inside of the data path +// 2. Two levels up if the execDir inside of the data path on non-macOS platforms +// 3. Five levels up if the execDir inside of the dataPath on macOS platform +func ExecDir(execDir string) string { + if isInsideData(execDir) { + execDir = filepath.Dir(filepath.Dir(execDir)) + if runtime.GOOS == darwin { + execDir = filepath.Dir(filepath.Dir(filepath.Dir(execDir))) + } + } + return execDir +} + +// binaryDir returns the application binary directory +// For macOS it appends the path inside of the app bundle +// For other platforms it returns the same dir +func binaryDir(baseDir string) string { + if runtime.GOOS == darwin { + baseDir = filepath.Join(baseDir, "elastic-agent.app", "Contents", "MacOS") + } + return baseDir +} + +// BinaryPath returns the application binary path that is concatenation of the directory and the agentName +func BinaryPath(baseDir, agentName string) string { + return filepath.Join(binaryDir(baseDir), agentName) } diff --git a/internal/pkg/agent/application/paths/common_test.go b/internal/pkg/agent/application/paths/common_test.go new file mode 100644 index 00000000000..27a9cf80ebd --- /dev/null +++ b/internal/pkg/agent/application/paths/common_test.go @@ -0,0 +1,93 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package paths + +import ( + "fmt" + "path/filepath" + "runtime" + "testing" + + "github.com/google/go-cmp/cmp" + + "github.com/elastic/elastic-agent/internal/pkg/release" +) + +func validTestPath() string { + validPath := filepath.Join("data", fmt.Sprintf("elastic-agent-%s", release.ShortCommit())) + if runtime.GOOS == darwin { + validPath = filepath.Join(validPath, "elastic-agent.app", "Contents", "MacOS") + } + return validPath +} + +func TestIsInsideData(t *testing.T) { + tests := []struct { + name string + exePath string + res bool + }{ + { + name: "empty", + }, + { + name: "invalid", + exePath: "data/elastic-agent", + }, + { + name: "valid", + exePath: validTestPath(), + res: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + res := isInsideData(tc.exePath) + diff := cmp.Diff(tc.res, res) + if diff != "" { + t.Error(diff) + } + }) + } +} + +func TestExecDir(t *testing.T) { + base := filepath.Join(string(filepath.Separator), "Library", "Elastic", "Agent") + tests := []struct { + name string + execDir string + resDir string + }{ + { + name: "empty", + }, + { + name: "non-data path", + execDir: "data/elastic-agent", + resDir: "data/elastic-agent", + }, + { + name: "valid", + execDir: validTestPath(), + resDir: ".", + }, + { + name: "valid abs", + execDir: filepath.Join(base, validTestPath()), + resDir: base, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + resDir := ExecDir(tc.execDir) + diff := cmp.Diff(tc.resDir, resDir) + if diff != "" { + t.Error(diff) + } + }) + } +} diff --git a/internal/pkg/agent/application/paths/files.go b/internal/pkg/agent/application/paths/files.go index 7d35549e840..e6a1bf2eda1 100644 --- a/internal/pkg/agent/application/paths/files.go +++ b/internal/pkg/agent/application/paths/files.go @@ -32,6 +32,9 @@ const defaultAgentStateStoreYmlFile = "state.yml" // defaultAgentStateStoreFile is the file that will contain the action that can be replayed after restart encrypted. const defaultAgentStateStoreFile = "state.enc" +// defaultInputDPath return the location of the inputs.d. +const defaultInputsDPath = "inputs.d" + // AgentConfigYmlFile is a name of file used to store agent information func AgentConfigYmlFile() string { return filepath.Join(Config(), defaultAgentFleetYmlFile) @@ -82,3 +85,8 @@ func AgentStateStoreYmlFile() string { func AgentStateStoreFile() string { return filepath.Join(Home(), defaultAgentStateStoreFile) } + +// AgentInputsDPath is directory that contains the fragment of inputs yaml for K8s deployment. +func AgentInputsDPath() string { + return filepath.Join(Config(), defaultInputsDPath) +} diff --git a/internal/pkg/agent/application/upgrade/artifact/config.go b/internal/pkg/agent/application/upgrade/artifact/config.go index 6db38fa612c..b09c6faf7e7 100644 --- a/internal/pkg/agent/application/upgrade/artifact/config.go +++ b/internal/pkg/agent/application/upgrade/artifact/config.go @@ -9,8 +9,12 @@ import ( "strings" "time" + c "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/transport/httpcommon" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + "github.com/elastic/elastic-agent/internal/pkg/config" + "github.com/elastic/elastic-agent/pkg/core/logger" ) const ( @@ -22,6 +26,10 @@ const ( DefaultSourceURI = "https://artifacts.elastic.co/downloads/" ) +type ConfigReloader interface { + Reload(*Config) error +} + // Config is a configuration used for verifier and downloader type Config struct { // OperatingSystem: operating system [linux, windows, darwin] @@ -49,6 +57,96 @@ type Config struct { httpcommon.HTTPTransportSettings `config:",inline" yaml:",inline"` // Note: use anonymous struct for json inline } +type Reloader struct { + log *logger.Logger + cfg *Config + reloaders []ConfigReloader +} + +func NewReloader(cfg *Config, log *logger.Logger, rr ...ConfigReloader) *Reloader { + return &Reloader{ + cfg: cfg, + log: log, + reloaders: rr, + } +} + +func (r *Reloader) Reload(rawConfig *config.Config) error { + if err := r.reloadConfig(rawConfig); err != nil { + return errors.New(err, "failed to reload config") + } + + if err := r.reloadSourceURI(rawConfig); err != nil { + return errors.New(err, "failed to reload source URI") + } + + for _, reloader := range r.reloaders { + if err := reloader.Reload(r.cfg); err != nil { + return errors.New(err, "failed reloading config") + } + } + + return nil +} + +func (r *Reloader) reloadConfig(rawConfig *config.Config) error { + type reloadConfig struct { + C *Config `json:"agent.download" config:"agent.download"` + } + tmp := &reloadConfig{ + C: DefaultConfig(), + } + if err := rawConfig.Unpack(&tmp); err != nil { + return err + } + + *(r.cfg) = Config{ + OperatingSystem: tmp.C.OperatingSystem, + Architecture: tmp.C.Architecture, + SourceURI: tmp.C.SourceURI, + TargetDirectory: tmp.C.TargetDirectory, + InstallPath: tmp.C.InstallPath, + DropPath: tmp.C.DropPath, + HTTPTransportSettings: tmp.C.HTTPTransportSettings, + } + + return nil +} + +func (r *Reloader) reloadSourceURI(rawConfig *config.Config) error { + type reloadConfig struct { + // SourceURI: source of the artifacts, e.g https://artifacts.elastic.co/downloads/ + SourceURI string `json:"agent.download.sourceURI" config:"agent.download.sourceURI"` + + // FleetSourceURI: source of the artifacts, e.g https://artifacts.elastic.co/downloads/ coming from fleet which uses + // different naming. + FleetSourceURI string `json:"agent.download.source_uri" config:"agent.download.source_uri"` + } + cfg := &reloadConfig{} + if err := rawConfig.Unpack(&cfg); err != nil { + return errors.New(err, "failed to unpack config during reload") + } + + var newSourceURI string + if fleetURI := strings.TrimSpace(cfg.FleetSourceURI); fleetURI != "" { + // fleet configuration takes precedence + newSourceURI = fleetURI + } else if sourceURI := strings.TrimSpace(cfg.SourceURI); sourceURI != "" { + newSourceURI = sourceURI + } + + if newSourceURI != "" { + r.log.Infof("Source URI changed from %q to %q", r.cfg.SourceURI, newSourceURI) + r.cfg.SourceURI = newSourceURI + } else { + // source uri unset, reset to default + r.log.Infof("Source URI reset from %q to %q", r.cfg.SourceURI, DefaultSourceURI) + r.cfg.SourceURI = DefaultSourceURI + } + + return nil +} + // DefaultConfig creates a config with pre-set default values. func DefaultConfig() *Config { transport := httpcommon.DefaultHTTPTransportSettings() @@ -100,3 +198,42 @@ func (c *Config) Arch() string { c.Architecture = arch return c.Architecture } + +// Unpack reads a config object into the settings. +func (c *Config) Unpack(cfg *c.C) error { + tmp := struct { + OperatingSystem string `json:"-" config:",ignore"` + Architecture string `json:"-" config:",ignore"` + SourceURI string `json:"sourceURI" config:"sourceURI"` + TargetDirectory string `json:"targetDirectory" config:"target_directory"` + InstallPath string `yaml:"installPath" config:"install_path"` + DropPath string `yaml:"dropPath" config:"drop_path"` + }{ + OperatingSystem: c.OperatingSystem, + Architecture: c.Architecture, + SourceURI: c.SourceURI, + TargetDirectory: c.TargetDirectory, + InstallPath: c.InstallPath, + DropPath: c.DropPath, + } + + if err := cfg.Unpack(&tmp); err != nil { + return err + } + + transport := DefaultConfig().HTTPTransportSettings + if err := cfg.Unpack(&transport); err != nil { + return err + } + + *c = Config{ + OperatingSystem: tmp.OperatingSystem, + Architecture: tmp.Architecture, + SourceURI: tmp.SourceURI, + TargetDirectory: tmp.TargetDirectory, + InstallPath: tmp.InstallPath, + DropPath: tmp.DropPath, + HTTPTransportSettings: transport, + } + return nil +} diff --git a/internal/pkg/agent/application/upgrade/artifact/config_test.go b/internal/pkg/agent/application/upgrade/artifact/config_test.go new file mode 100644 index 00000000000..803154e465f --- /dev/null +++ b/internal/pkg/agent/application/upgrade/artifact/config_test.go @@ -0,0 +1,248 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package artifact + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/elastic/elastic-agent/internal/pkg/config" + "github.com/elastic/elastic-agent/pkg/core/logger" +) + +func TestReload(t *testing.T) { + type testCase struct { + input string + initialConfig *Config + expectedSourceURI string + expectedTargetDirectory string + expectedInstallDirectory string + expectedDropDirectory string + expectedFingerprint string + expectedTLS bool + expectedTLSEnabled bool + expectedDisableProxy bool + expectedTimeout time.Duration + } + defaultValues := DefaultConfig() + testCases := []testCase{ + { + input: `agent.download: + sourceURI: "testing.uri" + target_directory: "a/b/c" + install_path: "i/p" + drop_path: "d/p" + proxy_disable: true + timeout: 33s + ssl.enabled: true + ssl.ca_trusted_fingerprint: "my_finger_print" +`, + initialConfig: DefaultConfig(), + expectedSourceURI: "testing.uri", + expectedTargetDirectory: "a/b/c", + expectedInstallDirectory: "i/p", + expectedDropDirectory: "d/p", + expectedFingerprint: "my_finger_print", + expectedTLS: true, + expectedTLSEnabled: true, + expectedDisableProxy: true, + expectedTimeout: 33 * time.Second, + }, + { + input: `agent.download: + sourceURI: "testing.uri" +`, + initialConfig: DefaultConfig(), + expectedSourceURI: "testing.uri", + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + { + input: `agent.download: + sourceURI: "" +`, + initialConfig: &Config{ + SourceURI: "testing.uri", + HTTPTransportSettings: defaultValues.HTTPTransportSettings, + }, + expectedSourceURI: defaultValues.SourceURI, // fallback to default when set to empty + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + { + input: ``, + initialConfig: &Config{ + SourceURI: "testing.uri", + HTTPTransportSettings: defaultValues.HTTPTransportSettings, + }, + expectedSourceURI: defaultValues.SourceURI, // fallback to default when not set + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + { + input: `agent.download: + sourceURI: " " +`, + initialConfig: &Config{ + SourceURI: "testing.uri", + HTTPTransportSettings: defaultValues.HTTPTransportSettings, + }, + expectedSourceURI: defaultValues.SourceURI, // fallback to default when set to whitespace + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + { + input: `agent.download: + source_uri: " " +`, + initialConfig: &Config{ + SourceURI: "testing.uri", + HTTPTransportSettings: defaultValues.HTTPTransportSettings, + }, + expectedSourceURI: defaultValues.SourceURI, // fallback to default when set to whitespace + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + { + input: `agent.download: + source_uri: " " + sourceURI: " " +`, + initialConfig: DefaultConfig(), + expectedSourceURI: defaultValues.SourceURI, // fallback to default when set to whitespace + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + { + input: ``, + initialConfig: &Config{ + SourceURI: "testing.uri", + HTTPTransportSettings: defaultValues.HTTPTransportSettings, + }, + expectedSourceURI: defaultValues.SourceURI, + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + { + input: `agent.download: + source_uri: " " + sourceURI: "testing.uri" +`, + initialConfig: DefaultConfig(), + expectedSourceURI: "testing.uri", + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + { + input: `agent.download: + source_uri: "testing.uri" + sourceURI: " " +`, + initialConfig: DefaultConfig(), + expectedSourceURI: "testing.uri", + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + { + input: `agent.download: + source_uri: "testing.uri" + sourceURI: "another.uri" +`, + initialConfig: DefaultConfig(), + expectedSourceURI: "testing.uri", + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + } + + l, _ := logger.NewTesting("t") + for _, tc := range testCases { + cfg := tc.initialConfig + reloader := NewReloader(cfg, l) + + c, err := config.NewConfigFrom(tc.input) + require.NoError(t, err) + + require.NoError(t, reloader.Reload(c)) + + require.Equal(t, tc.expectedSourceURI, cfg.SourceURI) + require.Equal(t, tc.expectedTargetDirectory, cfg.TargetDirectory) + require.Equal(t, tc.expectedInstallDirectory, cfg.InstallPath) + require.Equal(t, tc.expectedDropDirectory, cfg.DropPath) + require.Equal(t, tc.expectedTimeout, cfg.Timeout) + + require.Equal(t, tc.expectedDisableProxy, cfg.Proxy.Disable) + + if tc.expectedTLS { + require.NotNil(t, cfg.TLS) + require.Equal(t, tc.expectedTLSEnabled, *cfg.TLS.Enabled) + require.Equal(t, tc.expectedFingerprint, cfg.TLS.CATrustedFingerprint) + } else { + require.Nil(t, cfg.TLS) + } + } +} diff --git a/internal/pkg/agent/application/upgrade/artifact/download/composed/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/composed/downloader.go index 84e353ff661..b5de15fc9a8 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/composed/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/composed/downloader.go @@ -12,6 +12,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" ) // Downloader is a downloader with a predefined set of downloaders. @@ -50,3 +51,17 @@ func (e *Downloader) Download(ctx context.Context, a artifact.Artifact, version return "", err } + +func (e *Downloader) Reload(c *artifact.Config) error { + for _, d := range e.dd { + reloadable, ok := d.(download.Reloader) + if !ok { + continue + } + + if err := reloadable.Reload(c); err != nil { + return errors.New(err, "failed reloading artifact config for composed downloader") + } + } + return nil +} diff --git a/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier.go index 26c714d8c52..8930c2a1ba6 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier.go @@ -5,12 +5,11 @@ package composed import ( - "errors" - "github.com/hashicorp/go-multierror" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" ) // Verifier is a verifier with a predefined set of verifiers. @@ -54,3 +53,17 @@ func (e *Verifier) Verify(a artifact.Artifact, version string) error { return err } + +func (e *Verifier) Reload(c *artifact.Config) error { + for _, v := range e.vv { + reloadable, ok := v.(download.Reloader) + if !ok { + continue + } + + if err := reloadable.Reload(c); err != nil { + return errors.New(err, "failed reloading artifact config for composed verifier") + } + } + return nil +} diff --git a/internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go index 5ef423825a9..7e7ca63ed23 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go @@ -73,6 +73,23 @@ func NewDownloaderWithClient(log progressLogger, config *artifact.Config, client } } +func (e *Downloader) Reload(c *artifact.Config) error { + // reload client + client, err := c.HTTPTransportSettings.Client( + httpcommon.WithAPMHTTPInstrumentation(), + ) + if err != nil { + return errors.New(err, "http.downloader: failed to generate client out of config") + } + + client.Transport = withHeaders(client.Transport, headers) + + e.client = *client + e.config = c + + return nil +} + // Download fetches the package from configured source. // Returns absolute path to downloaded package and an error. func (e *Downloader) Download(ctx context.Context, a artifact.Artifact, version string) (_ string, err error) { @@ -81,7 +98,9 @@ func (e *Downloader) Download(ctx context.Context, a artifact.Artifact, version defer func() { if err != nil { for _, path := range downloadedFiles { - os.Remove(path) + if err := os.Remove(path); err != nil { + e.log.Warnf("failed to cleanup %s: %v", path, err) + } } } }() @@ -171,12 +190,14 @@ func (e *Downloader) downloadFile(ctx context.Context, artifactName, filename, f resp, err := e.client.Do(req.WithContext(ctx)) if err != nil { - return "", errors.New(err, "fetching package failed", errors.TypeNetwork, errors.M(errors.MetaKeyURI, sourceURI)) + // return path, file already exists and needs to be cleaned up + return fullPath, errors.New(err, "fetching package failed", errors.TypeNetwork, errors.M(errors.MetaKeyURI, sourceURI)) } defer resp.Body.Close() if resp.StatusCode != 200 { - return "", errors.New(fmt.Sprintf("call to '%s' returned unsuccessful status code: %d", sourceURI, resp.StatusCode), errors.TypeNetwork, errors.M(errors.MetaKeyURI, sourceURI)) + // return path, file already exists and needs to be cleaned up + return fullPath, errors.New(fmt.Sprintf("call to '%s' returned unsuccessful status code: %d", sourceURI, resp.StatusCode), errors.TypeNetwork, errors.M(errors.MetaKeyURI, sourceURI)) } fileSize := -1 @@ -193,7 +214,8 @@ func (e *Downloader) downloadFile(ctx context.Context, artifactName, filename, f if err != nil { reportCancel() dp.ReportFailed(err) - return "", errors.New(err, "fetching package failed", errors.TypeNetwork, errors.M(errors.MetaKeyURI, sourceURI)) + // return path, file already exists and needs to be cleaned up + return fullPath, errors.New(err, "copying fetched package failed", errors.TypeNetwork, errors.M(errors.MetaKeyURI, sourceURI)) } reportCancel() dp.ReportComplete() diff --git a/internal/pkg/agent/application/upgrade/artifact/download/http/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/http/verifier.go index 4568c0f2cdd..46590f4e5db 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/http/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/verifier.go @@ -60,6 +60,24 @@ func NewVerifier(config *artifact.Config, allowEmptyPgp bool, pgp []byte) (*Veri return v, nil } +func (v *Verifier) Reload(c *artifact.Config) error { + // reload client + client, err := c.HTTPTransportSettings.Client( + httpcommon.WithAPMHTTPInstrumentation(), + httpcommon.WithModRoundtripper(func(rt http.RoundTripper) http.RoundTripper { + return withHeaders(rt, headers) + }), + ) + if err != nil { + return errors.New(err, "http.verifier: failed to generate client out of config") + } + + v.client = *client + v.config = c + + return nil +} + // Verify checks downloaded package on preconfigured // location against a key stored on elastic.co website. func (v *Verifier) Verify(a artifact.Artifact, version string) error { diff --git a/internal/pkg/agent/application/upgrade/artifact/download/reloadable.go b/internal/pkg/agent/application/upgrade/artifact/download/reloadable.go new file mode 100644 index 00000000000..3b2239740c7 --- /dev/null +++ b/internal/pkg/agent/application/upgrade/artifact/download/reloadable.go @@ -0,0 +1,14 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package download + +import ( + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" +) + +// Reloader is an interface allowing to reload artifact config +type Reloader interface { + Reload(*artifact.Config) error +} diff --git a/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go index b858fca0fc3..2a09c65e522 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go @@ -5,19 +5,26 @@ package snapshot import ( + "context" "encoding/json" "fmt" "strings" + "github.com/elastic/elastic-agent-libs/transport/httpcommon" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/http" - - "github.com/elastic/elastic-agent-libs/transport/httpcommon" + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" ) +type Downloader struct { + downloader download.Downloader + versionOverride string +} + // NewDownloader creates a downloader which first checks local directory // and then fallbacks to remote if configured. func NewDownloader(log *logger.Logger, config *artifact.Config, versionOverride string) (download.Downloader, error) { @@ -25,7 +32,36 @@ func NewDownloader(log *logger.Logger, config *artifact.Config, versionOverride if err != nil { return nil, err } - return http.NewDownloader(log, cfg) + + httpDownloader, err := http.NewDownloader(log, cfg) + if err != nil { + return nil, errors.New(err, "failed to create snapshot downloader") + } + + return &Downloader{ + downloader: httpDownloader, + versionOverride: versionOverride, + }, nil +} + +func (e *Downloader) Reload(c *artifact.Config) error { + reloader, ok := e.downloader.(artifact.ConfigReloader) + if !ok { + return nil + } + + cfg, err := snapshotConfig(c, e.versionOverride) + if err != nil { + return errors.New(err, "snapshot.downloader: failed to generate snapshot config") + } + + return reloader.Reload(cfg) +} + +// Download fetches the package from configured source. +// Returns absolute path to downloaded package and an error. +func (e *Downloader) Download(ctx context.Context, a artifact.Artifact, version string) (string, error) { + return e.downloader.Download(ctx, a, version) } func snapshotConfig(config *artifact.Config, versionOverride string) (*artifact.Config, error) { @@ -35,12 +71,13 @@ func snapshotConfig(config *artifact.Config, versionOverride string) (*artifact. } return &artifact.Config{ - OperatingSystem: config.OperatingSystem, - Architecture: config.Architecture, - SourceURI: snapshotURI, - TargetDirectory: config.TargetDirectory, - InstallPath: config.InstallPath, - DropPath: config.DropPath, + OperatingSystem: config.OperatingSystem, + Architecture: config.Architecture, + SourceURI: snapshotURI, + TargetDirectory: config.TargetDirectory, + InstallPath: config.InstallPath, + DropPath: config.DropPath, + HTTPTransportSettings: config.HTTPTransportSettings, }, nil } diff --git a/internal/pkg/agent/application/upgrade/artifact/download/snapshot/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/verifier.go index 31ad26a0474..c114775cbdb 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/snapshot/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/verifier.go @@ -8,8 +8,14 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/http" + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" ) +type Verifier struct { + verifier download.Verifier + versionOverride string +} + // NewVerifier creates a downloader which first checks local directory // and then fallbacks to remote if configured. func NewVerifier(config *artifact.Config, allowEmptyPgp bool, pgp []byte, versionOverride string) (download.Verifier, error) { @@ -17,5 +23,32 @@ func NewVerifier(config *artifact.Config, allowEmptyPgp bool, pgp []byte, versio if err != nil { return nil, err } - return http.NewVerifier(cfg, allowEmptyPgp, pgp) + v, err := http.NewVerifier(cfg, allowEmptyPgp, pgp) + if err != nil { + return nil, errors.New(err, "failed to create snapshot verifier") + } + + return &Verifier{ + verifier: v, + versionOverride: versionOverride, + }, nil +} + +// Verify checks the package from configured source. +func (e *Verifier) Verify(a artifact.Artifact, version string) error { + return e.verifier.Verify(a, version) +} + +func (e *Verifier) Reload(c *artifact.Config) error { + reloader, ok := e.verifier.(artifact.ConfigReloader) + if !ok { + return nil + } + + cfg, err := snapshotConfig(c, e.versionOverride) + if err != nil { + return errors.New(err, "snapshot.downloader: failed to generate snapshot config") + } + + return reloader.Reload(cfg) } diff --git a/internal/pkg/agent/application/upgrade/cleanup.go b/internal/pkg/agent/application/upgrade/cleanup.go index 5e0618dfe78..2581e30a1d9 100644 --- a/internal/pkg/agent/application/upgrade/cleanup.go +++ b/internal/pkg/agent/application/upgrade/cleanup.go @@ -13,11 +13,15 @@ import ( "github.com/hashicorp/go-multierror" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" + "github.com/elastic/elastic-agent/pkg/core/logger" ) -// preUpgradeCleanup will remove files that do not have the passed version number from the downloads directory. -func preUpgradeCleanup(version string) error { - files, err := os.ReadDir(paths.Downloads()) +// cleanNonMatchingVersionsFromDownloads will remove files that do not have the passed version number from the downloads directory. +func cleanNonMatchingVersionsFromDownloads(log *logger.Logger, version string) error { + downloadsPath := paths.Downloads() + log.Debugw("Cleaning up non-matching downloaded versions", "version", version, "downloads.path", downloadsPath) + + files, err := os.ReadDir(downloadsPath) if err != nil { return fmt.Errorf("unable to read directory %q: %w", paths.Downloads(), err) } diff --git a/internal/pkg/agent/application/upgrade/cleanup_test.go b/internal/pkg/agent/application/upgrade/cleanup_test.go index 736a9c42b3d..1170c26946d 100644 --- a/internal/pkg/agent/application/upgrade/cleanup_test.go +++ b/internal/pkg/agent/application/upgrade/cleanup_test.go @@ -9,7 +9,9 @@ import ( "path/filepath" "testing" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" + "github.com/elastic/elastic-agent/pkg/core/logger" "github.com/stretchr/testify/require" ) @@ -31,7 +33,8 @@ func setupDir(t *testing.T) { func TestPreUpgradeCleanup(t *testing.T) { setupDir(t) - err := preUpgradeCleanup("8.4.0") + log := newErrorLogger(t) + err := cleanNonMatchingVersionsFromDownloads(log, "8.4.0") require.NoError(t, err) files, err := os.ReadDir(paths.Downloads()) @@ -42,3 +45,14 @@ func TestPreUpgradeCleanup(t *testing.T) { require.NoError(t, err) require.Equal(t, []byte("hello, world!"), p) } + +func newErrorLogger(t *testing.T) *logger.Logger { + t.Helper() + + loggerCfg := logger.DefaultLoggingConfig() + loggerCfg.Level = logp.ErrorLevel + + log, err := logger.NewFromConfig("", loggerCfg, false) + require.NoError(t, err) + return log +} diff --git a/internal/pkg/agent/application/upgrade/rollback.go b/internal/pkg/agent/application/upgrade/rollback.go index 8ce6958beae..b4f6014fb3d 100644 --- a/internal/pkg/agent/application/upgrade/rollback.go +++ b/internal/pkg/agent/application/upgrade/rollback.go @@ -31,33 +31,35 @@ const ( ) // Rollback rollbacks to previous version which was functioning before upgrade. -func Rollback(ctx context.Context, prevHash, currentHash string) error { +func Rollback(ctx context.Context, log *logger.Logger, prevHash string, currentHash string) error { // change symlink - if err := ChangeSymlink(ctx, prevHash); err != nil { + if err := ChangeSymlink(ctx, log, prevHash); err != nil { return err } // revert active commit - if err := UpdateActiveCommit(prevHash); err != nil { + if err := UpdateActiveCommit(log, prevHash); err != nil { return err } // Restart + log.Info("Restarting the agent after rollback") if err := restartAgent(ctx); err != nil { return err } // cleanup everything except version we're rolling back into - return Cleanup(prevHash, true) + return Cleanup(log, prevHash, true) } // Cleanup removes all artifacts and files related to a specified version. -func Cleanup(currentHash string, removeMarker bool) error { +func Cleanup(log *logger.Logger, currentHash string, removeMarker bool) error { + log.Debugw("Cleaning up upgrade", "hash", currentHash, "remove_marker", removeMarker) <-time.After(afterRestartDelay) // remove upgrade marker if removeMarker { - if err := CleanMarker(); err != nil { + if err := CleanMarker(log); err != nil { return err } } @@ -74,7 +76,9 @@ func Cleanup(currentHash string, removeMarker bool) error { } // remove symlink to avoid upgrade failures, ignore error - _ = os.Remove(prevSymlinkPath()) + prevSymlink := prevSymlinkPath() + log.Debugw("Removing previous symlink path", "file.path", prevSymlinkPath()) + _ = os.Remove(prevSymlink) dirPrefix := fmt.Sprintf("%s-", agentName) currentDir := fmt.Sprintf("%s-%s", agentName, currentHash) @@ -88,6 +92,7 @@ func Cleanup(currentHash string, removeMarker bool) error { } hashedDir := filepath.Join(paths.Data(), dir) + log.Debugw("Removing hashed data directory", "file.path", hashedDir) if cleanupErr := install.RemovePath(hashedDir); cleanupErr != nil { err = multierror.Append(err, cleanupErr) } @@ -113,6 +118,7 @@ func InvokeWatcher(log *logger.Logger) error { } }() + log.Debugw("Starting upgrade watcher", "path", cmd.Path, "args", cmd.Args, "env", cmd.Env, "dir", cmd.Dir) return cmd.Start() } diff --git a/internal/pkg/agent/application/upgrade/service_darwin.go b/internal/pkg/agent/application/upgrade/service_darwin.go index 2bdb435147b..58709dd3e53 100644 --- a/internal/pkg/agent/application/upgrade/service_darwin.go +++ b/internal/pkg/agent/application/upgrade/service_darwin.go @@ -14,7 +14,6 @@ import ( "fmt" "os" "os/exec" - "path/filepath" "regexp" "strconv" "strings" @@ -50,13 +49,13 @@ func (p *darwinPidProvider) Close() {} func (p *darwinPidProvider) PID(ctx context.Context) (int, error) { piders := []func(context.Context) (int, error){ - p.piderFromCmd(ctx, "launchctl", "list", paths.ServiceName), + p.piderFromCmd("launchctl", "list", paths.ServiceName), } // if release is specifically built to be upgradeable (using DEV flag) // we dont require to run as a service and will need sudo fallback if release.Upgradeable() { - piders = append(piders, p.piderFromCmd(ctx, "sudo", "launchctl", "list", paths.ServiceName)) + piders = append(piders, p.piderFromCmd("sudo", "launchctl", "list", paths.ServiceName)) } var pidErrors error @@ -72,7 +71,7 @@ func (p *darwinPidProvider) PID(ctx context.Context) (int, error) { return 0, pidErrors } -func (p *darwinPidProvider) piderFromCmd(ctx context.Context, name string, args ...string) func(context.Context) (int, error) { +func (p *darwinPidProvider) piderFromCmd(name string, args ...string) func(context.Context) (int, error) { return func(context.Context) (int, error) { listCmd := exec.Command(name, args...) listCmd.SysProcAttr = &syscall.SysProcAttr{ @@ -115,8 +114,8 @@ func (p *darwinPidProvider) piderFromCmd(ctx context.Context, name string, args } func invokeCmd(topPath string) *exec.Cmd { - homeExePath := filepath.Join(topPath, agentName) - + // paths.BinaryPath properly derives the newPath depending on the platform. The path to the binary for macOS is inside of the app bundle. + homeExePath := paths.BinaryPath(topPath, agentName) cmd := exec.Command(homeExePath, watcherSubcommand, "--path.config", paths.Config(), "--path.home", paths.Top(), diff --git a/internal/pkg/agent/application/upgrade/step_download.go b/internal/pkg/agent/application/upgrade/step_download.go index 926e310fda3..510fd00cb8f 100644 --- a/internal/pkg/agent/application/upgrade/step_download.go +++ b/internal/pkg/agent/application/upgrade/step_download.go @@ -40,6 +40,10 @@ func (u *Upgrader) downloadArtifact(ctx context.Context, version, sourceURI stri } } + u.log.Debugw("Downloading upgrade artifact", "version", version, + "source_uri", settings.SourceURI, "drop_path", settings.DropPath, + "target_path", settings.TargetDirectory, "install_path", settings.InstallPath) + verifier, err := newVerifier(version, u.log, &settings) if err != nil { return "", errors.New(err, "initiating verifier") diff --git a/internal/pkg/agent/application/upgrade/step_mark.go b/internal/pkg/agent/application/upgrade/step_mark.go index 7757ff6a9a1..fa337e3907a 100644 --- a/internal/pkg/agent/application/upgrade/step_mark.go +++ b/internal/pkg/agent/application/upgrade/step_mark.go @@ -17,6 +17,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/internal/pkg/release" + "github.com/elastic/elastic-agent/pkg/core/logger" ) const markerFilename = ".update-marker" @@ -91,7 +92,7 @@ func newMarkerSerializer(m *UpdateMarker) *updateMarkerSerializer { } // markUpgrade marks update happened so we can handle grace period -func (u *Upgrader) markUpgrade(_ context.Context, hash string, action *fleetapi.ActionUpgrade) error { +func (u *Upgrader) markUpgrade(_ context.Context, log *logger.Logger, hash string, action *fleetapi.ActionUpgrade) error { prevVersion := release.Version() prevHash := release.Commit() if len(prevHash) > hashLen { @@ -112,11 +113,12 @@ func (u *Upgrader) markUpgrade(_ context.Context, hash string, action *fleetapi. } markerPath := markerFilePath() + log.Infow("Writing upgrade marker file", "file.path", markerPath, "hash", marker.Hash, "prev_hash", prevHash) if err := ioutil.WriteFile(markerPath, markerBytes, 0600); err != nil { return errors.New(err, errors.TypeFilesystem, "failed to create update marker file", errors.M(errors.MetaKeyPath, markerPath)) } - if err := UpdateActiveCommit(hash); err != nil { + if err := UpdateActiveCommit(log, hash); err != nil { return err } @@ -124,8 +126,9 @@ func (u *Upgrader) markUpgrade(_ context.Context, hash string, action *fleetapi. } // UpdateActiveCommit updates active.commit file to point to active version. -func UpdateActiveCommit(hash string) error { +func UpdateActiveCommit(log *logger.Logger, hash string) error { activeCommitPath := filepath.Join(paths.Top(), agentCommitFile) + log.Infow("Updating active commit", "file.path", activeCommitPath, "hash", hash) if err := ioutil.WriteFile(activeCommitPath, []byte(hash), 0600); err != nil { return errors.New(err, errors.TypeFilesystem, "failed to update active commit", errors.M(errors.MetaKeyPath, activeCommitPath)) } @@ -134,8 +137,9 @@ func UpdateActiveCommit(hash string) error { } // CleanMarker removes a marker from disk. -func CleanMarker() error { +func CleanMarker(log *logger.Logger) error { markerFile := markerFilePath() + log.Debugw("Removing marker file", "file.path", markerFile) if err := os.Remove(markerFile); !os.IsNotExist(err) { return err } diff --git a/internal/pkg/agent/application/upgrade/step_relink.go b/internal/pkg/agent/application/upgrade/step_relink.go index 9c998262ecd..13c49693062 100644 --- a/internal/pkg/agent/application/upgrade/step_relink.go +++ b/internal/pkg/agent/application/upgrade/step_relink.go @@ -14,23 +14,32 @@ import ( "github.com/elastic/elastic-agent-libs/file" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + "github.com/elastic/elastic-agent/pkg/core/logger" +) + +const ( + windows = "windows" + exe = ".exe" ) // ChangeSymlink updates symlink paths to match current version. -func ChangeSymlink(ctx context.Context, targetHash string) error { +func ChangeSymlink(ctx context.Context, log *logger.Logger, targetHash string) error { // create symlink to elastic-agent-{hash} hashedDir := fmt.Sprintf("%s-%s", agentName, targetHash) symlinkPath := filepath.Join(paths.Top(), agentName) - newPath := filepath.Join(paths.Top(), "data", hashedDir, agentName) + + // paths.BinaryPath properly derives the binary directory depending on the platform. The path to the binary for macOS is inside of the app bundle. + newPath := paths.BinaryPath(filepath.Join(paths.Top(), "data", hashedDir), agentName) // handle windows suffixes - if runtime.GOOS == "windows" { - symlinkPath += ".exe" - newPath += ".exe" + if runtime.GOOS == windows { + symlinkPath += exe + newPath += exe } prevNewPath := prevSymlinkPath() + log.Infow("Changing symlink", "symlink_path", symlinkPath, "new_path", newPath, "prev_path", prevNewPath) // remove symlink to avoid upgrade failures if err := os.Remove(prevNewPath); !os.IsNotExist(err) { @@ -49,7 +58,7 @@ func prevSymlinkPath() string { agentPrevName := agentName + ".prev" // handle windows suffixes - if runtime.GOOS == "windows" { + if runtime.GOOS == windows { agentPrevName = agentName + ".exe.prev" } diff --git a/internal/pkg/agent/application/upgrade/step_unpack.go b/internal/pkg/agent/application/upgrade/step_unpack.go index 108593c5083..45d007e55f4 100644 --- a/internal/pkg/agent/application/upgrade/step_unpack.go +++ b/internal/pkg/agent/application/upgrade/step_unpack.go @@ -8,10 +8,8 @@ import ( "archive/tar" "archive/zip" "compress/gzip" - "context" "fmt" "io" - "io/ioutil" "os" "path/filepath" "runtime" @@ -21,27 +19,31 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + "github.com/elastic/elastic-agent/pkg/core/logger" ) // unpack unpacks archive correctly, skips root (symlink, config...) unpacks data/* -func (u *Upgrader) unpack(ctx context.Context, version, archivePath string) (string, error) { +func (u *Upgrader) unpack(version, archivePath string) (string, error) { // unpack must occur in directory that holds the installation directory // or the extraction will be double nested var hash string var err error - if runtime.GOOS == "windows" { - hash, err = unzip(version, archivePath) + if runtime.GOOS == windows { + hash, err = unzip(u.log, archivePath) } else { - hash, err = untar(version, archivePath) + hash, err = untar(u.log, version, archivePath) } + if err != nil { + u.log.Errorw("Failed to unpack upgrade artifact", "error.message", err, "version", version, "file.path", archivePath, "hash", hash) return "", err } + u.log.Infow("Unpacked upgrade artifact", "version", version, "file.path", archivePath, "hash", hash) return hash, nil } -func unzip(version, archivePath string) (string, error) { +func unzip(log *logger.Logger, archivePath string) (string, error) { var hash, rootDir string r, err := zip.OpenReader(archivePath) if err != nil { @@ -65,7 +67,7 @@ func unzip(version, archivePath string) (string, error) { //get hash fileName := strings.TrimPrefix(f.Name, fileNamePrefix) if fileName == agentCommitFile { - hashBytes, err := ioutil.ReadAll(rc) + hashBytes, err := io.ReadAll(rc) if err != nil || len(hashBytes) < hashLen { return err } @@ -82,9 +84,11 @@ func unzip(version, archivePath string) (string, error) { path := filepath.Join(paths.Data(), strings.TrimPrefix(fileName, "data/")) if f.FileInfo().IsDir() { - os.MkdirAll(path, f.Mode()) + log.Debugw("Unpacking directory", "archive", "zip", "file.path", path) + _ = os.MkdirAll(path, f.Mode()) } else { - os.MkdirAll(filepath.Dir(path), f.Mode()) + log.Debugw("Unpacking file", "archive", "zip", "file.path", path) + _ = os.MkdirAll(filepath.Dir(path), f.Mode()) f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()) if err != nil { return err @@ -95,6 +99,7 @@ func unzip(version, archivePath string) (string, error) { } }() + //nolint:gosec // legacy if _, err = io.Copy(f, rc); err != nil { return err } @@ -119,7 +124,7 @@ func unzip(version, archivePath string) (string, error) { return hash, nil } -func untar(version, archivePath string) (string, error) { +func untar(log *logger.Logger, version string, archivePath string) (string, error) { r, err := os.Open(archivePath) if err != nil { return "", errors.New(fmt.Sprintf("artifact for 'elastic-agent' version '%s' could not be found at '%s'", version, archivePath), errors.TypeFilesystem, errors.M(errors.MetaKeyPath, archivePath)) @@ -157,7 +162,7 @@ func untar(version, archivePath string) (string, error) { fileName := strings.TrimPrefix(f.Name, fileNamePrefix) if fileName == agentCommitFile { - hashBytes, err := ioutil.ReadAll(tr) + hashBytes, err := io.ReadAll(tr) if err != nil || len(hashBytes) < hashLen { return "", err } @@ -183,6 +188,7 @@ func untar(version, archivePath string) (string, error) { mode := fi.Mode() switch { case mode.IsRegular(): + log.Debugw("Unpacking file", "archive", "tar", "file.path", abs) // just to be sure, it should already be created by Dir type if err := os.MkdirAll(filepath.Dir(abs), 0755); err != nil { return "", errors.New(err, "TarInstaller: creating directory for file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) @@ -193,6 +199,7 @@ func untar(version, archivePath string) (string, error) { return "", errors.New(err, "TarInstaller: creating file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) } + //nolint:gosec // legacy _, err = io.Copy(wf, tr) if closeErr := wf.Close(); closeErr != nil && err == nil { err = closeErr @@ -201,6 +208,7 @@ func untar(version, archivePath string) (string, error) { return "", fmt.Errorf("TarInstaller: error writing to %s: %w", abs, err) } case mode.IsDir(): + log.Debugw("Unpacking directory", "archive", "tar", "file.path", abs) if err := os.MkdirAll(abs, 0755); err != nil { return "", errors.New(err, "TarInstaller: creating directory for file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) } diff --git a/internal/pkg/agent/application/upgrade/upgrade.go b/internal/pkg/agent/application/upgrade/upgrade.go index 31f48d8d0d0..e4ef8c6066f 100644 --- a/internal/pkg/agent/application/upgrade/upgrade.go +++ b/internal/pkg/agent/application/upgrade/upgrade.go @@ -7,7 +7,6 @@ package upgrade import ( "context" "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -113,26 +112,27 @@ func (u *Upgrader) Upgradeable() bool { // Upgrade upgrades running agent, function returns shutdown callback that must be called by reexec. func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string, action *fleetapi.ActionUpgrade) (_ reexec.ShutdownCallbackFn, err error) { + u.log.Infow("Upgrading agent", "version", version, "source_uri", sourceURI) span, ctx := apm.StartSpan(ctx, "upgrade", "app.internal") defer span.End() - err = preUpgradeCleanup(u.agentInfo.Version()) + err = cleanNonMatchingVersionsFromDownloads(u.log, u.agentInfo.Version()) if err != nil { - u.log.Errorf("Unable to clean downloads dir %q before update: %v", paths.Downloads(), err) + u.log.Errorw("Unable to clean downloads before update", "error.message", err, "downloads.path", paths.Downloads()) } sourceURI = u.sourceURI(sourceURI) archivePath, err := u.downloadArtifact(ctx, version, sourceURI) if err != nil { - // Run the same preUpgradeCleanup task to get rid of any newly downloaded files + // Run the same pre-upgrade cleanup task to get rid of any newly downloaded files // This may have an issue if users are upgrading to the same version number. - if dErr := preUpgradeCleanup(u.agentInfo.Version()); dErr != nil { - u.log.Errorf("Unable to remove file after verification failure: %v", dErr) + if dErr := cleanNonMatchingVersionsFromDownloads(u.log, u.agentInfo.Version()); dErr != nil { + u.log.Errorw("Unable to remove file after verification failure", "error.message", dErr) } return nil, err } - newHash, err := u.unpack(ctx, version, archivePath) + newHash, err := u.unpack(version, archivePath) if err != nil { return nil, err } @@ -146,31 +146,35 @@ func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string return nil, nil } - if err := copyActionStore(newHash); err != nil { + if err := copyActionStore(u.log, newHash); err != nil { return nil, errors.New(err, "failed to copy action store") } - if err := ChangeSymlink(ctx, newHash); err != nil { - rollbackInstall(ctx, newHash) + if err := ChangeSymlink(ctx, u.log, newHash); err != nil { + u.log.Errorw("Rolling back: changing symlink failed", "error.message", err) + rollbackInstall(ctx, u.log, newHash) return nil, err } - if err := u.markUpgrade(ctx, newHash, action); err != nil { - rollbackInstall(ctx, newHash) + if err := u.markUpgrade(ctx, u.log, newHash, action); err != nil { + u.log.Errorw("Rolling back: marking upgrade failed", "error.message", err) + rollbackInstall(ctx, u.log, newHash) return nil, err } if err := InvokeWatcher(u.log); err != nil { - rollbackInstall(ctx, newHash) + u.log.Errorw("Rolling back: starting watcher failed", "error.message", err) + rollbackInstall(ctx, u.log, newHash) return nil, err } cb := shutdownCallback(u.log, paths.Home(), release.Version(), version, release.TrimCommit(newHash)) // Clean everything from the downloads dir + u.log.Debugw("Removing downloads directory", "file.path", paths.Downloads()) err = os.RemoveAll(paths.Downloads()) if err != nil { - u.log.Errorf("Unable to clean downloads dir %q after update: %v", paths.Downloads(), err) + u.log.Errorw("Unable to clean downloads after update", "error.message", err, "file.path", paths.Downloads()) } return cb, nil @@ -212,20 +216,21 @@ func (u *Upgrader) sourceURI(retrievedURI string) string { return u.settings.SourceURI } -func rollbackInstall(ctx context.Context, hash string) { +func rollbackInstall(ctx context.Context, log *logger.Logger, hash string) { os.RemoveAll(filepath.Join(paths.Data(), fmt.Sprintf("%s-%s", agentName, hash))) - _ = ChangeSymlink(ctx, release.ShortCommit()) + _ = ChangeSymlink(ctx, log, release.ShortCommit()) } -func copyActionStore(newHash string) error { +func copyActionStore(log *logger.Logger, newHash string) error { // copies legacy action_store.yml, state.yml and state.enc encrypted file if exists storePaths := []string{paths.AgentActionStoreFile(), paths.AgentStateStoreYmlFile(), paths.AgentStateStoreFile()} + newHome := filepath.Join(filepath.Dir(paths.Home()), fmt.Sprintf("%s-%s", agentName, newHash)) + log.Debugw("Copying action store", "new_home_path", newHome) for _, currentActionStorePath := range storePaths { - newHome := filepath.Join(filepath.Dir(paths.Home()), fmt.Sprintf("%s-%s", agentName, newHash)) newActionStorePath := filepath.Join(newHome, filepath.Base(currentActionStorePath)) - - currentActionStore, err := ioutil.ReadFile(currentActionStorePath) + log.Debugw("Copying action store path", "from", currentActionStorePath, "to", newActionStorePath) + currentActionStore, err := os.ReadFile(currentActionStorePath) if os.IsNotExist(err) { // nothing to copy continue @@ -234,7 +239,7 @@ func copyActionStore(newHash string) error { return err } - if err := ioutil.WriteFile(newActionStorePath, currentActionStore, 0600); err != nil { + if err := os.WriteFile(newActionStorePath, currentActionStore, 0600); err != nil { return err } } diff --git a/internal/pkg/agent/cmd/container.go b/internal/pkg/agent/cmd/container.go index 74e574c4806..baa21918695 100644 --- a/internal/pkg/agent/cmd/container.go +++ b/internal/pkg/agent/cmd/container.go @@ -649,8 +649,13 @@ func performGET(cfg setupConfig, client *kibana.Client, path string, response in for i := 0; i < cfg.Kibana.RetryMaxCount; i++ { code, result, err := client.Connection.Request("GET", path, nil, nil, nil) if err != nil || code != 200 { - err = fmt.Errorf("http GET request to %s%s fails: %w. Response: %s", - client.Connection.URL, path, err, truncateString(result)) + if err != nil { + err = fmt.Errorf("http GET request to %s%s fails: %w. Response: %s", + client.Connection.URL, path, err, truncateString(result)) + } else { + err = fmt.Errorf("http GET request to %s%s fails. StatusCode: %d Response: %s", + client.Connection.URL, path, code, truncateString(result)) + } fmt.Fprintf(writer, "%s failed: %s\n", msg, err) <-time.After(cfg.Kibana.RetrySleepDuration) continue @@ -668,8 +673,13 @@ func performPOST(cfg setupConfig, client *kibana.Client, path string, writer io. for i := 0; i < cfg.Kibana.RetryMaxCount; i++ { code, result, err := client.Connection.Request("POST", path, nil, nil, nil) if err != nil || code >= 400 { - err = fmt.Errorf("http POST request to %s%s fails: %w. Response: %s", - client.Connection.URL, path, err, truncateString(result)) + if err != nil { + err = fmt.Errorf("http POST request to %s%s fails: %w. Response: %s", + client.Connection.URL, path, err, truncateString(result)) + } else { + err = fmt.Errorf("http POST request to %s%s fails. StatusCode: %d Response: %s", + client.Connection.URL, path, code, truncateString(result)) + } lastErr = err fmt.Fprintf(writer, "%s failed: %s\n", msg, err) <-time.After(cfg.Kibana.RetrySleepDuration) diff --git a/internal/pkg/agent/cmd/upgrade.go b/internal/pkg/agent/cmd/upgrade.go index 83128b970e8..5e5d75aeeba 100644 --- a/internal/pkg/agent/cmd/upgrade.go +++ b/internal/pkg/agent/cmd/upgrade.go @@ -36,8 +36,6 @@ func newUpgradeCommandWithArgs(_ []string, streams *cli.IOStreams) *cobra.Comman } func upgradeCmd(streams *cli.IOStreams, cmd *cobra.Command, args []string) error { - fmt.Fprintln(streams.Out, "The upgrade process of Elastic Agent is currently EXPERIMENTAL and should not be used in production") - version := args[0] sourceURI, _ := cmd.Flags().GetString("source-uri") diff --git a/internal/pkg/agent/cmd/watch.go b/internal/pkg/agent/cmd/watch.go index 64bd604cd85..353017b714e 100644 --- a/internal/pkg/agent/cmd/watch.go +++ b/internal/pkg/agent/cmd/watch.go @@ -15,6 +15,7 @@ import ( "github.com/spf13/cobra" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent/internal/pkg/agent/application/filelock" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade" @@ -40,8 +41,13 @@ func newWatchCommandWithArgs(_ []string, streams *cli.IOStreams) *cobra.Command Short: "Watch watches Elastic Agent for failures and initiates rollback.", Long: `Watch watches Elastic Agent for failures and initiates rollback.`, Run: func(_ *cobra.Command, _ []string) { - if err := watchCmd(); err != nil { - fmt.Fprintf(streams.Err, "Error: %v\n%s\n", err, troubleshootMessage()) + log, err := configuredLogger() + if err != nil { + fmt.Fprintf(streams.Err, "Error configuring logger: %v\n%s\n", err, troubleshootMessage()) + } + if err := watchCmd(log); err != nil { + log.Errorw("Watch command failed", "error.message", err) + fmt.Fprintf(streams.Err, "Watch command failed: %v\n%s\n", err, troubleshootMessage()) os.Exit(1) } }, @@ -50,12 +56,7 @@ func newWatchCommandWithArgs(_ []string, streams *cli.IOStreams) *cobra.Command return cmd } -func watchCmd() error { - log, err := configuredLogger() - if err != nil { - return err - } - +func watchCmd(log *logp.Logger) error { marker, err := upgrade.LoadMarker() if err != nil { log.Error("failed to load marker", err) @@ -88,7 +89,7 @@ func watchCmd() error { // if we're not within grace and marker is still there it might mean // that cleanup was not performed ok, cleanup everything except current version // hash is the same as hash of agent which initiated watcher. - if err := upgrade.Cleanup(release.ShortCommit(), true); err != nil { + if err := upgrade.Cleanup(log, release.ShortCommit(), true); err != nil { log.Error("rollback failed", err) } // exit nicely @@ -97,8 +98,8 @@ func watchCmd() error { ctx := context.Background() if err := watch(ctx, tilGrace, log); err != nil { - log.Debugf("Error detected proceeding to rollback: %v", err) - err = upgrade.Rollback(ctx, marker.PrevHash, marker.Hash) + log.Error("Error detected proceeding to rollback: %v", err) + err = upgrade.Rollback(ctx, log, marker.PrevHash, marker.Hash) if err != nil { log.Error("rollback failed", err) } @@ -109,7 +110,7 @@ func watchCmd() error { // in windows it might leave self untouched, this will get cleaned up // later at the start, because for windows we leave marker untouched. removeMarker := !isWindows() - err = upgrade.Cleanup(marker.Hash, removeMarker) + err = upgrade.Cleanup(log, marker.Hash, removeMarker) if err != nil { log.Error("rollback failed", err) } diff --git a/internal/pkg/agent/control/server/listener_windows.go b/internal/pkg/agent/control/server/listener_windows.go index 69d211502ea..73fd3b97d95 100644 --- a/internal/pkg/agent/control/server/listener_windows.go +++ b/internal/pkg/agent/control/server/listener_windows.go @@ -10,6 +10,7 @@ package server import ( "net" "os/user" + "strings" "github.com/pkg/errors" @@ -18,9 +19,14 @@ import ( "github.com/elastic/elastic-agent/pkg/core/logger" ) +const ( + NTAUTHORITY_SYSTEM = "S-1-5-18" + ADMINISTRATORS_GROUP = "S-1-5-32-544" +) + // createListener creates a named pipe listener on Windows -func createListener(_ *logger.Logger) (net.Listener, error) { - sd, err := securityDescriptor() +func createListener(log *logger.Logger) (net.Listener, error) { + sd, err := securityDescriptor(log) if err != nil { return nil, err } @@ -31,7 +37,7 @@ func cleanupListener(_ *logger.Logger) { // nothing to do on windows } -func securityDescriptor() (string, error) { +func securityDescriptor(log *logger.Logger) (string, error) { u, err := user.Current() if err != nil { return "", errors.Wrap(err, "failed to get current user") @@ -42,11 +48,42 @@ func securityDescriptor() (string, error) { // String definition: https://docs.microsoft.com/en-us/windows/win32/secauthz/ace-strings // Give generic read/write access to the specified user. descriptor := "D:P(A;;GA;;;" + u.Uid + ")" - if u.Username == "NT AUTHORITY\\SYSTEM" { + + if isAdmin, err := isWindowsAdmin(u); err != nil { + // do not fail, agent would end up in a loop, continue with limited permissions + log.Warnf("failed to detect admin: %w", err) + } else if isAdmin { // running as SYSTEM, include Administrators group so Administrators can talk over // the named pipe to the running Elastic Agent system process // https://support.microsoft.com/en-us/help/243330/well-known-security-identifiers-in-windows-operating-systems - descriptor += "(A;;GA;;;S-1-5-32-544)" // Administrators group + descriptor += "(A;;GA;;;" + ADMINISTRATORS_GROUP + ")" } return descriptor, nil } + +func isWindowsAdmin(u *user.User) (bool, error) { + if u.Username == "NT AUTHORITY\\SYSTEM" { + return true, nil + } + + if equalsSystemGroup(u.Uid) || equalsSystemGroup(u.Gid) { + return true, nil + } + + groups, err := u.GroupIds() + if err != nil { + return false, errors.Wrap(err, "failed to get current user groups") + } + + for _, groupSid := range groups { + if equalsSystemGroup(groupSid) { + return true, nil + } + } + + return false, nil +} + +func equalsSystemGroup(s string) bool { + return strings.EqualFold(s, NTAUTHORITY_SYSTEM) || strings.EqualFold(s, ADMINISTRATORS_GROUP) +} diff --git a/internal/pkg/agent/install/install.go b/internal/pkg/agent/install/install.go index 58f3fa73312..431fd1db931 100644 --- a/internal/pkg/agent/install/install.go +++ b/internal/pkg/agent/install/install.go @@ -6,17 +6,20 @@ package install import ( "fmt" - "io/ioutil" "os" "path/filepath" + "runtime" "github.com/otiai10/copy" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" ) +const ( + darwin = "darwin" +) + // Install installs Elastic Agent persistently on the system including creating and starting its service. func Install(cfgFile string) error { dir, err := findDirectory() @@ -53,15 +56,39 @@ func Install(cfgFile string) error { // place shell wrapper, if present on platform if paths.ShellWrapperPath != "" { - err = os.MkdirAll(filepath.Dir(paths.ShellWrapperPath), 0755) - if err == nil { - err = ioutil.WriteFile(paths.ShellWrapperPath, []byte(paths.ShellWrapper), 0755) - } - if err != nil { - return errors.New( - err, - fmt.Sprintf("failed to write shell wrapper (%s)", paths.ShellWrapperPath), - errors.M("destination", paths.ShellWrapperPath)) + // Install symlink for darwin instead of the wrapper script. + // Elastic-agent should be first process that launchd starts in order to be able to grant + // the Full-Disk Access (FDA) to the agent and it's child processes. + // This is specifically important for osquery FDA permissions at the moment. + if runtime.GOOS == darwin { + // Check if previous shell wrapper or symlink exists and remove it so it can be overwritten + if _, err := os.Lstat(paths.ShellWrapperPath); err == nil { + if err := os.Remove(paths.ShellWrapperPath); err != nil { + return errors.New( + err, + fmt.Sprintf("failed to remove (%s)", paths.ShellWrapperPath), + errors.M("destination", paths.ShellWrapperPath)) + } + } + err = os.Symlink("/Library/Elastic/Agent/elastic-agent", paths.ShellWrapperPath) + if err != nil { + return errors.New( + err, + fmt.Sprintf("failed to create elastic-agent symlink (%s)", paths.ShellWrapperPath), + errors.M("destination", paths.ShellWrapperPath)) + } + } else { + err = os.MkdirAll(filepath.Dir(paths.ShellWrapperPath), 0755) + if err == nil { + //nolint: gosec // this is intended to be an executable shell script, not chaning the permissions for the linter + err = os.WriteFile(paths.ShellWrapperPath, []byte(paths.ShellWrapper), 0755) + } + if err != nil { + return errors.New( + err, + fmt.Sprintf("failed to write shell wrapper (%s)", paths.ShellWrapperPath), + errors.M("destination", paths.ShellWrapperPath)) + } } } @@ -146,12 +173,7 @@ func findDirectory() (string, error) { if err != nil { return "", err } - sourceDir := filepath.Dir(execPath) - if info.IsInsideData(sourceDir) { - // executable path is being reported as being down inside of data path - // move up to directories to perform the copy - sourceDir = filepath.Dir(filepath.Dir(sourceDir)) - } + sourceDir := paths.ExecDir(filepath.Dir(execPath)) err = verifyDirectory(sourceDir) if err != nil { return "", err diff --git a/internal/pkg/agent/vars/vars.go b/internal/pkg/agent/vars/vars.go index b685583895f..65c0ef2ae1f 100644 --- a/internal/pkg/agent/vars/vars.go +++ b/internal/pkg/agent/vars/vars.go @@ -22,7 +22,7 @@ func WaitForVariables(ctx context.Context, l *logger.Logger, cfg *config.Config, var cancel context.CancelFunc var vars []*transpiler.Vars - composable, err := composable.New(l, cfg) + composable, err := composable.New(l, cfg, false) if err != nil { return nil, fmt.Errorf("failed to create composable controller: %w", err) } diff --git a/internal/pkg/agent/vault/vault_darwin.c b/internal/pkg/agent/vault/vault_darwin.c index c2bb85bb354..b5c1777dac1 100644 --- a/internal/pkg/agent/vault/vault_darwin.c +++ b/internal/pkg/agent/vault/vault_darwin.c @@ -209,10 +209,10 @@ OSStatus RemoveKeychainItem(SecKeychainRef keychain, const char *name, const cha char* GetOSStatusMessage(OSStatus status) { CFStringRef s = SecCopyErrorMessageString(status, NULL); char *p; - int n; - n = CFStringGetLength(s)*8; - p = malloc(n); - CFStringGetCString(s, p, n, kCFStringEncodingUTF8); + int n; + n = CFStringGetLength(s)*8; + p = malloc(n); + CFStringGetCString(s, p, n, kCFStringEncodingUTF8); CFRelease(s); - return p; + return p; } diff --git a/internal/pkg/composable/context.go b/internal/pkg/composable/context.go index 97767f4a5d5..3a805efd249 100644 --- a/internal/pkg/composable/context.go +++ b/internal/pkg/composable/context.go @@ -14,7 +14,7 @@ import ( ) // ContextProviderBuilder creates a new context provider based on the given config and returns it. -type ContextProviderBuilder func(log *logger.Logger, config *config.Config) (corecomp.ContextProvider, error) +type ContextProviderBuilder func(log *logger.Logger, config *config.Config, managed bool) (corecomp.ContextProvider, error) // MustAddContextProvider adds a new ContextProviderBuilder and panics if it AddContextProvider returns an error. func (r *providerRegistry) MustAddContextProvider(name string, builder ContextProviderBuilder) { @@ -24,6 +24,7 @@ func (r *providerRegistry) MustAddContextProvider(name string, builder ContextPr } } +//nolint:dupl,goimports,nolintlint // false positive // AddContextProvider adds a new ContextProviderBuilder func (r *providerRegistry) AddContextProvider(name string, builder ContextProviderBuilder) error { r.lock.Lock() @@ -32,11 +33,14 @@ func (r *providerRegistry) AddContextProvider(name string, builder ContextProvid if name == "" { return fmt.Errorf("provider name is required") } + if strings.ToLower(name) != name { return fmt.Errorf("provider name must be lowercase") } + _, contextExists := r.contextProviders[name] _, dynamicExists := r.dynamicProviders[name] + if contextExists || dynamicExists { return fmt.Errorf("provider '%s' is already registered", name) } diff --git a/internal/pkg/composable/controller.go b/internal/pkg/composable/controller.go index babd1230586..0af5a0d93e8 100644 --- a/internal/pkg/composable/controller.go +++ b/internal/pkg/composable/controller.go @@ -46,7 +46,7 @@ type controller struct { } // New creates a new controller. -func New(log *logger.Logger, c *config.Config) (Controller, error) { +func New(log *logger.Logger, c *config.Config, managed bool) (Controller, error) { l := log.Named("composable") var providersCfg Config @@ -65,7 +65,7 @@ func New(log *logger.Logger, c *config.Config) (Controller, error) { // explicitly disabled; skipping continue } - provider, err := builder(l, pCfg) + provider, err := builder(l, pCfg, managed) if err != nil { return nil, errors.New(err, fmt.Sprintf("failed to build provider '%s'", name), errors.TypeConfig, errors.M("provider", name)) } @@ -82,7 +82,7 @@ func New(log *logger.Logger, c *config.Config) (Controller, error) { // explicitly disabled; skipping continue } - provider, err := builder(l.Named(strings.Join([]string{"providers", name}, ".")), pCfg) + provider, err := builder(l.Named(strings.Join([]string{"providers", name}, ".")), pCfg, managed) if err != nil { return nil, errors.New(err, fmt.Sprintf("failed to build provider '%s'", name), errors.TypeConfig, errors.M("provider", name)) } diff --git a/internal/pkg/composable/controller_test.go b/internal/pkg/composable/controller_test.go index a8c3ec7df93..d4fdbb8fdfc 100644 --- a/internal/pkg/composable/controller_test.go +++ b/internal/pkg/composable/controller_test.go @@ -78,7 +78,7 @@ func TestController(t *testing.T) { log, err := logger.New("", false) require.NoError(t, err) - c, err := composable.New(log, cfg) + c, err := composable.New(log, cfg, false) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) diff --git a/internal/pkg/composable/dynamic.go b/internal/pkg/composable/dynamic.go index c83c2ccc2e2..22ff438fe71 100644 --- a/internal/pkg/composable/dynamic.go +++ b/internal/pkg/composable/dynamic.go @@ -34,7 +34,7 @@ type DynamicProvider interface { } // DynamicProviderBuilder creates a new dynamic provider based on the given config and returns it. -type DynamicProviderBuilder func(log *logger.Logger, config *config.Config) (DynamicProvider, error) +type DynamicProviderBuilder func(log *logger.Logger, config *config.Config, managed bool) (DynamicProvider, error) // MustAddDynamicProvider adds a new DynamicProviderBuilder and panics if it AddDynamicProvider returns an error. func (r *providerRegistry) MustAddDynamicProvider(name string, builder DynamicProviderBuilder) { @@ -44,28 +44,29 @@ func (r *providerRegistry) MustAddDynamicProvider(name string, builder DynamicPr } } +//nolint:dupl,goimports,nolintlint // false positive // AddDynamicProvider adds a new DynamicProviderBuilder -func (r *providerRegistry) AddDynamicProvider(name string, builder DynamicProviderBuilder) error { +func (r *providerRegistry) AddDynamicProvider(providerName string, builder DynamicProviderBuilder) error { r.lock.Lock() defer r.lock.Unlock() - if name == "" { - return fmt.Errorf("provider name is required") + if providerName == "" { + return fmt.Errorf("provider providerName is required") } - if strings.ToLower(name) != name { - return fmt.Errorf("provider name must be lowercase") + if strings.ToLower(providerName) != providerName { + return fmt.Errorf("provider providerName must be lowercase") } - _, contextExists := r.contextProviders[name] - _, dynamicExists := r.dynamicProviders[name] + _, contextExists := r.contextProviders[providerName] + _, dynamicExists := r.dynamicProviders[providerName] if contextExists || dynamicExists { - return fmt.Errorf("provider '%s' is already registered", name) + return fmt.Errorf("provider '%s' is already registered", providerName) } if builder == nil { - return fmt.Errorf("provider '%s' cannot be registered with a nil factory", name) + return fmt.Errorf("provider '%s' cannot be registered with a nil factory", providerName) } - r.dynamicProviders[name] = builder - r.logger.Debugf("Registered provider: %s", name) + r.dynamicProviders[providerName] = builder + r.logger.Debugf("Registered provider: %s", providerName) return nil } diff --git a/internal/pkg/composable/providers/agent/agent.go b/internal/pkg/composable/providers/agent/agent.go index ed8eb956afe..2fb5bb284e5 100644 --- a/internal/pkg/composable/providers/agent/agent.go +++ b/internal/pkg/composable/providers/agent/agent.go @@ -42,6 +42,6 @@ func (*contextProvider) Run(comm corecomp.ContextProviderComm) error { } // ContextProviderBuilder builds the context provider. -func ContextProviderBuilder(_ *logger.Logger, _ *config.Config) (corecomp.ContextProvider, error) { +func ContextProviderBuilder(_ *logger.Logger, _ *config.Config, _ bool) (corecomp.ContextProvider, error) { return &contextProvider{}, nil } diff --git a/internal/pkg/composable/providers/agent/agent_test.go b/internal/pkg/composable/providers/agent/agent_test.go index f3c6904b05c..cd15e8058ea 100644 --- a/internal/pkg/composable/providers/agent/agent_test.go +++ b/internal/pkg/composable/providers/agent/agent_test.go @@ -20,7 +20,7 @@ func TestContextProvider(t *testing.T) { testutils.InitStorage(t) builder, _ := composable.Providers.GetContextProvider("agent") - provider, err := builder(nil, nil) + provider, err := builder(nil, nil, true) require.NoError(t, err) comm := ctesting.NewContextComm(context.Background()) diff --git a/internal/pkg/composable/providers/docker/docker.go b/internal/pkg/composable/providers/docker/docker.go index 8647677e6e8..fa58b00a880 100644 --- a/internal/pkg/composable/providers/docker/docker.go +++ b/internal/pkg/composable/providers/docker/docker.go @@ -102,7 +102,7 @@ func (c *dynamicProvider) Run(comm composable.DynamicProviderComm) error { } // DynamicProviderBuilder builds the dynamic provider. -func DynamicProviderBuilder(logger *logger.Logger, c *config.Config) (composable.DynamicProvider, error) { +func DynamicProviderBuilder(logger *logger.Logger, c *config.Config, managed bool) (composable.DynamicProvider, error) { var cfg Config if c == nil { c = config.New() @@ -146,7 +146,7 @@ func generateData(event bus.Event) (*dockerContainerData, error) { "image": container.Image, "labels": processorLabelMap, }, - "to": "container", + "target": "container", }, }, }, diff --git a/internal/pkg/composable/providers/docker/docker_test.go b/internal/pkg/composable/providers/docker/docker_test.go index d0b5c69ba4d..a035fe06a58 100644 --- a/internal/pkg/composable/providers/docker/docker_test.go +++ b/internal/pkg/composable/providers/docker/docker_test.go @@ -53,7 +53,7 @@ func TestGenerateData(t *testing.T) { "co_elastic_logs/disable": "true", }, }, - "to": "container", + "target": "container", }, }, } diff --git a/internal/pkg/composable/providers/env/env.go b/internal/pkg/composable/providers/env/env.go index 6f65120de48..ac6ef4be446 100644 --- a/internal/pkg/composable/providers/env/env.go +++ b/internal/pkg/composable/providers/env/env.go @@ -31,7 +31,7 @@ func (*contextProvider) Run(comm corecomp.ContextProviderComm) error { } // ContextProviderBuilder builds the context provider. -func ContextProviderBuilder(_ *logger.Logger, _ *config.Config) (corecomp.ContextProvider, error) { +func ContextProviderBuilder(_ *logger.Logger, _ *config.Config, _ bool) (corecomp.ContextProvider, error) { return &contextProvider{}, nil } diff --git a/internal/pkg/composable/providers/env/env_test.go b/internal/pkg/composable/providers/env/env_test.go index f41f6200697..a03f37ee577 100644 --- a/internal/pkg/composable/providers/env/env_test.go +++ b/internal/pkg/composable/providers/env/env_test.go @@ -17,7 +17,7 @@ import ( func TestContextProvider(t *testing.T) { builder, _ := composable.Providers.GetContextProvider("env") - provider, err := builder(nil, nil) + provider, err := builder(nil, nil, true) require.NoError(t, err) comm := ctesting.NewContextComm(context.Background()) diff --git a/internal/pkg/composable/providers/host/host.go b/internal/pkg/composable/providers/host/host.go index cc98021e77b..b722a5f4c69 100644 --- a/internal/pkg/composable/providers/host/host.go +++ b/internal/pkg/composable/providers/host/host.go @@ -77,7 +77,7 @@ func (c *contextProvider) Run(comm corecomp.ContextProviderComm) error { } // ContextProviderBuilder builds the context provider. -func ContextProviderBuilder(log *logger.Logger, c *config.Config) (corecomp.ContextProvider, error) { +func ContextProviderBuilder(log *logger.Logger, c *config.Config, _ bool) (corecomp.ContextProvider, error) { p := &contextProvider{ logger: log, fetcher: getHostInfo, diff --git a/internal/pkg/composable/providers/host/host_test.go b/internal/pkg/composable/providers/host/host_test.go index 869f6a82050..7cf2f208abd 100644 --- a/internal/pkg/composable/providers/host/host_test.go +++ b/internal/pkg/composable/providers/host/host_test.go @@ -33,10 +33,10 @@ func TestContextProvider(t *testing.T) { builder, _ := composable.Providers.GetContextProvider("host") log, err := logger.New("host_test", false) require.NoError(t, err) - provider, err := builder(log, c) + provider, err := builder(log, c, true) require.NoError(t, err) - hostProvider := provider.(*contextProvider) + hostProvider, _ := provider.(*contextProvider) hostProvider.fetcher = returnHostMapping() require.Equal(t, 100*time.Millisecond, hostProvider.CheckInterval) diff --git a/internal/pkg/composable/providers/kubernetes/config.go b/internal/pkg/composable/providers/kubernetes/config.go index 9bec67b66b8..4a97b417c59 100644 --- a/internal/pkg/composable/providers/kubernetes/config.go +++ b/internal/pkg/composable/providers/kubernetes/config.go @@ -7,6 +7,8 @@ package kubernetes import ( "time" + "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-autodiscover/kubernetes" "github.com/elastic/elastic-agent-autodiscover/kubernetes/metadata" "github.com/elastic/elastic-agent-libs/logp" @@ -34,6 +36,9 @@ type Config struct { LabelsDedot bool `config:"labels.dedot"` AnnotationsDedot bool `config:"annotations.dedot"` + + Hints *config.C `config:"hints"` + Prefix string `config:"prefix"` } // Resources config section for resources' config blocks @@ -56,6 +61,7 @@ func (c *Config) InitDefaults() { c.LabelsDedot = true c.AnnotationsDedot = true c.AddResourceMetadata = metadata.GetDefaultResourceMetadataConfig() + c.Prefix = "co.elastic" } // Validate ensures correctness of config diff --git a/internal/pkg/composable/providers/kubernetes/hints.go b/internal/pkg/composable/providers/kubernetes/hints.go new file mode 100644 index 00000000000..98bde12f54d --- /dev/null +++ b/internal/pkg/composable/providers/kubernetes/hints.go @@ -0,0 +1,260 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package kubernetes + +import ( + "fmt" + "regexp" + "strings" + + "github.com/elastic/elastic-agent-autodiscover/utils" + + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent-libs/mapstr" +) + +const ( + integration = "package" + datastreams = "data_streams" + + host = "host" + period = "period" + timeout = "timeout" + metricspath = "metrics_path" + username = "username" + password = "password" + stream = "stream" // this is the container stream: stdout/stderr +) + +type hintsBuilder struct { + Key string + + logger *logp.Logger +} + +func (m *hintsBuilder) getIntegration(hints mapstr.M) string { + return utils.GetHintString(hints, m.Key, integration) +} + +func (m *hintsBuilder) getDataStreams(hints mapstr.M) []string { + ds := utils.GetHintAsList(hints, m.Key, datastreams) + return ds +} + +func (m *hintsBuilder) getHost(hints mapstr.M) string { + return utils.GetHintString(hints, m.Key, host) +} + +func (m *hintsBuilder) getStreamHost(hints mapstr.M, streamName string) string { + key := fmt.Sprintf("%v.%v", streamName, host) + return utils.GetHintString(hints, m.Key, key) +} + +func (m *hintsBuilder) getPeriod(hints mapstr.M) string { + return utils.GetHintString(hints, m.Key, period) +} + +func (m *hintsBuilder) getStreamPeriod(hints mapstr.M, streamName string) string { + key := fmt.Sprintf("%v.%v", streamName, period) + return utils.GetHintString(hints, m.Key, key) +} + +func (m *hintsBuilder) getTimeout(hints mapstr.M) string { + return utils.GetHintString(hints, m.Key, timeout) +} + +func (m *hintsBuilder) getStreamTimeout(hints mapstr.M, streamName string) string { + key := fmt.Sprintf("%v.%v", streamName, timeout) + return utils.GetHintString(hints, m.Key, key) +} + +func (m *hintsBuilder) getMetricspath(hints mapstr.M) string { + return utils.GetHintString(hints, m.Key, metricspath) +} + +func (m *hintsBuilder) getStreamMetricspath(hints mapstr.M, streamName string) string { + key := fmt.Sprintf("%v.%v", streamName, metricspath) + return utils.GetHintString(hints, m.Key, key) +} + +func (m *hintsBuilder) getUsername(hints mapstr.M) string { + return utils.GetHintString(hints, m.Key, username) +} + +func (m *hintsBuilder) getStreamUsername(hints mapstr.M, streamName string) string { + key := fmt.Sprintf("%v.%v", streamName, username) + return utils.GetHintString(hints, m.Key, key) +} + +func (m *hintsBuilder) getPassword(hints mapstr.M) string { + return utils.GetHintString(hints, m.Key, password) +} + +func (m *hintsBuilder) getStreamPassword(hints mapstr.M, streamName string) string { + key := fmt.Sprintf("%v.%v", streamName, password) + return utils.GetHintString(hints, m.Key, key) +} + +func (m *hintsBuilder) getContainerStream(hints mapstr.M) string { + return utils.GetHintString(hints, m.Key, stream) +} + +func (m *hintsBuilder) getStreamContainerStream(hints mapstr.M, streamName string) string { + key := fmt.Sprintf("%v.%v", streamName, stream) + return utils.GetHintString(hints, m.Key, key) +} + +// Replace hints like `'${kubernetes.pod.ip}:6379'` with the actual values from the resource metadata. +// So if you replace the `${kubernetes.pod.ip}` part with the value from the Pod's metadata +// you end up with sth like `10.28.90.345:6379` +func (m *hintsBuilder) getFromMeta(value string, kubeMeta mapstr.M) string { + if value == "" { + return "" + } + r := regexp.MustCompile(`\${([^{}]+)}`) + matches := r.FindAllString(value, -1) + for _, match := range matches { + key := strings.TrimSuffix(strings.TrimPrefix(match, "${kubernetes."), "}") + val, err := kubeMeta.GetValue(key) + if err != nil { + m.logger.Debugf("cannot retrieve key from k8smeta: %v", key) + return "" + } + hintVal, ok := val.(string) + if !ok { + m.logger.Debugf("cannot convert value into string: %v", val) + return "" + } + value = strings.Replace(value, match, hintVal, -1) + } + return value +} + +// GenerateHintsMapping gets a hint's map extracted from the annotations and constructs the final +// hints' mapping to be emitted. +func GenerateHintsMapping(hints mapstr.M, kubeMeta mapstr.M, logger *logp.Logger, containerID string) mapstr.M { + builder := hintsBuilder{ + Key: "hints", // consider doing it a configurable, + logger: logger, + } + + hintsMapping := mapstr.M{} + integration := builder.getIntegration(hints) + if integration == "" { + return hintsMapping + } + integrationHints := mapstr.M{} + + if containerID != "" { + _, _ = hintsMapping.Put("container_id", containerID) + // Add the default container log fallback to enable any template which defines + // a log input with a `"${kubernetes.hints.container_logs.enabled} == true"` condition + _, _ = integrationHints.Put("container_logs.enabled", true) + } + + // TODO: add support for processors + // Processors should be data_stream specific. + // Add a basic processor as a base like: + //- add_fields: + // target: kubernetes + // fields: + // hints: true + // Blocked by https://github.com/elastic/elastic-agent/issues/735 + + integrationHost := builder.getFromMeta(builder.getHost(hints), kubeMeta) + if integrationHost != "" { + _, _ = integrationHints.Put(host, integrationHost) + } + integrationPeriod := builder.getFromMeta(builder.getPeriod(hints), kubeMeta) + if integrationPeriod != "" { + _, _ = integrationHints.Put(period, integrationPeriod) + } + integrationTimeout := builder.getFromMeta(builder.getTimeout(hints), kubeMeta) + if integrationTimeout != "" { + _, _ = integrationHints.Put(timeout, integrationTimeout) + } + integrationMetricsPath := builder.getFromMeta(builder.getMetricspath(hints), kubeMeta) + if integrationMetricsPath != "" { + _, _ = integrationHints.Put(metricspath, integrationMetricsPath) + } + integrationUsername := builder.getFromMeta(builder.getUsername(hints), kubeMeta) + if integrationUsername != "" { + _, _ = integrationHints.Put(username, integrationUsername) + } + integrationPassword := builder.getFromMeta(builder.getPassword(hints), kubeMeta) + if integrationPassword != "" { + _, _ = integrationHints.Put(password, integrationPassword) + } + integrationContainerStream := builder.getFromMeta(builder.getContainerStream(hints), kubeMeta) + if integrationContainerStream != "" { + _, _ = integrationHints.Put(stream, integrationContainerStream) + } + + dataStreams := builder.getDataStreams(hints) + if len(dataStreams) == 0 { + _, _ = integrationHints.Put("enabled", true) + } + for _, dataStream := range dataStreams { + streamHints := mapstr.M{ + "enabled": true, + } + if integrationPeriod != "" { + _, _ = streamHints.Put(period, integrationPeriod) + } + if integrationHost != "" { + _, _ = streamHints.Put(host, integrationHost) + } + if integrationTimeout != "" { + _, _ = streamHints.Put(timeout, integrationTimeout) + } + if integrationMetricsPath != "" { + _, _ = streamHints.Put(metricspath, integrationMetricsPath) + } + if integrationUsername != "" { + _, _ = streamHints.Put(username, integrationUsername) + } + if integrationPassword != "" { + _, _ = streamHints.Put(password, integrationPassword) + } + if integrationContainerStream != "" { + _, _ = streamHints.Put(stream, integrationContainerStream) + } + + streamPeriod := builder.getFromMeta(builder.getStreamPeriod(hints, dataStream), kubeMeta) + if streamPeriod != "" { + _, _ = streamHints.Put(period, streamPeriod) + } + streamHost := builder.getFromMeta(builder.getStreamHost(hints, dataStream), kubeMeta) + if streamHost != "" { + _, _ = streamHints.Put(host, streamHost) + } + streamTimeout := builder.getFromMeta(builder.getStreamTimeout(hints, dataStream), kubeMeta) + if streamTimeout != "" { + _, _ = streamHints.Put(timeout, streamTimeout) + } + streamMetricsPath := builder.getFromMeta(builder.getStreamMetricspath(hints, dataStream), kubeMeta) + if streamMetricsPath != "" { + _, _ = streamHints.Put(metricspath, streamMetricsPath) + } + streamUsername := builder.getFromMeta(builder.getStreamUsername(hints, dataStream), kubeMeta) + if streamUsername != "" { + _, _ = streamHints.Put(username, streamUsername) + } + streamPassword := builder.getFromMeta(builder.getStreamPassword(hints, dataStream), kubeMeta) + if streamPassword != "" { + _, _ = streamHints.Put(password, streamPassword) + } + streamContainerStream := builder.getFromMeta(builder.getStreamContainerStream(hints, dataStream), kubeMeta) + if streamContainerStream != "" { + _, _ = streamHints.Put(stream, streamContainerStream) + } + _, _ = integrationHints.Put(dataStream, streamHints) + + } + + _, _ = hintsMapping.Put(integration, integrationHints) + + return hintsMapping +} diff --git a/internal/pkg/composable/providers/kubernetes/hints_test.go b/internal/pkg/composable/providers/kubernetes/hints_test.go new file mode 100644 index 00000000000..04c25575f26 --- /dev/null +++ b/internal/pkg/composable/providers/kubernetes/hints_test.go @@ -0,0 +1,368 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package kubernetes + +import ( + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/elastic-agent-autodiscover/kubernetes" + "github.com/elastic/elastic-agent-libs/mapstr" +) + +func TestGenerateHintsMapping(t *testing.T) { + logger := getLogger() + pod := &kubernetes.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + UID: types.UID(uid), + Namespace: "testns", + Labels: map[string]string{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, + Annotations: map[string]string{ + "app": "production", + }, + }, + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + Spec: kubernetes.PodSpec{ + NodeName: "testnode", + }, + Status: kubernetes.PodStatus{PodIP: "127.0.0.5"}, + } + + mapping := map[string]interface{}{ + "namespace": pod.GetNamespace(), + "pod": mapstr.M{ + "uid": string(pod.GetUID()), + "name": pod.GetName(), + "ip": pod.Status.PodIP, + }, + "namespace_annotations": mapstr.M{ + "nsa": "nsb", + }, + "labels": mapstr.M{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, + "annotations": mapstr.M{ + "app": "production", + }, + } + hints := mapstr.M{ + "hints": mapstr.M{ + "data_streams": "info, key, keyspace", + "host": "${kubernetes.pod.ip}:6379", + "info": mapstr.M{"period": "1m", "timeout": "41s"}, + "key": mapstr.M{"period": "10m"}, + "package": "redis", + "password": "password", + "username": "username", + "metrics_path": "/metrics", + "timeout": "42s", + "period": "42s", + }, + } + + expected := mapstr.M{ + "redis": mapstr.M{ + "host": "127.0.0.5:6379", + "metrics_path": "/metrics", + "username": "username", + "password": "password", + "timeout": "42s", + "period": "42s", + "info": mapstr.M{ + "enabled": true, + "host": "127.0.0.5:6379", + "period": "1m", + "metrics_path": "/metrics", + "username": "username", + "password": "password", + "timeout": "41s", + }, "key": mapstr.M{ + "enabled": true, + "host": "127.0.0.5:6379", + "period": "10m", + "metrics_path": "/metrics", + "username": "username", + "password": "password", + "timeout": "42s", + }, "keyspace": mapstr.M{ + "enabled": true, + "host": "127.0.0.5:6379", + "period": "42s", + "metrics_path": "/metrics", + "username": "username", + "password": "password", + "timeout": "42s", + }, + }, + } + + hintsMapping := GenerateHintsMapping(hints, mapping, logger, "") + + assert.Equal(t, expected, hintsMapping) +} + +func TestGenerateHintsMappingWithDefaults(t *testing.T) { + logger := getLogger() + pod := &kubernetes.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + UID: types.UID(uid), + Namespace: "testns", + Labels: map[string]string{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, + Annotations: map[string]string{ + "app": "production", + }, + }, + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + Spec: kubernetes.PodSpec{ + NodeName: "testnode", + }, + Status: kubernetes.PodStatus{PodIP: "127.0.0.5"}, + } + + mapping := map[string]interface{}{ + "namespace": pod.GetNamespace(), + "pod": mapstr.M{ + "uid": string(pod.GetUID()), + "name": pod.GetName(), + "ip": pod.Status.PodIP, + }, + "namespace_annotations": mapstr.M{ + "nsa": "nsb", + }, + "labels": mapstr.M{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, + "annotations": mapstr.M{ + "app": "production", + }, + } + hints := mapstr.M{ + "hints": mapstr.M{ + "host": "${kubernetes.pod.ip}:6379", + "package": "redis", + "metrics_path": "/metrics", + "timeout": "42s", + "period": "42s", + }, + } + + expected := mapstr.M{ + "redis": mapstr.M{ + "enabled": true, + "host": "127.0.0.5:6379", + "metrics_path": "/metrics", + "timeout": "42s", + "period": "42s", + }, + } + + hintsMapping := GenerateHintsMapping(hints, mapping, logger, "") + + assert.Equal(t, expected, hintsMapping) +} + +func TestGenerateHintsMappingWithContainerID(t *testing.T) { + logger := getLogger() + pod := &kubernetes.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + UID: types.UID(uid), + Namespace: "testns", + Labels: map[string]string{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, + Annotations: map[string]string{ + "app": "production", + }, + }, + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + Spec: kubernetes.PodSpec{ + NodeName: "testnode", + }, + Status: kubernetes.PodStatus{PodIP: "127.0.0.5"}, + } + + mapping := map[string]interface{}{ + "namespace": pod.GetNamespace(), + "pod": mapstr.M{ + "uid": string(pod.GetUID()), + "name": pod.GetName(), + "ip": pod.Status.PodIP, + }, + "namespace_annotations": mapstr.M{ + "nsa": "nsb", + }, + "labels": mapstr.M{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, + "annotations": mapstr.M{ + "app": "production", + }, + } + hints := mapstr.M{ + "hints": mapstr.M{ + "data_streams": "info, key, keyspace", + "host": "${kubernetes.pod.ip}:6379", + "info": mapstr.M{"period": "1m", "timeout": "41s"}, + "key": mapstr.M{"period": "10m"}, + "package": "redis", + "password": "password", + "username": "username", + "metrics_path": "/metrics", + "timeout": "42s", + "period": "42s", + }, + } + + expected := mapstr.M{ + "container_id": "asdfghjklqwerty", + "redis": mapstr.M{ + "container_logs": mapstr.M{ + "enabled": true, + }, + "host": "127.0.0.5:6379", + "metrics_path": "/metrics", + "username": "username", + "password": "password", + "timeout": "42s", + "period": "42s", + "info": mapstr.M{ + "enabled": true, + "host": "127.0.0.5:6379", + "period": "1m", + "metrics_path": "/metrics", + "username": "username", + "password": "password", + "timeout": "41s", + }, "key": mapstr.M{ + "enabled": true, + "host": "127.0.0.5:6379", + "period": "10m", + "metrics_path": "/metrics", + "username": "username", + "password": "password", + "timeout": "42s", + }, "keyspace": mapstr.M{ + "enabled": true, + "host": "127.0.0.5:6379", + "period": "42s", + "metrics_path": "/metrics", + "username": "username", + "password": "password", + "timeout": "42s", + }, + }, + } + + hintsMapping := GenerateHintsMapping(hints, mapping, logger, "asdfghjklqwerty") + + assert.Equal(t, expected, hintsMapping) +} + +func TestGenerateHintsMappingWithLogStream(t *testing.T) { + logger := getLogger() + pod := &kubernetes.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + UID: types.UID(uid), + Namespace: "testns", + Labels: map[string]string{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, + Annotations: map[string]string{ + "app": "production", + }, + }, + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + Spec: kubernetes.PodSpec{ + NodeName: "testnode", + }, + Status: kubernetes.PodStatus{PodIP: "127.0.0.5"}, + } + + mapping := map[string]interface{}{ + "namespace": pod.GetNamespace(), + "pod": mapstr.M{ + "uid": string(pod.GetUID()), + "name": pod.GetName(), + "ip": pod.Status.PodIP, + }, + "namespace_annotations": mapstr.M{ + "nsa": "nsb", + }, + "labels": mapstr.M{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, + "annotations": mapstr.M{ + "app": "production", + }, + } + hints := mapstr.M{ + "hints": mapstr.M{ + "data_streams": "access, error", + "access": mapstr.M{"stream": "stdout"}, + "error": mapstr.M{"stream": "stderr"}, + "package": "apache", + }, + } + + expected := mapstr.M{ + "container_id": "asdfghjkl", + "apache": mapstr.M{ + "container_logs": mapstr.M{ + "enabled": true, + }, + "access": mapstr.M{ + "enabled": true, + "stream": "stdout", + }, "error": mapstr.M{ + "enabled": true, + "stream": "stderr", + }, + }, + } + + hintsMapping := GenerateHintsMapping(hints, mapping, logger, "asdfghjkl") + + assert.Equal(t, expected, hintsMapping) +} diff --git a/internal/pkg/composable/providers/kubernetes/kubernetes.go b/internal/pkg/composable/providers/kubernetes/kubernetes.go index 9f43522f2da..73309439a78 100644 --- a/internal/pkg/composable/providers/kubernetes/kubernetes.go +++ b/internal/pkg/composable/providers/kubernetes/kubernetes.go @@ -7,9 +7,11 @@ package kubernetes import ( "fmt" + "github.com/elastic/elastic-agent-autodiscover/kubernetes" + "github.com/elastic/elastic-agent-libs/logp" + k8s "k8s.io/client-go/kubernetes" - "github.com/elastic/elastic-agent-autodiscover/kubernetes" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/composable" "github.com/elastic/elastic-agent/internal/pkg/config" @@ -34,12 +36,13 @@ func init() { } type dynamicProvider struct { - logger *logger.Logger - config *Config + logger *logger.Logger + config *Config + managed bool } // DynamicProviderBuilder builds the dynamic provider. -func DynamicProviderBuilder(logger *logger.Logger, c *config.Config) (composable.DynamicProvider, error) { +func DynamicProviderBuilder(logger *logger.Logger, c *config.Config, managed bool) (composable.DynamicProvider, error) { var cfg Config if c == nil { c = config.New() @@ -49,11 +52,15 @@ func DynamicProviderBuilder(logger *logger.Logger, c *config.Config) (composable return nil, errors.New(err, "failed to unpack configuration") } - return &dynamicProvider{logger, &cfg}, nil + return &dynamicProvider{logger, &cfg, managed}, nil } // Run runs the kubernetes context provider. func (p *dynamicProvider) Run(comm composable.DynamicProviderComm) error { + if p.config.Hints.Enabled() { + betalogger := logp.NewLogger("cfgwarn") + betalogger.Warnf("BETA: Hints' feature is beta.") + } eventers := make([]Eventer, 0, 3) if p.config.Resources.Pod.Enabled { eventer, err := p.watchResource(comm, "pod") @@ -153,19 +160,19 @@ func (p *dynamicProvider) newEventer( client k8s.Interface) (Eventer, error) { switch resourceType { case "pod": - eventer, err := NewPodEventer(comm, p.config, p.logger, client, p.config.Scope) + eventer, err := NewPodEventer(comm, p.config, p.logger, client, p.config.Scope, p.managed) if err != nil { return nil, err } return eventer, nil case nodeScope: - eventer, err := NewNodeEventer(comm, p.config, p.logger, client, p.config.Scope) + eventer, err := NewNodeEventer(comm, p.config, p.logger, client, p.config.Scope, p.managed) if err != nil { return nil, err } return eventer, nil case "service": - eventer, err := NewServiceEventer(comm, p.config, p.logger, client, p.config.Scope) + eventer, err := NewServiceEventer(comm, p.config, p.logger, client, p.config.Scope, p.managed) if err != nil { return nil, err } diff --git a/internal/pkg/composable/providers/kubernetes/node.go b/internal/pkg/composable/providers/kubernetes/node.go index a1539afb9c1..0e5aebc8931 100644 --- a/internal/pkg/composable/providers/kubernetes/node.go +++ b/internal/pkg/composable/providers/kubernetes/node.go @@ -43,7 +43,8 @@ func NewNodeEventer( cfg *Config, logger *logp.Logger, client k8s.Interface, - scope string) (Eventer, error) { + scope string, + managed bool) (Eventer, error) { watcher, err := kubernetes.NewNamedWatcher("agent-node", client, &kubernetes.Node{}, kubernetes.WatchOptions{ SyncTimeout: cfg.SyncPeriod, Node: cfg.Node, diff --git a/internal/pkg/composable/providers/kubernetes/node_test.go b/internal/pkg/composable/providers/kubernetes/node_test.go index ab19e7d2ce2..8415304b00b 100644 --- a/internal/pkg/composable/providers/kubernetes/node_test.go +++ b/internal/pkg/composable/providers/kubernetes/node_test.go @@ -93,11 +93,6 @@ func TestGenerateNodeData(t *testing.T) { type nodeMeta struct{} // Generate generates node metadata from a resource object -// Metadata map is in the following form: -// { -// "kubernetes": {}, -// "some.ecs.field": "asdf" -// } // All Kubernetes fields that need to be stored under kubernetes. prefix are populated by // GenerateK8s method while fields that are part of ECS are generated by GenerateECS method func (n *nodeMeta) Generate(obj kubernetes.Resource, opts ...metadata.FieldOptions) mapstr.M { diff --git a/internal/pkg/composable/providers/kubernetes/pod.go b/internal/pkg/composable/providers/kubernetes/pod.go index a8b11b06585..27c9b53bec2 100644 --- a/internal/pkg/composable/providers/kubernetes/pod.go +++ b/internal/pkg/composable/providers/kubernetes/pod.go @@ -9,6 +9,8 @@ import ( "sync" "time" + "github.com/elastic/elastic-agent-autodiscover/utils" + "github.com/elastic/elastic-agent-autodiscover/kubernetes" "github.com/elastic/elastic-agent-autodiscover/kubernetes/metadata" c "github.com/elastic/elastic-agent-libs/config" @@ -23,15 +25,16 @@ import ( ) type pod struct { - logger *logp.Logger - cleanupTimeout time.Duration - comm composable.DynamicProviderComm - scope string - config *Config - metagen metadata.MetaGen watcher kubernetes.Watcher nodeWatcher kubernetes.Watcher + comm composable.DynamicProviderComm + metagen metadata.MetaGen namespaceWatcher kubernetes.Watcher + config *Config + logger *logp.Logger + scope string + managed bool + cleanupTimeout time.Duration // Mutex used by configuration updates not triggered by the main watcher, // to avoid race conditions between cross updates and deletions. @@ -51,7 +54,8 @@ func NewPodEventer( cfg *Config, logger *logp.Logger, client k8s.Interface, - scope string) (Eventer, error) { + scope string, + managed bool) (Eventer, error) { watcher, err := kubernetes.NewNamedWatcher("agent-pod", client, &kubernetes.Pod{}, kubernetes.WatchOptions{ SyncTimeout: cfg.SyncPeriod, Node: cfg.Node, @@ -95,6 +99,7 @@ func NewPodEventer( watcher: watcher, nodeWatcher: nodeWatcher, namespaceWatcher: namespaceWatcher, + managed: managed, } watcher.AddEventHandler(p) @@ -149,10 +154,32 @@ func (p *pod) emitRunning(pod *kubernetes.Pod) { data := generatePodData(pod, p.metagen, namespaceAnnotations) data.mapping["scope"] = p.scope - // Emit the pod - // We emit Pod + containers to ensure that configs matching Pod only - // get Pod metadata (not specific to any container) - _ = p.comm.AddOrUpdate(data.uid, PodPriority, data.mapping, data.processors) + + if p.config.Hints.Enabled() { // This is "hints based autodiscovery flow" + if !p.managed { + if ann, ok := data.mapping["annotations"]; ok { + annotations, _ := ann.(mapstr.M) + hints := utils.GenerateHints(annotations, "", p.config.Prefix) + if len(hints) > 0 { + p.logger.Debugf("Extracted hints are :%v", hints) + hintsMapping := GenerateHintsMapping(hints, data.mapping, p.logger, "") + p.logger.Debugf("Generated hints mappings are :%v", hintsMapping) + _ = p.comm.AddOrUpdate( + data.uid, + PodPriority, + map[string]interface{}{"hints": hintsMapping}, + data.processors, + ) + } + } + } + } else { // This is the "template-based autodiscovery" flow + // emit normal mapping to be used in dynamic variable resolution + // Emit the pod + // We emit Pod + containers to ensure that configs matching Pod only + // get Pod metadata (not specific to any container) + _ = p.comm.AddOrUpdate(data.uid, PodPriority, data.mapping, data.processors) + } // Emit all containers in the pod // We should deal with init containers stopping after initialization @@ -160,7 +187,7 @@ func (p *pod) emitRunning(pod *kubernetes.Pod) { } func (p *pod) emitContainers(pod *kubernetes.Pod, namespaceAnnotations mapstr.M) { - generateContainerData(p.comm, pod, p.metagen, namespaceAnnotations) + generateContainerData(p.comm, pod, p.metagen, namespaceAnnotations, p.logger, p.managed, p.config) } func (p *pod) emitStopped(pod *kubernetes.Pod) { @@ -240,6 +267,12 @@ func generatePodData( _ = safemapstr.Put(annotations, k, v) } k8sMapping["annotations"] = annotations + // Pass labels(not dedoted) to all events so that they can be used in templating. + labels := mapstr.M{} + for k, v := range pod.GetObjectMeta().GetLabels() { + _ = safemapstr.Put(labels, k, v) + } + k8sMapping["labels"] = labels processors := []map[string]interface{}{} // meta map includes metadata that go under kubernetes.* @@ -265,7 +298,10 @@ func generateContainerData( comm composable.DynamicProviderComm, pod *kubernetes.Pod, kubeMetaGen metadata.MetaGen, - namespaceAnnotations mapstr.M) { + namespaceAnnotations mapstr.M, + logger *logp.Logger, + managed bool, + config *Config) { containers := kubernetes.GetContainersInPod(pod) @@ -275,6 +311,12 @@ func generateContainerData( _ = safemapstr.Put(annotations, k, v) } + // Pass labels to all events so that it can be used in templating. + labels := mapstr.M{} + for k, v := range pod.GetObjectMeta().GetLabels() { + _ = safemapstr.Put(labels, k, v) + } + for _, c := range containers { // If it doesn't have an ID, container doesn't exist in // the runtime, emit only an event if we are stopping, so @@ -299,8 +341,9 @@ func generateContainerData( if len(namespaceAnnotations) != 0 { k8sMapping["namespace_annotations"] = namespaceAnnotations } - // add annotations to be discoverable by templates + // add annotations and labels to be discoverable by templates k8sMapping["annotations"] = annotations + k8sMapping["labels"] = labels //container ECS fields cmeta := mapstr.M{ @@ -344,7 +387,28 @@ func generateContainerData( _, _ = containerMeta.Put("port", fmt.Sprintf("%v", port.ContainerPort)) _, _ = containerMeta.Put("port_name", port.Name) k8sMapping["container"] = containerMeta - _ = comm.AddOrUpdate(eventID, ContainerPriority, k8sMapping, processors) + + if config.Hints.Enabled() { // This is "hints based autodiscovery flow" + if !managed { + if ann, ok := k8sMapping["annotations"]; ok { + annotations, _ := ann.(mapstr.M) + hints := utils.GenerateHints(annotations, "", config.Prefix) + if len(hints) > 0 { + logger.Debugf("Extracted hints are :%v", hints) + hintsMapping := GenerateHintsMapping(hints, k8sMapping, logger, c.ID) + logger.Debugf("Generated hints mappings are :%v", hintsMapping) + _ = comm.AddOrUpdate( + eventID, + PodPriority, + map[string]interface{}{"hints": hintsMapping}, + processors, + ) + } + } + } + } else { // This is the "template-based autodiscovery" flow + _ = comm.AddOrUpdate(eventID, ContainerPriority, k8sMapping, processors) + } } } else { k8sMapping["container"] = containerMeta diff --git a/internal/pkg/composable/providers/kubernetes/pod_test.go b/internal/pkg/composable/providers/kubernetes/pod_test.go index 95361fd2ce0..7409ad1a3ea 100644 --- a/internal/pkg/composable/providers/kubernetes/pod_test.go +++ b/internal/pkg/composable/providers/kubernetes/pod_test.go @@ -9,17 +9,28 @@ import ( "fmt" "testing" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "github.com/stretchr/testify/assert" + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent/pkg/core/logger" + "github.com/elastic/elastic-agent-autodiscover/kubernetes" "github.com/elastic/elastic-agent-autodiscover/kubernetes/metadata" "github.com/elastic/elastic-agent-libs/mapstr" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" + "github.com/elastic/elastic-agent/internal/pkg/config" ) +func getLogger() *logger.Logger { + loggerCfg := logger.DefaultLoggingConfig() + loggerCfg.Level = logp.ErrorLevel + l, _ := logger.NewFromConfig("", loggerCfg, false) + return l +} + func TestGeneratePodData(t *testing.T) { pod := &kubernetes.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -153,13 +164,21 @@ func TestGenerateContainerPodData(t *testing.T) { context.TODO(), providerDataChan, } + logger := getLogger() + var cfg Config + c := config.New() + _ = c.Unpack(&cfg) generateContainerData( &comm, pod, &podMeta{}, mapstr.M{ "nsa": "nsb", - }) + }, + logger, + true, + &cfg, + ) mapping := map[string]interface{}{ "namespace": pod.GetNamespace(), @@ -274,13 +293,21 @@ func TestEphemeralContainers(t *testing.T) { context.TODO(), providerDataChan, } + + logger := getLogger() + var cfg Config + c := config.New() + _ = c.Unpack(&cfg) generateContainerData( &comm, pod, &podMeta{}, mapstr.M{ "nsa": "nsb", - }) + }, + logger, + true, + &cfg) mapping := map[string]interface{}{ "namespace": pod.GetNamespace(), @@ -366,11 +393,6 @@ func (t *MockDynamicComm) Remove(id string) { type podMeta struct{} // Generate generates pod metadata from a resource object -// Metadata map is in the following form: -// { -// "kubernetes": {}, -// "some.ecs.field": "asdf" -// } // All Kubernetes fields that need to be stored under kubernetes. prefix are populated by // GenerateK8s method while fields that are part of ECS are generated by GenerateECS method func (p *podMeta) Generate(obj kubernetes.Resource, opts ...metadata.FieldOptions) mapstr.M { diff --git a/internal/pkg/composable/providers/kubernetes/service.go b/internal/pkg/composable/providers/kubernetes/service.go index 49c20627734..4060c12e646 100644 --- a/internal/pkg/composable/providers/kubernetes/service.go +++ b/internal/pkg/composable/providers/kubernetes/service.go @@ -43,7 +43,8 @@ func NewServiceEventer( cfg *Config, logger *logp.Logger, client k8s.Interface, - scope string) (Eventer, error) { + scope string, + managed bool) (Eventer, error) { watcher, err := kubernetes.NewNamedWatcher("agent-service", client, &kubernetes.Service{}, kubernetes.WatchOptions{ SyncTimeout: cfg.SyncPeriod, Node: cfg.Node, diff --git a/internal/pkg/composable/providers/kubernetes/service_test.go b/internal/pkg/composable/providers/kubernetes/service_test.go index 69e945ee1cd..1943e3cfcdb 100644 --- a/internal/pkg/composable/providers/kubernetes/service_test.go +++ b/internal/pkg/composable/providers/kubernetes/service_test.go @@ -107,11 +107,6 @@ func TestGenerateServiceData(t *testing.T) { type svcMeta struct{} // Generate generates svc metadata from a resource object -// Metadata map is in the following form: -// { -// "kubernetes": {}, -// "some.ecs.field": "asdf" -// } // All Kubernetes fields that need to be stored under kubernetes. prefix are populated by // GenerateK8s method while fields that are part of ECS are generated by GenerateECS method func (s *svcMeta) Generate(obj kubernetes.Resource, opts ...metadata.FieldOptions) mapstr.M { diff --git a/internal/pkg/composable/providers/kubernetesleaderelection/config.go b/internal/pkg/composable/providers/kubernetesleaderelection/config.go index d92d35566a2..7ccc2f9a799 100644 --- a/internal/pkg/composable/providers/kubernetesleaderelection/config.go +++ b/internal/pkg/composable/providers/kubernetesleaderelection/config.go @@ -8,10 +8,12 @@ import "github.com/elastic/elastic-agent-autodiscover/kubernetes" // Config for kubernetes_leaderelection provider type Config struct { - KubeConfig string `config:"kube_config"` - KubeClientOptions kubernetes.KubeClientOptions `config:"kube_client_options"` + KubeConfig string `config:"kube_config"` + // Name of the leaderelection lease LeaderLease string `config:"leader_lease"` + + KubeClientOptions kubernetes.KubeClientOptions `config:"kube_client_options"` } // InitDefaults initializes the default values for the config. diff --git a/internal/pkg/composable/providers/kubernetesleaderelection/kubernetes_leaderelection.go b/internal/pkg/composable/providers/kubernetesleaderelection/kubernetes_leaderelection.go index d0d773d1663..1fc6c7e958d 100644 --- a/internal/pkg/composable/providers/kubernetesleaderelection/kubernetes_leaderelection.go +++ b/internal/pkg/composable/providers/kubernetesleaderelection/kubernetes_leaderelection.go @@ -33,7 +33,7 @@ type contextProvider struct { } // ContextProviderBuilder builds the provider. -func ContextProviderBuilder(logger *logger.Logger, c *config.Config) (corecomp.ContextProvider, error) { +func ContextProviderBuilder(logger *logger.Logger, c *config.Config, managed bool) (corecomp.ContextProvider, error) { var cfg Config if c == nil { c = config.New() diff --git a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go index d6e8190c13a..543d0cd6b28 100644 --- a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go +++ b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go @@ -36,7 +36,7 @@ type contextProviderK8sSecrets struct { } // ContextProviderBuilder builds the context provider. -func ContextProviderBuilder(logger *logger.Logger, c *config.Config) (corecomp.ContextProvider, error) { +func ContextProviderBuilder(logger *logger.Logger, c *config.Config, managed bool) (corecomp.ContextProvider, error) { var cfg Config if c == nil { c = config.New() diff --git a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go index 388f33074bb..f633a9f062e 100644 --- a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go +++ b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go @@ -51,7 +51,7 @@ func Test_K8sSecretsProvider_Fetch(t *testing.T) { cfg, err := config.NewConfigFrom(map[string]string{"a": "b"}) require.NoError(t, err) - p, err := ContextProviderBuilder(logger, cfg) + p, err := ContextProviderBuilder(logger, cfg, true) require.NoError(t, err) fp, _ := p.(*contextProviderK8sSecrets) @@ -106,7 +106,7 @@ func Test_K8sSecretsProvider_FetchWrongSecret(t *testing.T) { cfg, err := config.NewConfigFrom(map[string]string{"a": "b"}) require.NoError(t, err) - p, err := ContextProviderBuilder(logger, cfg) + p, err := ContextProviderBuilder(logger, cfg, true) require.NoError(t, err) fp, _ := p.(*contextProviderK8sSecrets) diff --git a/internal/pkg/composable/providers/local/local.go b/internal/pkg/composable/providers/local/local.go index b44affc78df..b54e6142ee0 100644 --- a/internal/pkg/composable/providers/local/local.go +++ b/internal/pkg/composable/providers/local/local.go @@ -32,7 +32,7 @@ func (c *contextProvider) Run(comm corecomp.ContextProviderComm) error { } // ContextProviderBuilder builds the context provider. -func ContextProviderBuilder(_ *logger.Logger, c *config.Config) (corecomp.ContextProvider, error) { +func ContextProviderBuilder(_ *logger.Logger, c *config.Config, _ bool) (corecomp.ContextProvider, error) { p := &contextProvider{} if c != nil { err := c.Unpack(p) diff --git a/internal/pkg/composable/providers/local/local_test.go b/internal/pkg/composable/providers/local/local_test.go index 6afe29251d5..dfec629b88a 100644 --- a/internal/pkg/composable/providers/local/local_test.go +++ b/internal/pkg/composable/providers/local/local_test.go @@ -26,7 +26,7 @@ func TestContextProvider(t *testing.T) { }) require.NoError(t, err) builder, _ := composable.Providers.GetContextProvider("local") - provider, err := builder(nil, cfg) + provider, err := builder(nil, cfg, true) require.NoError(t, err) comm := ctesting.NewContextComm(context.Background()) diff --git a/internal/pkg/composable/providers/localdynamic/localdynamic.go b/internal/pkg/composable/providers/localdynamic/localdynamic.go index 0fd81738976..9d9f5c501ae 100644 --- a/internal/pkg/composable/providers/localdynamic/localdynamic.go +++ b/internal/pkg/composable/providers/localdynamic/localdynamic.go @@ -41,7 +41,7 @@ func (c *dynamicProvider) Run(comm composable.DynamicProviderComm) error { } // DynamicProviderBuilder builds the dynamic provider. -func DynamicProviderBuilder(_ *logger.Logger, c *config.Config) (composable.DynamicProvider, error) { +func DynamicProviderBuilder(_ *logger.Logger, c *config.Config, _ bool) (composable.DynamicProvider, error) { p := &dynamicProvider{} if c != nil { err := c.Unpack(p) diff --git a/internal/pkg/composable/providers/localdynamic/localdynamic_test.go b/internal/pkg/composable/providers/localdynamic/localdynamic_test.go index a20b37852d9..8cc0a44ccd7 100644 --- a/internal/pkg/composable/providers/localdynamic/localdynamic_test.go +++ b/internal/pkg/composable/providers/localdynamic/localdynamic_test.go @@ -60,7 +60,7 @@ func TestContextProvider(t *testing.T) { }) require.NoError(t, err) builder, _ := composable.Providers.GetDynamicProvider("local_dynamic") - provider, err := builder(nil, cfg) + provider, err := builder(nil, cfg, true) require.NoError(t, err) comm := ctesting.NewDynamicComm(context.Background()) diff --git a/internal/pkg/composable/providers/path/path.go b/internal/pkg/composable/providers/path/path.go index 05af5bcd0b0..389a21fe6bc 100644 --- a/internal/pkg/composable/providers/path/path.go +++ b/internal/pkg/composable/providers/path/path.go @@ -14,7 +14,7 @@ import ( ) func init() { - composable.Providers.MustAddContextProvider("path", ContextProviderBuilder) + composable.Providers.AddContextProvider("path", ContextProviderBuilder) } type contextProvider struct{} @@ -34,6 +34,6 @@ func (*contextProvider) Run(comm corecomp.ContextProviderComm) error { } // ContextProviderBuilder builds the context provider. -func ContextProviderBuilder(_ *logger.Logger, _ *config.Config) (corecomp.ContextProvider, error) { +func ContextProviderBuilder(_ *logger.Logger, _ *config.Config, _ bool) (corecomp.ContextProvider, error) { return &contextProvider{}, nil } diff --git a/internal/pkg/composable/providers/path/path_test.go b/internal/pkg/composable/providers/path/path_test.go index 14f263e56db..094865d3fbd 100644 --- a/internal/pkg/composable/providers/path/path_test.go +++ b/internal/pkg/composable/providers/path/path_test.go @@ -18,7 +18,7 @@ import ( func TestContextProvider(t *testing.T) { builder, _ := composable.Providers.GetContextProvider("path") - provider, err := builder(nil, nil) + provider, err := builder(nil, nil, true) require.NoError(t, err) comm := ctesting.NewContextComm(context.Background()) diff --git a/internal/pkg/composable/testing/dynamic.go b/internal/pkg/composable/testing/dynamic.go index bfa48dff57d..99b499835cd 100644 --- a/internal/pkg/composable/testing/dynamic.go +++ b/internal/pkg/composable/testing/dynamic.go @@ -81,6 +81,7 @@ func (t *DynamicComm) Previous(id string) (DynamicState, bool) { return prev, ok } +//nolint:prealloc,goimports,nolintlint // false positive // PreviousIDs returns the previous set mapping ID. func (t *DynamicComm) PreviousIDs() []string { t.lock.Lock() @@ -100,6 +101,7 @@ func (t *DynamicComm) Current(id string) (DynamicState, bool) { return curr, ok } +//nolint:prealloc,goimports,nolintlint // false positive // CurrentIDs returns the current set mapping ID. func (t *DynamicComm) CurrentIDs() []string { t.lock.Lock() diff --git a/internal/pkg/core/backoff/backoff.go b/internal/pkg/core/backoff/backoff.go index 06723e7db9a..c97eaae199d 100644 --- a/internal/pkg/core/backoff/backoff.go +++ b/internal/pkg/core/backoff/backoff.go @@ -4,11 +4,16 @@ package backoff +import "time" + // Backoff defines the interface for backoff strategies. type Backoff interface { // Wait blocks for a duration of time governed by the backoff strategy. Wait() bool + // NextWait returns the duration of the next call to Wait(). + NextWait() time.Duration + // Reset resets the backoff duration to an initial value governed by the backoff strategy. Reset() } diff --git a/internal/pkg/core/backoff/backoff_test.go b/internal/pkg/core/backoff/backoff_test.go index 88498ff5a58..12332eb15f2 100644 --- a/internal/pkg/core/backoff/backoff_test.go +++ b/internal/pkg/core/backoff/backoff_test.go @@ -14,14 +14,9 @@ import ( type factory func(<-chan struct{}) Backoff -func TestBackoff(t *testing.T) { - t.Run("test close channel", testCloseChannel) - t.Run("test unblock after some time", testUnblockAfterInit) -} - -func testCloseChannel(t *testing.T) { - init := 2 * time.Second - max := 5 * time.Minute +func TestCloseChannel(t *testing.T) { + init := 2 * time.Millisecond + max := 5 * time.Second tests := map[string]factory{ "ExpBackoff": func(done <-chan struct{}) Backoff { @@ -42,9 +37,9 @@ func testCloseChannel(t *testing.T) { } } -func testUnblockAfterInit(t *testing.T) { - init := 1 * time.Second - max := 5 * time.Minute +func TestUnblockAfterInit(t *testing.T) { + init := 1 * time.Millisecond + max := 5 * time.Second tests := map[string]factory{ "ExpBackoff": func(done <-chan struct{}) Backoff { @@ -68,3 +63,36 @@ func testUnblockAfterInit(t *testing.T) { }) } } + +func TestNextWait(t *testing.T) { + init := time.Millisecond + max := 5 * time.Second + + tests := map[string]factory{ + "ExpBackoff": func(done <-chan struct{}) Backoff { + return NewExpBackoff(done, init, max) + }, + "EqualJitterBackoff": func(done <-chan struct{}) Backoff { + return NewEqualJitterBackoff(done, init, max) + }, + } + + for name, f := range tests { + t.Run(name, func(t *testing.T) { + c := make(chan struct{}) + b := f(c) + + startWait := b.NextWait() + assert.Equal(t, startWait, b.NextWait(), "next wait not stable") + + startedAt := time.Now() + b.Wait() + waitDuration := time.Now().Sub(startedAt) + nextWait := b.NextWait() + + t.Logf("actualWait: %s startWait: %s nextWait: %s", waitDuration, startWait, nextWait) + assert.Less(t, startWait, nextWait, "wait value did not increase") + assert.GreaterOrEqual(t, waitDuration, startWait, "next wait duration <= actual wait duration") + }) + } +} diff --git a/internal/pkg/core/backoff/equal_jitter.go b/internal/pkg/core/backoff/equal_jitter.go index d87077397cd..671201f5892 100644 --- a/internal/pkg/core/backoff/equal_jitter.go +++ b/internal/pkg/core/backoff/equal_jitter.go @@ -16,8 +16,9 @@ type EqualJitterBackoff struct { duration time.Duration done <-chan struct{} - init time.Duration - max time.Duration + init time.Duration + max time.Duration + nextRand time.Duration last time.Time } @@ -29,6 +30,7 @@ func NewEqualJitterBackoff(done <-chan struct{}, init, max time.Duration) Backof done: done, init: init, max: max, + nextRand: time.Duration(rand.Int63n(int64(init))), //nolint:gosec } } @@ -38,13 +40,18 @@ func (b *EqualJitterBackoff) Reset() { b.duration = b.init * 2 } +func (b *EqualJitterBackoff) NextWait() time.Duration { + // Make sure we have always some minimal back off and jitter. + temp := b.duration / 2 + return temp + b.nextRand +} + // Wait block until either the timer is completed or channel is done. func (b *EqualJitterBackoff) Wait() bool { - // Make sure we have always some minimal back off and jitter. - temp := int64(b.duration / 2) - backoff := time.Duration(temp + rand.Int63n(temp)) + backoff := b.NextWait() // increase duration for next wait. + b.nextRand = time.Duration(rand.Int63n(int64(b.duration))) b.duration *= 2 if b.duration > b.max { b.duration = b.max diff --git a/internal/pkg/core/backoff/exponential.go b/internal/pkg/core/backoff/exponential.go index 81224b95eb5..51b5b4e0cb5 100644 --- a/internal/pkg/core/backoff/exponential.go +++ b/internal/pkg/core/backoff/exponential.go @@ -36,18 +36,23 @@ func (b *ExpBackoff) Reset() { b.duration = b.init } +func (b *ExpBackoff) NextWait() time.Duration { + nextWait := b.duration + nextWait *= 2 + if nextWait > b.max { + nextWait = b.max + } + return nextWait +} + // Wait block until either the timer is completed or channel is done. func (b *ExpBackoff) Wait() bool { - backoff := b.duration - b.duration *= 2 - if b.duration > b.max { - b.duration = b.max - } + b.duration = b.NextWait() select { case <-b.done: return false - case <-time.After(backoff): + case <-time.After(b.duration): b.last = time.Now() return true } diff --git a/internal/pkg/crypto/io.go b/internal/pkg/crypto/io.go index 738a216774a..2012bdf1b5c 100644 --- a/internal/pkg/crypto/io.go +++ b/internal/pkg/crypto/io.go @@ -21,11 +21,11 @@ import ( // Option is the default options used to generate the encrypt and decrypt writer. // NOTE: the defined options need to be same for both the Reader and the writer. type Option struct { + Generator bytesGen IterationsCount int KeyLength int SaltLength int IVLength int - Generator bytesGen // BlockSize must be a factor of aes.BlockSize BlockSize int @@ -180,7 +180,6 @@ func (w *Writer) Write(b []byte) (int, error) { } func (w *Writer) writeBlock(b []byte) error { - // randomly generate the salt and the initialization vector, this information will be saved // on disk in the file as part of the header iv, err := w.generator(w.option.IVLength) @@ -189,12 +188,14 @@ func (w *Writer) writeBlock(b []byte) error { return w.err } + // nolint: errcheck // Ignore the error at this point. w.writer.Write(iv) encodedBytes := w.gcm.Seal(nil, iv, b, nil) l := make([]byte, 4) binary.LittleEndian.PutUint32(l, uint32(len(encodedBytes))) + // nolint: errcheck // Ignore the error at this point. w.writer.Write(l) _, err = w.writer.Write(encodedBytes) @@ -325,7 +326,7 @@ func (r *Reader) consumeBlock() error { } encodedBytes := make([]byte, l) - _, err = io.ReadAtLeast(r.reader, encodedBytes, int(l)) + _, err = io.ReadAtLeast(r.reader, encodedBytes, l) if err != nil { r.err = errors.Wrapf(err, "fail read the block of %d bytes", l) } @@ -364,7 +365,6 @@ func (r *Reader) Close() error { func randomBytes(length int) ([]byte, error) { r := make([]byte, length) _, err := rand.Read(r) - if err != nil { return nil, err } diff --git a/internal/pkg/fleetapi/acker/retrier/retrier.go b/internal/pkg/fleetapi/acker/retrier/retrier.go index 747fe93645d..b007bfdf064 100644 --- a/internal/pkg/fleetapi/acker/retrier/retrier.go +++ b/internal/pkg/fleetapi/acker/retrier/retrier.go @@ -32,19 +32,19 @@ type Option func(*Retrier) // Retrier implements retrier for actions acks type Retrier struct { - log *logger.Logger acker BatchAcker // AckBatch provider + log *logger.Logger - initialRetryInterval time.Duration // initial retry interval - maxRetryInterval time.Duration // max retry interval - maxRetries int // configurable maxNumber of retries per action + doneCh chan struct{} // signal channel to kickoff retry loop if not running + kickCh chan struct{} // signal channel when retry loop is done actions []fleetapi.Action // pending actions - mx sync.Mutex - kickCh chan struct{} // signal channel to kickoff retry loop if not running + maxRetryInterval time.Duration // max retry interval + maxRetries int // configurable maxNumber of retries per action + initialRetryInterval time.Duration // initial retry interval - doneCh chan struct{} // signal channel when retry loop is done + mx sync.Mutex } // New creates new instance of retrier @@ -173,7 +173,6 @@ func (r *Retrier) runRetries(ctx context.Context) { default: } r.log.Debug("ack retrier: exit retry loop") - } func (r *Retrier) updateRetriesMap(retries map[string]int, actions []fleetapi.Action, resp *fleetapi.AckResponse) (failed []fleetapi.Action) { diff --git a/internal/pkg/fleetapi/checkin_cmd.go b/internal/pkg/fleetapi/checkin_cmd.go index 9c2cd1513e1..33bcd3dab55 100644 --- a/internal/pkg/fleetapi/checkin_cmd.go +++ b/internal/pkg/fleetapi/checkin_cmd.go @@ -38,12 +38,11 @@ type CheckinComponent struct { // CheckinRequest consists of multiple events reported to fleet ui. type CheckinRequest struct { - Status string `json:"status"` - AckToken string `json:"ack_token,omitempty"` - Events []SerializableEvent `json:"events"` - Metadata *info.ECSMeta `json:"local_metadata,omitempty"` - Message string `json:"message"` // V2 Agent message - Components []CheckinComponent `json:"components"` // V2 Agent components + Status string `json:"status"` + AckToken string `json:"ack_token,omitempty"` + Metadata *info.ECSMeta `json:"local_metadata,omitempty"` + Message string `json:"message"` // V2 Agent message + Components []CheckinComponent `json:"components"` // V2 Agent components } // SerializableEvent is a representation of the event to be send to the Fleet Server API via the checkin @@ -96,23 +95,26 @@ func NewCheckinCmd(info agentInfo, client client.Sender) *CheckinCmd { } } -// Execute enroll the Agent in the Fleet Server. -func (e *CheckinCmd) Execute(ctx context.Context, r *CheckinRequest) (*CheckinResponse, error) { +// Execute enroll the Agent in the Fleet Server. Returns the decoded check in response, a duration indicating +// how long the request took, and an error. +func (e *CheckinCmd) Execute(ctx context.Context, r *CheckinRequest) (*CheckinResponse, time.Duration, error) { if err := r.Validate(); err != nil { - return nil, err + return nil, 0, err } b, err := json.Marshal(r) if err != nil { - return nil, errors.New(err, + return nil, 0, errors.New(err, "fail to encode the checkin request", errors.TypeUnexpected) } cp := fmt.Sprintf(checkingPath, e.info.AgentID()) + sendStart := time.Now() resp, err := e.client.Send(ctx, "POST", cp, nil, nil, bytes.NewBuffer(b)) + sendDuration := time.Now().Sub(sendStart) if err != nil { - return nil, errors.New(err, + return nil, sendDuration, errors.New(err, "fail to checkin to fleet-server", errors.TypeNetwork, errors.M(errors.MetaKeyURI, cp)) @@ -120,26 +122,26 @@ func (e *CheckinCmd) Execute(ctx context.Context, r *CheckinRequest) (*CheckinRe defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return nil, client.ExtractError(resp.Body) + return nil, sendDuration, client.ExtractError(resp.Body) } rs, err := ioutil.ReadAll(resp.Body) if err != nil { - return nil, errors.New(err, "failed to read checkin response") + return nil, sendDuration, errors.New(err, "failed to read checkin response") } checkinResponse := &CheckinResponse{} decoder := json.NewDecoder(bytes.NewReader(rs)) if err := decoder.Decode(checkinResponse); err != nil { - return nil, errors.New(err, + return nil, sendDuration, errors.New(err, "fail to decode checkin response", errors.TypeNetwork, errors.M(errors.MetaKeyURI, cp)) } if err := checkinResponse.Validate(); err != nil { - return nil, err + return nil, sendDuration, err } - return checkinResponse, nil + return checkinResponse, sendDuration, nil } diff --git a/internal/pkg/fleetapi/checkin_cmd_test.go b/internal/pkg/fleetapi/checkin_cmd_test.go index 2d9aef2741a..56726bb5559 100644 --- a/internal/pkg/fleetapi/checkin_cmd_test.go +++ b/internal/pkg/fleetapi/checkin_cmd_test.go @@ -11,6 +11,7 @@ import ( "io/ioutil" "net/http" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -25,6 +26,7 @@ func (*agentinfo) AgentID() string { return "id" } func TestCheckin(t *testing.T) { const withAPIKey = "secret" + const requestDelay = time.Millisecond ctx := context.Background() agentInfo := &agentinfo{} @@ -39,6 +41,8 @@ func TestCheckin(t *testing.T) { mux.HandleFunc(path, authHandler(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusInternalServerError) fmt.Fprint(w, raw) + // Introduce a small delay to test the request time measurment. + time.Sleep(requestDelay) }, withAPIKey)) return mux }, withAPIKey, @@ -47,8 +51,10 @@ func TestCheckin(t *testing.T) { request := CheckinRequest{} - _, err := cmd.Execute(ctx, &request) + _, took, err := cmd.Execute(ctx, &request) require.Error(t, err) + // Ensure the request took at least as long as the artificial delay. + require.GreaterOrEqual(t, took, requestDelay) }, )) @@ -96,7 +102,7 @@ func TestCheckin(t *testing.T) { request := CheckinRequest{} - r, err := cmd.Execute(ctx, &request) + r, _, err := cmd.Execute(ctx, &request) require.NoError(t, err) require.Equal(t, 1, len(r.Actions)) @@ -157,7 +163,7 @@ func TestCheckin(t *testing.T) { request := CheckinRequest{} - r, err := cmd.Execute(ctx, &request) + r, _, err := cmd.Execute(ctx, &request) require.NoError(t, err) require.Equal(t, 2, len(r.Actions)) @@ -173,7 +179,7 @@ func TestCheckin(t *testing.T) { }, )) - t.Run("When we receive no action", withServerWithAuthClient( + t.Run("When we receive no action with delay", withServerWithAuthClient( func(t *testing.T) *http.ServeMux { raw := `{ "actions": [] }` mux := http.NewServeMux() @@ -189,7 +195,7 @@ func TestCheckin(t *testing.T) { request := CheckinRequest{} - r, err := cmd.Execute(ctx, &request) + r, _, err := cmd.Execute(ctx, &request) require.NoError(t, err) require.Equal(t, 0, len(r.Actions)) @@ -223,7 +229,7 @@ func TestCheckin(t *testing.T) { request := CheckinRequest{Metadata: testMetadata()} - r, err := cmd.Execute(ctx, &request) + r, _, err := cmd.Execute(ctx, &request) require.NoError(t, err) require.Equal(t, 0, len(r.Actions)) @@ -257,7 +263,7 @@ func TestCheckin(t *testing.T) { request := CheckinRequest{} - r, err := cmd.Execute(ctx, &request) + r, _, err := cmd.Execute(ctx, &request) require.NoError(t, err) require.Equal(t, 0, len(r.Actions)) diff --git a/magefile.go b/magefile.go index 084aa62dc08..ed633505e49 100644 --- a/magefile.go +++ b/magefile.go @@ -10,14 +10,12 @@ package main import ( "context" "fmt" - "io" "os" "os/exec" "path/filepath" "runtime" "strconv" "strings" - "sync" "time" "github.com/hashicorp/go-multierror" @@ -53,7 +51,7 @@ const ( externalArtifacts = "EXTERNAL" configFile = "elastic-agent.yml" agentDropPath = "AGENT_DROP_PATH" - specSuffix = ".spec.yml" // TODO: change after beat ignores yml config + specSuffix = ".spec.yml" checksumFilename = "checksum.yml" ) @@ -97,48 +95,6 @@ type Demo mg.Namespace // Dev runs package and build for dev purposes. type Dev mg.Namespace -// Notice regenerates the NOTICE.txt file. -func Notice() error { - fmt.Println(">> Generating NOTICE") - fmt.Println(">> fmt - go mod tidy") - err := sh.RunV("go", "mod", "tidy", "-v") - if err != nil { - return errors.Wrap(err, "failed running go mod tidy, please fix the issues reported") - } - fmt.Println(">> fmt - go mod download") - err = sh.RunV("go", "mod", "download") - if err != nil { - return errors.Wrap(err, "failed running go mod download, please fix the issues reported") - } - fmt.Println(">> fmt - go list") - str, err := sh.Output("go", "list", "-m", "-json", "all") - if err != nil { - return errors.Wrap(err, "failed running go list, please fix the issues reported") - } - fmt.Println(">> fmt - go run") - cmd := exec.Command("go", "run", "go.elastic.co/go-licence-detector", "-includeIndirect", "-rules", "dev-tools/notice/rules.json", "-overrides", "dev-tools/notice/overrides.json", "-noticeTemplate", "dev-tools/notice/NOTICE.txt.tmpl", - "-noticeOut", "NOTICE.txt", "-depsOut", "\"\"") - stdin, err := cmd.StdinPipe() - if err != nil { - return errors.Wrap(err, "failed running go run, please fix the issues reported") - } - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer stdin.Close() - defer wg.Done() - if _, err := io.WriteString(stdin, str); err != nil { - fmt.Println(err) - } - }() - wg.Wait() - _, err = cmd.CombinedOutput() - if err != nil { - return errors.Wrap(err, "failed combined output, please fix the issues reported") - } - return nil -} - func CheckNoChanges() error { fmt.Println(">> fmt - go run") err := sh.RunV("go", "mod", "tidy", "-v") diff --git a/specs/heartbeat.spec.yml b/specs/heartbeat.spec.yml index 0b7da1c9048..ba6a08934b8 100644 --- a/specs/heartbeat.spec.yml +++ b/specs/heartbeat.spec.yml @@ -1,47 +1,44 @@ -version: 2 -inputs: - - name: synthetics/synthetics - description: "Synthetics Browser Monitor" - platforms: &platforms - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: &outputs - - elasticsearch - - kafka - - logstash - - redis - command: - args: &args - - "-E" - - "setup.ilm.enabled=false" - - "-E" - - "setup.template.enabled=false" - - "-E" - - "management.enabled=true" - - "-E" - - "logging.level=debug" - - "-E" - - "gc_percent=${HEARTBEAT_GOGC:100}" - - name: synthetics/http - description: "Synthetics HTTP Monitor" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: synthetics/icmp - description: "Synthetics ICMP Monitor" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: synthetics/tcp - description: "Synthetics TCP Monitor" - platforms: *platforms - outputs: *outputs - command: - args: *args +version: 2 +inputs: + - name: synthetics/synthetics + description: "Synthetics Browser Monitor" + platforms: &platforms + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: &outputs + - elasticsearch + command: + args: &args + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "logging.level=debug" + - "-E" + - "gc_percent=${HEARTBEAT_GOGC:100}" + - name: synthetics/http + description: "Synthetics HTTP Monitor" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: synthetics/icmp + description: "Synthetics ICMP Monitor" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: synthetics/tcp + description: "Synthetics TCP Monitor" + platforms: *platforms + outputs: *outputs + command: + args: *args diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index b8b6792b912..1c415537ad4 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-d058e92f-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-cae815eb-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.4.0-d058e92f-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.6.0-cae815eb-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" diff --git a/version/docs/version.asciidoc b/version/docs/version.asciidoc index 9d0056a0c38..0485d65c441 100644 --- a/version/docs/version.asciidoc +++ b/version/docs/version.asciidoc @@ -1,6 +1,6 @@ :stack-version: 8.3.0 :doc-branch: main -:go-version: 1.17.10 +:go-version: 1.18.7 :release-state: unreleased :python: 3.7 :docker: 1.12