diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 8312b5969cae6..bc7e49a59467f 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -40,6 +40,7 @@ BWC_VERSION: - "7.13.4" - "7.14.0" - "7.14.1" + - "7.14.2" - "7.15.0" - "7.16.0" - "8.0.0" diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix.yml index fd979e15413f7..037f0bca8c3a1 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix.yml @@ -36,6 +36,7 @@ - centos-8-packaging - debian-9-packaging - debian-10-packaging + - debian-11-packaging - opensuse-15-1-packaging - oraclelinux-7-packaging - oraclelinux-8-packaging diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index f5b6ecb9d52f9..9ffc1c8b92a1c 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -14,3 +14,6 @@ # Format more snapshot / restore relate projects 559c4e6ef4f9173bbb59043bacd0ac36c7281040 + +# Format aggregations and related code (server and x-pack) +d71544976608bdb53fa4d29521fb328e1033ee2f diff --git a/BUILDING.md b/BUILDING.md new file mode 100644 index 0000000000000..0d2534c0bf07a --- /dev/null +++ b/BUILDING.md @@ -0,0 +1,120 @@ +Building Elasticsearch with Gradle +============================= + +Elasticsearch is built using the [Gradle](https://gradle.org/) open source build tools. + +This document provides a general guidelines for using and working on the elasticsearch build logic. + +## Build logic organisation + +The Elasticsearch project contains 3 build-related projects that are included into the Elasticsearch build as a [composite build](https://docs.gradle.org/current/userguide/composite_builds.html). + +### `build-conventions` + +This project contains build conventions that are applied to all elasticsearch projects. + +### `build-tools` + +This project contains all build logic that we publish for third party elasticsearch plugin authors. +We provide the following plugins: + +- `elasticsearch.esplugin` - A gradle plugin for building an elasticsearch plugin. +- `elasticsearch.testclusters` - A gradle plugin for setting up es clusters for testing within a build. + +This project is published as part of the elasticsearch release and accessible by +`org.elasticsearch.gradle:build-tools:`. +These build tools are also used by the `elasticsearch-hadoop` project maintained by elastic. + +### `build-tools-internal` + +This project contains all elasticsearch project specific build logic that is not meant to be shared +with other internal or external projects. + +## Build guidelines + +This is an intentionally small set of guidelines to build users and authors +to ensure we keep the build consistent. We also publish elasticsearch build logic +as `build-tools` to be usuable by thirdparty elasticsearch plugin authors. This is +also used by other elastic teams like `elasticsearch-hadoop`. +Breaking changes should therefore be avoided and an appropriate deprecation cycle +should be followed. + +### Stay up to date + +The elasticsearch build usually uses the latest Gradle GA release. We stay as close to the +latest Gradle releases as possible. In certain cases an update is blocked by a breaking behaviour +in Gradle. We're usually in contact with the gradle team here or working on a fix +in our build logic to resolve this. + +**The Elasticsearch build will fail if any deprecated Gradle API is used.** + +### Make a change in the build + +There are a few guidelines to follow that should make your life easier to make changes to the elasticsearch build. +Please add a member of the `es-delivery` team as a reviewer if you're making non-trivial changes to the build. + +#### Custom Plugin and Task implementations + +Build logic that is used across multiple subprojects should considered to be moved into a Gradle plugin with according Gradle task implmentation. +Elasticsearch specific build logic is located in the `build-tools-internal` subproject including integration tests. + +- Gradle plugins and Tasks should be written in Java +- We use a groovy and spock for setting up Gradle integration tests. + (see https://github.com/elastic/elasticsearch/blob/master/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy) + +#### Declaring tasks + +The elasticsearch build makes use of the [task avoidance API](https://docs.gradle.org/current/userguide/task_configuration_avoidance.html) to keep the configuration time of the build low. + +When declaring tasks (in build scripts or custom plugins) this means that we want to _register_ a task like: + + tasks.register('someTask') { ... } + +instead of eagerly _creating_ the task: + + task someTask { ... } + +The major difference between these two syntaxes is, that the configuration block of an registered task will only be executed when the task is actually created due to the build requires that task to run. The configuration block of an eagerly created tasks will be executed immediately. + +By actually doing less in the gradle configuration time as only creating tasks that are requested as part of the build and by only running the configurations for those requested tasks, using the task avoidance api contributes a major part in keeping our build fast. + +#### Adding additional integration tests + +Additional integration tests for a certain elasticsearch modules that are specific to certain cluster configuration can be declared in a separate so called `qa` subproject of your module. + +The benefit of a dedicated project for these tests are: +- `qa` projects are dedicated two specific usecases and easier to maintain +- It keeps the specific test logic separated from the common test logic. +- You can run those tests in parallel to other projects of the build. + +#### Using test fixtures + +Sometimes we want to share test fixtures to setup the code under test across multiple projects. There are basically two ways doing so. + +Ideally we would use the build-in [java-test-fixtures](https://docs.gradle.org/current/userguide/java_testing.html#sec:java_test_fixtures) gradle plugin. +This plugin relies on having a separate sourceSet for the test fixtures code. + +In the elasticsearch codebase we have test fixtures and actual tests within the same sourceSet. Therefore we introduced the `elasticsearch.internal-test-artifact` plugin to provides another build artifact of your project based on the `test` sourceSet. + + +This artifact can be resolved by the consumer project as shown in the example below: + +``` +dependencies { + //add the test fixtures of `:providing-project` to testImplementation configuration. + testImplementation(testArtifact(project(":fixture-providing-project'))) +} +``` + +This test artifact mechanism makes use of the concept of [component capabilities](https://docs.gradle.org/current/userguide/component_capabilities.html) +similar to how the gradle build-in `java-test-fixtures` plugin works. + +`testArtifact` is a shortcut declared in the elasticsearch build. Alternatively you can declare the dependency via + +``` +dependencies { + testImplementation(project(":fixture-providing-project')) { + requireCapabilities("org.elasticsearch.gradle:fixture-providing-project-test-artifacts") + } +} +``` diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3e08516f86814..73457580c1b29 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -69,6 +69,8 @@ cycle. * Lines that are not part of your change should not be edited (e.g. don't format unchanged lines, don't reorder existing imports) * Add the appropriate [license headers](#license-headers) to any new files +* For contributions involving the elasticsearch build you can find (details about the build setup in the +* [BUILDING](BUILDING.md) file ### Submitting your changes diff --git a/build-tools-internal/build.gradle b/build-tools-internal/build.gradle index 362e5f2d4bc23..b0363dc795eb0 100644 --- a/build-tools-internal/build.gradle +++ b/build-tools-internal/build.gradle @@ -159,8 +159,8 @@ gradlePlugin { implementationClass = 'org.elasticsearch.gradle.internal.rest.compat.YamlRestCompatTestPlugin' } yamlRestTest { - id = 'elasticsearch.yaml-rest-test' - implementationClass = 'org.elasticsearch.gradle.internal.test.rest.YamlRestTestPlugin' + id = 'elasticsearch.internal-yaml-rest-test' + implementationClass = 'org.elasticsearch.gradle.internal.test.rest.InternalYamlRestTestPlugin' } } } diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalBwcGitPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalBwcGitPluginFuncTest.groovy index 310b747dd14a0..e24d3f1824e03 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalBwcGitPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalBwcGitPluginFuncTest.groovy @@ -63,6 +63,6 @@ class InternalBwcGitPluginFuncTest extends AbstractGitAwareGradleFuncTest { then: result.task(":checkoutBwcBranch").outcome == TaskOutcome.SUCCESS result.task(":consumer:register").outcome == TaskOutcome.SUCCESS - normalized(result.output).contains("/cloned/build/checkout") + result.output.contains("./build/checkout") } } diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionArchiveCheckPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionArchiveCheckPluginFuncTest.groovy index 74eacb0b5a5ac..587343133b08e 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionArchiveCheckPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionArchiveCheckPluginFuncTest.groovy @@ -80,11 +80,16 @@ unknown license content line 2 } """ + + when: - def result = gradleRunner(":darwin-tar:check").buildAndFail() + def runner = gradleRunner(":darwin-tar:check") + println "{runner.getClass()} = ${runner.getClass()}" + def result = runner.buildAndFail() + println "result.getClass() = ${result.getClass()}" then: result.task(":darwin-tar:checkLicense").outcome == TaskOutcome.FAILED - normalized(result.output).contains("> expected line [2] in " + + result.output.contains("> expected line [2] in " + "[./darwin-tar/build/tar-extracted/elasticsearch-${VersionProperties.getElasticsearch()}/LICENSE.txt] " + "to be [elastic license coorp stuff line 2] but was [unknown license content line 2]") } @@ -110,7 +115,7 @@ Copyright 2009-2018 Acme Coorp""" def result = gradleRunner(":darwin-tar:checkNotice").buildAndFail() then: result.task(":darwin-tar:checkNotice").outcome == TaskOutcome.FAILED - normalized(result.output).contains("> expected line [2] in " + + result.output.contains("> expected line [2] in " + "[./darwin-tar/build/tar-extracted/elasticsearch-${VersionProperties.getElasticsearch()}/NOTICE.txt] " + "to be [Copyright 2009-2021 Elasticsearch] but was [Copyright 2009-2018 Acme Coorp]") } @@ -146,8 +151,7 @@ Copyright 2009-2021 Elasticsearch""" def result = gradleRunner(":darwin-tar:check").buildAndFail() then: result.task(":darwin-tar:checkMlCppNotice").outcome == TaskOutcome.FAILED - normalized(result.output) - .contains("> expected [./darwin-tar/build/tar-extracted/elasticsearch-" + + result.output.contains("> expected [./darwin-tar/build/tar-extracted/elasticsearch-" + "${VersionProperties.getElasticsearch()}/modules/x-pack-ml/NOTICE.txt " + "to contain [foo license] but it did not") } diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy index 3cb898076cc1a..a03f6ef8c9e79 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy @@ -17,6 +17,7 @@ import spock.lang.Unroll /* * Test is ignored on ARM since this test case tests the ability to build certain older BWC branches that we don't support on ARM */ + @IgnoreIf({ Architecture.current() == Architecture.AARCH64 }) class InternalDistributionBwcSetupPluginFuncTest extends AbstractGitAwareGradleFuncTest { @@ -138,9 +139,8 @@ class InternalDistributionBwcSetupPluginFuncTest extends AbstractGitAwareGradleF and: "assemble task triggered" result.output.contains("[7.10.1] > Task :distribution:archives:darwin-tar:assemble") - normalized(result.output) - .contains("distfile /distribution/bwc/bugfix/build/bwc/checkout-7.10/distribution/archives/darwin-tar/" + - "build/distributions/elasticsearch-7.10.1-SNAPSHOT-darwin-x86_64.tar.gz") + result.output.contains("distfile /distribution/bwc/bugfix/build/bwc/checkout-7.10/distribution/archives/darwin-tar/" + + "build/distributions/elasticsearch-7.10.1-SNAPSHOT-darwin-x86_64.tar.gz") } def "bwc expanded distribution folder can be resolved as bwc project artifact"() { @@ -177,11 +177,9 @@ class InternalDistributionBwcSetupPluginFuncTest extends AbstractGitAwareGradleF result.task(":distribution:bwc:minor:buildBwcDarwinTar").outcome == TaskOutcome.SUCCESS and: "assemble task triggered" result.output.contains("[7.12.0] > Task :distribution:archives:darwin-tar:extractedAssemble") - normalized(result.output) - .contains("expandedRootPath /distribution/bwc/minor/build/bwc/checkout-7.x/" + + result.output.contains("expandedRootPath /distribution/bwc/minor/build/bwc/checkout-7.x/" + "distribution/archives/darwin-tar/build/install") - normalized(result.output) - .contains("nested folder /distribution/bwc/minor/build/bwc/checkout-7.x/" + + result.output.contains("nested folder /distribution/bwc/minor/build/bwc/checkout-7.x/" + "distribution/archives/darwin-tar/build/install/elasticsearch-7.12.0-SNAPSHOT") } } diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/JdkDownloadPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/JdkDownloadPluginFuncTest.groovy index 62bb3f917df8b..677bd6de8673c 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/JdkDownloadPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/JdkDownloadPluginFuncTest.groovy @@ -182,7 +182,7 @@ class JdkDownloadPluginFuncTest extends AbstractGradleFuncTest { } then: - normalized(result.output).contains("Unpacking $expectedArchiveName using $transformType") == false + result.output.contains("Unpacking $expectedArchiveName using $transformType") == false where: platform | expectedArchiveName | transformType @@ -214,8 +214,8 @@ class JdkDownloadPluginFuncTest extends AbstractGradleFuncTest { return "/java/GA/" + versionPath + "/GPL/" + filename; } else if (vendor.equals(VENDOR_AZUL)) { final String module = isMac(platform) ? "macosx" : platform; - // we only test zulu 15 darwin aarch64 for now - return "/zulu${module.equals('linux') ? '-embedded' : ''}/bin/zulu16.28.11-ca-jdk16.0.0-${module}_${arch}.tar.gz"; + // we only test zulu 16 darwin aarch64 for now + return "/zulu${module.equals('linux') ? '-embedded' : ''}/bin/zulu16.32.15-ca-jdk16.0.2-${module}_${arch}.tar.gz"; } } diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/precommit/LicenseHeadersPrecommitPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/precommit/LicenseHeadersPrecommitPluginFuncTest.groovy index b727235a242f3..0481d3315d010 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/precommit/LicenseHeadersPrecommitPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/precommit/LicenseHeadersPrecommitPluginFuncTest.groovy @@ -33,7 +33,7 @@ class LicenseHeadersPrecommitPluginFuncTest extends AbstractGradleFuncTest { assertOutputContains(result.output, "> Check failed. License header problems were found. Full details: ./build/reports/licenseHeaders/rat.xml") assertOutputContains(result.output, "./src/main/java/org/acme/UnknownLicensed.java") assertOutputContains(result.output, "./src/main/java/org/acme/UnapprovedLicensed.java") - normalized(result.output).contains("./src/main/java/org/acme/DualLicensed.java") == false + result.output.contains("./src/main/java/org/acme/DualLicensed.java") == false } def "can filter source files"() { diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rerun/InternalTestRerunPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rerun/InternalTestRerunPluginFuncTest.groovy index f119d9b4114ed..0dd982665808a 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rerun/InternalTestRerunPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rerun/InternalTestRerunPluginFuncTest.groovy @@ -57,7 +57,7 @@ class InternalTestRerunPluginFuncTest extends AbstractGradleFuncTest { def result = gradleRunner("test").buildAndFail() result.output.contains("total executions: 2") == false and: "no jvm system exit tracing provided" - normalized(result.output).contains("""Test jvm exited unexpectedly. + result.output.contains("""Test jvm exited unexpectedly. Test jvm system exit trace:""") == false } @@ -119,7 +119,7 @@ Test jvm system exit trace:""") == false result.output.contains("AnotherTest6 total executions: 2") // triggered only in the second overall run and: 'Tracing is provided' - normalized(result.output).contains("""================ + result.output.contains("""================ Test jvm exited unexpectedly. Test jvm system exit trace (run: 1) Gradle Test Executor 1 > AnotherTest6 > someTest @@ -200,11 +200,11 @@ Gradle Test Executor 1 > AnotherTest6 > someTest result.output.contains("JdkKillingTest total executions: 5") result.output.contains("Max retries(4) hit") and: 'Tracing is provided' - normalized(result.output).contains("Test jvm system exit trace (run: 1)") - normalized(result.output).contains("Test jvm system exit trace (run: 2)") - normalized(result.output).contains("Test jvm system exit trace (run: 3)") - normalized(result.output).contains("Test jvm system exit trace (run: 4)") - normalized(result.output).contains("Test jvm system exit trace (run: 5)") + result.output.contains("Test jvm system exit trace (run: 1)") + result.output.contains("Test jvm system exit trace (run: 2)") + result.output.contains("Test jvm system exit trace (run: 3)") + result.output.contains("Test jvm system exit trace (run: 4)") + result.output.contains("Test jvm system exit trace (run: 5)") } private String testMethodContent(boolean withSystemExit, boolean fail, int timesFailing = 1) { diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/YamlRestTestPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/InternalYamlRestTestPluginFuncTest.groovy similarity index 89% rename from build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/YamlRestTestPluginFuncTest.groovy rename to build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/InternalYamlRestTestPluginFuncTest.groovy index 9cfd51d074bee..527cd6277c722 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/YamlRestTestPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/InternalYamlRestTestPluginFuncTest.groovy @@ -11,13 +11,13 @@ package org.elasticsearch.gradle.internal.test.rest import org.elasticsearch.gradle.fixtures.AbstractRestResourcesFuncTest import org.gradle.testkit.runner.TaskOutcome -class YamlRestTestPluginFuncTest extends AbstractRestResourcesFuncTest { +class InternalYamlRestTestPluginFuncTest extends AbstractRestResourcesFuncTest { def "yamlRestTest does nothing when there are no tests"() { given: buildFile << """ plugins { - id 'elasticsearch.yaml-rest-test' + id 'elasticsearch.internal-yaml-rest-test' } """ @@ -34,7 +34,7 @@ class YamlRestTestPluginFuncTest extends AbstractRestResourcesFuncTest { given: internalBuild() buildFile << """ - apply plugin: 'elasticsearch.yaml-rest-test' + apply plugin: 'elasticsearch.internal-yaml-rest-test' dependencies { yamlRestTestImplementation "junit:junit:4.12" @@ -67,8 +67,8 @@ class YamlRestTestPluginFuncTest extends AbstractRestResourcesFuncTest { file("/build/classes/java/yamlRestTest/MockIT.class").exists() // check that our copied specs and tests are on the yamlRestTest classpath - normalized(result.output).contains("./build/restResources/yamlSpecs") - normalized(result.output).contains("./build/restResources/yamlTests") + result.output.contains("./build/restResources/yamlSpecs") + result.output.contains("./build/restResources/yamlTests") when: result = gradleRunner("yamlRestTest").build() diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/YamlRestCompatTestPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/YamlRestCompatTestPluginFuncTest.groovy index 9443590a1e432..6e16b5a7be172 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/YamlRestCompatTestPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/YamlRestCompatTestPluginFuncTest.groovy @@ -22,13 +22,13 @@ class YamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTest { def compatibleVersion = Version.fromString(VersionProperties.getVersions().get("elasticsearch")).getMajor() - 1 def specIntermediateDir = "restResources/v${compatibleVersion}/yamlSpecs" def testIntermediateDir = "restResources/v${compatibleVersion}/yamlTests" - def transformTask = ":transformV${compatibleVersion}RestTests" + def transformTask = ":yamlRestTestV${compatibleVersion}CompatTransform" def YAML_FACTORY = new YAMLFactory() def MAPPER = new ObjectMapper(YAML_FACTORY) def READER = MAPPER.readerFor(ObjectNode.class) def WRITER = MAPPER.writerFor(ObjectNode.class) - def "yamlRestCompatTest does nothing when there are no tests"() { + def "yamlRestTestVxCompatTest does nothing when there are no tests"() { given: addSubProject(":distribution:bwc:minor") << """ configurations { checkout } @@ -44,16 +44,16 @@ class YamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTest { """ when: - def result = gradleRunner("yamlRestCompatTest", '--stacktrace').build() + def result = gradleRunner("yamlRestTestV${compatibleVersion}CompatTest", '--stacktrace').build() then: - result.task(':yamlRestCompatTest').outcome == TaskOutcome.NO_SOURCE + result.task(":yamlRestTestV${compatibleVersion}CompatTest").outcome == TaskOutcome.NO_SOURCE result.task(':copyRestCompatApiTask').outcome == TaskOutcome.NO_SOURCE result.task(':copyRestCompatTestTask').outcome == TaskOutcome.NO_SOURCE result.task(transformTask).outcome == TaskOutcome.NO_SOURCE } - def "yamlRestCompatTest executes and copies api and transforms tests from :bwc:minor"() { + def "yamlRestTestVxCompatTest executes and copies api and transforms tests from :bwc:minor"() { given: internalBuild() @@ -93,10 +93,10 @@ class YamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTest { file("distribution/bwc/minor/checkoutDir/src/yamlRestTest/resources/rest-api-spec/test/" + test) << "" when: - def result = gradleRunner("yamlRestCompatTest").build() + def result = gradleRunner("yamlRestTestV${compatibleVersion}CompatTest").build() then: - result.task(':yamlRestCompatTest').outcome == TaskOutcome.SKIPPED + result.task(":yamlRestTestV${compatibleVersion}CompatTest").outcome == TaskOutcome.SKIPPED result.task(':copyRestCompatApiTask').outcome == TaskOutcome.SUCCESS result.task(':copyRestCompatTestTask').outcome == TaskOutcome.SUCCESS result.task(transformTask).outcome == TaskOutcome.SUCCESS @@ -123,16 +123,16 @@ class YamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTest { result.task(':copyYamlTestsTask').outcome == TaskOutcome.NO_SOURCE when: - result = gradleRunner("yamlRestCompatTest").build() + result = gradleRunner("yamlRestTestV${compatibleVersion}CompatTest").build() then: - result.task(':yamlRestCompatTest').outcome == TaskOutcome.SKIPPED + result.task(":yamlRestTestV${compatibleVersion}CompatTest").outcome == TaskOutcome.SKIPPED result.task(':copyRestCompatApiTask').outcome == TaskOutcome.UP_TO_DATE result.task(':copyRestCompatTestTask').outcome == TaskOutcome.UP_TO_DATE result.task(transformTask).outcome == TaskOutcome.UP_TO_DATE } - def "yamlRestCompatTest is wired into check and checkRestCompat"() { + def "yamlRestTestVxCompatTest is wired into check and checkRestCompat"() { given: addSubProject(":distribution:bwc:minor") << """ @@ -155,7 +155,7 @@ class YamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTest { then: result.task(':check').outcome == TaskOutcome.UP_TO_DATE result.task(':checkRestCompat').outcome == TaskOutcome.UP_TO_DATE - result.task(':yamlRestCompatTest').outcome == TaskOutcome.NO_SOURCE + result.task(":yamlRestTestV${compatibleVersion}CompatTest").outcome == TaskOutcome.NO_SOURCE result.task(':copyRestCompatApiTask').outcome == TaskOutcome.NO_SOURCE result.task(':copyRestCompatTestTask').outcome == TaskOutcome.NO_SOURCE result.task(transformTask).outcome == TaskOutcome.NO_SOURCE @@ -169,7 +169,7 @@ class YamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTest { then: result.task(':check').outcome == TaskOutcome.UP_TO_DATE result.task(':checkRestCompat').outcome == TaskOutcome.UP_TO_DATE - result.task(':yamlRestCompatTest').outcome == TaskOutcome.SKIPPED + result.task(":yamlRestTestV${compatibleVersion}CompatTest").outcome == TaskOutcome.SKIPPED result.task(':copyRestCompatApiTask').outcome == TaskOutcome.SKIPPED result.task(':copyRestCompatTestTask').outcome == TaskOutcome.SKIPPED result.task(transformTask).outcome == TaskOutcome.SKIPPED @@ -195,7 +195,7 @@ class YamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTest { dependencies { yamlRestTestImplementation "junit:junit:4.12" } - tasks.named("transformV7RestTests").configure({ task -> + tasks.named("yamlRestTestV${compatibleVersion}CompatTransform").configure({ task -> task.replaceValueInMatch("_type", "_doc") task.replaceValueInMatch("_source.values", ["z", "x", "y"], "one") task.removeMatch("_source.blah") @@ -266,7 +266,7 @@ class YamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTest { - match: {} """.stripIndent() when: - def result = gradleRunner("yamlRestCompatTest").build() + def result = gradleRunner("yamlRestTestV${compatibleVersion}CompatTest").build() then: diff --git a/build-tools-internal/src/main/groovy/elasticsearch.formatting.gradle b/build-tools-internal/src/main/groovy/elasticsearch.formatting.gradle index 709b2033fd7a0..1abf66430b937 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.formatting.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.formatting.gradle @@ -59,7 +59,6 @@ def projectPathsToExclude = [ ':libs:elasticsearch-secure-sm', ':libs:elasticsearch-ssl-config', ':libs:elasticsearch-x-content', - ':modules:aggs-matrix-stats', ':modules:analysis-common', ':modules:ingest-common', ':modules:ingest-geoip', @@ -109,7 +108,6 @@ def projectPathsToExclude = [ ':test:logger-usage', ':x-pack:license-tools', ':x-pack:plugin', - ':x-pack:plugin:analytics', ':x-pack:plugin:async-search', ':x-pack:plugin:async-search:qa', ':x-pack:plugin:async-search:qa:security', @@ -152,8 +150,6 @@ def projectPathsToExclude = [ ':x-pack:plugin:repository-encrypted:qa:azure', ':x-pack:plugin:repository-encrypted:qa:gcs', ':x-pack:plugin:repository-encrypted:qa:s3', - ':x-pack:plugin:rollup', - ':x-pack:plugin:rollup:qa:rest', ':x-pack:plugin:search-business-rules', ':x-pack:plugin:security', ':x-pack:plugin:security:cli', @@ -202,16 +198,18 @@ subprojects { if (projectPathsToExclude.contains(project.path) == false) { project.apply plugin: "com.diffplug.spotless" + spotless { java { if (project.path == ':server') { target 'src/*/java/org/elasticsearch/action/admin/cluster/repositories/**/*.java', 'src/*/java/org/elasticsearch/action/admin/cluster/snapshots/**/*.java', + 'src/test/java/org/elasticsearch/common/xcontent/support/AbstractFilteringTestCase.java', + 'src/test/java/org/elasticsearch/common/xcontent/support/XContentMapValuesTests.java', 'src/*/java/org/elasticsearch/index/snapshots/**/*.java', 'src/*/java/org/elasticsearch/repositories/**/*.java', + 'src/*/java/org/elasticsearch/search/aggregations/**/*.java', 'src/*/java/org/elasticsearch/snapshots/**/*.java' - - targetExclude 'src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java' } else { // Normally this isn't necessary, but we have Java sources in // non-standard places diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/JdkDownloadPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/JdkDownloadPlugin.java index 310703082edda..c2043f15a5773 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/JdkDownloadPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/JdkDownloadPlugin.java @@ -138,7 +138,7 @@ private void setupRepository(Project project, Jdk jdk) { + zuluPathSuffix + "/bin/zulu" + jdk.getMajor() - + ".28.11-ca-jdk16.0.0-" + + ".32.15-ca-jdk16.0.2-" + azulPlatform(jdk) + "_[classifier].[ext]"; break; diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/BreakingChangesGenerator.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/BreakingChangesGenerator.java index 691aa47d9ebbc..fc33c288cf944 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/BreakingChangesGenerator.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/BreakingChangesGenerator.java @@ -8,11 +8,8 @@ package org.elasticsearch.gradle.internal.release; -import groovy.text.SimpleTemplateEngine; - import com.google.common.annotations.VisibleForTesting; -import org.elasticsearch.gradle.Version; import org.elasticsearch.gradle.VersionProperties; import java.io.File; @@ -26,6 +23,9 @@ import java.util.TreeMap; import java.util.stream.Collectors; +import static java.util.Comparator.comparing; +import static java.util.stream.Collectors.groupingBy; + /** * Generates the page that lists the breaking changes and deprecations for a minor version release. */ @@ -33,43 +33,41 @@ public class BreakingChangesGenerator { static void update(File templateFile, File outputFile, List entries) throws IOException { try (FileWriter output = new FileWriter(outputFile)) { - generateFile(Files.readString(templateFile.toPath()), output, entries); + output.write( + generateFile(QualifiedVersion.of(VersionProperties.getElasticsearch()), Files.readString(templateFile.toPath()), entries) + ); } } @VisibleForTesting - private static void generateFile(String template, FileWriter outputWriter, List entries) throws IOException { - final Version version = VersionProperties.getElasticsearchVersion(); + static String generateFile(QualifiedVersion version, String template, List entries) throws IOException { final Map>> breakingChangesByNotabilityByArea = entries.stream() .map(ChangelogEntry::getBreaking) .filter(Objects::nonNull) + .sorted(comparing(ChangelogEntry.Breaking::getTitle)) .collect( - Collectors.groupingBy( + groupingBy( ChangelogEntry.Breaking::isNotable, - Collectors.groupingBy(ChangelogEntry.Breaking::getArea, TreeMap::new, Collectors.toList()) + groupingBy(ChangelogEntry.Breaking::getArea, TreeMap::new, Collectors.toList()) ) ); final Map> deprecationsByArea = entries.stream() .map(ChangelogEntry::getDeprecation) .filter(Objects::nonNull) - .collect(Collectors.groupingBy(ChangelogEntry.Deprecation::getArea, TreeMap::new, Collectors.toList())); + .sorted(comparing(ChangelogEntry.Deprecation::getTitle)) + .collect(groupingBy(ChangelogEntry.Deprecation::getArea, TreeMap::new, Collectors.toList())); final Map bindings = new HashMap<>(); bindings.put("breakingChangesByNotabilityByArea", breakingChangesByNotabilityByArea); bindings.put("deprecationsByArea", deprecationsByArea); - bindings.put("isElasticsearchSnapshot", VersionProperties.isElasticsearchSnapshot()); + bindings.put("isElasticsearchSnapshot", version.isSnapshot()); bindings.put("majorDotMinor", version.getMajor() + "." + version.getMinor()); bindings.put("majorMinor", String.valueOf(version.getMajor()) + version.getMinor()); bindings.put("nextMajor", (version.getMajor() + 1) + ".0"); bindings.put("version", version); - try { - final SimpleTemplateEngine engine = new SimpleTemplateEngine(); - engine.createTemplate(template).make(bindings).writeTo(outputWriter); - } catch (ClassNotFoundException e) { - throw new RuntimeException(e); - } + return TemplateUtils.render(template, bindings); } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ChangelogEntry.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ChangelogEntry.java index 08b03b35ccd63..19b9ed2f274a4 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ChangelogEntry.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ChangelogEntry.java @@ -37,7 +37,6 @@ public class ChangelogEntry { private Highlight highlight; private Breaking breaking; private Deprecation deprecation; - private List versions; private static final ObjectMapper yamlMapper = new ObjectMapper(new YAMLFactory()); @@ -113,14 +112,6 @@ public void setDeprecation(Deprecation deprecation) { this.deprecation = deprecation; } - public List getVersions() { - return versions; - } - - public void setVersions(List versions) { - this.versions = versions; - } - @Override public boolean equals(Object o) { if (this == o) { @@ -136,20 +127,19 @@ public boolean equals(Object o) { && Objects.equals(type, that.type) && Objects.equals(summary, that.summary) && Objects.equals(highlight, that.highlight) - && Objects.equals(breaking, that.breaking) - && Objects.equals(versions, that.versions); + && Objects.equals(breaking, that.breaking); } @Override public int hashCode() { - return Objects.hash(pr, issues, area, type, summary, highlight, breaking, versions); + return Objects.hash(pr, issues, area, type, summary, highlight, breaking); } @Override public String toString() { return String.format( Locale.ROOT, - "ChangelogEntry{pr=%d, issues=%s, area='%s', type='%s', summary='%s', highlight=%s, breaking=%s, deprecation=%s versions=%s}", + "ChangelogEntry{pr=%d, issues=%s, area='%s', type='%s', summary='%s', highlight=%s, breaking=%s, deprecation=%s}", pr, issues, area, @@ -157,8 +147,7 @@ public String toString() { summary, highlight, breaking, - deprecation, - versions + deprecation ); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTask.java index 5d5e1edf9b99e..0769996c50a88 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTask.java @@ -8,9 +8,11 @@ package org.elasticsearch.gradle.internal.release; -import org.elasticsearch.gradle.Version; +import com.google.common.annotations.VisibleForTesting; + import org.elasticsearch.gradle.VersionProperties; import org.gradle.api.DefaultTask; +import org.gradle.api.GradleException; import org.gradle.api.file.ConfigurableFileCollection; import org.gradle.api.file.FileCollection; import org.gradle.api.file.RegularFile; @@ -22,12 +24,23 @@ import org.gradle.api.tasks.InputFiles; import org.gradle.api.tasks.OutputFile; import org.gradle.api.tasks.TaskAction; +import org.gradle.process.ExecOperations; -import javax.inject.Inject; +import java.io.File; import java.io.IOException; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; import java.util.List; -import java.util.function.Predicate; -import java.util.stream.Collectors; +import java.util.Map; +import java.util.Set; +import javax.inject.Inject; + +import static java.util.Comparator.naturalOrder; +import static java.util.stream.Collectors.toList; +import static java.util.stream.Collectors.toSet; /** * Orchestrates the steps required to generate or update various release notes files. @@ -47,8 +60,10 @@ public class GenerateReleaseNotesTask extends DefaultTask { private final RegularFileProperty releaseHighlightsFile; private final RegularFileProperty breakingChangesFile; + private final GitWrapper gitWrapper; + @Inject - public GenerateReleaseNotesTask(ObjectFactory objectFactory) { + public GenerateReleaseNotesTask(ObjectFactory objectFactory, ExecOperations execOperations) { changelogs = objectFactory.fileCollection(); releaseNotesIndexTemplate = objectFactory.fileProperty(); @@ -60,56 +75,194 @@ public GenerateReleaseNotesTask(ObjectFactory objectFactory) { releaseNotesFile = objectFactory.fileProperty(); releaseHighlightsFile = objectFactory.fileProperty(); breakingChangesFile = objectFactory.fileProperty(); + + gitWrapper = new GitWrapper(execOperations); } @TaskAction public void executeTask() throws IOException { + if (needsGitTags(VersionProperties.getElasticsearch())) { + findAndUpdateUpstreamRemote(gitWrapper); + } + LOGGER.info("Finding changelog files..."); - final Version checkoutVersion = VersionProperties.getElasticsearchVersion(); + final Map> filesByVersion = partitionFilesByVersion( + gitWrapper, + VersionProperties.getElasticsearch(), + this.changelogs.getFiles() + ); - final List entries = this.changelogs.getFiles() - .stream() - .map(ChangelogEntry::parse) - .filter( - // Only process changelogs that are included in this minor version series of ES. - // If this change was released in an earlier major or minor version of Elasticsearch, do not - // include it in the notes. An earlier patch version is OK, the release notes include changes - // for every patch release in a minor series. - log -> { - final List versionsForChangelogFile = log.getVersions() - .stream() - .map(v -> Version.fromString(v, Version.Mode.RELAXED)) - .collect(Collectors.toList()); - - final Predicate includedInSameMinor = v -> v.getMajor() == checkoutVersion.getMajor() - && v.getMinor() == checkoutVersion.getMinor(); - - final Predicate includedInEarlierMajorOrMinor = v -> v.getMajor() < checkoutVersion.getMajor() - || (v.getMajor() == checkoutVersion.getMajor() && v.getMinor() < checkoutVersion.getMinor()); - - boolean includedInThisMinor = versionsForChangelogFile.stream().anyMatch(includedInSameMinor); - - if (includedInThisMinor) { - return versionsForChangelogFile.stream().noneMatch(includedInEarlierMajorOrMinor); - } else { - return false; - } - } - ) - .collect(Collectors.toList()); + final List entries = new ArrayList<>(); + final Map> changelogsByVersion = new HashMap<>(); + + filesByVersion.forEach((version, files) -> { + Set entriesForVersion = files.stream().map(ChangelogEntry::parse).collect(toSet()); + entries.addAll(entriesForVersion); + changelogsByVersion.put(version, entriesForVersion); + }); + + final Set versions = getVersions(gitWrapper, VersionProperties.getElasticsearch()); LOGGER.info("Updating release notes index..."); - ReleaseNotesIndexUpdater.update(this.releaseNotesIndexTemplate.get().getAsFile(), this.releaseNotesIndexFile.get().getAsFile()); + ReleaseNotesIndexGenerator.update( + versions, + this.releaseNotesIndexTemplate.get().getAsFile(), + this.releaseNotesIndexFile.get().getAsFile() + ); LOGGER.info("Generating release notes..."); - ReleaseNotesGenerator.update(this.releaseNotesTemplate.get().getAsFile(), this.releaseNotesFile.get().getAsFile(), entries); + ReleaseNotesGenerator.update( + this.releaseNotesTemplate.get().getAsFile(), + this.releaseNotesFile.get().getAsFile(), + changelogsByVersion + ); LOGGER.info("Generating release highlights..."); - ReleaseHighlightsGenerator.update(this.releaseHighlightsTemplate.get().getAsFile(), this.releaseHighlightsFile.get().getAsFile(), entries); + ReleaseHighlightsGenerator.update( + this.releaseHighlightsTemplate.get().getAsFile(), + this.releaseHighlightsFile.get().getAsFile(), + entries + ); LOGGER.info("Generating breaking changes / deprecations notes..."); - BreakingChangesGenerator.update(this.breakingChangesTemplate.get().getAsFile(), this.breakingChangesFile.get().getAsFile(), entries); + BreakingChangesGenerator.update( + this.breakingChangesTemplate.get().getAsFile(), + this.breakingChangesFile.get().getAsFile(), + entries + ); + } + + /** + * Find all tags in the `major.minor` series for the supplied version + * @param gitWrapper used to call `git` + * @param currentVersion the version to base the query upon + * @return all versions in the series + */ + @VisibleForTesting + static Set getVersions(GitWrapper gitWrapper, String currentVersion) { + QualifiedVersion v = QualifiedVersion.of(currentVersion); + Set versions = gitWrapper.listVersions("v" + v.getMajor() + '.' + v.getMinor() + ".*").collect(toSet()); + versions.add(v); + return versions; + } + + /** + * Group a set of files by the version in which they first appeared, up until the supplied version. Any files not + * present in an earlier version are assumed to have been introduced in the specified version. + * + *

This method works by finding all git tags prior to {@param versionString} in the same minor series, and + * examining the git tree for that tag. By doing this over each tag, it is possible to see how the contents + * of the changelog directory changed over time. + * + * @param gitWrapper used to call `git` + * @param versionString the "current" version. Does not require a tag in git. + * @param allFilesInCheckout the files to partition + * @return a mapping from version to the files added in that version. + */ + @VisibleForTesting + static Map> partitionFilesByVersion( + GitWrapper gitWrapper, + String versionString, + Set allFilesInCheckout + ) { + if (needsGitTags(versionString) == false) { + return Map.of(QualifiedVersion.of(versionString), allFilesInCheckout); + } + + QualifiedVersion currentVersion = QualifiedVersion.of(versionString); + + // Find all tags for this minor series, using a wildcard tag pattern. + String tagWildcard = "v%d.%d*".formatted(currentVersion.getMajor(), currentVersion.getMinor()); + + final List earlierVersions = gitWrapper.listVersions(tagWildcard) + // Only keep earlier versions, and if `currentVersion` is a prerelease, then only prereleases too. + .filter(each -> each.isBefore(currentVersion) && (currentVersion.hasQualifier() == each.hasQualifier())) + .sorted(naturalOrder()) + .collect(toList()); + + if (earlierVersions.isEmpty()) { + throw new GradleException("Failed to find git tags prior to [v" + currentVersion + "]"); + } + + Map> partitionedFiles = new HashMap<>(); + + Set mutableAllFilesInCheckout = new HashSet<>(allFilesInCheckout); + + // 1. For each earlier version + earlierVersions.forEach(earlierVersion -> { + // 2. Find all the changelog files it contained + Set filesInTreeForVersion = gitWrapper.listFiles("v" + earlierVersion, "docs/changelog") + .map(line -> Path.of(line).getFileName().toString()) + .collect(toSet()); + + Set filesForVersion = new HashSet<>(); + partitionedFiles.put(earlierVersion, filesForVersion); + + // 3. Find the `File` object for each one + final Iterator filesIterator = mutableAllFilesInCheckout.iterator(); + while (filesIterator.hasNext()) { + File nextFile = filesIterator.next(); + if (filesInTreeForVersion.contains(nextFile.getName())) { + // 4. And remove it so that it is associated with the earlier version + filesForVersion.add(nextFile); + filesIterator.remove(); + } + } + }); + + // 5. Associate whatever is left with the current version. + partitionedFiles.put(currentVersion, mutableAllFilesInCheckout); + + return partitionedFiles; + } + + /** + * Ensure the upstream git remote is up-to-date. The upstream is whatever git remote references `elastic/elasticsearch`. + * @param gitWrapper used to call `git` + */ + private static void findAndUpdateUpstreamRemote(GitWrapper gitWrapper) { + LOGGER.info("Finding upstream git remote"); + // We need to ensure the tags are up-to-date. Find the correct remote to use + String upstream = gitWrapper.listRemotes() + .entrySet() + .stream() + .filter(entry -> entry.getValue().contains("elastic/elasticsearch")) + .findFirst() + .map(Map.Entry::getKey) + .orElseThrow( + () -> new GradleException( + "I need to ensure the git tags are up-to-date, but I couldn't find a git remote for [elastic/elasticsearch]" + ) + ); + + LOGGER.info("Updating remote [{}]", upstream); + // Now update the remote, and make sure we update the tags too + gitWrapper.updateRemote(upstream); + + LOGGER.info("Updating tags from [{}]", upstream); + gitWrapper.updateTags(upstream); + } + + /** + * This methods checks the supplied version and answers {@code false} if the fetching of git + * tags can be skipped, or {@code true} otherwise. + *

+ * The first version in a minor series will never have any preceding versions, so there's no + * need to fetch tags and examine the repository state in the past. This applies when the + * version is a release version, a snapshot, or the first alpha version. Subsequent alphas, + * betas and release candidates need to check the previous prelease tags. + * + * @param versionString the version string to check + * @return whether fetching git tags is required + */ + @VisibleForTesting + static boolean needsGitTags(String versionString) { + if (versionString.endsWith(".0") || versionString.endsWith(".0-SNAPSHOT") || versionString.endsWith(".0-alpha1")) { + return false; + } + + return true; } @InputFiles diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/GitWrapper.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/GitWrapper.java new file mode 100644 index 0000000000000..9cfb649399a90 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/GitWrapper.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.release; + +import org.gradle.process.ExecOperations; + +import java.io.ByteArrayOutputStream; +import java.nio.charset.StandardCharsets; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * This class wraps certain {@code git} operations. This is partly for convenience, and partly so that these + * operations can be easily mocked in testing. + */ +public class GitWrapper { + + private final ExecOperations execOperations; + + public GitWrapper(ExecOperations execOperations) { + this.execOperations = execOperations; + } + + /** + * @return a mapping from remote names to remote URLs. + */ + public Map listRemotes() { + return runCommand("git", "remote", "-v").lines() + .filter(l -> l.contains("(fetch)")) + .map(line -> line.split("\\s+")) + .collect(Collectors.toMap(parts -> parts[0], parts -> parts[1])); + } + + String runCommand(String... args) { + final ByteArrayOutputStream stdout = new ByteArrayOutputStream(); + + execOperations.exec(spec -> { + // The redundant cast is to silence a compiler warning. + spec.setCommandLine((Object[]) args); + spec.setStandardOutput(stdout); + }); + + return stdout.toString(StandardCharsets.UTF_8); + } + + /** + * Updates the git repository from the specified remote + * @param remote the remote to use to update + */ + public void updateRemote(String remote) { + runCommand("git", "fetch", Objects.requireNonNull(remote)); + } + + /** + * Updates the git repository's tags from the specified remote + * @param remote the remote to use to update + */ + public void updateTags(String remote) { + runCommand("git", "fetch", "--tags", Objects.requireNonNull(remote)); + } + + /** + * Fetch all tags matching the specified pattern, returning them as {@link QualifiedVersion} instances. + * @param pattern the tag pattern to match + * @return matching versions + */ + public Stream listVersions(String pattern) { + return runCommand("git", "tag", "-l", pattern).lines().map(QualifiedVersion::of); + } + + /** + * Returns all files at the specified {@param path} for the state of the git repository at {@param ref}. + * + * @param ref the ref to use + * @param path the path to list + * @return A stream of file names. No path information is included. + */ + public Stream listFiles(String ref, String path) { + return runCommand("git", "ls-tree", "--name-only", "-r", ref, path).lines(); + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/QualifiedVersion.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/QualifiedVersion.java new file mode 100644 index 0000000000000..0cc579e68d52a --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/QualifiedVersion.java @@ -0,0 +1,182 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.release; + +import org.elasticsearch.gradle.Version; + +import java.util.Comparator; +import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Encapsulates comparison and printing logic for an x.y.z version with optional qualifier. This class is very similar + * to {@link Version}, but it dissects the qualifier in such a way that is incompatible + * with how {@link Version} is used in the build. It also retains any qualifier (prerelease) information, and uses + * that information when comparing instances. + */ +public final class QualifiedVersion implements Comparable { + private final int major; + private final int minor; + private final int revision; + private final Qualifier qualifier; + + private static final Pattern pattern = Pattern.compile( + "^v? (\\d+) \\. (\\d+) \\. (\\d+) (?: - (alpha\\d+ | beta\\d+ | rc\\d+ | SNAPSHOT ) )? $", + Pattern.COMMENTS + ); + + private QualifiedVersion(int major, int minor, int revision, String qualifier) { + this.major = major; + this.minor = minor; + this.revision = revision; + this.qualifier = qualifier == null ? null : Qualifier.of(qualifier); + } + + /** + * Parses the supplied string into an object. + * @param s a version string in strict semver + * @return a new instance + */ + public static QualifiedVersion of(final String s) { + Objects.requireNonNull(s); + Matcher matcher = pattern.matcher(s); + if (matcher.matches() == false) { + throw new IllegalArgumentException("Invalid version format: '" + s + "'. Should be " + pattern); + } + + return new QualifiedVersion( + Integer.parseInt(matcher.group(1)), + Integer.parseInt(matcher.group(2)), + Integer.parseInt(matcher.group(3)), + matcher.group(4) + ); + } + + @Override + public String toString() { + return "%d.%d.%d%s".formatted(major, minor, revision, qualifier == null ? "" : "-" + qualifier); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + QualifiedVersion version = (QualifiedVersion) o; + return major == version.major + && minor == version.minor + && revision == version.revision + && Objects.equals(qualifier, version.qualifier); + } + + @Override + public int hashCode() { + return Objects.hash(major, minor, revision, qualifier); + } + + public int getMajor() { + return major; + } + + public int getMinor() { + return minor; + } + + public int getRevision() { + return revision; + } + + public boolean hasQualifier() { + return qualifier != null; + } + + public Qualifier getQualifier() { + return qualifier; + } + + public boolean isSnapshot() { + return this.qualifier != null && this.qualifier.level == QualifierLevel.SNAPSHOT; + } + + public QualifiedVersion withoutQualifier() { + return new QualifiedVersion(major, minor, revision, null); + } + + private static final Comparator COMPARATOR = Comparator.comparing((QualifiedVersion v) -> v.major) + .thenComparing(v -> v.minor) + .thenComparing(v -> v.revision) + .thenComparing((QualifiedVersion v) -> v.qualifier, Comparator.nullsLast(Comparator.naturalOrder())); + + @Override + public int compareTo(QualifiedVersion other) { + return COMPARATOR.compare(this, other); + } + + public boolean isBefore(QualifiedVersion other) { + return this.compareTo(other) < 0; + } + + private enum QualifierLevel { + alpha, + beta, + rc, + SNAPSHOT + } + + private static class Qualifier implements Comparable { + private final QualifierLevel level; + private final int number; + + private Qualifier(QualifierLevel level, int number) { + this.level = level; + this.number = number; + } + + private static final Comparator COMPARATOR = Comparator.comparing((Qualifier p) -> p.level).thenComparing(p -> p.number); + + @Override + public int compareTo(Qualifier other) { + return COMPARATOR.compare(this, other); + } + + private static Qualifier of(String qualifier) { + if ("SNAPSHOT".equals(qualifier)) { + return new Qualifier(QualifierLevel.SNAPSHOT, 0); + } + + Pattern pattern = Pattern.compile("^(alpha|beta|rc)(\\d+)$"); + Matcher matcher = pattern.matcher(qualifier); + if (matcher.find()) { + String level = matcher.group(1); + int number = Integer.parseInt(matcher.group(2)); + return new Qualifier(QualifierLevel.valueOf(level), number); + } else { + // This shouldn't happen - we check the format before this is called + throw new IllegalArgumentException("Invalid qualifier [" + qualifier + "] passed"); + } + } + + public String toString() { + return level == QualifierLevel.SNAPSHOT ? level.name() : level.name() + number; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Qualifier that = (Qualifier) o; + return number == that.number && level == that.level; + } + + @Override + public int hashCode() { + return Objects.hash(level, number); + } + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGenerator.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGenerator.java index 02b450aa22eea..e8e807f301a2c 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGenerator.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGenerator.java @@ -8,11 +8,8 @@ package org.elasticsearch.gradle.internal.release; -import groovy.text.SimpleTemplateEngine; - import com.google.common.annotations.VisibleForTesting; -import org.elasticsearch.gradle.Version; import org.elasticsearch.gradle.VersionProperties; import java.io.File; @@ -32,13 +29,14 @@ public class ReleaseHighlightsGenerator { static void update(File templateFile, File outputFile, List entries) throws IOException { try (FileWriter output = new FileWriter(outputFile)) { - generateFile(VersionProperties.getElasticsearchVersion(), Files.readString(templateFile.toPath()), entries, output); + output.write( + generateFile(QualifiedVersion.of(VersionProperties.getElasticsearch()), Files.readString(templateFile.toPath()), entries) + ); } } @VisibleForTesting - static void generateFile(Version version, String templateFile, List entries, FileWriter outputWriter) - throws IOException { + static String generateFile(QualifiedVersion version, String template, List entries) throws IOException { final List priorVersions = new ArrayList<>(); if (version.getMinor() > 0) { @@ -66,11 +64,6 @@ static void generateFile(Version version, String templateFile, List changelogs) throws IOException { + static void update(File templateFile, File outputFile, Map> changelogs) throws IOException { final String templateString = Files.readString(templateFile.toPath()); try (FileWriter output = new FileWriter(outputFile)) { - generateFile(VersionProperties.getElasticsearchVersion(), templateString, changelogs, output); + output.write(generateFile(templateString, changelogs)); } } @VisibleForTesting - static void generateFile(Version version, String template, List changelogs, Writer outputWriter) throws IOException { - final var changelogsByVersionByTypeByArea = buildChangelogBreakdown(version, changelogs); + static String generateFile(String template, Map> changelogs) throws IOException { + final var changelogsByVersionByTypeByArea = buildChangelogBreakdown(changelogs); final Map bindings = new HashMap<>(); bindings.put("changelogsByVersionByTypeByArea", changelogsByVersionByTypeByArea); bindings.put("TYPE_LABELS", TYPE_LABELS); - try { - final SimpleTemplateEngine engine = new SimpleTemplateEngine(); - engine.createTemplate(template).make(bindings).writeTo(outputWriter); - } catch (ClassNotFoundException e) { - throw new GradleException("Failed to generate file from template", e); - } + return TemplateUtils.render(template, bindings); } - private static Map>>> buildChangelogBreakdown( - Version elasticsearchVersion, - List changelogs + private static Map>>> buildChangelogBreakdown( + Map> changelogsByVersion ) { - final Predicate includedInSameMinor = v -> v.getMajor() == elasticsearchVersion.getMajor() - && v.getMinor() == elasticsearchVersion.getMinor(); - - final Map>>> changelogsByVersionByTypeByArea = changelogs.stream() - .collect( - Collectors.groupingBy( - // Key changelog entries by the earlier version in which they were released - entry -> entry.getVersions() - .stream() - .map(v -> Version.fromString(v.replaceFirst("^v", ""))) - .filter(includedInSameMinor) - .sorted() - .findFirst() - .get(), - - // Generate a reverse-ordered map. Despite the IDE saying the type can be inferred, removing it - // causes the compiler to complain. - () -> new TreeMap>>>(Comparator.reverseOrder()), - - // Group changelogs entries by their change type - Collectors.groupingBy( + Map>>> changelogsByVersionByTypeByArea = new TreeMap<>( + Comparator.reverseOrder() + ); + + changelogsByVersion.forEach((version, changelogs) -> { + Map>> changelogsByTypeByArea = changelogs.stream() + .collect( + groupingBy( // Entries with breaking info are always put in the breaking section entry -> entry.getBreaking() == null ? entry.getType() : "breaking", TreeMap::new, // Group changelogs for each type by their team area - Collectors.groupingBy( + groupingBy( // `security` and `known-issue` areas don't need to supply an area entry -> entry.getType().equals("known-issue") || entry.getType().equals("security") ? "_all_" : entry.getArea(), TreeMap::new, - Collectors.toList() + toList() ) ) - ) - ); + ); + + changelogsByVersionByTypeByArea.put(version, changelogsByTypeByArea); + }); // Sort per-area changelogs by their summary text. Assumes that the underlying list is sortable changelogsByVersionByTypeByArea.forEach( (_version, byVersion) -> byVersion.forEach( (_type, byTeam) -> byTeam.forEach( - (_team, changelogsForTeam) -> changelogsForTeam.sort(Comparator.comparing(ChangelogEntry::getSummary)) + (_team, changelogsForTeam) -> changelogsForTeam.sort(comparing(ChangelogEntry::getSummary)) ) ) ); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseNotesIndexGenerator.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseNotesIndexGenerator.java new file mode 100644 index 0000000000000..839c318f32784 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseNotesIndexGenerator.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.release; + +import com.google.common.annotations.VisibleForTesting; + +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.nio.file.Files; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; +import java.util.stream.Collectors; + +import static java.util.Comparator.reverseOrder; + +/** + * This class ensures that the release notes index page has the appropriate anchors and include directives + * for the current repository version. + */ +public class ReleaseNotesIndexGenerator { + + static void update(Set versions, File indexTemplate, File indexFile) throws IOException { + try (FileWriter indexFileWriter = new FileWriter(indexFile)) { + indexFileWriter.write(generateFile(versions, Files.readString(indexTemplate.toPath()))); + } + } + + @VisibleForTesting + static String generateFile(Set versionsSet, String template) throws IOException { + final Set versions = new TreeSet<>(reverseOrder()); + + // For the purpose of generating the index, snapshot versions are the same as released versions. Prerelease versions are not. + versionsSet.stream().map(v -> v.isSnapshot() ? v.withoutQualifier() : v).forEach(versions::add); + + final List includeVersions = versions.stream() + .map(v -> v.hasQualifier() ? v.toString() : v.getMajor() + "." + v.getMinor()) + .distinct() + .collect(Collectors.toList()); + + final Map bindings = new HashMap<>(); + bindings.put("versions", versions); + bindings.put("includeVersions", includeVersions); + + return TemplateUtils.render(template, bindings); + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseNotesIndexUpdater.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseNotesIndexUpdater.java deleted file mode 100644 index 5403d1e03f303..0000000000000 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseNotesIndexUpdater.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.gradle.internal.release; - -import groovy.text.SimpleTemplateEngine; - -import com.google.common.annotations.VisibleForTesting; - -import org.elasticsearch.gradle.Version; -import org.elasticsearch.gradle.VersionProperties; - -import java.io.File; -import java.io.FileWriter; -import java.io.IOException; -import java.io.Writer; -import java.nio.file.Files; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -/** - * This class ensures that the release notes index page has the appropriate anchors and include directives - * for the current repository version. It achieves this by parsing out the existing entries and writing - * out the file again. - */ -public class ReleaseNotesIndexUpdater { - - static void update(File indexTemplate, File indexFile) throws IOException { - final List existingIndexLines = Files.readAllLines(indexFile.toPath()); - try (FileWriter indexFileWriter = new FileWriter(indexFile)) { - generateFile( - VersionProperties.getElasticsearchVersion(), - existingIndexLines, - Files.readString(indexTemplate.toPath()), - indexFileWriter - ); - } - } - - @VisibleForTesting - static void generateFile(Version version, List existingIndexLines, String indexTemplate, Writer outputWriter) - throws IOException { - final List existingVersions = existingIndexLines.stream() - .filter(line -> line.startsWith("* < line.replace("* <>", "")) - .distinct() - .collect(Collectors.toList()); - - final List existingIncludes = existingIndexLines.stream() - .filter(line -> line.startsWith("include::")) - .map(line -> line.replace("include::release-notes/", "").replace(".asciidoc[]", "")) - .distinct() - .collect(Collectors.toList()); - - final String versionString = version.toString(); - - if (existingVersions.contains(versionString) == false) { - int insertionIndex = existingVersions.size() - 1; - while (insertionIndex > 0 && Version.fromString(existingVersions.get(insertionIndex)).before(version)) { - insertionIndex -= 1; - } - existingVersions.add(insertionIndex, versionString); - } - - final String includeString = version.getMajor() + "." + version.getMinor(); - - if (existingIncludes.contains(includeString) == false) { - int insertionIndex = existingIncludes.size() - 1; - while (insertionIndex > 0 && Version.fromString(ensurePatchVersion(existingIncludes.get(insertionIndex))).before(version)) { - insertionIndex -= 1; - } - existingIncludes.add(insertionIndex, includeString); - } - - final Map bindings = new HashMap<>(); - bindings.put("existingVersions", existingVersions); - bindings.put("existingIncludes", existingIncludes); - - try { - final SimpleTemplateEngine engine = new SimpleTemplateEngine(); - engine.createTemplate(indexTemplate).make(bindings).writeTo(outputWriter); - } catch (ClassNotFoundException e) { - throw new RuntimeException(e); - } - } - - private static String ensurePatchVersion(String version) { - return version.matches("^\\d+\\.\\d+\\.\\d+.*$") ? version : version + ".0"; - } -} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java index d7d85504a0178..368df62b90921 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java @@ -40,9 +40,11 @@ public ReleaseToolsPlugin(ProjectLayout projectLayout) { @Override public void apply(Project project) { - project.getPluginManager().apply(PrecommitTaskPlugin.class); + project.getPluginManager().apply(PrecommitTaskPlugin.class); final Directory projectDirectory = projectLayout.getProjectDirectory(); + final Version version = VersionProperties.getElasticsearchVersion(); + final FileTree yamlFiles = projectDirectory.dir("docs/changelog") .getAsFileTree() .matching(new PatternSet().include("**/*.yml", "**/*.yaml")); @@ -65,8 +67,6 @@ public void apply(Project project) { }); project.getTasks().register("generateReleaseNotes", GenerateReleaseNotesTask.class).configure(task -> { - final Version version = VersionProperties.getElasticsearchVersion(); - task.setGroup("Documentation"); task.setDescription("Generates release notes from changelog files held in this checkout"); task.setChangelogs(yamlFiles); @@ -92,6 +92,6 @@ public void apply(Project project) { task.dependsOn(validateChangelogsTask); }); - project.getTasks().named("precommit").configure(task -> task.dependsOn(validateChangelogsTask)); + project.getTasks().named("precommit").configure(task -> task.dependsOn(validateChangelogsTask)); } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/TemplateUtils.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/TemplateUtils.java new file mode 100644 index 0000000000000..ef2915f847950 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/TemplateUtils.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.release; + +import groovy.text.SimpleTemplateEngine; + +import java.io.IOException; +import java.io.StringWriter; +import java.util.Map; + +/** + * Methods for working with Groovy templates. + */ +public class TemplateUtils { + + /** + * Applies {@code bindings} to {@code template}, then removes all carriage returns from + * the result. + * + * @param template a Groovy template + * @param bindings parameters for the template + * @return the rendered template + */ + public static String render(String template, Map bindings) throws IOException { + final StringWriter writer = new StringWriter(); + + try { + final SimpleTemplateEngine engine = new SimpleTemplateEngine(); + engine.createTemplate(template).make(bindings).writeTo(writer); + } catch (ClassNotFoundException e) { + throw new RuntimeException(e); + } + + return writer.toString().replace("\\r", ""); + } + +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ValidateChangelogEntryTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ValidateChangelogEntryTask.java index 5f030eb074653..149e8411dffaa 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ValidateChangelogEntryTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ValidateChangelogEntryTask.java @@ -17,10 +17,10 @@ import org.gradle.api.tasks.InputFiles; import org.gradle.api.tasks.TaskAction; -import javax.inject.Inject; import java.net.URI; import java.util.Map; import java.util.stream.Collectors; +import javax.inject.Inject; /** * Performs additional checks on changelog files, beyond whether they conform to the schema. @@ -49,13 +49,15 @@ public void executeTask() { if (type.equals("known-issue") == false && type.equals("security") == false) { if (entry.getPr() == null) { - throw new GradleException("[" + path + "] must provide a [pr] number (only 'known-issue' and " + - "'security' entries can omit this"); + throw new GradleException( + "[" + path + "] must provide a [pr] number (only 'known-issue' and " + "'security' entries can omit this" + ); } if (entry.getArea() == null) { - throw new GradleException("[" + path + "] must provide an [area] (only 'known-issue' and " + - "'security' entries can omit this"); + throw new GradleException( + "[" + path + "] must provide an [area] (only 'known-issue' and " + "'security' entries can omit this" + ); } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/rest/compat/YamlRestCompatTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/rest/compat/YamlRestCompatTestPlugin.java index b104d1aa3df77..502d3f886fc60 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/rest/compat/YamlRestCompatTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/rest/compat/YamlRestCompatTestPlugin.java @@ -18,7 +18,7 @@ import org.elasticsearch.gradle.internal.test.rest.RestResourcesExtension; import org.elasticsearch.gradle.internal.test.rest.RestResourcesPlugin; import org.elasticsearch.gradle.internal.test.rest.RestTestUtil; -import org.elasticsearch.gradle.internal.test.rest.YamlRestTestPlugin; +import org.elasticsearch.gradle.internal.test.rest.InternalYamlRestTestPlugin; import org.elasticsearch.gradle.testclusters.TestClustersPlugin; import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.Plugin; @@ -43,8 +43,8 @@ * Apply this plugin to run the YAML based REST tests from a prior major version against this version's cluster. */ public class YamlRestCompatTestPlugin implements Plugin { - public static final String REST_COMPAT_CHECK_TASK_NAME = "checkRestCompat"; - public static final String SOURCE_SET_NAME = "yamlRestCompatTest"; + private static final String REST_COMPAT_CHECK_TASK_NAME = "checkRestCompat"; + private static final String SOURCE_SET_NAME = "yamlRestCompatTest"; private static final Path RELATIVE_API_PATH = Path.of("rest-api-spec/api"); private static final Path RELATIVE_TEST_PATH = Path.of("rest-api-spec/test"); private static final Path RELATIVE_REST_API_RESOURCES = Path.of("rest-api-spec/src/main/resources"); @@ -63,15 +63,15 @@ public void apply(Project project) { project.getPluginManager().apply(TestClustersPlugin.class); project.getPluginManager().apply(RestTestBasePlugin.class); project.getPluginManager().apply(RestResourcesPlugin.class); - project.getPluginManager().apply(YamlRestTestPlugin.class); + project.getPluginManager().apply(InternalYamlRestTestPlugin.class); RestResourcesExtension extension = project.getExtensions().getByType(RestResourcesExtension.class); // create source set SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); SourceSet yamlCompatTestSourceSet = sourceSets.create(SOURCE_SET_NAME); - SourceSet yamlTestSourceSet = sourceSets.getByName(YamlRestTestPlugin.SOURCE_SET_NAME); - GradleUtils.extendSourceSet(project, YamlRestTestPlugin.SOURCE_SET_NAME, SOURCE_SET_NAME); + SourceSet yamlTestSourceSet = sourceSets.getByName(InternalYamlRestTestPlugin.SOURCE_SET_NAME); + GradleUtils.extendSourceSet(project, InternalYamlRestTestPlugin.SOURCE_SET_NAME, SOURCE_SET_NAME); // copy compatible rest specs Configuration bwcMinorConfig = project.getConfigurations().create(BWC_MINOR_CONFIG_NAME); @@ -138,9 +138,10 @@ public void apply(Project project) { task.onlyIf(t -> isEnabled(project)); }); + // transform the copied tests task TaskProvider transformCompatTestTask = project.getTasks() - .register("transformV" + compatibleVersion + "RestTests", RestCompatTestTransformTask.class, task -> { + .register("yamlRestTestV"+ compatibleVersion + "CompatTransform", RestCompatTestTransformTask.class, task -> { task.getSourceDirectory().set(copyCompatYamlTestTask.flatMap(CopyRestTestsTask::getOutputResourceDir)); task.getOutputDirectory() .set(project.getLayout().getBuildDirectory().dir(compatTestsDir.resolve("transformed").toString())); @@ -161,9 +162,11 @@ public void apply(Project project) { .named(RestResourcesPlugin.COPY_YAML_TESTS_TASK) .flatMap(CopyRestTestsTask::getOutputResourceDir); - // setup the yamlRestTest task - Provider yamlRestCompatTestTask = RestTestUtil.registerTestTask(project, yamlCompatTestSourceSet); - project.getTasks().withType(RestIntegTestTask.class).named(SOURCE_SET_NAME).configure(testTask -> { + String testTaskName = "yamlRestTestV"+ compatibleVersion + "CompatTest"; + + // setup the test task + Provider yamlRestCompatTestTask = RestTestUtil.registerTestTask(project, yamlCompatTestSourceSet, testTaskName); + project.getTasks().withType(RestIntegTestTask.class).named(testTaskName).configure(testTask -> { // Use test runner and classpath from "normal" yaml source set testTask.setTestClassesDirs( yamlTestSourceSet.getOutput().getClassesDirs().plus(yamlCompatTestSourceSet.getOutput().getClassesDirs()) @@ -176,7 +179,7 @@ public void apply(Project project) { .minus(project.files(originalYamlTestsDir)) ); // run compatibility tests after "normal" tests - testTask.mustRunAfter(project.getTasks().named(YamlRestTestPlugin.SOURCE_SET_NAME)); + testTask.mustRunAfter(project.getTasks().named(InternalYamlRestTestPlugin.SOURCE_SET_NAME)); testTask.onlyIf(t -> isEnabled(project)); }); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java index cdc9907dd4cfa..8458ffe3e7fe4 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/DistroTestPlugin.java @@ -364,38 +364,16 @@ private List configureDistributions(Project project) List currentDistros = new ArrayList<>(); for (Architecture architecture : Architecture.values()) { - ALL_INTERNAL.stream().forEach(type -> { - for (boolean bundledJdk : Arrays.asList(true, false)) { - if (bundledJdk == false) { - // We'll never publish an ARM (aarch64) build without a bundled JDK. - if (architecture == Architecture.AARCH64) { - continue; - } - // All our Docker images include a bundled JDK so it doesn't make sense to test without one. - if (type.isDocker()) { - continue; - } - } - currentDistros.add( - createDistro(distributions, architecture, type, null, bundledJdk, VersionProperties.getElasticsearch()) - ); - } - }); + ALL_INTERNAL.stream().forEach(type -> currentDistros.add( + createDistro(distributions, architecture, type, null, true, VersionProperties.getElasticsearch()) + )); } for (Architecture architecture : Architecture.values()) { for (Platform platform : Arrays.asList(Platform.LINUX, Platform.WINDOWS)) { - for (boolean bundledJdk : Arrays.asList(true, false)) { - if (bundledJdk == false && architecture != Architecture.X64) { - // We will never publish distributions for non-x86 (amd64) platforms - // without a bundled JDK - continue; - } - - currentDistros.add( - createDistro(distributions, architecture, ARCHIVE, platform, bundledJdk, VersionProperties.getElasticsearch()) - ); - } + currentDistros.add( + createDistro(distributions, architecture, ARCHIVE, platform, true, VersionProperties.getElasticsearch()) + ); } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/YamlRestTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/InternalYamlRestTestPlugin.java similarity index 97% rename from build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/YamlRestTestPlugin.java rename to build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/InternalYamlRestTestPlugin.java index 5a76543bebca1..25ee8a24b9791 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/YamlRestTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/InternalYamlRestTestPlugin.java @@ -22,7 +22,7 @@ /** * Apply this plugin to run the YAML based REST tests. */ -public class YamlRestTestPlugin implements Plugin { +public class InternalYamlRestTestPlugin implements Plugin { public static final String SOURCE_SET_NAME = "yamlRestTest"; diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestUtil.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestUtil.java index 9b83bccaaa168..42965436daaeb 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestUtil.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestUtil.java @@ -28,11 +28,18 @@ private RestTestUtil() { } /** - * Creates a task with the source set name of type {@link RestIntegTestTask} + * Creates a {@link RestIntegTestTask} task with the source set of the same name */ public static Provider registerTestTask(Project project, SourceSet sourceSet) { + return registerTestTask(project, sourceSet, sourceSet.getName()); + } + + /** + * Creates a {@link RestIntegTestTask} task with a custom name for the provided source set + */ + public static Provider registerTestTask(Project project, SourceSet sourceSet, String taskName) { // lazily create the test task - return project.getTasks().register(sourceSet.getName(), RestIntegTestTask.class, testTask -> { + return project.getTasks().register(taskName, RestIntegTestTask.class, testTask -> { testTask.setGroup(JavaBasePlugin.VERIFICATION_GROUP); testTask.setDescription("Runs the REST tests against an external cluster"); project.getPlugins().withType(JavaPlugin.class, t -> diff --git a/build-tools-internal/src/main/resources/changelog-schema.json b/build-tools-internal/src/main/resources/changelog-schema.json index a2dfc5ecd306f..433c841ebbc8a 100644 --- a/build-tools-internal/src/main/resources/changelog-schema.json +++ b/build-tools-internal/src/main/resources/changelog-schema.json @@ -103,14 +103,6 @@ "type": "string", "minLength": 1 }, - "versions": { - "type": "array", - "items": { - "type": "string", - "pattern": "^v?\\d+\\.\\d+\\.\\d+$", - "minItems": 1 - } - }, "highlight": { "$ref": "#/definitions/Highlight" }, @@ -123,8 +115,7 @@ }, "required": [ "type", - "summary", - "versions" + "summary" ] }, "Highlight": { diff --git a/build-tools-internal/src/main/resources/forbidden/es-all-signatures.txt b/build-tools-internal/src/main/resources/forbidden/es-all-signatures.txt index 64c05ad953ab2..8426ad3c7bb98 100644 --- a/build-tools-internal/src/main/resources/forbidden/es-all-signatures.txt +++ b/build-tools-internal/src/main/resources/forbidden/es-all-signatures.txt @@ -54,3 +54,4 @@ java.util.concurrent.ScheduledThreadPoolExecutor#(int) java.util.concurrent.ScheduledThreadPoolExecutor#(int, java.util.concurrent.ThreadFactory) java.util.concurrent.ScheduledThreadPoolExecutor#(int, java.util.concurrent.RejectedExecutionHandler) java.util.concurrent.ScheduledThreadPoolExecutor#(int, java.util.concurrent.ThreadFactory, java.util.concurrent.RejectedExecutionHandler) + diff --git a/build-tools-internal/src/main/resources/templates/release-highlights.asciidoc b/build-tools-internal/src/main/resources/templates/release-highlights.asciidoc index 40b828d609745..6d349ac5c26c5 100644 --- a/build-tools-internal/src/main/resources/templates/release-highlights.asciidoc +++ b/build-tools-internal/src/main/resources/templates/release-highlights.asciidoc @@ -8,7 +8,7 @@ ifeval::[\\{release-state}\\"!=\\"unreleased\\"] For detailed information about this release, see the <> and <>. endif::[] -<% if (priorVersions.size > 0) { %> +<% if (priorVersions.size() > 0) { %> // Add previous release to the list Other versions: diff --git a/build-tools-internal/src/main/resources/templates/release-notes-index.asciidoc b/build-tools-internal/src/main/resources/templates/release-notes-index.asciidoc index 0b62b9b3f1e01..ba30aea3bf14e 100644 --- a/build-tools-internal/src/main/resources/templates/release-notes-index.asciidoc +++ b/build-tools-internal/src/main/resources/templates/release-notes-index.asciidoc @@ -6,7 +6,7 @@ This section summarizes the changes in each release. -<% existingVersions.each { print "* <>\n" } %> +<% versions.each { print "* <>\n" } %> -- -<% existingIncludes.each { print "include::release-notes/${ it }.asciidoc[]\n" } %> +<% includeVersions.each { print "include::release-notes/${ it }.asciidoc[]\n" } %> diff --git a/build-tools-internal/src/main/resources/templates/release-notes.asciidoc b/build-tools-internal/src/main/resources/templates/release-notes.asciidoc index 35384c8f4ce66..630af885aaaf5 100644 --- a/build-tools-internal/src/main/resources/templates/release-notes.asciidoc +++ b/build-tools-internal/src/main/resources/templates/release-notes.asciidoc @@ -1,13 +1,14 @@ <% for (version in changelogsByVersionByTypeByArea.keySet()) { -%>[[release-notes-$version]] -== {es} version $version -<% if (version.qualifier == "SNAPSHOT") { %> -coming[$version] +def unqualifiedVersion = version.withoutQualifier() +%>[[release-notes-$unqualifiedVersion]] +== {es} version ${unqualifiedVersion} +<% if (version.isSnapshot()) { %> +coming[$unqualifiedVersion] <% } %> Also see <>. <% if (changelogsByVersionByTypeByArea[version]["security"] != null) { %> [discrete] -[[security-updates-${version}]] +[[security-updates-${unqualifiedVersion}]] === Security updates <% for (change in changelogsByVersionByTypeByArea[version].remove("security").remove("_all_")) { @@ -16,7 +17,7 @@ Also see < [discrete] -[[known-issues-${version}]] +[[known-issues-${unqualifiedVersion}]] === Known issues <% for (change in changelogsByVersionByTypeByArea[version].remove("known-issue").remove("_all_")) { @@ -24,9 +25,9 @@ if (changelogsByVersionByTypeByArea[version]["known-issue"] != null) { %> } } for (changeType in changelogsByVersionByTypeByArea[version].keySet()) { %> -[[${ changeType }-${ version }]] +[[${ changeType }-${ unqualifiedVersion }]] [float] -=== ${ TYPE_LABELS[changeType] } +=== ${ TYPE_LABELS.getOrDefault(changeType, 'No mapping for TYPE_LABELS[' + changeType + ']') } <% for (team in changelogsByVersionByTypeByArea[version][changeType].keySet()) { print "\n${team}::\n"; @@ -41,5 +42,6 @@ for (changeType in changelogsByVersionByTypeByArea[version].keySet()) { %> } } } +print "\n\n" } %> diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.java new file mode 100644 index 0000000000000..e127af11b0fe7 --- /dev/null +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.release; + +import org.junit.Ignore; +import org.junit.Test; + +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.List; +import java.util.Objects; + +import static org.hamcrest.Matchers.equalTo; +import static org.junit.Assert.assertThat; + +@Ignore("https://github.com/elastic/elasticsearch/issues/77190") +public class BreakingChangesGeneratorTest { + + /** + * Check that the breaking changes can be correctly generated. + */ + @Test + public void generateFile_rendersCorrectMarkup() throws Exception { + // given: + final String template = getResource("/templates/breaking-changes.asciidoc"); + final String expectedOutput = getResource( + "/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.generateFile.asciidoc" + ); + final List entries = getEntries(); + + // when: + final String actualOutput = BreakingChangesGenerator.generateFile(QualifiedVersion.of("8.4.0-SNAPSHOT"), template, entries); + + // then: + assertThat(actualOutput, equalTo(expectedOutput)); + } + + private List getEntries() { + ChangelogEntry entry1 = new ChangelogEntry(); + ChangelogEntry.Breaking breaking1 = new ChangelogEntry.Breaking(); + entry1.setBreaking(breaking1); + + breaking1.setNotable(true); + breaking1.setTitle("Breaking change number 1"); + breaking1.setArea("API"); + breaking1.setDetails("Breaking change details 1"); + breaking1.setImpact("Breaking change impact description 1"); + + ChangelogEntry entry2 = new ChangelogEntry(); + ChangelogEntry.Breaking breaking2 = new ChangelogEntry.Breaking(); + entry2.setBreaking(breaking2); + + breaking2.setNotable(true); + breaking2.setTitle("Breaking change number 2"); + breaking2.setArea("Cluster"); + breaking2.setDetails("Breaking change details 2"); + breaking2.setImpact("Breaking change impact description 2"); + + ChangelogEntry entry3 = new ChangelogEntry(); + ChangelogEntry.Breaking breaking3 = new ChangelogEntry.Breaking(); + entry3.setBreaking(breaking3); + + breaking3.setNotable(false); + breaking3.setTitle("Breaking change number 3"); + breaking3.setArea("Transform"); + breaking3.setDetails("Breaking change details 3"); + breaking3.setImpact("Breaking change impact description 3"); + + return List.of(entry1, entry2, entry3); + } + + private String getResource(String name) throws Exception { + return Files.readString(Paths.get(Objects.requireNonNull(this.getClass().getResource(name)).toURI()), StandardCharsets.UTF_8); + } +} diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTaskTest.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTaskTest.java new file mode 100644 index 0000000000000..75f6c4a5a1ca4 --- /dev/null +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTaskTest.java @@ -0,0 +1,313 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.release; + +import org.elasticsearch.gradle.internal.test.GradleUnitTestCase; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Test; + +import java.io.File; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.aMapWithSize; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.is; +import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.when; + +@Ignore("https://github.com/elastic/elasticsearch/issues/77190") +public class GenerateReleaseNotesTaskTest extends GradleUnitTestCase { + private GitWrapper gitWrapper; + + @Before + public void setup() { + this.gitWrapper = mock(GitWrapper.class); + } + + /** + * Check that the task does not update git tags if the current version is a snapshot of the first patch release. + */ + @Test + public void needsGitTags_withFirstSnapshot_returnsFalse() { + assertThat(GenerateReleaseNotesTask.needsGitTags("8.0.0-SNAPSHOT"), is(false)); + } + + /** + * Check that the task does update git tags if the current version is a snapshot after the first patch release. + */ + @Test + public void needsGitTags_withLaterSnapshot_returnsTrue() { + assertThat(GenerateReleaseNotesTask.needsGitTags("8.0.1-SNAPSHOT"), is(true)); + } + + /** + * Check that the task does not update git tags if the current version is the first patch release in a minor series. + */ + @Test + public void needsGitTags_withFirstPatchRelease_returnsFalse() { + assertThat(GenerateReleaseNotesTask.needsGitTags("8.0.0"), is(false)); + } + + /** + * Check that the task does update git tags if the current version is later than the first patch release in a minor series. + */ + @Test + public void needsGitTags_withLaterPatchRelease_returnsTrue() { + assertThat(GenerateReleaseNotesTask.needsGitTags("8.0.1"), is(true)); + } + + /** + * Check that the task does not update git tags if the current version is a first alpha prerelease. + */ + @Test + public void needsGitTags_withFirsAlphaRelease_returnsFalse() { + assertThat(GenerateReleaseNotesTask.needsGitTags("8.0.0-alpha1"), is(false)); + } + + /** + * Check that the task does update git tags if the current version is a prerelease after the first alpha. + */ + @Test + public void needsGitTags_withLaterAlphaRelease_returnsFalse() { + assertThat(GenerateReleaseNotesTask.needsGitTags("8.0.0-alpha2"), is(true)); + } + + /** + * Check that partitioning changelog files when the current version is a snapshot returns a map with a single entry. + */ + @Test + public void partitionFiles_withSnapshot_returnsSingleMapping() { + // when: + Map> partitionedFiles = GenerateReleaseNotesTask.partitionFilesByVersion( + gitWrapper, + "8.0.0-SNAPSHOT", + Set.of(new File("docs/changelog/1234.yaml")) + ); + + // then: + assertThat(partitionedFiles, aMapWithSize(1)); + assertThat( + partitionedFiles, + hasEntry(equalTo(QualifiedVersion.of("8.0.0-SNAPSHOT")), hasItem(new File("docs/changelog/1234.yaml"))) + ); + verifyZeroInteractions(gitWrapper); + } + + /** + * Check that partitioning changelog files when the current version is the first release + * in a minor series returns a map with a single entry. + */ + @Test + public void partitionFiles_withFirstRevision_returnsSingleMapping() { + // when: + Map> partitionedFiles = GenerateReleaseNotesTask.partitionFilesByVersion( + gitWrapper, + "8.5.0", + Set.of(new File("docs/changelog/1234.yaml")) + ); + + // then: + assertThat(partitionedFiles, aMapWithSize(1)); + assertThat(partitionedFiles, hasEntry(equalTo(QualifiedVersion.of("8.5.0")), hasItem(new File("docs/changelog/1234.yaml")))); + verifyZeroInteractions(gitWrapper); + } + + /** + * Check that partitioning changelog files when the current version is the first alpha prerelease returns a map with a single entry. + */ + @Test + public void partitionFiles_withFirstAlpha_returnsSingleMapping() { + // when: + Map> partitionedFiles = GenerateReleaseNotesTask.partitionFilesByVersion( + gitWrapper, + "8.0.0-alpha1", + Set.of(new File("docs/changelog/1234.yaml")) + ); + + // then: + assertThat(partitionedFiles, aMapWithSize(1)); + assertThat(partitionedFiles, hasEntry(equalTo(QualifiedVersion.of("8.0.0-alpha1")), hasItem(new File("docs/changelog/1234.yaml")))); + verifyZeroInteractions(gitWrapper); + } + + /** + * Check that when deriving a lit of versions from git tags, the current unreleased version is included. + */ + @Test + public void getVersions_includesCurrentVersion() { + // given: + when(gitWrapper.listVersions(anyString())).thenReturn( + Stream.of("8.0.0-alpha1", "8.0.0-alpha2", "8.0.0-beta1", "8.0.0-beta2", "8.0.0-beta3", "8.0.0-rc1", "8.0.0") + .map(QualifiedVersion::of) + ); + + // when: + Set versions = GenerateReleaseNotesTask.getVersions(gitWrapper, "8.0.0-SNAPSHOT"); + + // then: + assertThat( + versions, + containsInAnyOrder( + Stream.of( + "8.0.0-alpha1", + "8.0.0-alpha2", + "8.0.0-beta1", + "8.0.0-beta2", + "8.0.0-beta3", + "8.0.0-rc1", + "8.0.0", + "8.0.0-SNAPSHOT" + ).map(QualifiedVersion::of).collect(Collectors.toList()).toArray(new QualifiedVersion[] {}) + ) + ); + } + + /** + * Check that the task partitions the list of files correctly by version for a prerelease. + */ + @Test + public void partitionFiles_withPrerelease_correctlyGroupsByPrereleaseVersion() { + // given: + when(gitWrapper.listVersions(anyString())).thenReturn( + Stream.of("8.0.0-alpha1", "8.0.0-alpha2", "8.0.0-beta1", "8.0.0-beta2", "8.0.0-beta3", "8.0.0-rc1", "8.0.0") + .map(QualifiedVersion::of) + ); + when(gitWrapper.listFiles(eq("v8.0.0-alpha1"), anyString())).thenReturn( + Stream.of("docs/changelog/1_1234.yaml", "docs/changelog/1_5678.yaml") + ); + when(gitWrapper.listFiles(eq("v8.0.0-alpha2"), anyString())).thenReturn( + Stream.of("docs/changelog/2_1234.yaml", "docs/changelog/2_5678.yaml") + ); + + Set allFiles = Set.of( + new File("docs/changelog/1_1234.yaml"), + new File("docs/changelog/1_5678.yaml"), + new File("docs/changelog/2_1234.yaml"), + new File("docs/changelog/2_5678.yaml"), + new File("docs/changelog/3_1234.yaml"), + new File("docs/changelog/3_5678.yaml") + ); + + // when: + Map> partitionedFiles = GenerateReleaseNotesTask.partitionFilesByVersion( + gitWrapper, + "8.0.0-beta1", + allFiles + ); + + // then: + verify(gitWrapper).listVersions("v8.0*"); + verify(gitWrapper).listFiles("v8.0.0-alpha1", "docs/changelog"); + verify(gitWrapper).listFiles("v8.0.0-alpha2", "docs/changelog"); + + assertThat( + partitionedFiles, + allOf( + aMapWithSize(3), + hasKey(QualifiedVersion.of("8.0.0-alpha1")), + hasKey(QualifiedVersion.of("8.0.0-alpha2")), + hasKey(QualifiedVersion.of("8.0.0-beta1")) + ) + ); + + assertThat( + partitionedFiles, + allOf( + hasEntry( + equalTo(QualifiedVersion.of("8.0.0-alpha1")), + containsInAnyOrder(new File("docs/changelog/1_1234.yaml"), new File("docs/changelog/1_5678.yaml")) + ), + hasEntry( + equalTo(QualifiedVersion.of("8.0.0-alpha2")), + containsInAnyOrder(new File("docs/changelog/2_1234.yaml"), new File("docs/changelog/2_5678.yaml")) + ), + hasEntry( + equalTo(QualifiedVersion.of("8.0.0-beta1")), + containsInAnyOrder(new File("docs/changelog/3_1234.yaml"), new File("docs/changelog/3_5678.yaml")) + ) + ) + ); + } + + /** + * Check that the task partitions the list of files correctly by version for a patch release. + */ + @Test + public void partitionFiles_withPatchRelease_correctlyGroupsByPatchVersion() { + // given: + when(gitWrapper.listVersions(anyString())).thenReturn( + Stream.of("8.0.0-alpha1", "8.0.0-alpha2", "8.0.0-beta1", "8.0.0-rc1", "8.0.0", "8.0.1", "8.0.2", "8.1.0") + .map(QualifiedVersion::of) + ); + when(gitWrapper.listFiles(eq("v8.0.0"), anyString())).thenReturn( + Stream.of("docs/changelog/1_1234.yaml", "docs/changelog/1_5678.yaml") + ); + when(gitWrapper.listFiles(eq("v8.0.1"), anyString())).thenReturn( + Stream.of("docs/changelog/2_1234.yaml", "docs/changelog/2_5678.yaml") + ); + + Set allFiles = Set.of( + new File("docs/changelog/1_1234.yaml"), + new File("docs/changelog/1_5678.yaml"), + new File("docs/changelog/2_1234.yaml"), + new File("docs/changelog/2_5678.yaml"), + new File("docs/changelog/3_1234.yaml"), + new File("docs/changelog/3_5678.yaml") + ); + + // when: + Map> partitionedFiles = GenerateReleaseNotesTask.partitionFilesByVersion(gitWrapper, "8.0.2", allFiles); + + // then: + verify(gitWrapper).listVersions("v8.0*"); + verify(gitWrapper).listFiles("v8.0.0", "docs/changelog"); + verify(gitWrapper).listFiles("v8.0.1", "docs/changelog"); + + assertThat( + partitionedFiles, + allOf( + aMapWithSize(3), + hasKey(QualifiedVersion.of("8.0.0")), + hasKey(QualifiedVersion.of("8.0.1")), + hasKey(QualifiedVersion.of("8.0.2")) + ) + ); + + assertThat( + partitionedFiles, + allOf( + hasEntry( + equalTo(QualifiedVersion.of("8.0.0")), + containsInAnyOrder(new File("docs/changelog/1_1234.yaml"), new File("docs/changelog/1_5678.yaml")) + ), + hasEntry( + equalTo(QualifiedVersion.of("8.0.1")), + containsInAnyOrder(new File("docs/changelog/2_1234.yaml"), new File("docs/changelog/2_5678.yaml")) + ), + hasEntry( + equalTo(QualifiedVersion.of("8.0.2")), + containsInAnyOrder(new File("docs/changelog/3_1234.yaml"), new File("docs/changelog/3_5678.yaml")) + ) + ) + ); + } +} diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGeneratorTest.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGeneratorTest.java new file mode 100644 index 0000000000000..f0859fe1b32b7 --- /dev/null +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGeneratorTest.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.release; + +import org.junit.Ignore; +import org.junit.Test; + +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.List; +import java.util.Objects; + +import static org.hamcrest.Matchers.equalTo; +import static org.junit.Assert.assertThat; + +@Ignore("https://github.com/elastic/elasticsearch/issues/77190") +public class ReleaseHighlightsGeneratorTest { + + /** + * Check that the release highlights can be correctly generated. + */ + @Test + public void generateFile_rendersCorrectMarkup() throws Exception { + // given: + final String template = getResource("/templates/release-highlights.asciidoc"); + final String expectedOutput = getResource( + "/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGeneratorTest.generateFile.asciidoc" + ); + final List entries = getEntries(); + + // when: + final String actualOutput = ReleaseHighlightsGenerator.generateFile(QualifiedVersion.of("8.4.0-SNAPSHOT"), template, entries); + + // then: + assertThat(actualOutput, equalTo(expectedOutput)); + } + + private List getEntries() { + ChangelogEntry entry1 = new ChangelogEntry(); + ChangelogEntry.Highlight highlight1 = new ChangelogEntry.Highlight(); + entry1.setHighlight(highlight1); + + highlight1.setNotable(true); + highlight1.setTitle("Notable release highlight number 1"); + highlight1.setBody("Notable release body number 1"); + + ChangelogEntry entry2 = new ChangelogEntry(); + ChangelogEntry.Highlight highlight2 = new ChangelogEntry.Highlight(); + entry2.setHighlight(highlight2); + + highlight2.setNotable(true); + highlight2.setTitle("Notable release highlight number 2"); + highlight2.setBody("Notable release body number 2"); + + ChangelogEntry entry3 = new ChangelogEntry(); + ChangelogEntry.Highlight highlight3 = new ChangelogEntry.Highlight(); + entry3.setHighlight(highlight3); + + highlight3.setNotable(false); + highlight3.setTitle("Notable release highlight number 3"); + highlight3.setBody("Notable release body number 3"); + + return List.of(entry1, entry2, entry3); + } + + private String getResource(String name) throws Exception { + return Files.readString(Paths.get(Objects.requireNonNull(this.getClass().getResource(name)).toURI()), StandardCharsets.UTF_8); + } +} diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/ReleaseNotesGeneratorTest.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/ReleaseNotesGeneratorTest.java new file mode 100644 index 0000000000000..4e031f34f5b88 --- /dev/null +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/ReleaseNotesGeneratorTest.java @@ -0,0 +1,125 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.release; + +import org.junit.Ignore; +import org.junit.Test; + +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +import static org.hamcrest.Matchers.equalTo; +import static org.junit.Assert.assertThat; + +@Ignore("https://github.com/elastic/elasticsearch/issues/77190") +public class ReleaseNotesGeneratorTest { + + /** + * Check that the release notes can be correctly generated. + */ + @Test + public void generateFile_rendersCorrectMarkup() throws Exception { + // given: + final String template = getResource("/templates/release-notes.asciidoc"); + final String expectedOutput = getResource( + "/org/elasticsearch/gradle/internal/release/ReleaseNotesGeneratorTest.generateFile.asciidoc" + ); + final Map> entries = getEntries(); + + // when: + final String actualOutput = ReleaseNotesGenerator.generateFile(template, entries); + + // then: + assertThat(actualOutput, equalTo(expectedOutput)); + } + + private Map> getEntries() { + final Set entries_8_2_0 = new HashSet<>(); + entries_8_2_0.addAll(buildEntries(1, 2)); + entries_8_2_0.addAll(buildEntries(2, 2)); + entries_8_2_0.addAll(buildEntries(3, 2)); + + final Set entries_8_1_0 = new HashSet<>(); + entries_8_1_0.addAll(buildEntries(4, 2)); + entries_8_1_0.addAll(buildEntries(5, 2)); + entries_8_1_0.addAll(buildEntries(6, 2)); + + final Set entries_8_0_0 = new HashSet<>(); + entries_8_0_0.addAll(buildEntries(7, 2)); + entries_8_0_0.addAll(buildEntries(8, 2)); + entries_8_0_0.addAll(buildEntries(9, 2)); + + // Security issues are presented first in the notes + final ChangelogEntry securityEntry = new ChangelogEntry(); + securityEntry.setArea("Security"); + securityEntry.setType("security"); + securityEntry.setSummary("Test security issue"); + entries_8_2_0.add(securityEntry); + + // known issues are presented after security issues + final ChangelogEntry knownIssue = new ChangelogEntry(); + knownIssue.setArea("Search"); + knownIssue.setType("known-issue"); + knownIssue.setSummary("Test known issue"); + entries_8_1_0.add(knownIssue); + + final Map> result = new HashMap<>(); + + result.put(QualifiedVersion.of("8.2.0-SNAPSHOT"), entries_8_2_0); + result.put(QualifiedVersion.of("8.1.0"), entries_8_1_0); + result.put(QualifiedVersion.of("8.0.0"), entries_8_0_0); + + return result; + } + + private List buildEntries(int seed, int count) { + // Sample of possible areas from `changelog-schema.json` + final List areas = List.of("Aggregation", "Cluster", "Indices", "Mappings", "Search", "Security"); + // Possible change types, with `breaking`, `breaking-java`, `known-issue` and `security` removed. + final List types = List.of("bug", "deprecation", "enhancement", "feature", "new-aggregation", "regression", "upgrade"); + + final String area = areas.get(seed % areas.size()); + final String type = types.get(seed % types.size()); + + final List entries = new ArrayList<>(count); + + int base = seed * 1000; + + for (int i = 0; i < count; i++) { + + final ChangelogEntry e = new ChangelogEntry(); + e.setPr(base++); + e.setArea(area); + e.setSummary("Test changelog entry " + seed + "_" + i); + e.setType(type); + + List issues = new ArrayList<>(count); + for (int j = 0; j <= i; j++) { + issues.add(base++); + } + e.setIssues(issues); + + entries.add(e); + } + + return entries; + } + + private String getResource(String name) throws Exception { + return Files.readString(Paths.get(Objects.requireNonNull(this.getClass().getResource(name)).toURI()), StandardCharsets.UTF_8); + } +} diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/ReleaseNotesIndexGeneratorTest.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/ReleaseNotesIndexGeneratorTest.java new file mode 100644 index 0000000000000..cc467ea37702f --- /dev/null +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/ReleaseNotesIndexGeneratorTest.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.release; + +import org.junit.Ignore; +import org.junit.Test; + +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.equalTo; +import static org.junit.Assert.assertThat; + +@Ignore("https://github.com/elastic/elasticsearch/issues/77190") +public class ReleaseNotesIndexGeneratorTest { + + /** + * Check that a release notes index can be generated. + */ + @Test + public void generateFile_rendersCorrectMarkup() throws Exception { + // given: + final Set versions = Stream.of( + "8.0.0-alpha1", + "8.0.0-beta2", + "8.0.0-rc3", + "8.0.0", + "8.0.1", + "8.0.2", + "8.1.0", + "8.1.1", + "8.2.0-SNAPSHOT" + ).map(QualifiedVersion::of).collect(Collectors.toSet()); + + final String template = getResource("/templates/release-notes-index.asciidoc"); + final String expectedOutput = getResource( + "/org/elasticsearch/gradle/internal/release/ReleaseNotesIndexGeneratorTest.generateFile.asciidoc" + ); + + // when: + final String actualOutput = ReleaseNotesIndexGenerator.generateFile(versions, template); + + // then: + assertThat(actualOutput, equalTo(expectedOutput)); + } + + private String getResource(String name) throws Exception { + return Files.readString(Paths.get(Objects.requireNonNull(this.getClass().getResource(name)).toURI()), StandardCharsets.UTF_8); + } +} diff --git a/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.generateFile.asciidoc b/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.generateFile.asciidoc new file mode 100644 index 0000000000000..4a61c2de4016f --- /dev/null +++ b/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.generateFile.asciidoc @@ -0,0 +1,81 @@ +[[migrating-8.4]] +== Migrating to 8.4 +++++ +8.4 +++++ + +This section discusses the changes that you need to be aware of when migrating +your application to {es} 8.4. + +See also <> and <>. + +coming[8.4.0-SNAPSHOT] + +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +[discrete] +[[breaking-changes-8.4]] +=== Breaking changes + +The following changes in {es} 8.4 might affect your applications +and prevent them from operating normally. +Before upgrading to 8.4 review these changes and take the described steps +to mitigate the impact. + +NOTE: Breaking changes introduced in minor versions are +normally limited to security and bug fixes. +Significant changes in behavior are deprecated in a minor release and +the old behavior is supported until the next major release. +To find out if you are using any deprecated functionality, +enable <>. + +// tag::notable-breaking-changes[] +[discrete] +[[breaking_84_api]] +==== API + +[[breaking_change_number_1]] +.Breaking change number 1 +[%collapsible] +==== +*Details* + +Breaking change details 1 + +*Impact* + +Breaking change impact description 1 +==== +// end::notable-breaking-changes[] + +// tag::notable-breaking-changes[] +[discrete] +[[breaking_84_cluster]] +==== Cluster + +[[breaking_change_number_2]] +.Breaking change number 2 +[%collapsible] +==== +*Details* + +Breaking change details 2 + +*Impact* + +Breaking change impact description 2 +==== +// end::notable-breaking-changes[] + +[discrete] +[[breaking_84_transform]] +==== Transform + +[[breaking_change_number_3]] +.Breaking change number 3 +[%collapsible] +==== +*Details* + +Breaking change details 3 + +*Impact* + +Breaking change impact description 3 +==== + diff --git a/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGeneratorTest.generateFile.asciidoc b/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGeneratorTest.generateFile.asciidoc new file mode 100644 index 0000000000000..25438cbe72b8a --- /dev/null +++ b/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/ReleaseHighlightsGeneratorTest.generateFile.asciidoc @@ -0,0 +1,40 @@ +[[release-highlights]] +== What's new in {minor-version} + +coming::[{minor-version}] + +Here are the highlights of what's new and improved in {es} {minor-version}! +ifeval::[\{release-state}\"!=\"unreleased\"] +For detailed information about this release, see the <> and +<>. +endif::[] + +// Add previous release to the list +Other versions: + +{ref-bare}/8.4/release-highlights.html[8.4] +| {ref-bare}/8.3/release-highlights.html[8.3] +| {ref-bare}/8.2/release-highlights.html[8.2] +| {ref-bare}/8.1/release-highlights.html[8.1] +| {ref-bare}/8.0/release-highlights.html[8.0] + +// tag::notable-highlights[] + +[discrete] +[[notable_release_highlight_number_1]] +=== Notable release highlight number 1 +Notable release body number 1 + +[discrete] +[[notable_release_highlight_number_2]] +=== Notable release highlight number 2 +Notable release body number 2 + +// end::notable-highlights[] + + +[discrete] +[[notable_release_highlight_number_3]] +=== Notable release highlight number 3 +Notable release body number 3 + diff --git a/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/ReleaseNotesGeneratorTest.generateFile.asciidoc b/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/ReleaseNotesGeneratorTest.generateFile.asciidoc new file mode 100644 index 0000000000000..d81a3d8df7f49 --- /dev/null +++ b/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/ReleaseNotesGeneratorTest.generateFile.asciidoc @@ -0,0 +1,105 @@ +[[release-notes-8.2.0]] +== {es} version 8.2.0 + +coming[8.2.0] + +Also see <>. + +[discrete] +[[security-updates-8.2.0]] +=== Security updates + +* Test security issue + +[[deprecation-8.2.0]] +[float] +=== Deprecations + +Cluster:: +* Test changelog entry 1_0 {es-pull}1000[#1000] (issue: {es-issue}1001[#1001]) +* Test changelog entry 1_1 {es-pull}1002[#1002] (issues: {es-issue}1003[#1003], {es-issue}1004[#1004]) + +[[enhancement-8.2.0]] +[float] +=== Enhancements + +Indices:: +* Test changelog entry 2_0 {es-pull}2000[#2000] (issue: {es-issue}2001[#2001]) +* Test changelog entry 2_1 {es-pull}2002[#2002] (issues: {es-issue}2003[#2003], {es-issue}2004[#2004]) + +[[feature-8.2.0]] +[float] +=== New features + +Mappings:: +* Test changelog entry 3_0 {es-pull}3000[#3000] (issue: {es-issue}3001[#3001]) +* Test changelog entry 3_1 {es-pull}3002[#3002] (issues: {es-issue}3003[#3003], {es-issue}3004[#3004]) + + +[[release-notes-8.1.0]] +== {es} version 8.1.0 + +Also see <>. + +[discrete] +[[known-issues-8.1.0]] +=== Known issues + +* Test known issue + +[[new-aggregation-8.1.0]] +[float] +=== New aggregation + +Search:: +* Test changelog entry 4_0 {es-pull}4000[#4000] (issue: {es-issue}4001[#4001]) +* Test changelog entry 4_1 {es-pull}4002[#4002] (issues: {es-issue}4003[#4003], {es-issue}4004[#4004]) + +[[regression-8.1.0]] +[float] +=== Regressions + +Security:: +* Test changelog entry 5_0 {es-pull}5000[#5000] (issue: {es-issue}5001[#5001]) +* Test changelog entry 5_1 {es-pull}5002[#5002] (issues: {es-issue}5003[#5003], {es-issue}5004[#5004]) + +[[upgrade-8.1.0]] +[float] +=== Upgrades + +Aggregation:: +* Test changelog entry 6_0 {es-pull}6000[#6000] (issue: {es-issue}6001[#6001]) +* Test changelog entry 6_1 {es-pull}6002[#6002] (issues: {es-issue}6003[#6003], {es-issue}6004[#6004]) + + +[[release-notes-8.0.0]] +== {es} version 8.0.0 + +Also see <>. + +[[bug-8.0.0]] +[float] +=== Bug fixes + +Cluster:: +* Test changelog entry 7_0 {es-pull}7000[#7000] (issue: {es-issue}7001[#7001]) +* Test changelog entry 7_1 {es-pull}7002[#7002] (issues: {es-issue}7003[#7003], {es-issue}7004[#7004]) + +[[deprecation-8.0.0]] +[float] +=== Deprecations + +Indices:: +* Test changelog entry 8_0 {es-pull}8000[#8000] (issue: {es-issue}8001[#8001]) +* Test changelog entry 8_1 {es-pull}8002[#8002] (issues: {es-issue}8003[#8003], {es-issue}8004[#8004]) + +[[enhancement-8.0.0]] +[float] +=== Enhancements + +Mappings:: +* Test changelog entry 9_0 {es-pull}9000[#9000] (issue: {es-issue}9001[#9001]) +* Test changelog entry 9_1 {es-pull}9002[#9002] (issues: {es-issue}9003[#9003], {es-issue}9004[#9004]) + + + diff --git a/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/ReleaseNotesIndexGeneratorTest.generateFile.asciidoc b/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/ReleaseNotesIndexGeneratorTest.generateFile.asciidoc new file mode 100644 index 0000000000000..64c07d930930f --- /dev/null +++ b/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/ReleaseNotesIndexGeneratorTest.generateFile.asciidoc @@ -0,0 +1,27 @@ +[[es-release-notes]] += Release notes + +[partintro] +-- + +This section summarizes the changes in each release. + +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> + +-- + +include::release-notes/8.2.asciidoc[] +include::release-notes/8.1.asciidoc[] +include::release-notes/8.0.asciidoc[] +include::release-notes/8.0.0-rc3.asciidoc[] +include::release-notes/8.0.0-beta2.asciidoc[] +include::release-notes/8.0.0-alpha1.asciidoc[] + diff --git a/build-tools/build.gradle b/build-tools/build.gradle index 6743306dafe22..1da2a31390d40 100644 --- a/build-tools/build.gradle +++ b/build-tools/build.gradle @@ -58,6 +58,10 @@ gradlePlugin { id = 'elasticsearch.test-gradle-policy' implementationClass = 'org.elasticsearch.gradle.test.GradleTestPolicySetupPlugin' } + yamlTests { + id = 'elasticsearch.yaml-rest-test' + implementationClass = 'org.elasticsearch.gradle.test.YamlRestTestPlugin' + } } } diff --git a/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy index 326b7a68360ae..1fcc36172a417 100644 --- a/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy +++ b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy @@ -109,7 +109,7 @@ class TestClustersPluginFuncTest extends AbstractGradleFuncTest { withChangedConfigMockedDistributionDownload(runner, runningClosure) then: - normalized(result.output).contains("Task ':myTask' is not up-to-date because:\n Input property 'clusters.myCluster\$0.nodes.\$0.$inputProperty'") + result.output.contains("Task ':myTask' is not up-to-date because:\n Input property 'clusters.myCluster\$0.nodes.\$0.$inputProperty'") result.output.contains("elasticsearch-keystore script executed!") assertEsLogContains("myCluster", "Starting Elasticsearch process") assertEsLogContains("myCluster", "Stopping node") @@ -163,7 +163,7 @@ class TestClustersPluginFuncTest extends AbstractGradleFuncTest { } then: - normalized(result.output).contains("Task ':myTask' is not up-to-date because:\n" + + result.output.contains("Task ':myTask' is not up-to-date because:\n" + " Input property 'clusters.myCluster\$0.nodes.\$0.$propertyName'") result.output.contains("elasticsearch-keystore script executed!") assertEsLogContains("myCluster", "Starting Elasticsearch process") diff --git a/build-tools/src/integTest/groovy/org/elasticsearch/gradle/test/YamlRestTestPluginFuncTest.groovy b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/test/YamlRestTestPluginFuncTest.groovy new file mode 100644 index 0000000000000..30aad8054b08f --- /dev/null +++ b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/test/YamlRestTestPluginFuncTest.groovy @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.test + +import org.elasticsearch.gradle.VersionProperties +import org.elasticsearch.gradle.fixtures.AbstractGradleFuncTest +import org.gradle.testkit.runner.TaskOutcome + +class YamlRestTestPluginFuncTest extends AbstractGradleFuncTest { + + def "declares default dependencies"() { + given: + buildFile << """ + plugins { + id 'elasticsearch.yaml-rest-test' + } + """ + + when: + def result = gradleRunner("dependencies").build() + def output = normalized(result.output) + then: + output.contains(""" +restTestSpecs +/--- org.elasticsearch:rest-api-spec:${VersionProperties.elasticsearch} FAILED""") + output.contains(normalized(""" +yamlRestTestImplementation - Implementation only dependencies for source set 'yaml rest test'. (n) +/--- org.elasticsearch.test:framework:${VersionProperties.elasticsearch} (n)""")) + } + + def "yamlRestTest does nothing when there are no tests"() { + given: + buildFile << """ + plugins { + id 'elasticsearch.yaml-rest-test' + } + + repositories { + mavenCentral() + } + + dependencies { + yamlRestTestImplementation "org.elasticsearch.test:framework:7.14.0" + restTestSpecs "org.elasticsearch:rest-api-spec:7.14.0" + } + """ + + when: + def result = gradleRunner("yamlRestTest").build() + then: + result.task(':compileYamlRestTestJava').outcome == TaskOutcome.NO_SOURCE + result.task(':processYamlRestTestResources').outcome == TaskOutcome.NO_SOURCE + result.task(':yamlRestTest').outcome == TaskOutcome.NO_SOURCE + } + +} \ No newline at end of file diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/plugin/PluginBuildPlugin.java b/build-tools/src/main/java/org/elasticsearch/gradle/plugin/PluginBuildPlugin.java index 54674d5476f95..fa7cc4d5beb25 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/plugin/PluginBuildPlugin.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/plugin/PluginBuildPlugin.java @@ -54,6 +54,9 @@ * Encapsulates build configuration for an Elasticsearch plugin. */ public class PluginBuildPlugin implements Plugin { + + public static final String BUNDLE_PLUGIN_TASK_NAME = "bundlePlugin"; + @Override public void apply(final Project project) { project.getPluginManager().apply(JavaPlugin.class); @@ -124,7 +127,7 @@ public void apply(final Project project) { project.getTasks().register("run", RunTask.class, runTask -> { runTask.useCluster(runCluster); - runTask.dependsOn(project.getTasks().named("bundlePlugin")); + runTask.dependsOn(project.getTasks().named(BUNDLE_PLUGIN_TASK_NAME)); }); } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/test/YamlRestTestPlugin.java b/build-tools/src/main/java/org/elasticsearch/gradle/test/YamlRestTestPlugin.java new file mode 100644 index 0000000000000..1ce03787a0756 --- /dev/null +++ b/build-tools/src/main/java/org/elasticsearch/gradle/test/YamlRestTestPlugin.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.test; + +import org.elasticsearch.gradle.VersionProperties; +import org.elasticsearch.gradle.plugin.PluginBuildPlugin; +import org.elasticsearch.gradle.testclusters.ElasticsearchCluster; +import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask; +import org.elasticsearch.gradle.testclusters.TestClustersPlugin; +import org.elasticsearch.gradle.transform.UnzipTransform; +import org.gradle.api.Action; +import org.gradle.api.NamedDomainObjectContainer; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.Task; +import org.gradle.api.artifacts.Configuration; +import org.gradle.api.artifacts.ConfigurationContainer; +import org.gradle.api.artifacts.dsl.DependencyHandler; +import org.gradle.api.artifacts.type.ArtifactTypeDefinition; +import org.gradle.api.attributes.Attribute; +import org.gradle.api.internal.artifacts.ArtifactAttributes; +import org.gradle.api.plugins.JavaBasePlugin; +import org.gradle.api.tasks.Copy; +import org.gradle.api.tasks.SourceSet; +import org.gradle.api.tasks.SourceSetContainer; +import org.gradle.api.tasks.TaskProvider; +import org.gradle.api.tasks.bundling.Zip; + +import java.io.File; + +import static org.elasticsearch.gradle.plugin.PluginBuildPlugin.BUNDLE_PLUGIN_TASK_NAME; + +public class YamlRestTestPlugin implements Plugin { + + public static final String REST_TEST_SPECS_CONFIGURATION_NAME = "restTestSpecs"; + public static final String YAML_REST_TEST = "yamlRestTest"; + + @Override + public void apply(Project project) { + project.getPluginManager().apply(GradleTestPolicySetupPlugin.class); + project.getPluginManager().apply(TestClustersPlugin.class); + project.getPluginManager().apply(JavaBasePlugin.class); + + Attribute restAttribute = Attribute.of("restSpecs", Boolean.class); + project.getDependencies().getAttributesSchema().attribute(restAttribute); + project.getDependencies().getArtifactTypes().maybeCreate(ArtifactTypeDefinition.JAR_TYPE); + project.getDependencies().registerTransform(UnzipTransform.class, transformSpec -> { + transformSpec.getFrom() + .attribute(ArtifactAttributes.ARTIFACT_FORMAT, ArtifactTypeDefinition.JAR_TYPE) + .attribute(restAttribute, true); + transformSpec.getTo() + .attribute(ArtifactAttributes.ARTIFACT_FORMAT, ArtifactTypeDefinition.DIRECTORY_TYPE) + .attribute(restAttribute, true); + }); + + ConfigurationContainer configurations = project.getConfigurations(); + Configuration restTestSpecs = configurations.create(REST_TEST_SPECS_CONFIGURATION_NAME); + restTestSpecs.getAttributes().attribute(ArtifactAttributes.ARTIFACT_FORMAT, ArtifactTypeDefinition.DIRECTORY_TYPE); + restTestSpecs.getAttributes().attribute(restAttribute, true); + + TaskProvider copyRestTestSpecs = project.getTasks().register("copyRestTestSpecs", Copy.class, t -> { + t.from(restTestSpecs); + t.into(new File(project.getBuildDir(), "restResources/restspec")); + }); + + var sourceSets = project.getExtensions().getByType(SourceSetContainer.class); + var testSourceSet = sourceSets.maybeCreate(YAML_REST_TEST); + NamedDomainObjectContainer testClusters = (NamedDomainObjectContainer) project + .getExtensions() + .getByName(TestClustersPlugin.EXTENSION_NAME); + + testSourceSet.getOutput().dir(copyRestTestSpecs.map(Task::getOutputs)); + Configuration yamlRestTestImplementation = configurations.getByName(testSourceSet.getImplementationConfigurationName()); + setupDefaultDependencies(project.getDependencies(), restTestSpecs, yamlRestTestImplementation); + var cluster = testClusters.maybeCreate(YAML_REST_TEST); + TaskProvider yamlRestTestTask = setupTestTask(project, testSourceSet, cluster); + project.getPlugins().withType(PluginBuildPlugin.class, p -> { + TaskProvider bundle = project.getTasks().withType(Zip.class).named(BUNDLE_PLUGIN_TASK_NAME); + cluster.plugin(bundle.flatMap(Zip::getArchiveFile)); + yamlRestTestTask.configure(t -> t.dependsOn(bundle)); + }); + } + + private static void setupDefaultDependencies( + DependencyHandler dependencyHandler, + Configuration restTestSpecs, + Configuration yamlRestTestImplementation + ) { + String elasticsearchVersion = VersionProperties.getElasticsearch(); + yamlRestTestImplementation.defaultDependencies( + deps -> deps.add(dependencyHandler.create("org.elasticsearch.test:framework:" + elasticsearchVersion)) + ); + + restTestSpecs.defaultDependencies( + deps -> deps.add(dependencyHandler.create("org.elasticsearch:rest-api-spec:" + elasticsearchVersion)) + ); + } + + private TaskProvider setupTestTask( + Project project, + SourceSet testSourceSet, + ElasticsearchCluster cluster + ) { + return project.getTasks().register("yamlRestTest", StandaloneRestIntegTestTask.class, task -> { + task.useCluster(cluster); + task.setTestClassesDirs(testSourceSet.getOutput().getClassesDirs()); + task.setClasspath(testSourceSet.getRuntimeClasspath()); + + var nonInputProperties = new SystemPropertyCommandLineArgumentProvider(); + nonInputProperties.systemProperty("tests.rest.cluster", () -> String.join(",", cluster.getAllHttpSocketURI())); + nonInputProperties.systemProperty("tests.cluster", () -> String.join(",", cluster.getAllTransportPortURI())); + nonInputProperties.systemProperty("tests.clustername", () -> cluster.getName()); + task.getJvmArgumentProviders().add(nonInputProperties); + task.systemProperty("tests.rest.load_packaged", Boolean.FALSE.toString()); + }); + } + +} diff --git a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy index 5791744a31b31..0bd8768ccc6c6 100644 --- a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy +++ b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy @@ -9,6 +9,7 @@ package org.elasticsearch.gradle.fixtures import org.elasticsearch.gradle.internal.test.InternalAwareGradleRunner +import org.elasticsearch.gradle.internal.test.NormalizeOutputGradleRunner import org.gradle.testkit.runner.GradleRunner import org.junit.Rule import org.junit.rules.TemporaryFolder @@ -18,6 +19,8 @@ import java.lang.management.ManagementFactory import java.util.jar.JarEntry import java.util.jar.JarOutputStream +import static org.elasticsearch.gradle.internal.test.TestUtils.normalizeString + abstract class AbstractGradleFuncTest extends Specification { @Rule @@ -46,11 +49,14 @@ abstract class AbstractGradleFuncTest extends Specification { } GradleRunner gradleRunner(File projectDir, String... arguments) { - new InternalAwareGradleRunner(GradleRunner.create() - .withDebug(ManagementFactory.getRuntimeMXBean().getInputArguments().toString().indexOf("-agentlib:jdwp") > 0) - .withProjectDir(projectDir) - .withPluginClasspath() - .forwardOutput() + return new NormalizeOutputGradleRunner( + new InternalAwareGradleRunner(GradleRunner.create() + .withDebug(ManagementFactory.getRuntimeMXBean().getInputArguments().toString().indexOf("-agentlib:jdwp") > 0) + .withProjectDir(projectDir) + .withPluginClasspath() + .forwardOutput() + ), + projectDir ).withArguments(arguments) } @@ -63,13 +69,9 @@ abstract class AbstractGradleFuncTest extends Specification { assert normalized(givenOutput).contains(normalized(expected)) == false true } + String normalized(String input) { - String normalizedPathPrefix = testProjectDir.root.canonicalPath.replace('\\', '/') - return input.readLines() - .collect { it.replace('\\', '/') } - .collect {it.replace(normalizedPathPrefix , '.') } - .collect {it.replaceAll(/Gradle Test Executor \d/ , 'Gradle Test Executor 1') } - .join("\n") + return normalizeString(input, testProjectDir.root) } File file(String path) { diff --git a/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/InternalAwareGradleRunner.java b/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/InternalAwareGradleRunner.java index 789780511c733..e30455aa0b406 100644 --- a/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/InternalAwareGradleRunner.java +++ b/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/InternalAwareGradleRunner.java @@ -14,7 +14,6 @@ import org.gradle.testkit.runner.InvalidRunnerConfigurationException; import org.gradle.testkit.runner.UnexpectedBuildFailure; import org.gradle.testkit.runner.UnexpectedBuildSuccess; -import org.jetbrains.annotations.Nullable; import java.io.File; import java.io.Writer; @@ -33,22 +32,26 @@ public InternalAwareGradleRunner(GradleRunner delegate) { @Override public GradleRunner withGradleVersion(String gradleVersion) { - return delegate.withGradleVersion(gradleVersion); + delegate.withGradleVersion(gradleVersion); + return this; } @Override public GradleRunner withGradleInstallation(File file) { - return delegate.withGradleInstallation(file); + delegate.withGradleInstallation(file); + return this; } @Override public GradleRunner withGradleDistribution(URI uri) { - return delegate.withGradleDistribution(uri); + delegate.withGradleDistribution(uri); + return this; } @Override public GradleRunner withTestKitDir(File file) { - return delegate.withTestKitDir(file); + delegate.withTestKitDir(file); + return this; } @Override @@ -58,7 +61,8 @@ public File getProjectDir() { @Override public GradleRunner withProjectDir(File projectDir) { - return delegate.withProjectDir(projectDir); + delegate.withProjectDir(projectDir); + return this; } @Override @@ -70,12 +74,14 @@ public List getArguments() { public GradleRunner withArguments(List arguments) { List collect = Stream.concat(arguments.stream(), Stream.of("-Dtest.external=true")) .collect(Collectors.toList()); - return delegate.withArguments(collect); + delegate.withArguments(collect); + return this; } @Override public GradleRunner withArguments(String... arguments) { - return withArguments(List.of(arguments)); + withArguments(List.of(arguments)); + return this; } @Override @@ -85,12 +91,14 @@ public List getPluginClasspath() { @Override public GradleRunner withPluginClasspath() throws InvalidPluginMetadataException { - return delegate.withPluginClasspath(); + delegate.withPluginClasspath(); + return this; } @Override public GradleRunner withPluginClasspath(Iterable iterable) { - return delegate.withPluginClasspath(iterable); + delegate.withPluginClasspath(iterable); + return this; } @Override @@ -100,33 +108,37 @@ public boolean isDebug() { @Override public GradleRunner withDebug(boolean b) { - return delegate.withDebug(b); + delegate.withDebug(b); + return this; } - @Nullable @Override public Map getEnvironment() { return delegate.getEnvironment(); } @Override - public GradleRunner withEnvironment(@Nullable Map map) { - return delegate.withEnvironment(map); + public GradleRunner withEnvironment(Map map) { + delegate.withEnvironment(map); + return this; } @Override public GradleRunner forwardStdOutput(Writer writer) { - return delegate.forwardStdOutput(writer); + delegate.forwardStdOutput(writer); + return this; } @Override public GradleRunner forwardStdError(Writer writer) { - return delegate.forwardStdOutput(writer); + delegate.forwardStdOutput(writer); + return this; } @Override public GradleRunner forwardOutput() { - return delegate.forwardOutput(); + delegate.forwardOutput(); + return this; } @Override diff --git a/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/NormalizeOutputGradleRunner.java b/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/NormalizeOutputGradleRunner.java new file mode 100644 index 0000000000000..8d2ae78396435 --- /dev/null +++ b/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/NormalizeOutputGradleRunner.java @@ -0,0 +1,193 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.test; + +import org.gradle.testkit.runner.BuildResult; +import org.gradle.testkit.runner.BuildTask; +import org.gradle.testkit.runner.GradleRunner; +import org.gradle.testkit.runner.InvalidPluginMetadataException; +import org.gradle.testkit.runner.InvalidRunnerConfigurationException; +import org.gradle.testkit.runner.TaskOutcome; +import org.gradle.testkit.runner.UnexpectedBuildFailure; +import org.gradle.testkit.runner.UnexpectedBuildSuccess; + +import java.io.File; +import java.io.Writer; +import java.net.URI; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.gradle.internal.test.TestUtils.normalizeString; + +public class NormalizeOutputGradleRunner extends GradleRunner { + + public NormalizeOutputGradleRunner(GradleRunner delegate, File projectRootDir) { + this.delegate = delegate; + this.projectRootDir = projectRootDir; + } + + @Override + public GradleRunner withGradleVersion(String gradleVersion) { + delegate.withGradleVersion(gradleVersion); + return this; + } + + @Override + public GradleRunner withGradleInstallation(File file) { + delegate.withGradleInstallation(file); + return this; + } + + @Override + public GradleRunner withGradleDistribution(URI uri) { + delegate.withGradleDistribution(uri); + return this; + } + + @Override + public GradleRunner withTestKitDir(File file) { + delegate.withTestKitDir(file); + return this; + } + + @Override + public File getProjectDir() { + return delegate.getProjectDir(); + } + + @Override + public GradleRunner withProjectDir(File projectDir) { + delegate.withProjectDir(projectDir); + return this; + } + + @Override + public List getArguments() { + return delegate.getArguments(); + } + + @Override + public GradleRunner withArguments(List arguments) { + return delegate.withArguments(arguments); + } + + @Override + public GradleRunner withArguments(String... arguments) { + withArguments(List.of(arguments)); + return this; + } + + @Override + public List getPluginClasspath() { + return delegate.getPluginClasspath(); + } + + @Override + public GradleRunner withPluginClasspath() throws InvalidPluginMetadataException { + delegate.withPluginClasspath(); + return this; + } + + @Override + public GradleRunner withPluginClasspath(Iterable iterable) { + delegate.withPluginClasspath(iterable); + return this; + } + + @Override + public boolean isDebug() { + return delegate.isDebug(); + } + + @Override + public GradleRunner withDebug(boolean b) { + delegate.withDebug(b); + return this; + } + + @Override + public Map getEnvironment() { + return delegate.getEnvironment(); + } + + @Override + public GradleRunner withEnvironment(Map map) { + delegate.withEnvironment(map); + return this; + } + + + @Override + public GradleRunner forwardStdOutput(Writer writer) { + delegate.forwardStdOutput(writer); + return this; + } + + @Override + public GradleRunner forwardStdError(Writer writer) { + delegate.forwardStdOutput(writer); + return this; + } + + @Override + public GradleRunner forwardOutput() { + delegate.forwardOutput(); + return this; + } + + @Override + public BuildResult build() throws InvalidRunnerConfigurationException, UnexpectedBuildFailure { + return new NormalizedBuildResult(delegate.build()); + } + + @Override + public BuildResult buildAndFail() throws InvalidRunnerConfigurationException, UnexpectedBuildSuccess { + return new NormalizedBuildResult(delegate.buildAndFail()); + } + + private GradleRunner delegate; + private File projectRootDir; + + private class NormalizedBuildResult implements BuildResult { + private BuildResult delegate; + private String normalizedString; + + NormalizedBuildResult(BuildResult delegate) { + this.delegate = delegate; + } + + @Override + public String getOutput() { + if (normalizedString == null) { + normalizedString = normalizeString(delegate.getOutput(), projectRootDir); + } + return normalizedString; + } + + @Override + public List getTasks() { + return delegate.getTasks(); + } + + @Override + public List tasks(TaskOutcome taskOutcome) { + return delegate.tasks(taskOutcome); + } + + @Override + public List taskPaths(TaskOutcome taskOutcome) { + return delegate.taskPaths(taskOutcome); + } + + @Override + public BuildTask task(String s) { + return delegate.task(s); + } + } +} diff --git a/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/TestClasspathUtils.java b/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/TestUtils.java similarity index 65% rename from build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/TestClasspathUtils.java rename to build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/TestUtils.java index 9876835a15acb..8e69122048c00 100644 --- a/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/TestClasspathUtils.java +++ b/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/TestUtils.java @@ -16,14 +16,15 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.nio.file.StandardCopyOption; +import java.util.stream.Collectors; import static org.junit.Assert.fail; -public class TestClasspathUtils { +public class TestUtils { public static void setupJarJdkClasspath(File projectRoot) { try { - URL originLocation = TestClasspathUtils.class.getClassLoader() + URL originLocation = TestUtils.class.getClassLoader() .loadClass("org.elasticsearch.jdk.JdkJarHellCheck") .getProtectionDomain() .getCodeSource() @@ -41,4 +42,18 @@ public static void setupJarJdkClasspath(File projectRoot) { } } + public static String normalizeString(String input, File projectRootDir) { + try { + String normalizedPathPrefix = projectRootDir.getCanonicalPath().replaceAll("\\\\", "/"); + System.out.println("normalizedPathPrefix = " + normalizedPathPrefix); + return input.lines() + .map(it -> it.replaceAll("\\\\", "/")) + .map(it -> it.replaceAll(normalizedPathPrefix, ".")) + .map(it -> it.replaceAll("Gradle Test Executor \\d", "Gradle Test Executor 1")) + .collect(Collectors.joining("\n")); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/enrich/StatsResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/enrich/StatsResponse.java index 916e2eeb270d9..d50a6deec7895 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/enrich/StatsResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/enrich/StatsResponse.java @@ -17,19 +17,21 @@ public final class StatsResponse { - private static ParseField EXECUTING_POLICIES_FIELD = new ParseField("executing_policies"); - private static ParseField COORDINATOR_STATS_FIELD = new ParseField("coordinator_stats"); + private static final ParseField EXECUTING_POLICIES_FIELD = new ParseField("executing_policies"); + private static final ParseField COORDINATOR_STATS_FIELD = new ParseField("coordinator_stats"); + private static final ParseField CACHE_STATS_FIELD = new ParseField("cache_stats"); @SuppressWarnings("unchecked") private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "stats_response", true, - args -> new StatsResponse((List) args[0], (List) args[1]) + args -> new StatsResponse((List) args[0], (List) args[1], (List) args[2]) ); static { PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), ExecutingPolicy.PARSER::apply, EXECUTING_POLICIES_FIELD); PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), CoordinatorStats.PARSER::apply, COORDINATOR_STATS_FIELD); + PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), CacheStats.PARSER::apply, CACHE_STATS_FIELD); } public static StatsResponse fromXContent(XContentParser parser) { @@ -38,10 +40,12 @@ public static StatsResponse fromXContent(XContentParser parser) { private final List executingPolicies; private final List coordinatorStats; + private final List cacheStats; - public StatsResponse(List executingPolicies, List coordinatorStats) { + public StatsResponse(List executingPolicies, List coordinatorStats, List cacheStats) { this.executingPolicies = executingPolicies; this.coordinatorStats = coordinatorStats; + this.cacheStats = cacheStats; } public List getExecutingPolicies() { @@ -52,6 +56,10 @@ public List getCoordinatorStats() { return coordinatorStats; } + public List getCacheStats() { + return cacheStats; + } + public static final class CoordinatorStats { static ParseField NODE_ID_FIELD = new ParseField("node_id"); @@ -177,4 +185,74 @@ public int hashCode() { } } + public static final class CacheStats { + + static ParseField NODE_ID_FIELD = new ParseField("node_id"); + static ParseField COUNT_FIELD = new ParseField("count"); + static ParseField HITS_FIELD = new ParseField("hits"); + static ParseField MISSES_FIELD = new ParseField("misses"); + static ParseField EVICTIONS_FIELD = new ParseField("evictions"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "coordinator_stats_item", + true, + args -> new CacheStats((String) args[0], (long) args[1], (long) args[2], (long) args[3], (long) args[4]) + ); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), NODE_ID_FIELD); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), COUNT_FIELD); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), HITS_FIELD); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), MISSES_FIELD); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), EVICTIONS_FIELD); + } + + private final String nodeId; + private final long count; + private final long hits; + private final long misses; + private final long evictions; + + public CacheStats(String nodeId, long count, long hits, long misses, long evictions) { + this.nodeId = nodeId; + this.count = count; + this.hits = hits; + this.misses = misses; + this.evictions = evictions; + } + + public String getNodeId() { + return nodeId; + } + + public long getCount() { + return count; + } + + public long getHits() { + return hits; + } + + public long getMisses() { + return misses; + } + + public long getEvictions() { + return evictions; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + CacheStats that = (CacheStats) o; + return count == that.count && hits == that.hits && misses == that.misses && + evictions == that.evictions && nodeId.equals(that.nodeId); + } + + @Override + public int hashCode() { + return Objects.hash(nodeId, count, hits, misses, evictions); + } + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/enrich/StatsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/enrich/StatsResponseTests.java index 7efbca89294fb..d02905915d90c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/enrich/StatsResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/enrich/StatsResponseTests.java @@ -34,13 +34,19 @@ protected EnrichStatsAction.Response createServerTestInstance(XContentType xCont } int numCoordinatingStats = randomIntBetween(0, 16); List coordinatorStats = new ArrayList<>(numCoordinatingStats); + List cacheStats = new ArrayList<>(numCoordinatingStats); for (int i = 0; i < numCoordinatingStats; i++) { + String nodeId = randomAlphaOfLength(4); EnrichStatsAction.Response.CoordinatorStats stats = new EnrichStatsAction.Response.CoordinatorStats( - randomAlphaOfLength(4), randomIntBetween(0, 8096), randomIntBetween(0, 8096), randomNonNegativeLong(), + nodeId, randomIntBetween(0, 8096), randomIntBetween(0, 8096), randomNonNegativeLong(), randomNonNegativeLong()); coordinatorStats.add(stats); + cacheStats.add( + new EnrichStatsAction.Response.CacheStats(nodeId, randomNonNegativeLong(), randomNonNegativeLong(), + randomNonNegativeLong(), randomNonNegativeLong()) + ); } - return new EnrichStatsAction.Response(executingPolicies, coordinatorStats); + return new EnrichStatsAction.Response(executingPolicies, coordinatorStats, cacheStats); } @Override @@ -68,6 +74,17 @@ protected void assertInstances(EnrichStatsAction.Response serverTestInstance, St assertThat(actual.getRemoteRequestsTotal(), equalTo(expected.getRemoteRequestsTotal())); assertThat(actual.getExecutedSearchesTotal(), equalTo(expected.getExecutedSearchesTotal())); } + + assertThat(clientInstance.getCacheStats().size(), equalTo(serverTestInstance.getCacheStats().size())); + for (int i = 0; i < clientInstance.getCacheStats().size(); i++) { + StatsResponse.CacheStats actual = clientInstance.getCacheStats().get(i); + EnrichStatsAction.Response.CacheStats expected = serverTestInstance.getCacheStats().get(i); + assertThat(actual.getNodeId(), equalTo(expected.getNodeId())); + assertThat(actual.getCount(), equalTo(expected.getCount())); + assertThat(actual.getHits(), equalTo(expected.getHits())); + assertThat(actual.getMisses(), equalTo(expected.getMisses())); + assertThat(actual.getEvictions(), equalTo(expected.getEvictions())); + } } private static TaskInfo randomTaskInfo() { diff --git a/distribution/archives/build.gradle b/distribution/archives/build.gradle index 0c56b8e02a021..c0d575cdbc2cf 100644 --- a/distribution/archives/build.gradle +++ b/distribution/archives/build.gradle @@ -11,26 +11,26 @@ import java.nio.file.Path apply plugin: 'elasticsearch.internal-distribution-archive-setup' -CopySpec archiveFiles(CopySpec modulesFiles, String distributionType, String platform, String architecture, boolean oss, boolean jdk) { +CopySpec archiveFiles(CopySpec modulesFiles, String distributionType, String platform, String architecture, boolean isTestDistro) { return copySpec { into("elasticsearch-${version}") { into('lib') { - with libFiles(oss) + with libFiles(isTestDistro) } into('config') { dirMode 0750 fileMode 0660 - with configFiles(distributionType, oss, jdk) + with configFiles(distributionType, isTestDistro) from { dirMode 0750 jvmOptionsDir.getParent() } } into('bin') { - with binFiles(distributionType, oss, jdk) + with binFiles(distributionType, isTestDistro) } - if (jdk) { - into("darwin".equals(platform) ? 'jdk.app' : 'jdk') { + into("darwin".equals(platform) ? 'jdk.app' : 'jdk') { + if (isTestDistro == false) { with jdkFiles(project, platform, architecture) } } @@ -50,11 +50,11 @@ CopySpec archiveFiles(CopySpec modulesFiles, String distributionType, String pla include 'README.asciidoc' } from(rootProject.file('licenses')) { - include oss ? 'SSPL-1.0+ELASTIC-LICENSE-2.0.txt' : 'ELASTIC-LICENSE-2.0.txt' + include isTestDistro ? 'SSPL-1.0+ELASTIC-LICENSE-2.0.txt' : 'ELASTIC-LICENSE-2.0.txt' rename { 'LICENSE.txt' } } - with noticeFile(oss, jdk) + with noticeFile(isTestDistro) into('modules') { with modulesFiles } @@ -65,70 +65,42 @@ CopySpec archiveFiles(CopySpec modulesFiles, String distributionType, String pla distribution_archives { integTestZip { content { - archiveFiles(transportModulesFiles, 'zip', null, 'x64', true, false) + archiveFiles(transportModulesFiles, 'zip', null, 'x64', true) } } windowsZip { archiveClassifier = 'windows-x86_64' content { - archiveFiles(modulesFiles('windows-x86_64'), 'zip', 'windows', 'x64', false, true) - } - } - - noJdkWindowsZip { - archiveClassifier = 'no-jdk-windows-x86_64' - content { - archiveFiles(modulesFiles('windows-x86_64'), 'zip', 'windows', 'x64', false, false) + archiveFiles(modulesFiles('windows-x86_64'), 'zip', 'windows', 'x64', false) } } darwinTar { archiveClassifier = 'darwin-x86_64' content { - archiveFiles(modulesFiles('darwin-x86_64'), 'tar', 'darwin', 'x64', false, true) + archiveFiles(modulesFiles('darwin-x86_64'), 'tar', 'darwin', 'x64', false) } } darwinAarch64Tar { archiveClassifier = 'darwin-aarch64' content { - archiveFiles(modulesFiles('darwin-aarch64'), 'tar', 'darwin', 'aarch64', false, true) - } - } - - noJdkDarwinTar { - archiveClassifier = 'no-jdk-darwin-x86_64' - content { - archiveFiles(modulesFiles('darwin-x86_64'), 'tar', 'darwin', 'x64', false, false) - } - } - - noJdkDarwinAarch64Tar { - archiveClassifier = 'no-jdk-darwin-aarch64' - content { - archiveFiles(modulesFiles('darwin-aarch64'), 'tar', 'darwin', 'aarch64', false, false) + archiveFiles(modulesFiles('darwin-aarch64'), 'tar', 'darwin', 'aarch64', false) } } linuxAarch64Tar { archiveClassifier = 'linux-aarch64' content { - archiveFiles(modulesFiles('linux-aarch64'), 'tar', 'linux', 'aarch64', false, true) + archiveFiles(modulesFiles('linux-aarch64'), 'tar', 'linux', 'aarch64', false) } } linuxTar { archiveClassifier = 'linux-x86_64' content { - archiveFiles(modulesFiles('linux-x86_64'), 'tar', 'linux', 'x64', false, true) - } - } - - noJdkLinuxTar { - archiveClassifier = 'no-jdk-linux-x86_64' - content { - archiveFiles(modulesFiles('linux-x86_64'), 'tar', 'linux', 'x64', false, false) + archiveFiles(modulesFiles('linux-x86_64'), 'tar', 'linux', 'x64', false) } } } diff --git a/distribution/archives/no-jdk-darwin-aarch64-tar/build.gradle b/distribution/archives/no-jdk-darwin-aarch64-tar/build.gradle deleted file mode 100644 index 4f7400c7eaa0e..0000000000000 --- a/distribution/archives/no-jdk-darwin-aarch64-tar/build.gradle +++ /dev/null @@ -1,2 +0,0 @@ -// This file is intentionally blank. All configuration of the -// distribution is done in the parent project. \ No newline at end of file diff --git a/distribution/archives/no-jdk-darwin-tar/build.gradle b/distribution/archives/no-jdk-darwin-tar/build.gradle deleted file mode 100644 index 4a6dde5fc0c92..0000000000000 --- a/distribution/archives/no-jdk-darwin-tar/build.gradle +++ /dev/null @@ -1,2 +0,0 @@ -// This file is intentionally blank. All configuration of the -// distribution is done in the parent project. diff --git a/distribution/archives/no-jdk-linux-tar/build.gradle b/distribution/archives/no-jdk-linux-tar/build.gradle deleted file mode 100644 index 4a6dde5fc0c92..0000000000000 --- a/distribution/archives/no-jdk-linux-tar/build.gradle +++ /dev/null @@ -1,2 +0,0 @@ -// This file is intentionally blank. All configuration of the -// distribution is done in the parent project. diff --git a/distribution/archives/no-jdk-windows-zip/build.gradle b/distribution/archives/no-jdk-windows-zip/build.gradle deleted file mode 100644 index 4a6dde5fc0c92..0000000000000 --- a/distribution/archives/no-jdk-windows-zip/build.gradle +++ /dev/null @@ -1,2 +0,0 @@ -// This file is intentionally blank. All configuration of the -// distribution is done in the parent project. diff --git a/distribution/build.gradle b/distribution/build.gradle index 942a718acea96..26093832ae17b 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -69,10 +69,6 @@ def buildDefaultNoticeTaskProvider = tasks.register("buildDefaultNotice", Notice licensesDir new File(project(':distribution').projectDir, 'licenses') } -def buildDefaultNoJdkNoticeTaskProvider = tasks.register("buildDefaultNoJdkNotice", NoticeTask) - -def buildOssNoJdkNoticeTaskProvider = tasks.register("buildOssNoJdkNotice", NoticeTask) - // The :server and :libs projects belong to all distributions tasks.withType(NoticeTask).configureEach { licensesDir project(':server').file('licenses') @@ -86,7 +82,6 @@ tasks.withType(NoticeTask).configureEach { /***************************************************************************** * Modules * *****************************************************************************/ -String ossOutputs = 'build/outputs/oss' String defaultOutputs = 'build/outputs/default' String systemdOutputs = 'build/outputs/systemd' String transportOutputs = 'build/outputs/transport-only' @@ -354,20 +349,20 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { from buildTransportModulesTaskProvider } - configFiles = { distributionType, testDistro, jdk -> + configFiles = { distributionType, isTestDistro -> copySpec { with copySpec { // main config files, processed with distribution specific substitutions from '../src/config' exclude 'log4j2.properties' // this is handled separately below - filter("tokens" : expansionsForDistribution(distributionType, testDistro, jdk), ReplaceTokens.class) + filter("tokens" : expansionsForDistribution(distributionType, isTestDistro), ReplaceTokens.class) } from buildDefaultLog4jConfigTaskProvider from defaultConfigFiles } } - binFiles = { distributionType, testDistro, jdk -> + binFiles = { distributionType, testDistro -> copySpec { // non-windows files, for all distributions with copySpec { @@ -375,7 +370,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { exclude '*.exe' exclude '*.bat' eachFile { it.setMode(0755) } - filter("tokens" : expansionsForDistribution(distributionType, testDistro, jdk), ReplaceTokens.class) + filter("tokens" : expansionsForDistribution(distributionType, testDistro), ReplaceTokens.class) } // windows files, only for zip if (distributionType == 'zip') { @@ -383,7 +378,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { from '../src/bin' include '*.bat' filter(FixCrLfFilter, eol: FixCrLfFilter.CrLf.newInstance('crlf')) - filter("tokens" : expansionsForDistribution(distributionType, testDistro, jdk), ReplaceTokens.class) + filter("tokens" : expansionsForDistribution(distributionType, testDistro), ReplaceTokens.class) } with copySpec { from '../src/bin' @@ -403,16 +398,12 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { } } - noticeFile = { testDistro, jdk -> + noticeFile = { testDistro -> copySpec { if (testDistro) { from buildServerNoticeTaskProvider } else { - if (jdk) { - from buildDefaultNoticeTaskProvider - } else { - from buildDefaultNoJdkNoticeTaskProvider - } + from buildDefaultNoticeTaskProvider } } } @@ -466,14 +457,14 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { * */ subprojects { - ext.expansionsForDistribution = { distributionType, testDistro, jdk -> + ext.expansionsForDistribution = { distributionType, isTestDistro -> final String packagingPathData = "path.data: /var/lib/elasticsearch" final String pathLogs = "/var/log/elasticsearch" final String packagingPathLogs = "path.logs: ${pathLogs}" final String packagingLoggc = "${pathLogs}/gc.log" String licenseText - if (testDistro) { + if (isTestDistro) { licenseText = rootProject.file('licenses/SSPL-1.0+ELASTIC-LICENSE-2.0.txt').getText('UTF-8') } else { licenseText = rootProject.file('licenses/ELASTIC-LICENSE-2.0.txt').getText('UTF-8') @@ -541,7 +532,7 @@ subprojects { ], 'es.distribution.flavor': [ - 'def': testDistro ? 'oss' : 'default' + 'def': 'default' ], 'es.distribution.type': [ @@ -552,11 +543,11 @@ subprojects { ], 'es.bundled_jdk': [ - 'def': jdk ? 'true' : 'false' + 'def': 'true' ], 'license.name': [ - 'deb': testDistro ? 'ASL-2.0' : 'Elastic-License' + 'deb': 'Elastic-License' ], 'license.text': [ diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index abae2aae9d032..9815e1cabdc8b 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -46,24 +46,24 @@ plugins { id "nebula.ospackage-base" version "8.6.1" } -void addProcessFilesTask(String type, boolean oss, boolean jdk) { - String packagingFiles = "build/packaging/${oss ? 'oss-' : ''}${jdk ? '' : 'no-jdk-'}${type}" +['deb', 'rpm'].each { type -> + String packagingFiles = "build/packaging/${type}" - String taskName = "process${oss ? 'Oss' : ''}${jdk ? '' : 'NoJdk'}${type.capitalize()}Files" + String taskName = "process${type.capitalize()}Files" tasks.register(taskName, Copy) { into packagingFiles with copySpec { from 'src/common' from "src/${type}" - filter("tokens" : expansionsForDistribution(type, oss, jdk), ReplaceTokens.class) + filter("tokens" : expansionsForDistribution(type, false), ReplaceTokens.class) } into('etc/elasticsearch') { - with configFiles(type, oss, jdk) + with configFiles(type, false) } - filter("tokens" : expansionsForDistribution(type, oss, jdk), ReplaceTokens.class) + filter("tokens" : expansionsForDistribution(type, false), ReplaceTokens.class) doLast { // create empty dirs, we set the permissions when configuring the packages @@ -78,25 +78,16 @@ void addProcessFilesTask(String type, boolean oss, boolean jdk) { } } -addProcessFilesTask('deb', true, true) -addProcessFilesTask('deb', true, false) -addProcessFilesTask('deb', false, true) -addProcessFilesTask('deb', false, false) -addProcessFilesTask('rpm', true, true) -addProcessFilesTask('rpm', true, false) -addProcessFilesTask('rpm', false, true) -addProcessFilesTask('rpm', false, false) - // Common configuration that is package dependent. This can't go in ospackage // since we have different templated files that need to be consumed, but the structure // is the same -Closure commonPackageConfig(String type, boolean oss, boolean jdk, String architecture) { +def commonPackageConfig(String type, String architecture) { return { onlyIf { OS.current().equals(OS.WINDOWS) == false } - dependsOn "process${oss ? 'Oss' : ''}${jdk ? '' : 'NoJdk'}${type.capitalize()}Files" - packageName "elasticsearch${oss ? '-oss' : ''}" + dependsOn "process${type.capitalize()}Files" + packageName "elasticsearch" if (type == 'deb') { if (architecture == 'x64') { arch('amd64') @@ -114,11 +105,10 @@ Closure commonPackageConfig(String type, boolean oss, boolean jdk, String archit } } // Follow elasticsearch's file naming convention - String jdkString = jdk ? "" : "no-jdk-" - String prefix = "${architecture == 'aarch64' ? 'aarch64-' : ''}${oss ? 'oss-' : ''}${jdk ? '' : 'no-jdk-'}${type}" + String prefix = "${architecture == 'aarch64' ? 'aarch64-' : ''}${type}" destinationDirectory = file("${prefix}/build/distributions") - archiveFileName.value(project.provider({ "${packageName}-${project.version}-${jdkString}${archString}.${type}" } )) - String packagingFiles = "build/packaging/${oss ? 'oss-' : ''}${jdk ? '' : 'no-jdk-'}${type}" + archiveFileName.value(project.provider({ "${packageName}-${project.version}-${archString}.${type}" } )) + String packagingFiles = "build/packaging/${type}" String scripts = "${packagingFiles}/scripts" preInstall file("${scripts}/preinst") @@ -133,22 +123,20 @@ Closure commonPackageConfig(String type, boolean oss, boolean jdk, String archit // specify it again explicitly for copying common files into('/usr/share/elasticsearch') { into('bin') { - with binFiles(type, oss, jdk) + with binFiles(type, false) } from(rootProject.projectDir) { include 'README.asciidoc' fileMode 0644 } into('lib') { - with libFiles(oss) + with libFiles(false) } into('modules') { with modulesFiles('linux-' + ((architecture == 'x64') ? 'x86_64' : architecture)) } - if (jdk) { - into('jdk') { - with jdkFiles(project, 'linux', architecture) - } + into('jdk') { + with jdkFiles(project, 'linux', architecture) } // we need to specify every intermediate directory in these paths so the package managers know they are explicitly // intended to manage them; otherwise they may be left behind on uninstallation. duplicate calls of the same @@ -178,7 +166,7 @@ Closure commonPackageConfig(String type, boolean oss, boolean jdk, String archit assert type == 'rpm' into('/usr/share/elasticsearch') { from(rootProject.file('licenses')) { - include oss ? 'SSPL-1.0+ELASTIC-LICENSE-2.0.txt' : 'ELASTIC-LICENSE-2.0.txt' + include 'ELASTIC-LICENSE-2.0.txt' rename { 'LICENSE.txt' } } fileMode 0644 @@ -189,12 +177,10 @@ Closure commonPackageConfig(String type, boolean oss, boolean jdk, String archit configurationFile '/etc/elasticsearch/elasticsearch.yml' configurationFile '/etc/elasticsearch/jvm.options' configurationFile '/etc/elasticsearch/log4j2.properties' - if (oss == false) { - configurationFile '/etc/elasticsearch/role_mapping.yml' - configurationFile '/etc/elasticsearch/roles.yml' - configurationFile '/etc/elasticsearch/users' - configurationFile '/etc/elasticsearch/users_roles' - } + configurationFile '/etc/elasticsearch/role_mapping.yml' + configurationFile '/etc/elasticsearch/roles.yml' + configurationFile '/etc/elasticsearch/users' + configurationFile '/etc/elasticsearch/users_roles' from("${packagingFiles}") { dirMode 02750 into('/etc') @@ -213,7 +199,7 @@ Closure commonPackageConfig(String type, boolean oss, boolean jdk, String archit createDirectoryEntry true fileType CONFIG | NOREPLACE } - String envFile = expansionsForDistribution(type, oss, jdk)['path.env'] + String envFile = expansionsForDistribution(type, false)['path.env'] configurationFile envFile into(new File(envFile).getParent()) { fileType CONFIG | NOREPLACE @@ -261,10 +247,10 @@ Closure commonPackageConfig(String type, boolean oss, boolean jdk, String archit copyEmptyDir('/usr/share/elasticsearch/plugins', 'root', 'root', 0755) // the oss package conflicts with the default distribution and vice versa - conflicts('elasticsearch' + (oss ? '' : '-oss')) + conflicts('elasticsearch-oss') into '/usr/share/elasticsearch' - with noticeFile(oss, jdk) + with noticeFile(false) } } @@ -300,17 +286,13 @@ ospackage { into '/usr/share/elasticsearch' } -Closure commonDebConfig(boolean oss, boolean jdk, String architecture) { +Closure commonDebConfig(String architecture) { return { - configure(commonPackageConfig('deb', oss, jdk, architecture)) + configure(commonPackageConfig('deb', architecture)) // jdeb does not provide a way to set the License control attribute, and ospackage // silently ignores setting it. Instead, we set the license as "custom field" - if (oss) { - customFields['License'] = 'ASL-2.0' - } else { - customFields['License'] = 'Elastic-License' - } + customFields['License'] = 'Elastic-License' archiveVersion = project.version.replace('-', '~') packageGroup 'web' @@ -323,46 +305,23 @@ Closure commonDebConfig(boolean oss, boolean jdk, String architecture) { into('/usr/share/lintian/overrides') { from('src/deb/lintian/elasticsearch') - if (oss) { - rename('elasticsearch', 'elasticsearch-oss') - } } } } tasks.register('buildAarch64Deb', Deb) { - configure(commonDebConfig(false, true, 'aarch64')) + configure(commonDebConfig('aarch64')) } tasks.register('buildDeb', Deb) { - configure(commonDebConfig(false, true, 'x64')) -} - -tasks.register('buildAarch64OssDeb', Deb) { - configure(commonDebConfig(true, true, 'aarch64')) + configure(commonDebConfig('x64')) } -tasks.register('buildOssDeb', Deb) { - configure(commonDebConfig(true, true, 'x64')) -} - -tasks.register('buildNoJdkDeb', Deb) { - configure(commonDebConfig(false, false, 'x64')) -} - -tasks.register('buildOssNoJdkDeb', Deb) { - configure(commonDebConfig(true, false, 'x64')) -} - -Closure commonRpmConfig(boolean oss, boolean jdk, String architecture) { +Closure commonRpmConfig(String architecture) { return { - configure(commonPackageConfig('rpm', oss, jdk, architecture)) + configure(commonPackageConfig('rpm', architecture)) - if (oss) { - license 'ASL 2.0' - } else { - license 'Elastic License' - } + license 'Elastic License' packageGroup 'Application/Internet' requires '/bin/bash' @@ -384,27 +343,11 @@ Closure commonRpmConfig(boolean oss, boolean jdk, String architecture) { } tasks.register('buildAarch64Rpm', Rpm) { - configure(commonRpmConfig(false, true, 'aarch64')) + configure(commonRpmConfig('aarch64')) } tasks.register('buildRpm', Rpm) { - configure(commonRpmConfig(false, true, 'x64')) -} - -tasks.register('buildAarch64OssRpm', Rpm) { - configure(commonRpmConfig(true, true, 'aarch64')) -} - -tasks.register('buildOssRpm', Rpm) { - configure(commonRpmConfig(true, true, 'x64')) -} - -tasks.register('buildNoJdkRpm', Rpm) { - configure(commonRpmConfig(false, false, 'x64')) -} - -tasks.register('buildOssNoJdkRpm', Rpm) { - configure(commonRpmConfig(true, false, 'x64')) + configure(commonRpmConfig('x64')) } Closure dpkgExists = { it -> new File('/bin/dpkg-deb').exists() || new File('/usr/bin/dpkg-deb').exists() || new File('/usr/local/bin/dpkg-deb').exists() } @@ -481,15 +424,9 @@ subprojects { Path copyrightPath String expectedLicense String licenseFilename - if (project.name.contains('oss-')) { - copyrightPath = packageExtractionDir.toPath().resolve("usr/share/doc/elasticsearch-oss/copyright") - expectedLicense = "ASL-2.0" - licenseFilename = "SSPL-1.0+ELASTIC-LICENSE-2.0.txt" - } else { - copyrightPath = packageExtractionDir.toPath().resolve("usr/share/doc/elasticsearch/copyright") - expectedLicense = "Elastic-License" - licenseFilename = "ELASTIC-LICENSE-2.0.txt" - } + copyrightPath = packageExtractionDir.toPath().resolve("usr/share/doc/elasticsearch/copyright") + expectedLicense = "Elastic-License" + licenseFilename = "ELASTIC-LICENSE-2.0.txt" final List header = Arrays.asList("Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/", "Copyright: Elasticsearch B.V. ", "License: " + expectedLicense) @@ -504,11 +441,7 @@ subprojects { onlyIf rpmExists doLast { String licenseFilename - if (project.name.contains('oss-')) { - licenseFilename = "SSPL-1.0+ELASTIC-LICENSE-2.0.txt" - } else { - licenseFilename = "ELASTIC-LICENSE-2.0.txt" - } + licenseFilename = "ELASTIC-LICENSE-2.0.txt" final List licenseLines = Files.readAllLines(rootDir.toPath().resolve("licenses/" + licenseFilename)) final Path licensePath = packageExtractionDir.toPath().resolve("usr/share/elasticsearch/LICENSE.txt") assertLinesInFile(licensePath, licenseLines) @@ -544,11 +477,7 @@ subprojects { exec.standardOutput = output doLast { String expectedLicense - if (project.name.contains('oss-')) { - expectedLicense = "ASL-2.0" - } else { - expectedLicense = "Elastic-License" - } + expectedLicense = "Elastic-License" final Pattern pattern = Pattern.compile("\\s*License: (.+)") final String info = output.toString('UTF-8') final String[] actualLines = info.split("\n") @@ -581,11 +510,7 @@ subprojects { doLast { String license = output.toString('UTF-8') String expectedLicense - if (project.name.contains('oss-')) { - expectedLicense = "ASL 2.0" - } else { - expectedLicense = "Elastic License" - } + expectedLicense = "Elastic License" if (license != expectedLicense) { throw new GradleException("expected license [${expectedLicense}] for [${-> buildDist.get().outputs.files.singleFile}] but was [${license}]") } diff --git a/distribution/packages/no-jdk-deb/build.gradle b/distribution/packages/no-jdk-deb/build.gradle deleted file mode 100644 index 4a6dde5fc0c92..0000000000000 --- a/distribution/packages/no-jdk-deb/build.gradle +++ /dev/null @@ -1,2 +0,0 @@ -// This file is intentionally blank. All configuration of the -// distribution is done in the parent project. diff --git a/distribution/packages/no-jdk-rpm/build.gradle b/distribution/packages/no-jdk-rpm/build.gradle deleted file mode 100644 index 4a6dde5fc0c92..0000000000000 --- a/distribution/packages/no-jdk-rpm/build.gradle +++ /dev/null @@ -1,2 +0,0 @@ -// This file is intentionally blank. All configuration of the -// distribution is done in the parent project. diff --git a/distribution/packages/no-jdk-rpm/src/main/resources/logo/elastic.gif b/distribution/packages/no-jdk-rpm/src/main/resources/logo/elastic.gif deleted file mode 100755 index e3b20998d5300..0000000000000 Binary files a/distribution/packages/no-jdk-rpm/src/main/resources/logo/elastic.gif and /dev/null differ diff --git a/docs/changelog/70635.yaml b/docs/changelog/70635.yaml index d877a7bbba0d5..e118ae31c27cb 100644 --- a/docs/changelog/70635.yaml +++ b/docs/changelog/70635.yaml @@ -3,6 +3,3 @@ summary: Tighten up write permissions in Docker image area: Packaging type: enhancement issues: [] -versions: - - v8.0.0 - - v7.15.0 diff --git a/docs/changelog/75981.yaml b/docs/changelog/75981.yaml index 8b7d8a03136d6..ac1018f8ecb0c 100644 --- a/docs/changelog/75981.yaml +++ b/docs/changelog/75981.yaml @@ -3,7 +3,3 @@ summary: Bump bundled JDK to 16.0.2 area: Packaging type: upgrade issues: [] -versions: - - v8.0.0 - - v7.14.1 - - v7.15.0 diff --git a/docs/changelog/76192.yaml b/docs/changelog/76192.yaml index 6d0f3d7262065..4639d84e122e6 100644 --- a/docs/changelog/76192.yaml +++ b/docs/changelog/76192.yaml @@ -5,6 +5,3 @@ type: enhancement issues: - 76148 - 74327 -versions: - - v8.0.0 - - v7.15.0 diff --git a/docs/changelog/77128.yaml b/docs/changelog/77128.yaml new file mode 100644 index 0000000000000..63133b6c76acc --- /dev/null +++ b/docs/changelog/77128.yaml @@ -0,0 +1,7 @@ +pr: 77128 +summary: Handle cgroups v2 in `OsProbe` +area: Infra/Core +type: enhancement +issues: + - 77126 + - 76812 diff --git a/docs/java-rest/high-level/migration.asciidoc b/docs/java-rest/high-level/migration.asciidoc index c8db57f52598d..babc7681be636 100644 --- a/docs/java-rest/high-level/migration.asciidoc +++ b/docs/java-rest/high-level/migration.asciidoc @@ -219,7 +219,7 @@ transportClient.delete(request, new ActionListener() { // <2> }); -------------------------------------------------- <1> Create the `DeleteRequest` using its constructor -<2> Execute the `DeleteRequest` by passing the request and a +<2> Execute the `DeleteRequest` by passing the request and an `ActionListener` that gets called on execution completion or failure. This method does not block and returns immediately. <3> The `onResponse()` method is called when the response is @@ -234,7 +234,7 @@ The same request asynchronously executed using the high-level client is: include-tagged::{doc-tests}/MigrationDocumentationIT.java[migration-request-async-execution] -------------------------------------------------- <1> Create the `DeleteRequest` using its constructor -<2> Execute the `DeleteRequest` by passing the request and a +<2> Execute the `DeleteRequest` by passing the request and an `ActionListener` that gets called on execution completion or failure. This method does not block and returns immediately. <3> The `onResponse()` method is called when the response is diff --git a/docs/reference/aggregations/metrics/rate-aggregation.asciidoc b/docs/reference/aggregations/metrics/rate-aggregation.asciidoc index ffc9d30c4bf91..cb1f903f6443c 100644 --- a/docs/reference/aggregations/metrics/rate-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/rate-aggregation.asciidoc @@ -6,10 +6,12 @@ Rate ++++ -A `rate` metrics aggregation can be used only inside a `date_histogram` and calculates a rate of documents or a field in each -`date_histogram` bucket. The field values can be generated extracted from specific numeric or +A `rate` metrics aggregation can be used only inside a `date_histogram` or `composite` aggregation. It calculates a rate of documents +or a field in each bucket. The field values can be generated extracted from specific numeric or <> in the documents. +NOTE: For `composite` aggregations, there must be exactly one `date_histogram` source for the `rate` aggregation to be supported. + ==== Syntax A `rate` aggregation looks like this in isolation: @@ -167,6 +169,142 @@ The response will contain the average daily sale prices for each month. -------------------------------------------------- // TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/] +You can also take advantage of `composite` aggregations to calculate the average daily sale price for each item in +your inventory + +[source,console] +-------------------------------------------------- +GET sales/_search?filter_path=aggregations&size=0 +{ + "aggs": { + "buckets": { + "composite": { <1> + "sources": [ + { + "month": { + "date_histogram": { <2> + "field": "date", + "calendar_interval": "month" + } + } + }, + { + "type": { <3> + "terms": { + "field": "type" + } + } + } + ] + }, + "aggs": { + "avg_price": { + "rate": { + "field": "price", <4> + "unit": "day" <5> + } + } + } + } + } +} +-------------------------------------------------- +// TEST[setup:sales] +<1> Composite aggregation with a date histogram source + and a source for the item type. +<2> The date histogram source grouping monthly +<3> The terms source grouping for each sale item type +<4> Calculate sum of all sale prices, per month and item +<5> Convert to average daily sales per item + +The response will contain the average daily sale prices for each month per item. + +[source,console-result] +-------------------------------------------------- +{ + "aggregations" : { + "buckets" : { + "after_key" : { + "month" : 1425168000000, + "type" : "t-shirt" + }, + "buckets" : [ + { + "key" : { + "month" : 1420070400000, + "type" : "bag" + }, + "doc_count" : 1, + "avg_price" : { + "value" : 4.838709677419355 + } + }, + { + "key" : { + "month" : 1420070400000, + "type" : "hat" + }, + "doc_count" : 1, + "avg_price" : { + "value" : 6.451612903225806 + } + }, + { + "key" : { + "month" : 1420070400000, + "type" : "t-shirt" + }, + "doc_count" : 1, + "avg_price" : { + "value" : 6.451612903225806 + } + }, + { + "key" : { + "month" : 1422748800000, + "type" : "hat" + }, + "doc_count" : 1, + "avg_price" : { + "value" : 1.7857142857142858 + } + }, + { + "key" : { + "month" : 1422748800000, + "type" : "t-shirt" + }, + "doc_count" : 1, + "avg_price" : { + "value" : 0.35714285714285715 + } + }, + { + "key" : { + "month" : 1425168000000, + "type" : "hat" + }, + "doc_count" : 1, + "avg_price" : { + "value" : 6.451612903225806 + } + }, + { + "key" : { + "month" : 1425168000000, + "type" : "t-shirt" + }, + "doc_count" : 1, + "avg_price" : { + "value" : 5.645161290322581 + } + } + ] + } + } +} +-------------------------------------------------- + By adding the `mode` parameter with the value `value_count`, we can change the calculation from `sum` to the number of values of the field: [source,console] diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index 304181792cb4f..71b60f9e3d402 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -214,6 +214,37 @@ instead. When using this method, the `source_content_type` parameter should also be passed with a media type value that indicates the format of the source, such as `application/json`. +[discrete] +[[api-compatibility]] +=== REST API version compatibility + +Major version upgrades often include a number of breaking changes +that impact how you interact with {es}. +While we recommend that you monitor the deprecation logs and +update applications before upgrading {es}, +having to coordinate the necessary changes can be an impediment to upgrading. + +You can enable an existing application to function without modification after +an upgrade by including API compatibility headers, which tell {es} you are still +using the previous version of the REST API. Using these headers allows the +structure of requests and responses to remain the same; it does not guarantee +the same behavior. + + +You set version compatibility on a per-request basis in the `Content-Type` and `Accept` headers. +Setting `compatible-with` to the same major version as +the version you're running has no impact, +but ensures that the request will still work after {es} is upgraded. + +To tell {es} 8.0 you are using the 7.x request and response format, +set `compatible-with=7`: + +[source,sh] +---------------------------------------------------------------------- +Content-Type: application/vnd.elasticsearch+json; compatible-with=7 +Accept: application/vnd.elasticsearch+json; compatible-with=7 +---------------------------------------------------------------------- + [discrete] [[api-url-access-control]] === URL-based access control diff --git a/docs/reference/ilm/error-handling.asciidoc b/docs/reference/ilm/error-handling.asciidoc index 500ac99ec129f..d125c557323e9 100644 --- a/docs/reference/ilm/error-handling.asciidoc +++ b/docs/reference/ilm/error-handling.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] [testenv="basic"] [[index-lifecycle-error-handling]] -== Resolve lifecycle policy execution errors +== Troubleshooting {ilm} errors When {ilm-init} executes a lifecycle policy, it's possible for errors to occur while performing the necessary index operations for a step. @@ -147,3 +147,69 @@ POST /my-index-000001/_ilm/retry {ilm-init} subsequently attempts to re-run the step that failed. You can use the <> to monitor the progress. + +[discrete] +=== Common {ilm-init} errors + +Here's how to resolve the most common errors reported in the `ERROR` step. + +TIP: Problems with rollover aliases are a common cause of errors. +Consider using <> instead of managing rollover with aliases. + +[discrete] +==== Rollover alias [x] can point to multiple indices, found duplicated alias [x] in index template [z] + +The target rollover alias is specified in an index template's `index.lifecycle.rollover_alias` setting. +You need to explicitly configure this alias _one time_ when you +<>. +The rollover action then manages setting and updating the alias to +<> to each subsequent index. + +Do not explicitly configure this same alias in the aliases section of an index template. + +[discrete] +==== index.lifecycle.rollover_alias [x] does not point to index [y] + +Either the index is using the wrong alias or the alias does not exist. + +Check the `index.lifecycle.rollover_alias` <>. +To see what aliases are configured, use <>. + +[discrete] +==== Setting [index.lifecycle.rollover_alias] for index [y] is empty or not defined + +The `index.lifecycle.rollover_alias` setting must be configured for the rollover action to work. + +Update the index settings to set `index.lifecycle.rollover_alias`. + +[discrete] +==== Alias [x] has more than one write index [y,z] + +Only one index can be designated as the write index for a particular alias. + +Use the <> API to set `is_write_index:false` for all but one index. + +[discrete] +==== index name [x] does not match pattern ^.*-\d+ + +The index name must match the regex pattern `^.*-\d+` for the rollover action to work. +The most common problem is that the index name does not contain trailing digits. +For example, `my-index` does not match the pattern requirement. + +Append a numeric value to the index name, for example `my-index-000001`. + +[discrete] +==== CircuitBreakingException: [x] data too large, data for [y] + +This indicates that the cluster is hitting resource limits. + +Before continuing to set up {ilm-init}, you'll need to take steps to alleviate the resource issues. +For more information, see <>. + +[discrete] +==== High disk watermark [x] exceeded on [y] + +This indicates that the cluster is running out of disk space. +This can happen when you don't have {ilm} set up to roll over from hot to warm nodes. + +Consider adding nodes, upgrading your hardware, or deleting unneeded indices. \ No newline at end of file diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index 63932c4d5a72a..4f31b7f7c3d1f 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -112,13 +112,6 @@ to incomplete history on the leader. Defaults to `12h`. Indicates whether <> are pre-loaded for nested queries. Possible values are `true` (default) and `false`. -[[index-hidden]] `index.hidden`:: - - Indicates whether the index should be hidden by default. Hidden indices are not - returned by default when using a wildcard expression. This behavior is controlled - per request through the use of the `expand_wildcards` parameter. Possible values are - `true` and `false` (default). - [[index-shard-check-on-startup]] `index.shard.check_on_startup`:: + ==== @@ -327,11 +320,14 @@ Defaults to `*`, which matches all fields eligible for [[index-final-pipeline]] `index.final_pipeline`:: - The final <> pipeline for this index. Index requests - will fail if the final pipeline is set and the pipeline does not exist. - The final pipeline always runs after the request pipeline (if specified) and - the default pipeline (if it exists). The special pipeline name `_none` - indicates no ingest pipeline will run. +The final <> pipeline for this index. Indexing requests +will fail if the final pipeline is set and the pipeline does not exist. +The final pipeline always runs after the request pipeline (if specified) and +the default pipeline (if it exists). The special pipeline name `_none` +indicates no ingest pipeline will run. ++ +NOTE: You can't use a final pipelines to change the `_index` field. If the +pipeline attempts to change the `_index` field, the indexing request will fail. [[index-mapping-dimension-fields-limit]] `index.mapping.dimension_fields.limit`:: @@ -340,6 +336,13 @@ the index. Defaults to `16`. + You can mark a field as a dimension using the `dimension` mapping parameter. +[[index-hidden]] `index.hidden`:: + + Indicates whether the index should be hidden by default. Hidden indices are not + returned by default when using a wildcard expression. This behavior is controlled + per request through the use of the `expand_wildcards` parameter. Possible values are + `true` and `false` (default). + [discrete] === Settings in other index modules diff --git a/docs/reference/ingest/apis/enrich/enrich-stats.asciidoc b/docs/reference/ingest/apis/enrich/enrich-stats.asciidoc index f7bc9680db623..e7b285b4fae57 100644 --- a/docs/reference/ingest/apis/enrich/enrich-stats.asciidoc +++ b/docs/reference/ingest/apis/enrich/enrich-stats.asciidoc @@ -85,6 +85,36 @@ that enrich processors have executed since node startup. -- +`cache_stats`:: ++ +-- +(Array of objects) +Objects containing information about the enrich +cache stats on each ingest node. + +Returned parameters include: + +`node_id`:: +(String) +ID of the ingest node with a enrich cache. + +`count`:: +(Integer) +Number of cached entries. + +`hits`:: +(Integer) +The number of enrich lookups served from cache. + +`missed`:: +(Integer) +The number of time enrich lookups couldn't be +served from cache. + +`evictions`:: +(Integer) +The number cache entries evicted from the cache. +-- [[enrich-stats-api-example]] ==== {api-examples-title} @@ -126,6 +156,15 @@ The API returns the following response: "remote_requests_total": 0, "executed_searches_total": 0 } + ], + "cache_stats": [ + { + "node_id": "1sFM8cmSROZYhPxVsiWew", + "count": 0, + "hits": 0, + "misses": 0, + "evictions": 0 + } ] } ---- @@ -133,3 +172,7 @@ The API returns the following response: // TESTRESPONSE[s/"node_id": "1sFM8cmSROZYhPxVsiWew"/"node_id" : $body.coordinator_stats.0.node_id/] // TESTRESPONSE[s/"remote_requests_total": 0/"remote_requests_total" : $body.coordinator_stats.0.remote_requests_total/] // TESTRESPONSE[s/"executed_searches_total": 0/"executed_searches_total" : $body.coordinator_stats.0.executed_searches_total/] +// TESTRESPONSE[s/"node_id": "1sFM8cmSROZYhPxVsiWew"/"node_id" : $body.cache_stats.0.node_id/] +// TESTRESPONSE[s/"count": 0/"count" : $body.cache_stats.0.count/] +// TESTRESPONSE[s/"misses": 0/"misses" : $body.cache_stats.0.misses/] +// TESTRESPONSE[s/"evictions": 0/"evictions" : $body.cache_stats.0.evictions/] diff --git a/docs/reference/ingest/apis/enrich/execute-enrich-policy.asciidoc b/docs/reference/ingest/apis/enrich/execute-enrich-policy.asciidoc index 35e9b9e69b59c..a213146a49afe 100644 --- a/docs/reference/ingest/apis/enrich/execute-enrich-policy.asciidoc +++ b/docs/reference/ingest/apis/enrich/execute-enrich-policy.asciidoc @@ -102,8 +102,8 @@ it may take a while to return a response. (Required, string) Enrich policy to execute. -[[execute-enrich-policy-api-request-body]] -==== {api-request-body-title} +[[execute-enrich-policy-api-query-params]] +==== {api-query-parms-title} `wait_for_completion`:: (Required, Boolean) diff --git a/docs/reference/ingest/processors/foreach.asciidoc b/docs/reference/ingest/processors/foreach.asciidoc index 7a8c29ff24a89..3cdf0319aabc9 100644 --- a/docs/reference/ingest/processors/foreach.asciidoc +++ b/docs/reference/ingest/processors/foreach.asciidoc @@ -4,39 +4,70 @@ Foreach ++++ -Processes elements in an array of unknown length. +Runs an ingest processor on each element of an array or object. -All processors can operate on elements inside an array, but if all elements of an array need to -be processed in the same way, defining a processor for each element becomes cumbersome and tricky -because it is likely that the number of elements in an array is unknown. For this reason the `foreach` -processor exists. By specifying the field holding array elements and a processor that -defines what should happen to each element, array fields can easily be preprocessed. +All ingest processors can run on array or object elements. However, if the +number of elements is unknown, it can be cumbersome to process each one in the +same way. -A processor inside the foreach processor works in the array element context and puts that in the ingest metadata -under the `_ingest._value` key. If the array element is a json object it holds all immediate fields of that json object. -and if the nested object is a value is `_ingest._value` just holds that value. Note that if a processor prior to the -`foreach` processor used `_ingest._value` key then the specified value will not be available to the processor inside -the `foreach` processor. The `foreach` processor does restore the original value, so that value is available to processors -after the `foreach` processor. - -Note that any other field from the document are accessible and modifiable like with all other processors. This processor -just puts the current array element being read into `_ingest._value` ingest metadata attribute, so that it may be -pre-processed. - -If the `foreach` processor fails to process an element inside the array, and no `on_failure` processor has been specified, -then it aborts the execution and leaves the array unmodified. +The `foreach` processor lets you specify a `field` containing array or object +values and a `processor` to run on each element in the field. [[foreach-options]] .Foreach Options [options="header"] |====== | Name | Required | Default | Description -| `field` | yes | - | The array field -| `processor` | yes | - | The processor to execute against each field -| `ignore_missing` | no | false | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +| `field` | yes | - | Field containing array or object +values. +| `processor` | yes | - | Ingest processor to run on each +element. +| `ignore_missing` | no | false | If `true`, the processor silently +exits without changing the document if the `field` is `null` or missing. include::common-options.asciidoc[] |====== +[discrete] +[[foreach-keys-values]] +==== Access keys and values + +When iterating through an array or object, the `foreach` processor stores the +current element's value in the `_ingest._value` <> field. `_ingest._value` contains the entire element value, including +any child fields. You can access child field values using dot notation on the +`_ingest._value` field. + +When iterating through an object, the `foreach` processor also stores the +current element's key as a string in `_ingest._key`. + +You can access and change `_ingest._key` and `_ingest._value` in the +`processor`. For an example, see the <>. + +[discrete] +[[foreach-failure-handling]] +==== Failure handling + +If the `foreach` processor fails to process an element and no `on_failure` +processor is specified, the `foreach` processor silently exits. This leaves the +entire array or object value unchanged. + +[discrete] +[[foreach-examples]] +==== Examples + +The following examples show how you can use the `foreach` processor with +different data types and options: + +* <> +* <> +* <> +* <> + +[discrete] +[[foreach-array-ex]] +===== Array + Assume the following document: [source,js] @@ -64,7 +95,7 @@ When this `foreach` processor operates on this sample document: -------------------------------------------------- // NOTCONSOLE -Then the document will look like this after preprocessing: +Then the document will look like this after processing: [source,js] -------------------------------------------------- @@ -74,7 +105,11 @@ Then the document will look like this after preprocessing: -------------------------------------------------- // NOTCONSOLE -Let's take a look at another example: +[discrete] +[[foreach-array-objects-ex]] +===== Array of objects + +Assume the following document: [source,js] -------------------------------------------------- @@ -111,7 +146,7 @@ so the following `foreach` processor is used: -------------------------------------------------- // NOTCONSOLE -After preprocessing the result is: +After processing the result is: [source,js] -------------------------------------------------- @@ -128,6 +163,130 @@ After preprocessing the result is: -------------------------------------------------- // NOTCONSOLE +For another array of objects example, see +{plugins}/ingest-attachment-with-arrays.html[attachment processor +documentation]. + +[discrete] +[[foreach-object-ex]] +===== Object + +You can also use the `foreach` processor on object fields. For example, +the following document contains a `products` field with object values. + +[source,js] +-------------------------------------------------- +{ + "products" : { + "widgets" : { + "total_sales" : 50, + "unit_price": 1.99, + "display_name": "" + }, + "sprockets" : { + "total_sales" : 100, + "unit_price": 9.99, + "display_name": "Super Sprockets" + }, + "whizbangs" : { + "total_sales" : 200, + "unit_price": 19.99, + "display_name": "Wonderful Whizbangs" + } + } +} +-------------------------------------------------- +// NOTCONSOLE + +The following `foreach` processor changes the value of `products.display_name` +to uppercase. + +[source,js] +-------------------------------------------------- +{ + "foreach": { + "field": "products", + "processor": { + "uppercase": { + "field": "_ingest._value.display_name" + } + } + } +} +-------------------------------------------------- +// NOTCONSOLE + +When run on the document, the `foreach` processor returns: + +[source,js] +-------------------------------------------------- +{ + "products" : { + "widgets" : { + "total_sales" : 50, + "unit_price" : 1.99, + "display_name" : "" + }, + "sprockets" : { + "total_sales" : 100, + "unit_price" : 9.99, + "display_name" : "SUPER SPROCKETS" + }, + "whizbangs" : { + "total_sales" : 200, + "unit_price" : 19.99, + "display_name" : "WONDERFUL WHIZBANGS" + } + } +} +-------------------------------------------------- +// NOTCONSOLE + +The following `foreach` processor sets each element's key to the +value of `products.display_name`. If `products.display_name` contains an empty string, +the processor deletes the element. + +[source,js] +-------------------------------------------------- +{ + "foreach": { + "field": "products", + "processor": { + "set": { + "field": "_ingest._key", + "value": "{{_ingest._value.display_name}}" + } + } + } +} +-------------------------------------------------- +// NOTCONSOLE + +When run on the previous document, the `foreach` processor returns: + +[source,js] +-------------------------------------------------- +{ + "products" : { + "Wonderful Whizbangs" : { + "total_sales" : 200, + "unit_price" : 19.99, + "display_name" : "Wonderful Whizbangs" + }, + "Super Sprockets" : { + "total_sales" : 100, + "unit_price" : 9.99, + "display_name" : "Super Sprockets" + } + } +} +-------------------------------------------------- +// NOTCONSOLE + +[discrete] +[[failure-handling-ex]] +===== Failure handling + The wrapped processor can have a `on_failure` definition. For example, the `id` field may not exist on all person objects. Instead of failing the index request, you can use an `on_failure` @@ -159,5 +318,3 @@ block to send the document to the 'failure_index' index for later inspection: In this example, if the `remove` processor does fail, then the array elements that have been processed thus far will be updated. - -Another advanced example can be found in the {plugins}/ingest-attachment-with-arrays.html[attachment processor documentation]. diff --git a/docs/reference/mapping/runtime.asciidoc b/docs/reference/mapping/runtime.asciidoc index 2c084b3501041..97d15d9d39d03 100644 --- a/docs/reference/mapping/runtime.asciidoc +++ b/docs/reference/mapping/runtime.asciidoc @@ -87,12 +87,12 @@ your data, but can impact search performance based on the computation defined in the runtime script. To balance search performance and flexibility, index fields that you'll -commonly search for and filter on, such as a timestamp. {es} automatically uses -these indexed fields first when running a query, resulting in a fast response -time. You can then use runtime fields to limit the number of fields that {es} -needs to calculate values for. Using indexed fields in tandem with runtime -fields provides flexibility in the data that you index and how you define -queries for other fields. +frequently search for and filter on, such as a timestamp. {es} automatically +uses these indexed fields first when running a query, resulting in a fast +response time. You can then use runtime fields to limit the number of fields +that {es} needs to calculate values for. Using indexed fields in tandem with +runtime fields provides flexibility in the data that you index and how you +define queries for other fields. Use the <> to run searches that include runtime fields. This method of search helps to offset the performance impacts @@ -810,8 +810,14 @@ can define runtime fields in the <> of an index mapping. If you decide to index a runtime field for greater performance, just move the full runtime field definition (including the script) to the context of an index -mapping. This capability means you can write a script only once, and apply -it to any context that supports runtime fields. +mapping. {es} automatically uses these indexed fields to drive queries, +resulting in a fast response time. This capability means you can write a +script only once, and apply it to any context that supports runtime fields. + +You can then use runtime fields to limit the number of fields that {es} needs +to calculate values for. Using indexed fields in tandem with runtime fields +provides flexibility in the data that you index and how you define queries for +other fields. IMPORTANT: After indexing a runtime field, you cannot update the included script. If you need to change the script, create a new field with the updated diff --git a/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc index d10a83a5972aa..c9687b559b56c 100644 --- a/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc @@ -7,10 +7,6 @@ ++++ Closes one or more {anomaly-jobs}. -A job can be opened and closed multiple times throughout its lifecycle. - -A closed job cannot receive data or perform analysis -operations, but you can still explore and navigate results. [[ml-close-job-request]] == {api-request-title} @@ -30,13 +26,19 @@ operations, but you can still explore and navigate results. [[ml-close-job-desc]] == {api-description-title} +A job can be opened and closed multiple times throughout its lifecycle. + +A closed job cannot receive data or perform analysis operations, but you can +still explore and navigate results. + You can close multiple {anomaly-jobs} in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can close all jobs by using `_all` or by specifying `*` as the ``. -If you close an {anomaly-job} whose {dfeed} is running, the request will first -attempt to stop the {dfeed}, as though <> was called with -the same `timeout` and `force` parameters as the close request. +If you close an {anomaly-job} whose {dfeed} is running, the request first tries +to stop the {dfeed}. This behavior is equivalent to calling +<> with the same `timeout` and `force` parameters +as the close job request. When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. diff --git a/docs/reference/ml/anomaly-detection/apis/delete-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/delete-job.asciidoc index 316bbd287a9d9..585463a409457 100644 --- a/docs/reference/ml/anomaly-detection/apis/delete-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/delete-job.asciidoc @@ -34,9 +34,10 @@ are granted to anyone over the `.ml-*` indices. It is not currently possible to delete multiple jobs using wildcards or a comma separated list. -If you delete a job that has a {dfeed}, the request will first attempt to -delete the {dfeed}, as though <> was called with the same -`timeout` and `force` parameters as this delete request. +If you delete a job that has a {dfeed}, the request first tries to delete the +{dfeed}. This behavior is equivalent to calling +<> with the same `timeout` and `force` +parameters as the delete job request. [[ml-delete-job-path-parms]] == {api-path-parms-title} diff --git a/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc index 4786dfcef2300..d57c31b8c7c36 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc @@ -36,10 +36,10 @@ each interval. See {ml-docs}/ml-delayed-data-detection.html[Handling delayed dat [IMPORTANT] ==== -* You must use {kib} or this API to create a {dfeed}. Do not add a -{dfeed} directly to the `.ml-config` index using the {es} index API. If {es} -{security-features} are enabled, do not give users `write` privileges on the -`.ml-config` index. +* You must use {kib}, this API, or the <> +to create a {dfeed}. Do not add a {dfeed} directly to the `.ml-config` index +using the {es} index API. If {es} {security-features} are enabled, do not give +users `write` privileges on the `.ml-config` index. * When {es} {security-features} are enabled, your {dfeed} remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide diff --git a/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc index 87a1cc6f8f36f..dad8471ced53f 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-job.asciidoc @@ -19,13 +19,24 @@ Instantiates an {anomaly-job}. Requires the `manage_ml` cluster privilege. This privilege is included in the `machine_learning_admin` built-in role. +If you include a `datafeed_config`, you must also have `read` index privileges +on the source index. + [[ml-put-job-desc]] == {api-description-title} -IMPORTANT: You must use {kib} or this API to create an {anomaly-job}. Do not put +[IMPORTANT] +==== +* You must use {kib} or this API to create an {anomaly-job}. Do not put a job directly to the `.ml-config` index using the {es} index API. If {es} {security-features} are enabled, do not give users `write` privileges on the `.ml-config` index. +* If you include a `datafeed_config` and {es} {security-features} are enabled, +your {dfeed} remembers which roles the user who created it had at the time of +creation and runs the query using those same roles. If you provide +<>, those +credentials are used instead. +==== [[ml-put-job-path-parms]] == {api-path-parms-title} @@ -250,7 +261,9 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=data-description] //End data_description `datafeed_config`:: -(object) The {dfeed} configured for the current {anomaly-job}. +(Optional, object) The {ml-docs}/ml-dfeeds.html[{dfeed}], which retrieves data +from {es} for analysis by the job. You can associate only one {dfeed} with each +{anomaly-job}. + .Properties of `datafeed` [%collapsible%open] diff --git a/docs/reference/ml/anomaly-detection/functions/ml-geo-functions.asciidoc b/docs/reference/ml/anomaly-detection/functions/ml-geo-functions.asciidoc index 2dba8a32f75b3..362f697ad3956 100644 --- a/docs/reference/ml/anomaly-detection/functions/ml-geo-functions.asciidoc +++ b/docs/reference/ml/anomaly-detection/functions/ml-geo-functions.asciidoc @@ -71,8 +71,8 @@ For example, JSON data might contain the following transaction coordinates: In {es}, location data is likely to be stored in `geo_point` fields. For more information, see {ref}/geo-point.html[`geo_point` data type]. This data type is -supported natively in {ml-features}. Specifically, {dfeed} when pulling data from -a `geo_point` field, will transform the data into the appropriate `lat,lon` string -format before sending to the {anomaly-job}. +supported natively in {ml-features}. Specifically, when pulling data from a +`geo_point` field, a {dfeed} will transform the data into the appropriate +`lat,lon` string format before sending to the {anomaly-job}. For more information, see <>. diff --git a/docs/reference/ml/anomaly-detection/ml-configuring-aggregations.asciidoc b/docs/reference/ml/anomaly-detection/ml-configuring-aggregations.asciidoc index 85de5d1254133..702fb10b2f4f8 100644 --- a/docs/reference/ml/anomaly-detection/ml-configuring-aggregations.asciidoc +++ b/docs/reference/ml/anomaly-detection/ml-configuring-aggregations.asciidoc @@ -78,48 +78,30 @@ PUT _ml/anomaly_detectors/farequote }, "data_description": { "time_field":"time" <1> - } -} ----------------------------------- -// TEST[skip:setup:farequote_data] - -<1> The `airline`, `responsetime`, and `time` fields are aggregations. Only the -aggregated fields defined in the `analysis_config` object are analyzed by the -{anomaly-job}. - -NOTE: When the `summary_count_field_name` property is set to a non-null value, -the job expects to receive aggregated input. The property must be set to the -name of the field that contains the count of raw data points that have been -aggregated. It applies to all detectors in the job. - -The aggregations are defined in the {dfeed} as follows: - -[source,console] ----------------------------------- -PUT _ml/datafeeds/datafeed-farequote -{ - "job_id":"farequote", - "indices": ["farequote"], - "aggregations": { - "buckets": { - "date_histogram": { - "field": "time", - "fixed_interval": "360s", - "time_zone": "UTC" - }, - "aggregations": { - "time": { <1> - "max": {"field": "time"} + }, + "datafeed_config":{ + "indices": ["farequote"], + "aggregations": { + "buckets": { + "date_histogram": { + "field": "time", + "fixed_interval": "360s", + "time_zone": "UTC" }, - "airline": { <2> - "terms": { - "field": "airline", - "size": 100 + "aggregations": { + "time": { <2> + "max": {"field": "time"} }, - "aggregations": { - "responsetime": { <3> - "avg": { - "field": "responsetime" + "airline": { <3> + "terms": { + "field": "airline", + "size": 100 + }, + "aggregations": { + "responsetime": { <4> + "avg": { + "field": "responsetime" + } } } } @@ -129,19 +111,27 @@ PUT _ml/datafeeds/datafeed-farequote } } ---------------------------------- -// TEST[skip:setup:farequote_job] +// TEST[skip:setup:farequote_data] -<1> The aggregations have names that match the fields that they operate on. The +<1> The `airline`, `responsetime`, and `time` fields are aggregations. Only the +aggregated fields defined in the `analysis_config` object are analyzed by the +{anomaly-job}. +<2> The aggregations have names that match the fields that they operate on. The `max` aggregation is named `time` and its field also needs to be `time`. -<2> The `term` aggregation is named `airline` and its field is also named +<3> The `term` aggregation is named `airline` and its field is also named `airline`. -<3> The `avg` aggregation is named `responsetime` and its field is also named +<4> The `avg` aggregation is named `responsetime` and its field is also named `responsetime`. +When the `summary_count_field_name` property is set to a non-null value, the job +expects to receive aggregated input. The property must be set to the name of the +field that contains the count of raw data points that have been aggregated. It +applies to all detectors in the job. + TIP: If you are using a `term` aggregation to gather influencer or partition field information, consider using a `composite` aggregation. It performs -better than a `date_histogram` with a nested `term` aggregation and also includes -all the values of the field instead of the top values per bucket. +better than a `date_histogram` with a nested `term` aggregation and also +includes all the values of the field instead of the top values per bucket. [discrete] [[aggs-using-composite]] @@ -153,15 +143,17 @@ For `composite` aggregation support, there must be exactly one `date_histogram` source. That value source must not be sorted in descending order. Additional `composite` aggregation value sources are allowed, such as `terms`. -NOTE: A {dfeed} that uses composite aggregations may not be as performant as datafeeds that use scrolling or -date histogram aggregations. Composite aggregations are optimized -for queries that are either `match_all` or `range` filters. Other types of +NOTE: A {dfeed} that uses composite aggregations may not be as performant as +{dfeeds} that use scrolling or date histogram aggregations. Composite +aggregations are optimized for queries that are either `match_all` or `range` +filters. Other types of queries may cause the `composite` aggregation to be ineffecient. Here is an example that uses a `composite` aggregation instead of a `date_histogram`. -Assuming the same job configuration as above. +This is an example of a job with a {dfeed} that uses a `composite` aggregation +to bucket the metrics based on time and terms: [source,console] ---------------------------------- @@ -178,54 +170,42 @@ PUT _ml/anomaly_detectors/farequote-composite }, "data_description": { "time_field":"time" - } -} ----------------------------------- -// TEST[skip:setup:farequote_data] - -This is an example of a datafeed that uses a `composite` aggregation to bucket -the metrics based on time and terms: - -[source,console] ----------------------------------- -PUT _ml/datafeeds/datafeed-farequote-composite -{ - "job_id": "farequote-composite", - "indices": [ - "farequote" - ], - "aggregations": { - "buckets": { - "composite": { - "size": 1000, <1> - "sources": [ - { - "time_bucket": { <2> - "date_histogram": { - "field": "time", - "fixed_interval": "360s", - "time_zone": "UTC" + }, + "datafeed_config":{ + "indices": ["farequote"], + "aggregations": { + "buckets": { + "composite": { + "size": 1000, <1> + "sources": [ + { + "time_bucket": { <2> + "date_histogram": { + "field": "time", + "fixed_interval": "360s", + "time_zone": "UTC" + } } - } - }, - { - "airline": { <3> - "terms": { - "field": "airline" + }, + { + "airline": { <3> + "terms": { + "field": "airline" + } } } - } - ] - }, - "aggregations": { - "time": { <4> - "max": { - "field": "time" - } + ] }, - "responsetime": { <5> - "avg": { - "field": "responsetime" + "aggregations": { + "time": { <4> + "max": { + "field": "time" + } + }, + "responsetime": { <5> + "avg": { + "field": "responsetime" + } } } } @@ -233,10 +213,8 @@ PUT _ml/datafeeds/datafeed-farequote-composite } } ---------------------------------- -// TEST[skip:setup:farequote_job] - <1> Provide the `size` to the composite agg to control how many resources -are used when aggregating the data. A larger `size` means a faster datafeed but +are used when aggregating the data. A larger `size` means a faster {dfeed} but more cluster resources are used when searching. <2> The required `date_histogram` composite aggregation source. Make sure it is named differently than your desired time field. @@ -364,7 +342,7 @@ When using a `date_histogram` aggregation to bucket by time: "bucket_agg": { ... }, - "aggregations": {] + "aggregations": { "data_histogram_aggregation": { "date_histogram": { "field": "time", diff --git a/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc b/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc index cebb40ac0bbcd..6dd13006f4601 100644 --- a/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc +++ b/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc @@ -5,42 +5,61 @@ beta::[] {kib} {alert-features} include support for {ml} rules, which run scheduled -checks on an {anomaly-job} or a group of jobs to detect anomalies with certain -conditions. If an anomaly meets the conditions, an alert is created and the -associated action is triggered. For example, you can create a rule to check an -{anomaly-job} every fifteen minutes for critical anomalies and to notify you in -an email. To learn more about {kib} {alert-features}, refer to +checks for anomalies in one or more {anomaly-jobs} or check the +health of the job with certain conditions. If the conditions of the rule are met, an +alert is created and the associated action is triggered. For example, you can +create a rule to check an {anomaly-job} every fifteen minutes for critical +anomalies and to notify you in an email. To learn more about {kib} +{alert-features}, refer to {kibana-ref}/alerting-getting-started.html#alerting-getting-started[Alerting]. +The following {ml} rules are available: -[[creating-anomaly-alert-rules]] +{anomaly-detect-cap} alert:: + Checks if the {anomaly-job} results contain anomalies that match the rule + conditions. + +{anomaly-jobs-cap} health:: + Monitors job health and alerts if an operational issue occurred that may + prevent the job from detecting anomalies. + +TIP: If you have created rules for specific {anomaly-jobs} and you want to +monitor whether these jobs work as expected, {anomaly-jobs} health rules are +ideal for this purpose. + + +[[creating-ml-rules]] == Creating a rule You can create {ml} rules in the {anomaly-job} wizard after you start the job, -from the job list, or under **{stack-manage-app} > {alerts-ui}**. On the *Create -rule* window, select *{anomaly-detect-cap} alert* under the {ml} section, then -give a name to the rule and optionally provide tags. - -Specify the time interval for the rule to check detected anomalies. It is -recommended to select an interval that is close to the bucket span of the -associated job. You can also select a notification option by using the _Notify_ -selector. An alert remains active as long as anomalies are found for a -particular {anomaly-job} during the check interval. When there is no anomaly -found in the next interval, the `Recovered` action group is invoked and the -status of the alert changes to `OK`. For more details, refer to the -documentation of +from the job list, or under **{stack-manage-app} > {alerts-ui}**. + +On the *Create rule* window, give a name to the rule and optionally provide +tags. Specify the time interval for the rule to check detected anomalies or job +health changes. It is recommended to select an interval that is close to the +bucket span of the job. You can also select a notification option with the +_Notify_ selector. An alert remains active as long as the configured conditions +are met during the check interval. When there is no matching condition in the +next interval, the `Recovered` action group is invoked and the status of the +alert changes to `OK`. For more details, refer to the documentation of {kibana-ref}/create-and-manage-rules.html#defining-rules-general-details[general rule details]. - + +Select the rule type you want to create under the {ml} section and continue to +configure it depending on whether it is an +<> or an +<> rule. + [role="screenshot"] -image::images/ml-anomaly-alert-type.jpg["Creating a rule for an anomaly detection alert"] - -Select the {anomaly-job} or the group of {anomaly-jobs} that is checked against -the rule. If you assign additional jobs to the group, the new jobs are -automatically checked the next time the conditions are checked. +image::images/ml-rule.jpg["Creating a new machine learning rule"] -You can select the result type of the {anomaly-job} that is checked against the -rule. In particular, you can create rules based on bucket, record, or influencer -results. + +[[creating-anomaly-alert-rules]] +=== {anomaly-detect-cap} alert + +Select the job that the rule applies to. + +You must select a type of {ml} result. In particular, you can create rules based +on bucket, record, or influencer results. [role="screenshot"] image::images/ml-anomaly-alert-severity.jpg["Selecting result type, severity, and test interval"] @@ -72,14 +91,61 @@ the sample results by providing a valid interval for your data. The generated preview contains the number of potentially created alerts during the relative time range you defined. +As the last step in the rule creation process, +<> that occur when the conditions +are met. + + +[[creating-anomaly-jobs-health-rules]] +=== {anomaly-jobs-cap} health + +Select the job or group that +the rule applies to. If you assign more jobs to the group, they are +included the next time the rule conditions are checked. + +You can also use a special character (`*`) to apply the rule to all your jobs. +Jobs created after the rule are automatically included. You can exclude jobs +that are not critically important by using the _Exclude_ field. + +Enable the health check types that you want to apply. All checks are enabled by +default. At least one check needs to be enabled to create the rule. The +following health checks are available: + +_Datafeed is not started_:: + Notifies if the corresponding {dfeed} of the job is not started but the job is + in an opened state. The notification message recommends the necessary + actions to solve the error. +_Model memory limit reached_:: + Notifies if the model memory status of the job reaches the soft or hard model + memory limit. Optimize your job by following + <> or consider + <>. +_Data delay has occurred_:: + Notifies when the job missed some data. You can define the threshold for the + amount of missing documents you get alerted on by setting + _Number of documents_. You can control the lookback interval for checking + delayed data with _Time interval_. Refer to the + <> page to see what to do about delayed data. +_Errors in job messages_:: + Notifies when the job messages contain error messages. Review the + notification; it contains the error messages, the corresponding job IDs and + recommendations on how to fix the issue. This check looks for job errors + that occur after the rule is created; it does not look at historic behavior. + +[role="screenshot"] +image::images/ml-health-check-config.jpg["Selecting health checkers"] + +As the last step in the rule creation process, +<> that occur when the conditions +are met. + [[defining-actions]] == Defining actions -As a next step, connect your rule to actions that use supported built-in -integrations by selecting a connector type. Connectors are {kib} services or -third-party integrations that perform an action when the rule conditions are -met. +Connect your rule to actions that use supported built-in integrations by +selecting a connector type. Connectors are {kib} services or third-party +integrations that perform an action when the rule conditions are met. [role="screenshot"] image::images/ml-anomaly-alert-actions.jpg["Selecting connector type"] @@ -88,7 +154,10 @@ For example, you can choose _Slack_ as a connector type and configure it to send a message to a channel you selected. You can also create an index connector that writes the JSON object you configure to a specific index. It's also possible to customize the notification messages. A list of variables is available to include -in the message, like job ID, anomaly score, time, or top influencers. +in the message, like job ID, anomaly score, time, top influencers, {dfeed} ID, +memory status and so on based on the selected rule type. Refer to +<> to see the full list of available variables by rule type. + [role="screenshot"] image::images/ml-anomaly-alert-messages.jpg["Customizing your message"] @@ -101,3 +170,205 @@ The name of an alert is always the same as the job ID of the associated {anomaly-job} that triggered it. You can mute the notifications for a particular {anomaly-job} on the page of the rule that lists the individual alerts. You can open it via *{alerts-ui}* by selecting the rule name. + + +[[action-variables]] +== Action variables + +You can add different variables to your action. The following variables are +specific to the {ml} rule types. + + +[[anomaly-alert-action-variables]] +=== {anomaly-detect-cap} alert action variables + +Every {anomaly-detect} alert has the following action variables: + +`context`.`anomalyExplorerUrl`:: +URL to open in the Anomaly Explorer. + +`context`.`isInterim`:: +Indicates if top hits contain interim results. + +`context`.`jobIds`:: +List of job IDs that triggered the alert. + +`context`.`message`:: +A preconstructed message for the alert. + +`context`.`score`:: +Anomaly score at the time of the notification action. + +`context`.`timestamp`:: +The bucket timestamp of the anomaly. + +`context`.`timestampIso8601`:: +The bucket timestamp of the anomaly in ISO8601 format. + +`context`.`topInfluencers`:: +The list of top influencers. ++ +.Properties of `context.topInfluencers` +[%collapsible%open] +==== +`influencer_field_name`::: +The field name of the influencer. + +`influencer_field_value`::: +The entity that influenced, contributed to, or was to blame for the anomaly. + +`score`::: +The influencer score. A normalized score between 0-100 which shows the +influencer's overall contribution to the anomalies. +==== + +`context`.`topRecords`:: +The list of top records. ++ +.Properties of `context.topRecords` +[%collapsible%open] +==== +`by_field_value`::: +The value of the by field. + +`field_name`::: +Certain functions require a field to operate on, for example, `sum()`. For those +functions, this value is the name of the field to be analyzed. + +`function`::: +The function in which the anomaly occurs, as specified in the detector +configuration. For example, `max`. + +`over_field_name`::: +The field used to split the data. + +`partition_field_value`::: +The field used to segment the analysis. + +`score`::: +A normalized score between 0-100, which is based on the probability of the +anomalousness of this record. +==== + +[[anomaly-jobs-health-action-variables]] +=== {anomaly-jobs-cap} health action variables + +Every health check has two main variables: `context.message` and +`context.results`. The properties of `context.results` may vary based on the +type of check. You can find the possible properties for all the checks below. + +==== _Datafeed is not started_ + +`context.message`:: +A preconstructed message for the alert. + +`context.results`:: +Contains the following properties: ++ +.Properties of `context.results` +[%collapsible%open] +==== +`datafeed_id`::: +The {dfeed} identifier. + +`datafeed_state`::: +The state of the {dfeed}. It can be `starting`, `started`, +`stopping`, `stopped`. + +`job_id`::: +The job identifier. + +`job_state`::: +The state of the job. It can be `opening`, `opened`, `closing`, +`closed`, or `failed`. +==== + +==== _Model memory limit reached_ + +`context.message`:: +A preconstructed message for the rule. + +`context.results`:: +Contains the following properties: ++ +.Properties of `context.results` +[%collapsible%open] +==== +`job_id`::: +The job identifier. + +`memory_status`::: +The status of the mathematical model. It can have one of the following values: + +* `soft_limit`: The model used more than 60% of the configured memory limit and + older unused models will be pruned to free up space. In categorization jobs no + further category examples will be stored. +* `hard_limit`: The model used more space than the configured memory limit. As a + result, not all incoming data was processed. + +`model_bytes`::: +The number of bytes of memory used by the models. + +`model_bytes_exceeded`::: +The number of bytes over the high limit for memory usage at the last allocation +failure. + +`model_bytes_memory_limit`::: +The upper limit for model memory usage. + +`log_time`::: +The timestamp of the model size statistics according to server time. Time +formatting is based on the {kib} settings. + +`peak_model_bytes`::: +The peak number of bytes of memory ever used by the model. +==== + +==== _Data delay has occured_ + +`context.message`:: +A preconstructed message for the rule. + +`context.results`:: +Contains the following properties: ++ +.Properties of `context.results` +[%collapsible%open] +==== +`annotation`::: +The annotation corresponding to the data delay in the job. + +`end_timestamp`::: +Timestamp of the latest finalized buckets with missing documents. Time +formatting is based on the {kib} settings. + +`job_id`::: +The job identifier. + +`missed_docs_count`::: +The number of missed documents. +==== + +==== _Error in job messages_ + +`context.message`:: +A preconstructed message for the rule. + +`context.results`:: +Contains the following properties: ++ +.Properties of `context.results` +[%collapsible%open] +==== +`timestamp`::: +Timestamp of the latest finalized buckets with missing documents. + +`job_id`::: +The job identifier. + +`message`::: +The error message. + +`node_name`::: +The name of the node that runs the job. +==== \ No newline at end of file diff --git a/docs/reference/ml/anomaly-detection/ml-configuring-transform.asciidoc b/docs/reference/ml/anomaly-detection/ml-configuring-transform.asciidoc index 9d02c3d011eac..fdaffa92bae2e 100644 --- a/docs/reference/ml/anomaly-detection/ml-configuring-transform.asciidoc +++ b/docs/reference/ml/anomaly-detection/ml-configuring-transform.asciidoc @@ -107,20 +107,16 @@ PUT _ml/anomaly_detectors/test1 }, "data_description": { "time_field":"@timestamp" - } -} - -PUT _ml/datafeeds/datafeed-test1 -{ - "job_id": "test1", - "indices": [ - "my-index-000001" - ], - "runtime_mappings": { - "total_error_count": { <2> - "type": "long", - "script": { - "source": "emit(doc['error_count'].value + doc['aborted_count'].value)" + }, + "datafeed_config":{ + "datafeed_id": "datafeed-test1", + "indices": ["my-index-000001"], + "runtime_mappings": { + "total_error_count": { <2> + "type": "long", + "script": { + "source": "emit(doc['error_count'].value + doc['aborted_count'].value)" + } } } } @@ -193,18 +189,16 @@ PUT _ml/anomaly_detectors/test2 }, "data_description": { "time_field":"@timestamp" - } -} - -PUT _ml/datafeeds/datafeed-test2 -{ - "job_id": "test2", - "indices": ["my-index-000001"], - "runtime_mappings": { - "my_runtime_field": { - "type": "keyword", - "script": { - "source": "emit(doc['some_field'].value + '_' + doc['another_field'].value)" <2> + }, + "datafeed_config":{ + "datafeed_id": "datafeed-test2", + "indices": ["my-index-000001"], + "runtime_mappings": { + "my_runtime_field": { + "type": "keyword", + "script": { + "source": "emit(doc['some_field'].value + '_' + doc['another_field'].value)" <2> + } } } } @@ -392,7 +386,7 @@ POST _ml/datafeeds/datafeed-test2/_update "my_runtime_field": { "type": "keyword", "script": { - "source": "emit(def m = /(.*)-bar-([0-9][0-9])/.matcher(doc['tokenstring3'].value); return m.find() ? m.group(1) + '_' + m.group(2) : '';)" <1> + "source": "def m = /(.*)-bar-([0-9][0-9])/.matcher(doc['tokenstring3'].value); emit(m.find() ? m.group(1) + '_' + m.group(2) : '');" <1> } } } @@ -438,18 +432,16 @@ PUT _ml/anomaly_detectors/test3 }, "data_description": { "time_field":"@timestamp" - } -} - -PUT _ml/datafeeds/datafeed-test3 -{ - "job_id": "test3", - "indices": ["my-index-000001"], - "runtime_mappings": { - "my_coordinates": { - "type": "keyword", - "script": { - "source": "emit(doc['coords.lat'].value + ',' + doc['coords.lon'].value)" + }, + "datafeed_config":{ + "datafeed_id": "datafeed-test3", + "indices": ["my-index-000001"], + "runtime_mappings": { + "my_coordinates": { + "type": "keyword", + "script": { + "source": "emit(doc['coords.lat'].value + ',' + doc['coords.lon'].value)" + } } } } @@ -501,19 +493,17 @@ PUT _ml/anomaly_detectors/test4 }, "data_description": { "time_field":"@timestamp" - } -} - -PUT _ml/datafeeds/datafeed-test4 -{ - "job_id": "test4", - "indices": ["my-index-000001"], - "script_fields":{ - "sub":{ - "script":"return domainSplit(doc['query'].value).get(0);" - }, - "hrd":{ - "script":"return domainSplit(doc['query'].value).get(1);" + }, + "datafeed_config":{ + "datafeed_id": "datafeed-test4", + "indices": ["my-index-000001"], + "script_fields":{ + "sub":{ + "script":"return domainSplit(doc['query'].value).get(0);" + }, + "hrd":{ + "script":"return domainSplit(doc['query'].value).get(1);" + } } } } diff --git a/docs/reference/ml/anomaly-detection/ml-configuring-url.asciidoc b/docs/reference/ml/anomaly-detection/ml-configuring-url.asciidoc index bd46bbd01e98b..032533df06ee0 100644 --- a/docs/reference/ml/anomaly-detection/ml-configuring-url.asciidoc +++ b/docs/reference/ml/anomaly-detection/ml-configuring-url.asciidoc @@ -21,7 +21,7 @@ image::images/ml-customurl-edit.gif["Add a custom URL in {kib}",width=75%] For each custom URL, you must supply the URL and a label, which is the link text that appears in the anomalies table. You can also optionally supply a time range. When you link to *Discover* or a {kib} dashboard, you'll have additional -options for specifying the pertinent index pattern or dashboard name and query +options for specifying the pertinent {data-source} or dashboard name and query entities. [discrete] @@ -90,7 +90,7 @@ your web browser so that it does not block pop-up windows or create an exception for your {kib} URL. * When creating a link to a {kib} dashboard, the URLs for dashboards can be very long. Be careful of typos, end of line characters, and URL encoding. Also ensure -you use the appropriate index ID for the target {kib} index pattern. +you use the appropriate index ID for the target {kib} {data-source}. * If you use an influencer name for string substitution, keep in mind that it might not always be available in the analysis results and the URL is invalid in those cases. There is not always a statistically significant influencer for each diff --git a/docs/reference/ml/df-analytics/apis/infer-trained-model-deployment.asciidoc b/docs/reference/ml/df-analytics/apis/infer-trained-model-deployment.asciidoc index 9e1ce728d553d..a08e963bd340d 100644 --- a/docs/reference/ml/df-analytics/apis/infer-trained-model-deployment.asciidoc +++ b/docs/reference/ml/df-analytics/apis/infer-trained-model-deployment.asciidoc @@ -59,8 +59,8 @@ The input text for evaluation. [[infer-trained-model-deployment-example]] == {api-examples-title} -The response depends on the task the model is trained for. If it is a -sentiment analysis task, the response is the score. For example: +The response depends on the task the model is trained for. If it is a +text classification task, the response is the score. For example: [source,console] -------------------------------------------------- @@ -77,7 +77,7 @@ The API returns scores in this case, for example: ---- { "positive" : 0.9998062667902223, - "negative" : 1.9373320977752957E-4 + "negative" : 1.9373320977752957E-4 } ---- // NOTCONSOLE diff --git a/docs/reference/ml/df-analytics/apis/put-trained-models.asciidoc b/docs/reference/ml/df-analytics/apis/put-trained-models.asciidoc index 9f4b63d1283d4..b8d6c8ced2d09 100644 --- a/docs/reference/ml/df-analytics/apis/put-trained-models.asciidoc +++ b/docs/reference/ml/df-analytics/apis/put-trained-models.asciidoc @@ -24,7 +24,7 @@ WARNING: Models created in version 7.8.0 are not backwards compatible [[ml-put-trained-models-prereq]] == {api-prereq-title} -Requires the `manage_ml` cluster privilege. This privilege is included in the +Requires the `manage_ml` cluster privilege. This privilege is included in the `machine_learning_admin` built-in role. @@ -42,6 +42,17 @@ created by {dfanalytics}. (Required, string) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=model-id] +[[ml-put-trained-models-query-params]] +== {api-query-parms-title} + +`defer_definition_decompression`:: +(Optional, boolean) +If set to `true` and a `compressed_definition` is provided, the request defers +definition decompression and skips relevant validations. +This deferral is useful for systems or users that know a good JVM heap size estimate for their +model and know that their model is valid and likely won't fail during inference. + + [role="child_attributes"] [[ml-put-trained-models-request-body]] == {api-request-body-title} diff --git a/docs/reference/ml/df-analytics/apis/start-trained-model-deployment.asciidoc b/docs/reference/ml/df-analytics/apis/start-trained-model-deployment.asciidoc index 957cbc09f1ec1..12d41818231d7 100644 --- a/docs/reference/ml/df-analytics/apis/start-trained-model-deployment.asciidoc +++ b/docs/reference/ml/df-analytics/apis/start-trained-model-deployment.asciidoc @@ -36,19 +36,19 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=model-id] (Optional, time) Controls the amount of time to wait for the model to deploy. Defaults to 20 seconds. + //// [role="child_attributes"] [[start-trained-model-deployment-results]] == {api-response-body-title} - //// + //// -[[ml-get-trained-models-response-codes]] +[[start-trained-models-response-codes]] == {api-response-codes-title} - //// + //// [[start-trained-model-deployment-example]] == {api-examples-title} - //// \ No newline at end of file diff --git a/docs/reference/ml/df-analytics/apis/stop-trained-model-deployment.asciidoc b/docs/reference/ml/df-analytics/apis/stop-trained-model-deployment.asciidoc index 6302fa8257697..05a03a64ed706 100644 --- a/docs/reference/ml/df-analytics/apis/stop-trained-model-deployment.asciidoc +++ b/docs/reference/ml/df-analytics/apis/stop-trained-model-deployment.asciidoc @@ -15,13 +15,13 @@ //// [[stop-trained-model-deployment-prereq]] == {api-prereq-title} - //// + //// [[stop-trained-model-deployment-desc]] == {api-description-title} - //// + [[stop-trained-model-deployment-path-params]] == {api-path-parms-title} @@ -33,13 +33,15 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=model-id] [[stop-trained-model-deployment-query-params]] == {api-query-parms-title} //// + //// [role="child_attributes"] [[stop-trained-model-deployment-results]] == {api-response-body-title} //// + //// -[[ml-get-trained-models-response-codes]] +[[stop-trained-models-response-codes]] == {api-response-codes-title} //// diff --git a/docs/reference/ml/images/ml-anomaly-alert-type.jpg b/docs/reference/ml/images/ml-anomaly-alert-type.jpg deleted file mode 100644 index eff726155c327..0000000000000 Binary files a/docs/reference/ml/images/ml-anomaly-alert-type.jpg and /dev/null differ diff --git a/docs/reference/ml/images/ml-health-check-config.jpg b/docs/reference/ml/images/ml-health-check-config.jpg new file mode 100644 index 0000000000000..c235d79984525 Binary files /dev/null and b/docs/reference/ml/images/ml-health-check-config.jpg differ diff --git a/docs/reference/ml/images/ml-rule.jpg b/docs/reference/ml/images/ml-rule.jpg new file mode 100644 index 0000000000000..44973e785401c Binary files /dev/null and b/docs/reference/ml/images/ml-rule.jpg differ diff --git a/docs/reference/modules/threadpool.asciidoc b/docs/reference/modules/threadpool.asciidoc index 6575c138747b7..e307a84f27499 100644 --- a/docs/reference/modules/threadpool.asciidoc +++ b/docs/reference/modules/threadpool.asciidoc @@ -44,7 +44,7 @@ There are several thread pools, but the important ones include: `snapshot_meta`:: For snapshot repository metadata read operations. Thread pool type is `scaling` with a keep-alive of `5m` and a max of `min(50, (`<>` pass:[ * ]3))`. + `# of allocated processors`>>`* 3))`. `warmer`:: For segment warm-up operations. Thread pool type is `scaling` with a diff --git a/docs/reference/query-dsl/combined-fields-query.asciidoc b/docs/reference/query-dsl/combined-fields-query.asciidoc index 42d1f45b0368b..9ea16bf8ef0a7 100644 --- a/docs/reference/query-dsl/combined-fields-query.asciidoc +++ b/docs/reference/query-dsl/combined-fields-query.asciidoc @@ -5,14 +5,14 @@ ++++ The `combined_fields` query supports searching multiple text fields as if their -contents had been indexed into one combined field. It takes a term-centric -view of the query: first it analyzes the query string into individual terms, +contents had been indexed into one combined field. The query takes a term-centric +view of the input string: first it analyzes the query string into individual terms, then looks for each term in any of the fields. This query is particularly useful when a match could span multiple text fields, for example the `title`, -`abstract` and `body` of an article: +`abstract`, and `body` of an article: [source,console] --------------------------------------------------- +---- GET /_search { "query": { @@ -23,31 +23,36 @@ GET /_search } } } --------------------------------------------------- +---- The `combined_fields` query takes a principled approach to scoring based on the simple BM25F formula described in http://www.staff.city.ac.uk/~sb317/papers/foundations_bm25_review.pdf[The Probabilistic Relevance Framework: BM25 and Beyond]. When scoring matches, the query combines term and collection statistics across -fields. This allows it to score each match as if the specified fields had been -indexed into a single combined field. (Note that this is a best attempt -- -`combined_fields` makes some approximations and scores will not obey this -model perfectly.) +fields to score each match as if the specified fields had been indexed into a +single, combined field. This scoring is a best attempt; `combined_fields` makes +some approximations and scores will not obey the BM25F model perfectly. +// tag::max-clause-limit[] [WARNING] .Field number limit =================================================== -There is a limit on the number of fields times terms that can be queried at -once. It is defined by the `indices.query.bool.max_clause_count` -<> which defaults to 4096. +By default, there is a limit to the number of clauses a query can contain. This +limit is defined by the +<> +setting, which defaults to `4096`. For `combined_fields` queries, the number of +clauses is calculated as the number of fields multiplied by the number of terms. =================================================== +// end::max-clause-limit[] ==== Per-field boosting -Individual fields can be boosted with the caret (`^`) notation: +Field boosts are interpreted according to the combined field model. For example, +if the `title` field has a boost of 2, the score is calculated as if each term +in the title appeared twice in the synthetic combined field. [source,console] --------------------------------------------------- +---- GET /_search { "query": { @@ -57,11 +62,8 @@ GET /_search } } } --------------------------------------------------- - -Field boosts are interpreted according to the combined field model. For example, -if the `title` field has a boost of 2, the score is calculated as if each term -in the title appeared twice in the synthetic combined field. +---- +<1> Individual fields can be boosted with the caret (`^`) notation. NOTE: The `combined_fields` query requires that field boosts are greater than or equal to 1.0. Field boosts are allowed to be fractional. @@ -149,7 +151,7 @@ term-centric: `operator` and `minimum_should_match` are applied per-term, instead of per-field. Concretely, a query like [source,console] --------------------------------------------------- +---- GET /_search { "query": { @@ -160,12 +162,15 @@ GET /_search } } } --------------------------------------------------- +---- -is executed as +is executed as: - +(combined("database", fields:["title" "abstract"])) - +(combined("systems", fields:["title", "abstract"])) +[source,txt] +---- ++(combined("database", fields:["title" "abstract"])) ++(combined("systems", fields:["title", "abstract"])) +---- In other words, each term must be present in at least one field for a document to match. @@ -178,8 +183,8 @@ to scoring based on the BM25F algorithm. [NOTE] .Custom similarities =================================================== -The `combined_fields` query currently only supports the `BM25` similarity -(which is the default unless a <> -is configured). <> are also not allowed. +The `combined_fields` query currently only supports the BM25 similarity, +which is the default unless a <> +is configured. <> are also not allowed. Using `combined_fields` in either of these cases will result in an error. =================================================== diff --git a/docs/reference/query-dsl/multi-match-query.asciidoc b/docs/reference/query-dsl/multi-match-query.asciidoc index 0b38b25ad80e1..bf9a4721a34cb 100644 --- a/docs/reference/query-dsl/multi-match-query.asciidoc +++ b/docs/reference/query-dsl/multi-match-query.asciidoc @@ -67,9 +67,7 @@ index settings, which in turn defaults to `*`. `*` extracts all fields in the ma are eligible to term queries and filters the metadata fields. All extracted fields are then combined to build a query. -WARNING: There is a limit on the number of fields times terms that can be queried -at once. It is defined by the `indices.query.bool.max_clause_count` <> -which defaults to 4096. +include::combined-fields-query.asciidoc[tag=max-clause-limit] [[multi-match-types]] [discrete] diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 5e253d39e1ef1..4a0f33d4063fc 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -3,6 +3,11 @@ The following pages have moved or been deleted. +[role="exclude",id="modules-scripting-other-layers"] +=== Other security layers + +Refer to <>. + [role="exclude",id="grok-basics"] === Grok basics diff --git a/docs/reference/scripting/security.asciidoc b/docs/reference/scripting/security.asciidoc index db81f57a7d754..2726938cb179d 100644 --- a/docs/reference/scripting/security.asciidoc +++ b/docs/reference/scripting/security.asciidoc @@ -1,114 +1,63 @@ [[modules-scripting-security]] == Scripting and security +Painless and {es} implement layers of security to build a defense in depth +strategy for running scripts safely. -While Elasticsearch contributors make every effort to prevent scripts from -running amok, security is something best done in -{wikipedia}/Defense_in_depth_(computing)[layers] because -all software has bugs and it is important to minimize the risk of failure in -any security layer. Find below rules of thumb for how to keep Elasticsearch -from being a vulnerability. +Painless uses a fine-grained allowlist. Anything that is not part of the +allowlist results in a compilation error. This capability is the first layer of +security in a defense in depth strategy for scripting. -[discrete] -=== Do not run as root -First and foremost, never run Elasticsearch as the `root` user as this would -allow any successful effort to circumvent the other security layers to do -*anything* on your server. Elasticsearch will refuse to start if it detects -that it is running as `root` but this is so important that it is worth double -and triple checking. - -[discrete] -=== Do not expose Elasticsearch directly to users -Do not expose Elasticsearch directly to users, instead have an application -make requests on behalf of users. If this is not possible, have an application -to sanitize requests from users. If *that* is not possible then have some -mechanism to track which users did what. Understand that it is quite possible -to write a <> that overwhelms Elasticsearch and brings down -the cluster. All such searches should be considered bugs and the Elasticsearch -contributors make an effort to prevent this but they are still possible. - -[discrete] -=== Do not expose Elasticsearch directly to the Internet -Do not expose Elasticsearch to the Internet, instead have an application -make requests on behalf of the Internet. Do not entertain the thought of having -an application "sanitize" requests to Elasticsearch. Understand that it is -possible for a sufficiently determined malicious user to write searches that -overwhelm the Elasticsearch cluster and bring it down. For example: - -Good: - -* Users type text into a search box and the text is sent directly to a -<>, <>, -<>, or any of the <>. -* Running a script with any of the above queries that was written as part of -the application development process. -* Running a script with `params` provided by users. -* User actions makes documents with a fixed structure. +The second layer of security is the https://www.oracle.com/java/technologies/javase/seccodeguide.html[Java Security Manager]. As part of its startup +sequence, {es} enables the Java Security Manager to limit the actions that +portions of the code can take. <> uses +the Java Security Manager as an additional layer of defense to prevent scripts +from doing things like writing files and listening to sockets. -Bad: - -* Users can write arbitrary scripts, queries, `_search` requests. -* User actions make documents with structure defined by users. - -[discrete] -[[modules-scripting-other-layers]] -=== Other security layers -In addition to user privileges and script sandboxing Elasticsearch uses the -https://www.oracle.com/java/technologies/javase/seccodeguide.html[Java Security Manager] -and native security tools as additional layers of security. - -As part of its startup sequence Elasticsearch enables the Java Security Manager -which limits the actions that can be taken by portions of the code. Painless -uses this to limit the actions that generated Painless scripts can take, -preventing them from being able to do things like write files and listen to -sockets. - -Elasticsearch uses +{es} uses {wikipedia}/Seccomp[seccomp] in Linux, https://www.chromium.org/developers/design-documents/sandbox/osx-sandboxing-design[Seatbelt] in macOS, and https://msdn.microsoft.com/en-us/library/windows/desktop/ms684147[ActiveProcessLimit] -on Windows to prevent Elasticsearch from forking or executing other processes. +on Windows as additional security layers to prevent {es} from forking or +running other processes. -Below this we describe the security settings for scripts and how you can -change from the defaults described above. You should be very, very careful -when allowing more than the defaults. Any extra permissions weakens the total -security of the Elasticsearch deployment. +You can modify the following script settings to restrict the type of scripts +that are allowed to run, and control the available +{painless}/painless-contexts.html[contexts] that scripts can run in. To +implement additional layers in your defense in depth strategy, follow the +<>. [[allowed-script-types-setting]] [discrete] === Allowed script types setting -Elasticsearch supports two script types: `inline` and `stored` (<>). -By default, {es} is configured to run both types of scripts. -To limit what type of scripts are run, set `script.allowed_types` to `inline` or `stored`. -To prevent any scripts from running, set `script.allowed_types` to `none`. +{es} supports two script types: `inline` and `stored`. By default, {es} is +configured to run both types of scripts. To limit what type of scripts are run, +set `script.allowed_types` to `inline` or `stored`. To prevent any scripts from +running, set `script.allowed_types` to `none`. IMPORTANT: If you use {kib}, set `script.allowed_types` to `both` or `inline`. Some {kib} features rely on inline scripts and do not function as expected if {es} does not allow inline scripts. -For example, to run `inline` scripts but not `stored` scripts, specify: +For example, to run `inline` scripts but not `stored` scripts: [source,yaml] ---- -script.allowed_types: inline <1> +script.allowed_types: inline ---- -<1> This will allow only inline scripts to be executed but not stored scripts -(or any other types). - [[allowed-script-contexts-setting]] [discrete] === Allowed script contexts setting -By default all script contexts are allowed to be executed. This can be modified using the -setting `script.allowed_contexts`. Only the contexts specified as part of the setting will -be allowed to be executed. To specify no contexts are allowed, set `script.allowed_contexts` -to be `none`. +By default, all script contexts are permitted. Use the `script.allowed_contexts` +setting to specify the contexts that are allowed. To specify that no contexts +are allowed, set `script.allowed_contexts` to `none`. + +For example, to allow scripts to run only in `scoring` and `update` contexts: [source,yaml] ---- -script.allowed_contexts: score, update <1> +script.allowed_contexts: score, update ---- -<1> This will allow only scoring and update scripts to be executed but not -aggs or plugin scripts (or any other contexts). diff --git a/docs/reference/search/terms-enum.asciidoc b/docs/reference/search/terms-enum.asciidoc index daa542c6ffd4a..fdf4f201aa3a4 100644 --- a/docs/reference/search/terms-enum.asciidoc +++ b/docs/reference/search/terms-enum.asciidoc @@ -98,8 +98,8 @@ Defaults to false. query rewrites to `match_none`. [[terms-enum-search_after-param]] -`string`:: +`search_after`:: (Optional, string) The string after which terms in the index should be returned. Allows for a form of -pagination if the last result from one request is passed as the search_after +pagination if the last result from one request is passed as the `search_after` parameter for a subsequent request. diff --git a/docs/reference/snapshot-restore/apis/get-snapshot-api.asciidoc b/docs/reference/snapshot-restore/apis/get-snapshot-api.asciidoc index 60cf9b34d53c9..a708a20bd8b24 100644 --- a/docs/reference/snapshot-restore/apis/get-snapshot-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/get-snapshot-api.asciidoc @@ -118,8 +118,17 @@ Allows setting a sort order for the result. Defaults to `start_time`, i.e. sorti `name`:: Sort snapshots by their name. +`repository`:: + Sort snapshots by their repository name and break ties by snapshot name. + `index_count`:: Sort snapshots by the number of indices they contain and break ties by snapshot name. + +`shard_count`:: + Sort snapshots by the number of shards they contain and break ties by snapshot name. + +`failed_shard_count`:: + Sort snapshots by the number of shards that they failed to snapshot and break ties by snapshot name. ==== `size`:: diff --git a/docs/reference/sql/functions/date-time.asciidoc b/docs/reference/sql/functions/date-time.asciidoc index c43d926514fe7..f4727640d8a07 100644 --- a/docs/reference/sql/functions/date-time.asciidoc +++ b/docs/reference/sql/functions/date-time.asciidoc @@ -53,6 +53,20 @@ s|Description | `INTERVAL '45:01.23' MINUTES TO SECONDS` | 45 minutes, 1 second and 230000000 nanoseconds |=== +==== Comparison + +Date/time fields can be compared to <> expressions with the equality (`=`) and `IN` operators: + +[source, sql] +-------------------------------------------------- +include-tagged::{sql-specs}/docs/docs.csv-spec[dtDateMathEquals] +-------------------------------------------------- + +[source, sql] +-------------------------------------------------- +include-tagged::{sql-specs}/docs/docs.csv-spec[dtDateMathIn] +-------------------------------------------------- + ==== Operators Basic arithmetic operators (`+`, `-`, `*`) support date/time parameters as indicated below: diff --git a/libs/core/src/main/java/org/elasticsearch/core/AbstractRefCounted.java b/libs/core/src/main/java/org/elasticsearch/core/AbstractRefCounted.java index cd296e5d5f31b..1ad8724dce2bf 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/AbstractRefCounted.java +++ b/libs/core/src/main/java/org/elasticsearch/core/AbstractRefCounted.java @@ -11,17 +11,13 @@ import java.util.concurrent.atomic.AtomicInteger; /** - * A basic RefCounted implementation that is initialized with a - * ref count of 1 and calls {@link #closeInternal()} once it reaches - * a 0 ref count + * A basic {@link RefCounted} implementation that is initialized with a ref count of 1 and calls {@link #closeInternal()} once it reaches + * a 0 ref count. */ public abstract class AbstractRefCounted implements RefCounted { - private final AtomicInteger refCount = new AtomicInteger(1); - private final String name; + public static final String ALREADY_CLOSED_MESSAGE = "already closed, can't increment ref count"; - public AbstractRefCounted(String name) { - this.name = name; - } + private final AtomicInteger refCount = new AtomicInteger(1); @Override public final void incRef() { @@ -63,14 +59,16 @@ public final boolean decRef() { } /** - * Called whenever the ref count is incremented or decremented. Can be implemented by implementations to a record of access to the - * instance for debugging purposes. + * Called whenever the ref count is incremented or decremented. Can be overridden to record access to the instance for debugging + * purposes. */ protected void touch() { } protected void alreadyClosed() { - throw new IllegalStateException(name + " is already closed can't increment refCount current count [" + refCount.get() + "]"); + final int currentRefCount = refCount.get(); + assert currentRefCount == 0 : currentRefCount; + throw new IllegalStateException(ALREADY_CLOSED_MESSAGE); } /** @@ -80,15 +78,21 @@ public int refCount() { return this.refCount.get(); } - - /** gets the name of this instance */ - public String getName() { - return name; - } - /** * Method that is invoked once the reference count reaches zero. * Implementations of this method must handle all exceptions and may not throw any exceptions. */ protected abstract void closeInternal(); + + /** + * Construct an {@link AbstractRefCounted} which runs the given {@link Runnable} when all references are released. + */ + public static AbstractRefCounted of(Runnable onClose) { + return new AbstractRefCounted() { + @Override + protected void closeInternal() { + onClose.run(); + } + }; + } } diff --git a/libs/core/src/test/java/org/elasticsearch/common/util/concurrent/RefCountedTests.java b/libs/core/src/test/java/org/elasticsearch/common/util/concurrent/RefCountedTests.java index e30e4eb4f3301..8579ebddee87c 100644 --- a/libs/core/src/test/java/org/elasticsearch/common/util/concurrent/RefCountedTests.java +++ b/libs/core/src/test/java/org/elasticsearch/common/util/concurrent/RefCountedTests.java @@ -11,7 +11,6 @@ import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; -import java.io.IOException; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; @@ -20,7 +19,8 @@ import static org.hamcrest.Matchers.is; public class RefCountedTests extends ESTestCase { - public void testRefCount() throws IOException { + + public void testRefCount() { MyRefCounted counted = new MyRefCounted(); int incs = randomIntBetween(1, 100); @@ -56,12 +56,9 @@ public void testRefCount() throws IOException { counted.decRef(); assertFalse(counted.tryIncRef()); - try { - counted.incRef(); - fail(" expected exception"); - } catch (IllegalStateException ex) { - assertThat(ex.getMessage(), equalTo("test is already closed can't increment refCount current count [0]")); - } + assertThat( + expectThrows(IllegalStateException.class, counted::incRef).getMessage(), + equalTo(AbstractRefCounted.ALREADY_CLOSED_MESSAGE)); try { counted.ensureOpen(); @@ -77,29 +74,26 @@ public void testMultiThreaded() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CopyOnWriteArrayList exceptions = new CopyOnWriteArrayList<>(); for (int i = 0; i < threads.length; i++) { - threads[i] = new Thread() { - @Override - public void run() { - try { - latch.await(); - for (int j = 0; j < 10000; j++) { - counted.incRef(); - try { - counted.ensureOpen(); - } finally { - counted.decRef(); - } + threads[i] = new Thread(() -> { + try { + latch.await(); + for (int j = 0; j < 10000; j++) { + counted.incRef(); + try { + counted.ensureOpen(); + } finally { + counted.decRef(); } - } catch (Exception e) { - exceptions.add(e); } + } catch (Exception e) { + exceptions.add(e); } - }; + }); threads[i].start(); } latch.countDown(); - for (int i = 0; i < threads.length; i++) { - threads[i].join(); + for (Thread thread : threads) { + thread.join(); } counted.decRef(); try { @@ -110,17 +104,12 @@ public void run() { } assertThat(counted.refCount(), is(0)); assertThat(exceptions, Matchers.emptyIterable()); - } - private final class MyRefCounted extends AbstractRefCounted { + private static final class MyRefCounted extends AbstractRefCounted { private final AtomicBoolean closed = new AtomicBoolean(false); - MyRefCounted() { - super("test"); - } - @Override protected void closeInternal() { this.closed.set(true); diff --git a/libs/geo/src/test/java/org/elasticsearch/geometry/utils/GeoHashTests.java b/libs/geo/src/test/java/org/elasticsearch/geometry/utils/GeoHashTests.java index 51545337c283c..45e92c850b4c4 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geometry/utils/GeoHashTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geometry/utils/GeoHashTests.java @@ -11,6 +11,11 @@ import org.elasticsearch.geometry.Rectangle; import org.elasticsearch.test.ESTestCase; +import java.util.ArrayList; + +import static org.elasticsearch.geometry.utils.Geohash.addNeighbors; +import static org.hamcrest.Matchers.containsInAnyOrder; + /** * Tests for {@link Geohash} */ @@ -103,4 +108,29 @@ public void testInvalidGeohashes() { assertEquals("empty geohash", ex.getMessage()); } + public void testNeighbors() { + // Simple root case + assertThat(addNeighbors("7", new ArrayList<>()), containsInAnyOrder("4", "5", "6", "d", "e", "h", "k", "s")); + + // Root cases (Outer cells) + assertThat(addNeighbors("0", new ArrayList<>()), containsInAnyOrder("1", "2", "3", "p", "r")); + assertThat(addNeighbors("b", new ArrayList<>()), containsInAnyOrder("8", "9", "c", "x", "z")); + assertThat(addNeighbors("p", new ArrayList<>()), containsInAnyOrder("n", "q", "r", "0", "2")); + assertThat(addNeighbors("z", new ArrayList<>()), containsInAnyOrder("8", "b", "w", "x", "y")); + + // Root crossing dateline + assertThat(addNeighbors("2", new ArrayList<>()), containsInAnyOrder("0", "1", "3", "8", "9", "p", "r", "x")); + assertThat(addNeighbors("r", new ArrayList<>()), containsInAnyOrder("0", "2", "8", "n", "p", "q", "w", "x")); + + // level1: simple case + assertThat(addNeighbors("dk", new ArrayList<>()), + containsInAnyOrder("d5", "d7", "de", "dh", "dj", "dm", "ds", "dt")); + + // Level1: crossing cells + assertThat(addNeighbors("d5", new ArrayList<>()), + containsInAnyOrder("d4", "d6", "d7", "dh", "dk", "9f", "9g", "9u")); + assertThat(addNeighbors("d0", new ArrayList<>()), + containsInAnyOrder("d1", "d2", "d3", "9b", "9c", "6p", "6r", "3z")); + } + } diff --git a/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java b/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java index 8e26acf58d17e..984c992b765e0 100644 --- a/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java +++ b/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java @@ -22,6 +22,7 @@ import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.Iterator; import java.util.LinkedHashMap; @@ -31,10 +32,14 @@ import java.util.function.Consumer; public final class Grok { + + public static final String[] ECS_COMPATIBILITY_MODES = {"disabled", "v1"}; + /** * Patterns built in to the grok library. */ - public static final Map BUILTIN_PATTERNS = loadBuiltinPatterns(); + private static Map LEGACY_PATTERNS; + private static Map ECS_V1_PATTERNS; private static final String NAME_GROUP = "name"; private static final String SUBNAME_GROUP = "subname"; @@ -296,16 +301,51 @@ public List captureConfig() { /** * Load built-in patterns. */ - private static Map loadBuiltinPatterns() { - String[] patternNames = new String[] { + public static synchronized Map getBuiltinPatterns(boolean ecsCompatibility) { + if (ecsCompatibility) { + if (ECS_V1_PATTERNS == null) { + ECS_V1_PATTERNS = loadPatterns(ecsCompatibility); + } + return ECS_V1_PATTERNS; + } else { + if (LEGACY_PATTERNS == null) { + LEGACY_PATTERNS = loadPatterns(ecsCompatibility); + } + return LEGACY_PATTERNS; + } + } + + public static Map getBuiltinPatterns(String ecsCompatibility) { + if (isValidEcsCompatibilityMode(ecsCompatibility)) { + return getBuiltinPatterns(ECS_COMPATIBILITY_MODES[1].equals(ecsCompatibility)); + } else { + throw new IllegalArgumentException("unsupported ECS compatibility mode [" + ecsCompatibility + "]"); + } + } + + public static boolean isValidEcsCompatibilityMode(String ecsCompatibility) { + return Arrays.asList(ECS_COMPATIBILITY_MODES).contains(ecsCompatibility); + } + + private static Map loadPatterns(boolean ecsCompatibility) { + String[] legacyPatternNames = { "aws", "bacula", "bind", "bro", "exim", "firewalls", "grok-patterns", "haproxy", "httpd", "java", "junos", "linux-syslog", "maven", "mcollective-patterns", "mongodb", "nagios", "postgresql", "rails", "redis", "ruby", "squid" }; + String[] ecsPatternNames = { + "aws", "bacula", "bind", "bro", "exim", "firewalls", "grok-patterns", "haproxy", + "httpd", "java", "junos", "linux-syslog", "maven", "mcollective", "mongodb", "nagios", + "postgresql", "rails", "redis", "ruby", "squid", "zeek" + }; + + String[] patternNames = ecsCompatibility ? ecsPatternNames : legacyPatternNames; + String directory = ecsCompatibility ? "/patterns/ecs-v1/" : "/patterns/legacy/"; + Map builtinPatterns = new LinkedHashMap<>(); for (String pattern : patternNames) { try { - try(InputStream is = Grok.class.getResourceAsStream("/patterns/" + pattern)) { + try (InputStream is = Grok.class.getResourceAsStream(directory + pattern)) { loadPatterns(builtinPatterns, is); } } catch (IOException e) { diff --git a/libs/grok/src/main/resources/patterns/ecs-v1/aws b/libs/grok/src/main/resources/patterns/ecs-v1/aws new file mode 100644 index 0000000000000..35d1467adce08 --- /dev/null +++ b/libs/grok/src/main/resources/patterns/ecs-v1/aws @@ -0,0 +1,28 @@ +S3_REQUEST_LINE (?:%{WORD:http.request.method} %{NOTSPACE:url.original}(?: HTTP/%{NUMBER:http.version})?) + +S3_ACCESS_LOG %{WORD:aws.s3access.bucket_owner} %{NOTSPACE:aws.s3access.bucket} \%{HTTPDATE:timestamp}\ (?:-|%{IP:client.ip}) (?:-|%{NOTSPACE:client.user.id}) %{NOTSPACE:aws.s3access.request_id} %{NOTSPACE:aws.s3access.operation} (?:-|%{NOTSPACE:aws.s3access.key}) (?:-|"%{S3_REQUEST_LINE:aws.s3access.request_uri}") (?:-|%{INT:http.response.status_code:int}) (?:-|%{NOTSPACE:aws.s3access.error_code}) (?:-|%{INT:aws.s3access.bytes_sent:long}) (?:-|%{INT:aws.s3access.object_size:long}) (?:-|%{INT:aws.s3access.total_time:int}) (?:-|%{INT:aws.s3access.turn_around_time:int}) "(?:-|%{DATA:http.request.referrer})" "(?:-|%{DATA:user_agent.original})" (?:-|%{NOTSPACE:aws.s3access.version_id})(?: (?:-|%{NOTSPACE:aws.s3access.host_id}) (?:-|%{NOTSPACE:aws.s3access.signature_version}) (?:-|%{NOTSPACE:tls.cipher}) (?:-|%{NOTSPACE:aws.s3access.authentication_type}) (?:-|%{NOTSPACE:aws.s3access.host_header}) (?:-|%{NOTSPACE:aws.s3access.tls_version}))? +# :long - %{INT:aws.s3access.bytes_sent:int} +# :long - %{INT:aws.s3access.object_size:int} + +ELB_URIHOST %{IPORHOST:url.domain}(?::%{POSINT:url.port:int})? +ELB_URIPATHQUERY %{URIPATH:url.path}(?:\?%{URIQUERY:url.query})? +# deprecated - old name: +ELB_URIPATHPARAM %{ELB_URIPATHQUERY} +ELB_URI %{URIPROTO:url.scheme}://(?:%{USER:url.username}(?::^@*)?@)?(?:%{ELB_URIHOST})?(?:%{ELB_URIPATHQUERY})? + +ELB_REQUEST_LINE (?:%{WORD:http.request.method} %{ELB_URI:url.original}(?: HTTP/%{NUMBER:http.version})?) + +# pattern supports 'regular' HTTP ELB format +ELB_V1_HTTP_LOG %{TIMESTAMP_ISO8601:timestamp} %{NOTSPACE:aws.elb.name} %{IP:source.ip}:%{INT:source.port:int} (?:-|(?:%{IP:aws.elb.backend.ip}:%{INT:aws.elb.backend.port:int})) (?:-1|%{NUMBER:aws.elb.request_processing_time.sec:float}) (?:-1|%{NUMBER:aws.elb.backend_processing_time.sec:float}) (?:-1|%{NUMBER:aws.elb.response_processing_time.sec:float}) %{INT:http.response.status_code:int} (?:-|%{INT:aws.elb.backend.http.response.status_code:int}) %{INT:http.request.body.bytes:long} %{INT:http.response.body.bytes:long} "%{ELB_REQUEST_LINE}"(?: "(?:-|%{DATA:user_agent.original})" (?:-|%{NOTSPACE:tls.cipher}) (?:-|%{NOTSPACE:aws.elb.ssl_protocol}))? +# :long - %{INT:http.request.body.bytes:int} +# :long - %{INT:http.response.body.bytes:int} + +ELB_ACCESS_LOG %{ELB_V1_HTTP_LOG} + +# pattern used to match a shorted format, that's why we have the optional part (starting with *http.version*) at the end +CLOUDFRONT_ACCESS_LOG (?%{YEAR}-%{MONTHNUM}-%{MONTHDAY}\t%{TIME})\t%{WORD:aws.cloudfront.x_edge_location}\t(?:-|%{INT:destination.bytes:long})\t%{IPORHOST:source.ip}\t%{WORD:http.request.method}\t%{HOSTNAME:url.domain}\t%{NOTSPACE:url.path}\t(?:(?:000)|%{INT:http.response.status_code:int})\t(?:-|%{DATA:http.request.referrer})\t%{DATA:user_agent.original}\t(?:-|%{DATA:url.query})\t(?:-|%{DATA:aws.cloudfront.http.request.cookie})\t%{WORD:aws.cloudfront.x_edge_result_type}\t%{NOTSPACE:aws.cloudfront.x_edge_request_id}\t%{HOSTNAME:aws.cloudfront.http.request.host}\t%{URIPROTO:network.protocol}\t(?:-|%{INT:source.bytes:long})\t%{NUMBER:aws.cloudfront.time_taken:float}\t(?:-|%{IP:network.forwarded_ip})\t(?:-|%{DATA:aws.cloudfront.ssl_protocol})\t(?:-|%{NOTSPACE:tls.cipher})\t%{WORD:aws.cloudfront.x_edge_response_result_type}(?:\t(?:-|HTTP/%{NUMBER:http.version})\t(?:-|%{DATA:aws.cloudfront.fle_status})\t(?:-|%{DATA:aws.cloudfront.fle_encrypted_fields})\t%{INT:source.port:int}\t%{NUMBER:aws.cloudfront.time_to_first_byte:float}\t(?:-|%{DATA:aws.cloudfront.x_edge_detailed_result_type})\t(?:-|%{NOTSPACE:http.request.mime_type})\t(?:-|%{INT:aws.cloudfront.http.request.size:long})\t(?:-|%{INT:aws.cloudfront.http.request.range.start:long})\t(?:-|%{INT:aws.cloudfront.http.request.range.end:long}))? +# :long - %{INT:destination.bytes:int} +# :long - %{INT:source.bytes:int} +# :long - %{INT:aws.cloudfront.http.request.size:int} +# :long - %{INT:aws.cloudfront.http.request.range.start:int} +# :long - %{INT:aws.cloudfront.http.request.range.end:int} diff --git a/libs/grok/src/main/resources/patterns/ecs-v1/bacula b/libs/grok/src/main/resources/patterns/ecs-v1/bacula new file mode 100644 index 0000000000000..169defdecea3a --- /dev/null +++ b/libs/grok/src/main/resources/patterns/ecs-v1/bacula @@ -0,0 +1,53 @@ +BACULA_TIMESTAMP %{MONTHDAY}-%{MONTH}(?:-%{YEAR})? %{HOUR}:%{MINUTE} +BACULA_HOST %{HOSTNAME} +BACULA_VOLUME %{USER} +BACULA_DEVICE %{USER} +BACULA_DEVICEPATH %{UNIXPATH} +BACULA_CAPACITY %{INT}{1,3}(,%{INT}{3})* +BACULA_VERSION %{USER} +BACULA_JOB %{USER} + +BACULA_LOG_MAX_CAPACITY User defined maximum volume capacity %{BACULA_CAPACITY:bacula.volume.max_capacity} exceeded on device \"%{BACULA_DEVICE:bacula.volume.device}\" \(%{BACULA_DEVICEPATH:bacula.volume.path}\).? +BACULA_LOG_END_VOLUME End of medium on Volume \"%{BACULA_VOLUME:bacula.volume.name}\" Bytes=%{BACULA_CAPACITY:bacula.volume.bytes} Blocks=%{BACULA_CAPACITY:bacula.volume.blocks} at %{BACULA_TIMESTAMP:bacula.timestamp}. +BACULA_LOG_NEW_VOLUME Created new Volume \"%{BACULA_VOLUME:bacula.volume.name}\" in catalog. +BACULA_LOG_NEW_LABEL Labeled new Volume \"%{BACULA_VOLUME:bacula.volume.name}\" on (?:file )?device \"%{BACULA_DEVICE:bacula.volume.device}\" \(%{BACULA_DEVICEPATH:bacula.volume.path}\). +BACULA_LOG_WROTE_LABEL Wrote label to prelabeled Volume \"%{BACULA_VOLUME:bacula.volume.name}\" on device \"%{BACULA_DEVICE:bacula.volume.device}\" \(%{BACULA_DEVICEPATH:bacula.volume.path}\) +BACULA_LOG_NEW_MOUNT New volume \"%{BACULA_VOLUME:bacula.volume.name}\" mounted on device \"%{BACULA_DEVICE:bacula.volume.device}\" \(%{BACULA_DEVICEPATH:bacula.volume.path}\) at %{BACULA_TIMESTAMP:bacula.timestamp}. +BACULA_LOG_NOOPEN \s*Cannot open %{DATA}: ERR=%{GREEDYDATA:error.message} +BACULA_LOG_NOOPENDIR \s*Could not open directory \"?%{DATA:file.path}\"?: ERR=%{GREEDYDATA:error.message} +BACULA_LOG_NOSTAT \s*Could not stat %{DATA:file.path}: ERR=%{GREEDYDATA:error.message} +BACULA_LOG_NOJOBS There are no more Jobs associated with Volume \"%{BACULA_VOLUME:bacula.volume.name}\". Marking it purged. +BACULA_LOG_ALL_RECORDS_PRUNED .*?All records pruned from Volume \"%{BACULA_VOLUME:bacula.volume.name}\"; marking it \"Purged\" +BACULA_LOG_BEGIN_PRUNE_JOBS Begin pruning Jobs older than %{INT} month %{INT} days . +BACULA_LOG_BEGIN_PRUNE_FILES Begin pruning Files. +BACULA_LOG_PRUNED_JOBS Pruned %{INT} Jobs* for client %{BACULA_HOST:bacula.client.name} from catalog. +BACULA_LOG_PRUNED_FILES Pruned Files from %{INT} Jobs* for client %{BACULA_HOST:bacula.client.name} from catalog. +BACULA_LOG_ENDPRUNE End auto prune. +BACULA_LOG_STARTJOB Start Backup JobId %{INT}, Job=%{BACULA_JOB:bacula.job.name} +BACULA_LOG_STARTRESTORE Start Restore Job %{BACULA_JOB:bacula.job.name} +BACULA_LOG_USEDEVICE Using Device \"%{BACULA_DEVICE:bacula.volume.device}\" +BACULA_LOG_DIFF_FS \s*%{UNIXPATH} is a different filesystem. Will not descend from %{UNIXPATH} into it. +BACULA_LOG_JOBEND Job write elapsed time = %{DATA:bacula.job.elapsed_time}, Transfer rate = %{NUMBER} (K|M|G)? Bytes/second +BACULA_LOG_NOPRUNE_JOBS No Jobs found to prune. +BACULA_LOG_NOPRUNE_FILES No Files found to prune. +BACULA_LOG_VOLUME_PREVWRITTEN Volume \"?%{BACULA_VOLUME:bacula.volume.name}\"? previously written, moving to end of data. +BACULA_LOG_READYAPPEND Ready to append to end of Volume \"%{BACULA_VOLUME:bacula.volume.name}\" size=%{INT:bacula.volume.size:long} +# :long - %{INT:bacula.volume.size:int} +BACULA_LOG_CANCELLING Cancelling duplicate JobId=%{INT:bacula.job.other_id}. +BACULA_LOG_MARKCANCEL JobId %{INT:bacula.job.id}, Job %{BACULA_JOB:bacula.job.name} marked to be canceled. +BACULA_LOG_CLIENT_RBJ shell command: run ClientRunBeforeJob \"%{GREEDYDATA:bacula.job.client_run_before_command}\" +BACULA_LOG_VSS (Generate )?VSS (Writer)? +BACULA_LOG_MAXSTART Fatal [eE]rror: Job canceled because max start delay time exceeded. +BACULA_LOG_DUPLICATE Fatal [eE]rror: JobId %{INT:bacula.job.other_id} already running. Duplicate job not allowed. +BACULA_LOG_NOJOBSTAT Fatal [eE]rror: No Job status returned from FD. +BACULA_LOG_FATAL_CONN Fatal [eE]rror: bsock.c:133 Unable to connect to (Client: %{BACULA_HOST:bacula.client.name}|Storage daemon) on %{IPORHOST:client.address}:%{POSINT:client.port:int}. ERR=%{GREEDYDATA:error.message} +BACULA_LOG_NO_CONNECT Warning: bsock.c:127 Could not connect to (Client: %{BACULA_HOST:bacula.client.name}|Storage daemon) on %{IPORHOST:client.address}:%{POSINT:client.port:int}. ERR=%{GREEDYDATA:error.message} +BACULA_LOG_NO_AUTH Fatal error: Unable to authenticate with File daemon at \"?%{IPORHOST:client.address}(?::%{POSINT:client.port:int})?\"?. Possible causes: +BACULA_LOG_NOSUIT No prior or suitable Full backup found in catalog. Doing FULL backup. +BACULA_LOG_NOPRIOR No prior Full backup Job record found. + +BACULA_LOG_JOB (Error: )?Bacula %{BACULA_HOST} %{BACULA_VERSION} \(%{BACULA_VERSION}\): + +BACULA_LOG %{BACULA_TIMESTAMP:timestamp} %{BACULA_HOST:host.hostname}(?: JobId %{INT:bacula.job.id})?:? (%{BACULA_LOG_MAX_CAPACITY}|%{BACULA_LOG_END_VOLUME}|%{BACULA_LOG_NEW_VOLUME}|%{BACULA_LOG_NEW_LABEL}|%{BACULA_LOG_WROTE_LABEL}|%{BACULA_LOG_NEW_MOUNT}|%{BACULA_LOG_NOOPEN}|%{BACULA_LOG_NOOPENDIR}|%{BACULA_LOG_NOSTAT}|%{BACULA_LOG_NOJOBS}|%{BACULA_LOG_ALL_RECORDS_PRUNED}|%{BACULA_LOG_BEGIN_PRUNE_JOBS}|%{BACULA_LOG_BEGIN_PRUNE_FILES}|%{BACULA_LOG_PRUNED_JOBS}|%{BACULA_LOG_PRUNED_FILES}|%{BACULA_LOG_ENDPRUNE}|%{BACULA_LOG_STARTJOB}|%{BACULA_LOG_STARTRESTORE}|%{BACULA_LOG_USEDEVICE}|%{BACULA_LOG_DIFF_FS}|%{BACULA_LOG_JOBEND}|%{BACULA_LOG_NOPRUNE_JOBS}|%{BACULA_LOG_NOPRUNE_FILES}|%{BACULA_LOG_VOLUME_PREVWRITTEN}|%{BACULA_LOG_READYAPPEND}|%{BACULA_LOG_CANCELLING}|%{BACULA_LOG_MARKCANCEL}|%{BACULA_LOG_CLIENT_RBJ}|%{BACULA_LOG_VSS}|%{BACULA_LOG_MAXSTART}|%{BACULA_LOG_DUPLICATE}|%{BACULA_LOG_NOJOBSTAT}|%{BACULA_LOG_FATAL_CONN}|%{BACULA_LOG_NO_CONNECT}|%{BACULA_LOG_NO_AUTH}|%{BACULA_LOG_NOSUIT}|%{BACULA_LOG_JOB}|%{BACULA_LOG_NOPRIOR}) +# old (deprecated) name : +BACULA_LOGLINE %{BACULA_LOG} diff --git a/libs/grok/src/main/resources/patterns/ecs-v1/bind b/libs/grok/src/main/resources/patterns/ecs-v1/bind new file mode 100644 index 0000000000000..ec212de118ddb --- /dev/null +++ b/libs/grok/src/main/resources/patterns/ecs-v1/bind @@ -0,0 +1,13 @@ +BIND9_TIMESTAMP %{MONTHDAY}[-]%{MONTH}[-]%{YEAR} %{TIME} + +BIND9_DNSTYPE (?:A|AAAA|CAA|CDNSKEY|CDS|CERT|CNAME|CSYNC|DLV|DNAME|DNSKEY|DS|HINFO|LOC|MX|NAPTR|NS|NSEC|NSEC3|OPENPGPKEY|PTR|RRSIG|RP|SIG|SMIMEA|SOA|SRV|TSIG|TXT|URI) +BIND9_CATEGORY (?:queries) + +# dns.question.class is static - only 'IN' is supported by Bind9 +# bind.log.question.name is expected to be a 'duplicate' (same as the dns.question.name capture) +BIND9_QUERYLOGBASE client(:? @0x(?:[0-9A-Fa-f]+))? %{IP:client.ip}#%{POSINT:client.port:int} \(%{GREEDYDATA:bind.log.question.name}\): query: %{GREEDYDATA:dns.question.name} (?IN) %{BIND9_DNSTYPE:dns.question.type}(:? %{DATA:bind.log.question.flags})? \(%{IP:server.ip}\) + +# for query-logging category and severity are always fixed as "queries: info: " +BIND9_QUERYLOG %{BIND9_TIMESTAMP:timestamp} %{BIND9_CATEGORY:bing.log.category}: %{LOGLEVEL:log.level}: %{BIND9_QUERYLOGBASE} + +BIND9 %{BIND9_QUERYLOG} diff --git a/libs/grok/src/main/resources/patterns/ecs-v1/bro b/libs/grok/src/main/resources/patterns/ecs-v1/bro new file mode 100644 index 0000000000000..dc38d5a7fba59 --- /dev/null +++ b/libs/grok/src/main/resources/patterns/ecs-v1/bro @@ -0,0 +1,30 @@ +# supports the 'old' BRO log files, for updated Zeek log format see the patters/ecs-v1/zeek +# https://www.bro.org/sphinx/script-reference/log-files.html + +BRO_BOOL [TF] +BRO_DATA [^\t]+ + +# http.log - old format (before the Zeek rename) : +BRO_HTTP %{NUMBER:timestamp}\t%{NOTSPACE:zeek.session_id}\t%{IP:source.ip}\t%{INT:source.port:int}\t%{IP:destination.ip}\t%{INT:destination.port:int}\t%{INT:zeek.http.trans_depth:int}\t(?:-|%{WORD:http.request.method})\t(?:-|%{BRO_DATA:url.domain})\t(?:-|%{BRO_DATA:url.original})\t(?:-|%{BRO_DATA:http.request.referrer})\t(?:-|%{BRO_DATA:user_agent.original})\t(?:-|%{NUMBER:http.request.body.bytes:long})\t(?:-|%{NUMBER:http.response.body.bytes:long})\t(?:-|%{POSINT:http.response.status_code:int})\t(?:-|%{DATA:zeek.http.status_msg})\t(?:-|%{POSINT:zeek.http.info_code:int})\t(?:-|%{DATA:zeek.http.info_msg})\t(?:-|%{BRO_DATA:zeek.http.filename})\t(?:\(empty\)|%{BRO_DATA:zeek.http.tags})\t(?:-|%{BRO_DATA:url.username})\t(?:-|%{BRO_DATA:url.password})\t(?:-|%{BRO_DATA:zeek.http.proxied})\t(?:-|%{BRO_DATA:zeek.http.orig_fuids})\t(?:-|%{BRO_DATA:http.request.mime_type})\t(?:-|%{BRO_DATA:zeek.http.resp_fuids})\t(?:-|%{BRO_DATA:http.response.mime_type}) +# :long - %{NUMBER:http.request.body.bytes:int} +# :long - %{NUMBER:http.response.body.bytes:int} + +# dns.log - old format +BRO_DNS %{NUMBER:timestamp}\t%{NOTSPACE:zeek.session_id}\t%{IP:source.ip}\t%{INT:source.port:int}\t%{IP:destination.ip}\t%{INT:destination.port:int}\t%{WORD:network.transport}\t(?:-|%{INT:dns.id:int})\t(?:-|%{BRO_DATA:dns.question.name})\t(?:-|%{INT:zeek.dns.qclass:int})\t(?:-|%{BRO_DATA:zeek.dns.qclass_name})\t(?:-|%{INT:zeek.dns.qtype:int})\t(?:-|%{BRO_DATA:dns.question.type})\t(?:-|%{INT:zeek.dns.rcode:int})\t(?:-|%{BRO_DATA:dns.response_code})\t(?:-|%{BRO_BOOL:zeek.dns.AA})\t(?:-|%{BRO_BOOL:zeek.dns.TC})\t(?:-|%{BRO_BOOL:zeek.dns.RD})\t(?:-|%{BRO_BOOL:zeek.dns.RA})\t(?:-|%{NONNEGINT:zeek.dns.Z:int})\t(?:-|%{BRO_DATA:zeek.dns.answers})\t(?:-|%{DATA:zeek.dns.TTLs})\t(?:-|%{BRO_BOOL:zeek.dns.rejected}) + +# conn.log - old bro, also supports 'newer' format (optional *zeek.connection.local_resp* flag) compared to non-ecs mode +BRO_CONN %{NUMBER:timestamp}\t%{NOTSPACE:zeek.session_id}\t%{IP:source.ip}\t%{INT:source.port:int}\t%{IP:destination.ip}\t%{INT:destination.port:int}\t%{WORD:network.transport}\t(?:-|%{BRO_DATA:network.protocol})\t(?:-|%{NUMBER:zeek.connection.duration:float})\t(?:-|%{INT:zeek.connection.orig_bytes:long})\t(?:-|%{INT:zeek.connection.resp_bytes:long})\t(?:-|%{BRO_DATA:zeek.connection.state})\t(?:-|%{BRO_BOOL:zeek.connection.local_orig})\t(?:(?:-|%{BRO_BOOL:zeek.connection.local_resp})\t)?(?:-|%{INT:zeek.connection.missed_bytes:long})\t(?:-|%{BRO_DATA:zeek.connection.history})\t(?:-|%{INT:source.packets:long})\t(?:-|%{INT:source.bytes:long})\t(?:-|%{INT:destination.packets:long})\t(?:-|%{INT:destination.bytes:long})\t(?:\(empty\)|%{BRO_DATA:zeek.connection.tunnel_parents}) +# :long - %{INT:zeek.connection.orig_bytes:int} +# :long - %{INT:zeek.connection.resp_bytes:int} +# :long - %{INT:zeek.connection.missed_bytes:int} +# :long - %{INT:source.packets:int} +# :long - %{INT:source.bytes:int} +# :long - %{INT:destination.packets:int} +# :long - %{INT:destination.bytes:int} + +# files.log - old format +BRO_FILES %{NUMBER:timestamp}\t%{NOTSPACE:zeek.files.fuid}\t(?:-|%{IP:server.ip})\t(?:-|%{IP:client.ip})\t(?:-|%{BRO_DATA:zeek.files.session_ids})\t(?:-|%{BRO_DATA:zeek.files.source})\t(?:-|%{INT:zeek.files.depth:int})\t(?:-|%{BRO_DATA:zeek.files.analyzers})\t(?:-|%{BRO_DATA:file.mime_type})\t(?:-|%{BRO_DATA:file.name})\t(?:-|%{NUMBER:zeek.files.duration:float})\t(?:-|%{BRO_DATA:zeek.files.local_orig})\t(?:-|%{BRO_BOOL:zeek.files.is_orig})\t(?:-|%{INT:zeek.files.seen_bytes:long})\t(?:-|%{INT:file.size:long})\t(?:-|%{INT:zeek.files.missing_bytes:long})\t(?:-|%{INT:zeek.files.overflow_bytes:long})\t(?:-|%{BRO_BOOL:zeek.files.timedout})\t(?:-|%{BRO_DATA:zeek.files.parent_fuid})\t(?:-|%{BRO_DATA:file.hash.md5})\t(?:-|%{BRO_DATA:file.hash.sha1})\t(?:-|%{BRO_DATA:file.hash.sha256})\t(?:-|%{BRO_DATA:zeek.files.extracted}) +# :long - %{INT:zeek.files.seen_bytes:int} +# :long - %{INT:file.size:int} +# :long - %{INT:zeek.files.missing_bytes:int} +# :long - %{INT:zeek.files.overflow_bytes:int} diff --git a/libs/grok/src/main/resources/patterns/ecs-v1/exim b/libs/grok/src/main/resources/patterns/ecs-v1/exim new file mode 100644 index 0000000000000..dba79503c0097 --- /dev/null +++ b/libs/grok/src/main/resources/patterns/ecs-v1/exim @@ -0,0 +1,26 @@ +EXIM_MSGID [0-9A-Za-z]{6}-[0-9A-Za-z]{6}-[0-9A-Za-z]{2} +# <= message arrival +# => normal message delivery +# -> additional address in same delivery +# *> delivery suppressed by -N +# ** delivery failed; address bounced +# == delivery deferred; temporary problem +EXIM_FLAGS (?:<=|=>|->|\*>|\*\*|==|<>|>>) +EXIM_DATE (:?%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{TIME}) +EXIM_PID \[%{POSINT:process.pid:int}\] +EXIM_QT ((\d+y)?(\d+w)?(\d+d)?(\d+h)?(\d+m)?(\d+s)?) +EXIM_EXCLUDE_TERMS (Message is frozen|(Start|End) queue run| Warning: | retry time not reached | no (IP address|host name) found for (IP address|host) | unexpected disconnection while reading SMTP command | no immediate delivery: |another process is handling this message) +EXIM_REMOTE_HOST (H=(%{NOTSPACE:source.address} )?(\(%{NOTSPACE:exim.log.remote_address}\) )?\%{IP:source.ip}\](?::%{POSINT:source.port:int})?) +EXIM_INTERFACE (I=\[%{IP:destination.ip}\](?::%{NUMBER:destination.port:int})) +EXIM_PROTOCOL (P=%{NOTSPACE:network.protocol}) +EXIM_MSG_SIZE (S=%{NUMBER:exim.log.message.size:int}) +EXIM_HEADER_ID (id=%{NOTSPACE:exim.log.header_id}) +EXIM_QUOTED_CONTENT (?:\\.|[^\\"])* +EXIM_SUBJECT (T="%{EXIM_QUOTED_CONTENT:exim.log.message.subject}") + +EXIM_UNKNOWN_FIELD (?:[A-Za-z0-9]{1,4}=(?:%{QUOTEDSTRING}|%{NOTSPACE})) +EXIM_NAMED_FIELDS (?: (?:%{EXIM_REMOTE_HOST}|%{EXIM_INTERFACE}|%{EXIM_PROTOCOL}|%{EXIM_MSG_SIZE}|%{EXIM_HEADER_ID}|%{EXIM_SUBJECT}|%{EXIM_UNKNOWN_FIELD}))* + +EXIM_MESSAGE_ARRIVAL %{EXIM_DATE:timestamp} (?:%{EXIM_PID} )?%{EXIM_MSGID:exim.log.message.id} (?<=) (?[a-z:] )?%{EMAILADDRESS:exim.log.sender.email}%{EXIM_NAMED_FIELDS}(?:(?: from ?)? for %{EMAILADDRESS:exim.log.recipient.email})? + +EXIM %{EXIM_MESSAGE_ARRIVAL} diff --git a/libs/grok/src/main/resources/patterns/ecs-v1/firewalls b/libs/grok/src/main/resources/patterns/ecs-v1/firewalls new file mode 100644 index 0000000000000..892b3a506825d --- /dev/null +++ b/libs/grok/src/main/resources/patterns/ecs-v1/firewalls @@ -0,0 +1,111 @@ +# NetScreen firewall logs +NETSCREENSESSIONLOG %{SYSLOGTIMESTAMP:timestamp} %{IPORHOST:observer.hostname} %{NOTSPACE:observer.name}: (?NetScreen) device_id=%{WORD:netscreen.device_id} .*?(system-\w+-%{NONNEGINT:event.code}\(%{WORD:netscreen.session.type}\))?: start_time="%{DATA:netscreen.session.start_time}" duration=%{INT:netscreen.session.duration:int} policy_id=%{INT:netscreen.policy_id} service=%{DATA:netscreen.service} proto=%{INT:netscreen.protocol_number:int} src zone=%{WORD:observer.ingress.zone} dst zone=%{WORD:observer.egress.zone} action=%{WORD:event.action} sent=%{INT:source.bytes:long} rcvd=%{INT:destination.bytes:long} src=%{IPORHOST:source.address} dst=%{IPORHOST:destination.address}(?: src_port=%{INT:source.port:int} dst_port=%{INT:destination.port:int})?(?: src-xlated ip=%{IP:source.nat.ip} port=%{INT:source.nat.port:int} dst-xlated ip=%{IP:destination.nat.ip} port=%{INT:destination.nat.port:int})?(?: session_id=%{INT:netscreen.session.id} reason=%{GREEDYDATA:netscreen.session.reason})? +# :long - %{INT:source.bytes:int} +# :long - %{INT:destination.bytes:int} + +#== Cisco ASA == +CISCO_TAGGED_SYSLOG ^<%{POSINT:log.syslog.priority:int}>%{CISCOTIMESTAMP:timestamp}( %{SYSLOGHOST:host.hostname})? ?: %%{CISCOTAG:cisco.asa.tag}: +CISCOTIMESTAMP %{MONTH} +%{MONTHDAY}(?: %{YEAR})? %{TIME} +CISCOTAG [A-Z0-9]+-%{INT}-(?:[A-Z0-9_]+) +# Common Particles +CISCO_ACTION Built|Teardown|Deny|Denied|denied|requested|permitted|denied by ACL|discarded|est-allowed|Dropping|created|deleted +CISCO_REASON Duplicate TCP SYN|Failed to locate egress interface|Invalid transport field|No matching connection|DNS Response|DNS Query|(?:%{WORD}\s*)* +CISCO_DIRECTION Inbound|inbound|Outbound|outbound +CISCO_INTERVAL first hit|%{INT}-second interval +CISCO_XLATE_TYPE static|dynamic +# helpers +CISCO_HITCOUNT_INTERVAL hit-cnt %{INT:cisco.asa.hit_count:int} (?:first hit|%{INT:cisco.asa.interval:int}-second interval) +CISCO_SRC_IP_USER %{NOTSPACE:observer.ingress.interface.name}:%{IP:source.ip}(?:\(%{DATA:source.user.name}\))? +CISCO_DST_IP_USER %{NOTSPACE:observer.egress.interface.name}:%{IP:destination.ip}(?:\(%{DATA:destination.user.name}\))? +CISCO_SRC_HOST_PORT_USER %{NOTSPACE:observer.ingress.interface.name}:(?:(?:%{IP:source.ip})|(?:%{HOSTNAME:source.address}))(?:/%{INT:source.port:int})?(?:\(%{DATA:source.user.name}\))? +CISCO_DST_HOST_PORT_USER %{NOTSPACE:observer.egress.interface.name}:(?:(?:%{IP:destination.ip})|(?:%{HOSTNAME:destination.address}))(?:/%{INT:destination.port:int})?(?:\(%{DATA:destination.user.name}\))? +# ASA-1-104001 +CISCOFW104001 \((?:Primary|Secondary)\) Switching to ACTIVE - %{GREEDYDATA:event.reason} +# ASA-1-104002 +CISCOFW104002 \((?:Primary|Secondary)\) Switching to STANDBY - %{GREEDYDATA:event.reason} +# ASA-1-104003 +CISCOFW104003 \((?:Primary|Secondary)\) Switching to FAILED\. +# ASA-1-104004 +CISCOFW104004 \((?:Primary|Secondary)\) Switching to OK\. +# ASA-1-105003 +CISCOFW105003 \((?:Primary|Secondary)\) Monitoring on [Ii]nterface %{NOTSPACE:network.interface.name} waiting +# ASA-1-105004 +CISCOFW105004 \((?:Primary|Secondary)\) Monitoring on [Ii]nterface %{NOTSPACE:network.interface.name} normal +# ASA-1-105005 +CISCOFW105005 \((?:Primary|Secondary)\) Lost Failover communications with mate on [Ii]nterface %{NOTSPACE:network.interface.name} +# ASA-1-105008 +CISCOFW105008 \((?:Primary|Secondary)\) Testing [Ii]nterface %{NOTSPACE:network.interface.name} +# ASA-1-105009 +CISCOFW105009 \((?:Primary|Secondary)\) Testing on [Ii]nterface %{NOTSPACE:network.interface.name} (?:Passed|Failed) +# ASA-2-106001 +CISCOFW106001 %{CISCO_DIRECTION:cisco.asa.network.direction} %{WORD:cisco.asa.network.transport} connection %{CISCO_ACTION:cisco.asa.outcome} from %{IP:source.ip}/%{INT:source.port:int} to %{IP:destination.ip}/%{INT:destination.port:int} flags %{DATA:cisco.asa.tcp_flags} on interface %{NOTSPACE:observer.egress.interface.name} +# ASA-2-106006, ASA-2-106007, ASA-2-106010 +CISCOFW106006_106007_106010 %{CISCO_ACTION:cisco.asa.outcome} %{CISCO_DIRECTION:cisco.asa.network.direction} %{WORD:cisco.asa.network.transport} (?:from|src) %{IP:source.ip}/%{INT:source.port:int}(?:\(%{DATA:source.user.name}\))? (?:to|dst) %{IP:destination.ip}/%{INT:destination.port:int}(?:\(%{DATA:destination.user.name}\))? (?:(?:on interface %{NOTSPACE:observer.egress.interface.name})|(?:due to %{CISCO_REASON:event.reason})) +# ASA-3-106014 +CISCOFW106014 %{CISCO_ACTION:cisco.asa.outcome} %{CISCO_DIRECTION:cisco.asa.network.direction} %{WORD:cisco.asa.network.transport} src %{CISCO_SRC_IP_USER} dst %{CISCO_DST_IP_USER}\s?\(type %{INT:cisco.asa.icmp_type:int}, code %{INT:cisco.asa.icmp_code:int}\) +# ASA-6-106015 +CISCOFW106015 %{CISCO_ACTION:cisco.asa.outcome} %{WORD:cisco.asa.network.transport} \(%{DATA:cisco.asa.rule_name}\) from %{IP:source.ip}/%{INT:source.port:int} to %{IP:destination.ip}/%{INT:destination.port:int} flags %{DATA:cisco.asa.tcp_flags} on interface %{NOTSPACE:observer.egress.interface.name} +# ASA-1-106021 +CISCOFW106021 %{CISCO_ACTION:cisco.asa.outcome} %{WORD:cisco.asa.network.transport} reverse path check from %{IP:source.ip} to %{IP:destination.ip} on interface %{NOTSPACE:observer.egress.interface.name} +# ASA-4-106023 +CISCOFW106023 %{CISCO_ACTION:cisco.asa.outcome}(?: protocol)? %{WORD:cisco.asa.network.transport} src %{CISCO_SRC_HOST_PORT_USER} dst %{CISCO_DST_HOST_PORT_USER}( \(type %{INT:cisco.asa.icmp_type:int}, code %{INT:cisco.asa.icmp_code:int}\))? by access-group "?%{DATA:cisco.asa.rule_name}"? \%{DATA:[@metadata.cisco.asa.hashcode1}, %{DATA:@metadata.cisco.asa.hashcode2}\] +# ASA-4-106100, ASA-4-106102, ASA-4-106103 +CISCOFW106100_2_3 access-list %{NOTSPACE:cisco.asa.rule_name} %{CISCO_ACTION:cisco.asa.outcome} %{WORD:cisco.asa.network.transport} for user '%{DATA:user.name}' %{DATA:observer.ingress.interface.name}/%{IP:source.ip}\(%{INT:source.port:int}\) -> %{DATA:observer.egress.interface.name}/%{IP:destination.ip}\(%{INT:destination.port:int}\) %{CISCO_HITCOUNT_INTERVAL} \%{DATA:[@metadata.cisco.asa.hashcode1}, %{DATA:@metadata.cisco.asa.hashcode2}\] +# ASA-5-106100 +CISCOFW106100 access-list %{NOTSPACE:cisco.asa.rule_name} %{CISCO_ACTION:cisco.asa.outcome} %{WORD:cisco.asa.network.transport} %{DATA:observer.ingress.interface.name}/%{IP:source.ip}\(%{INT:source.port:int}\)(?:\(%{DATA:source.user.name}\))? -> %{DATA:observer.egress.interface.name}/%{IP:destination.ip}\(%{INT:destination.port:int}\)(?:\(%{DATA:source.user.name}\))? hit-cnt %{INT:cisco.asa.hit_count:int} %{CISCO_INTERVAL} \%{DATA:[@metadata.cisco.asa.hashcode1}, %{DATA:@metadata.cisco.asa.hashcode2}\] +# ASA-5-304001 +CISCOFW304001 %{IP:source.ip}(?:\(%{DATA:source.user.name}\))? Accessed URL %{IP:destination.ip}:%{GREEDYDATA:url.original} +# ASA-6-110002 +CISCOFW110002 %{CISCO_REASON:event.reason} for %{WORD:cisco.asa.network.transport} from %{DATA:observer.ingress.interface.name}:%{IP:source.ip}/%{INT:source.port:int} to %{IP:destination.ip}/%{INT:destination.port:int} +# ASA-6-302010 +CISCOFW302010 %{INT:cisco.asa.connections.in_use:int} in use, %{INT:cisco.asa.connections.most_used:int} most used +# ASA-6-302013, ASA-6-302014, ASA-6-302015, ASA-6-302016 +CISCOFW302013_302014_302015_302016 %{CISCO_ACTION:cisco.asa.outcome}(?: %{CISCO_DIRECTION:cisco.asa.network.direction})? %{WORD:cisco.asa.network.transport} connection %{INT:cisco.asa.connection_id} for %{NOTSPACE:observer.ingress.interface.name}:%{IP:source.ip}/%{INT:source.port:int}(?: \(%{IP:source.nat.ip}/%{INT:source.nat.port:int}\))?(?:\(%{DATA:source.user.name?}\))? to %{NOTSPACE:observer.egress.interface.name}:%{IP:destination.ip}/%{INT:destination.port:int}( \(%{IP:destination.nat.ip}/%{INT:destination.nat.port:int}\))?(?:\(%{DATA:destination.user.name}\))?( duration %{TIME:cisco.asa.duration} bytes %{INT:network.bytes:long})?(?: %{CISCO_REASON:event.reason})?(?: \(%{DATA:user.name}\))? +# :long - %{INT:network.bytes:int} +# ASA-6-302020, ASA-6-302021 +CISCOFW302020_302021 %{CISCO_ACTION:cisco.asa.outcome}(?: %{CISCO_DIRECTION:cisco.asa.network.direction})? %{WORD:cisco.asa.network.transport} connection for faddr %{IP:destination.ip}/%{INT:cisco.asa.icmp_seq:int}(?:\(%{DATA:destination.user.name}\))? gaddr %{IP:source.nat.ip}/%{INT:cisco.asa.icmp_type:int} laddr %{IP:source.ip}/%{INT}(?: \(%{DATA:source.user.name}\))? +# ASA-6-305011 +CISCOFW305011 %{CISCO_ACTION:cisco.asa.outcome} %{CISCO_XLATE_TYPE} %{WORD:cisco.asa.network.transport} translation from %{DATA:observer.ingress.interface.name}:%{IP:source.ip}(/%{INT:source.port:int})?(?:\(%{DATA:source.user.name}\))? to %{DATA:observer.egress.interface.name}:%{IP:destination.ip}/%{INT:destination.port:int} +# ASA-3-313001, ASA-3-313004, ASA-3-313008 +CISCOFW313001_313004_313008 %{CISCO_ACTION:cisco.asa.outcome} %{WORD:cisco.asa.network.transport} type=%{INT:cisco.asa.icmp_type:int}, code=%{INT:cisco.asa.icmp_code:int} from %{IP:source.ip} on interface %{NOTSPACE:observer.egress.interface.name}(?: to %{IP:destination.ip})? +# ASA-4-313005 +CISCOFW313005 %{CISCO_REASON:event.reason} for %{WORD:cisco.asa.network.transport} error message: %{WORD} src %{CISCO_SRC_IP_USER} dst %{CISCO_DST_IP_USER} \(type %{INT:cisco.asa.icmp_type:int}, code %{INT:cisco.asa.icmp_code:int}\) on %{NOTSPACE} interface\.\s+Original IP payload: %{WORD:cisco.asa.original_ip_payload.network.transport} src %{IP:cisco.asa.original_ip_payload.source.ip}/%{INT:cisco.asa.original_ip_payload.source.port:int}(?:\(%{DATA:cisco.asa.original_ip_payload.source.user.name}\))? dst %{IP:cisco.asa.original_ip_payload.destination.ip}/%{INT:cisco.asa.original_ip_payload.destination.port:int}(?:\(%{DATA:cisco.asa.original_ip_payload.destination.user.name}\))? +# ASA-5-321001 +CISCOFW321001 Resource '%{DATA:cisco.asa.resource.name}' limit of %{POSINT:cisco.asa.resource.limit:int} reached for system +# ASA-4-402117 +CISCOFW402117 %{WORD:cisco.asa.network.type}: Received a non-IPSec packet \(protocol=\s?%{WORD:cisco.asa.network.transport}\) from %{IP:source.ip} to %{IP:destination.ip}\.? +# ASA-4-402119 +CISCOFW402119 %{WORD:cisco.asa.network.type}: Received an %{WORD:cisco.asa.ipsec.protocol} packet \(SPI=\s?%{DATA:cisco.asa.ipsec.spi}, sequence number=\s?%{DATA:cisco.asa.ipsec.seq_num}\) from %{IP:source.ip} \(user=\s?%{DATA:source.user.name}\) to %{IP:destination.ip} that failed anti-replay checking\.? +# ASA-4-419001 +CISCOFW419001 %{CISCO_ACTION:cisco.asa.outcome} %{WORD:cisco.asa.network.transport} packet from %{NOTSPACE:observer.ingress.interface.name}:%{IP:source.ip}/%{INT:source.port:int} to %{NOTSPACE:observer.egress.interface.name}:%{IP:destination.ip}/%{INT:destination.port:int}, reason: %{GREEDYDATA:event.reason} +# ASA-4-419002 +CISCOFW419002 %{CISCO_REASON:event.reason} from %{DATA:observer.ingress.interface.name}:%{IP:source.ip}/%{INT:source.port:int} to %{DATA:observer.egress.interface.name}:%{IP:destination.ip}/%{INT:destination.port:int} with different initial sequence number +# ASA-4-500004 +CISCOFW500004 %{CISCO_REASON:event.reason} for protocol=%{WORD:cisco.asa.network.transport}, from %{IP:source.ip}/%{INT:source.port:int} to %{IP:destination.ip}/%{INT:destination.port:int} +# ASA-6-602303, ASA-6-602304 +CISCOFW602303_602304 %{WORD:cisco.asa.network.type}: An %{CISCO_DIRECTION:cisco.asa.network.direction} %{DATA:cisco.asa.ipsec.tunnel_type} SA \(SPI=\s?%{DATA:cisco.asa.ipsec.spi}\) between %{IP:source.ip} and %{IP:destination.ip} \(user=\s?%{DATA:source.user.name}\) has been %{CISCO_ACTION:cisco.asa.outcome} +# ASA-7-710001, ASA-7-710002, ASA-7-710003, ASA-7-710005, ASA-7-710006 +CISCOFW710001_710002_710003_710005_710006 %{WORD:cisco.asa.network.transport} (?:request|access) %{CISCO_ACTION:cisco.asa.outcome} from %{IP:source.ip}/%{INT:source.port:int} to %{DATA:observer.egress.interface.name}:%{IP:destination.ip}/%{INT:destination.port:int} +# ASA-6-713172 +CISCOFW713172 Group = %{DATA:cisco.asa.source.group}, IP = %{IP:source.ip}, Automatic NAT Detection Status:\s+Remote end\s*%{DATA:@metadata.cisco.asa.remote_nat}\s*behind a NAT device\s+This\s+end\s*%{DATA:@metadata.cisco.asa.local_nat}\s*behind a NAT device +# ASA-4-733100 +CISCOFW733100 \\s*%{DATA:[cisco.asa.burst.object}\s*\] drop %{DATA:cisco.asa.burst.id} exceeded. Current burst rate is %{INT:cisco.asa.burst.current_rate:int} per second, max configured rate is %{INT:cisco.asa.burst.configured_rate:int}; Current average rate is %{INT:cisco.asa.burst.avg_rate:int} per second, max configured rate is %{INT:cisco.asa.burst.configured_avg_rate:int}; Cumulative total count is %{INT:cisco.asa.burst.cumulative_count:int} +#== End Cisco ASA == + + +IPTABLES_TCP_FLAGS (CWR |ECE |URG |ACK |PSH |RST |SYN |FIN )* +IPTABLES_TCP_PART (?:SEQ=%{INT:iptables.tcp.seq:int}\s+)?(?:ACK=%{INT:iptables.tcp.ack:int}\s+)?WINDOW=%{INT:iptables.tcp.window:int}\s+RES=0x%{BASE16NUM:iptables.tcp_reserved_bits}\s+%{IPTABLES_TCP_FLAGS:iptables.tcp.flags} + +IPTABLES4_FRAG (?:(?<= )(?:CE|DF|MF))* +IPTABLES4_PART SRC=%{IPV4:source.ip}\s+DST=%{IPV4:destination.ip}\s+LEN=(?:%{INT:iptables.length:int})?\s+TOS=(?:0|0x%{BASE16NUM:iptables.tos})?\s+PREC=(?:0x%{BASE16NUM:iptables.precedence_bits})?\s+TTL=(?:%{INT:iptables.ttl:int})?\s+ID=(?:%{INT:iptables.id})?\s+(?:%{IPTABLES4_FRAG:iptables.fragment_flags})?(?:\s+FRAG: %{INT:iptables.fragment_offset:int})? +IPTABLES6_PART SRC=%{IPV6:source.ip}\s+DST=%{IPV6:destination.ip}\s+LEN=(?:%{INT:iptables.length:int})?\s+TC=(?:0|0x%{BASE16NUM:iptables.tos})?\s+HOPLIMIT=(?:%{INT:iptables.ttl:int})?\s+FLOWLBL=(?:%{INT:iptables.flow_label})? + +IPTABLES IN=(?:%{NOTSPACE:observer.ingress.interface.name})?\s+OUT=(?:%{NOTSPACE:observer.egress.interface.name})?\s+(?:MAC=(?:%{COMMONMAC:destination.mac})?(?::%{COMMONMAC:source.mac})?(?::A-Fa-f0-9{2}:A-Fa-f0-9{2})?\s+)?(:?%{IPTABLES4_PART}|%{IPTABLES6_PART}).*?PROTO=(?:%{WORD:network.transport})?\s+SPT=(?:%{INT:source.port:int})?\s+DPT=(?:%{INT:destination.port:int})?\s+(?:%{IPTABLES_TCP_PART})? + +# Shorewall firewall logs +SHOREWALL (?:%{SYSLOGTIMESTAMP:timestamp}) (?:%{WORD:observer.hostname}) .*Shorewall:(?:%{WORD:shorewall.firewall.type})?:(?:%{WORD:shorewall.firewall.action})?.*%{IPTABLES} +#== End Shorewall +#== SuSE Firewall 2 == +SFW2_LOG_PREFIX SFW2\-INext\-%{NOTSPACE:suse.firewall.action} +SFW2 ((?:%{SYSLOGTIMESTAMP:timestamp})|(?:%{TIMESTAMP_ISO8601:timestamp}))\s*%{HOSTNAME:observer.hostname}.*?%{SFW2_LOG_PREFIX:suse.firewall.log_prefix}\s*%{IPTABLES} +#== End SuSE == diff --git a/libs/grok/src/main/resources/patterns/ecs-v1/grok-patterns b/libs/grok/src/main/resources/patterns/ecs-v1/grok-patterns new file mode 100644 index 0000000000000..6f58f3ff4750d --- /dev/null +++ b/libs/grok/src/main/resources/patterns/ecs-v1/grok-patterns @@ -0,0 +1,95 @@ +USERNAME [a-zA-Z0-9._-]+ +USER %{USERNAME} +EMAILLOCALPART [a-zA-Z0-9!#$%&'*+\-/=?^_`{|}~]{1,64}(?:\.[a-zA-Z0-9!#$%&'*+\-/=?^_`{|}~]{1,62}){0,63} +EMAILADDRESS %{EMAILLOCALPART}@%{HOSTNAME} +INT (?:[+-]?(?:[0-9]+)) +BASE10NUM (?[+-]?(?:(?:[0-9]+(?:\.[0-9]+)?)|(?:\.[0-9]+))) +NUMBER (?:%{BASE10NUM}) +BASE16NUM (?(?"(?>\\.|[^\\"]+)+"|""|(?>'(?>\\.|[^\\']+)+')|''|(?>`(?>\\.|[^\\`]+)+`)|``)) +UUID [A-Fa-f0-9]{8}-(?:[A-Fa-f0-9]{4}-){3}[A-Fa-f0-9]{12} +# URN, allowing use of RFC 2141 section 2.3 reserved characters +URN urn:[0-9A-Za-z][0-9A-Za-z-]{0,31}:(?:%[0-9a-fA-F]{2}|[0-9A-Za-z()+,.:=@;$_!*'/?#-])+ + +# Networking +MAC (?:%{CISCOMAC}|%{WINDOWSMAC}|%{COMMONMAC}) +CISCOMAC (?:(?:[A-Fa-f0-9]{4}\.){2}[A-Fa-f0-9]{4}) +WINDOWSMAC (?:(?:[A-Fa-f0-9]{2}-){5}[A-Fa-f0-9]{2}) +COMMONMAC (?:(?:[A-Fa-f0-9]{2}:){5}[A-Fa-f0-9]{2}) +IPV6 ((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)? +IPV4 (?[A-Za-z]+:|\\)(?:\\[^\\?*]*)+ +URIPROTO [A-Za-z]([A-Za-z0-9+\-.]+)+ +URIHOST %{IPORHOST}(?::%{POSINT})? +# uripath comes loosely from RFC1738, but mostly from what Firefox doesn't turn into %XX +URIPATH (?:/[A-Za-z0-9$.+!*'(){},~:;=@#%&_\-]*)+ +URIQUERY [A-Za-z0-9$.+!*'|(){},~@#%&/=:;_?\-\[\]<>]* +# deprecated (kept due compatibility): +URIPARAM \?%{URIQUERY} +URIPATHPARAM %{URIPATH}(?:\?%{URIQUERY})? +URI %{URIPROTO}://(?:%{USER}(?::[^@]*)?@)?(?:%{URIHOST})?(?:%{URIPATH}(?:\?%{URIQUERY})?)? + +# Months: January, Feb, 3, 03, 12, December +MONTH \b(?:[Jj]an(?:uary|uar)?|[Ff]eb(?:ruary|ruar)?|[Mm](?:a|ä)?r(?:ch|z)?|[Aa]pr(?:il)?|[Mm]a(?:y|i)?|[Jj]un(?:e|i)?|[Jj]ul(?:y|i)?|[Aa]ug(?:ust)?|[Ss]ep(?:tember)?|[Oo](?:c|k)?t(?:ober)?|[Nn]ov(?:ember)?|[Dd]e(?:c|z)(?:ember)?)\b +MONTHNUM (?:0?[1-9]|1[0-2]) +MONTHNUM2 (?:0[1-9]|1[0-2]) +MONTHDAY (?:(?:0[1-9])|(?:[12][0-9])|(?:3[01])|[1-9]) + +# Days: Monday, Tue, Thu, etc... +DAY (?:Mon(?:day)?|Tue(?:sday)?|Wed(?:nesday)?|Thu(?:rsday)?|Fri(?:day)?|Sat(?:urday)?|Sun(?:day)?) + +# Years? +YEAR (?>\d\d){1,2} +HOUR (?:2[0123]|[01]?[0-9]) +MINUTE (?:[0-5][0-9]) +# '60' is a leap second in most time standards and thus is valid. +SECOND (?:(?:[0-5]?[0-9]|60)(?:[:.,][0-9]+)?) +TIME (?!<[0-9])%{HOUR}:%{MINUTE}(?::%{SECOND})(?![0-9]) +# datestamp is YYYY/MM/DD-HH:MM:SS.UUUU (or something like it) +DATE_US %{MONTHNUM}[/-]%{MONTHDAY}[/-]%{YEAR} +DATE_EU %{MONTHDAY}[./-]%{MONTHNUM}[./-]%{YEAR} +ISO8601_TIMEZONE (?:Z|[+-]%{HOUR}(?::?%{MINUTE})) +ISO8601_SECOND %{SECOND} +TIMESTAMP_ISO8601 %{YEAR}-%{MONTHNUM}-%{MONTHDAY}[T ]%{HOUR}:?%{MINUTE}(?::?%{SECOND})?%{ISO8601_TIMEZONE}? +DATE %{DATE_US}|%{DATE_EU} +DATESTAMP %{DATE}[- ]%{TIME} +TZ (?:[APMCE][SD]T|UTC) +DATESTAMP_RFC822 %{DAY} %{MONTH} %{MONTHDAY} %{YEAR} %{TIME} %{TZ} +DATESTAMP_RFC2822 %{DAY}, %{MONTHDAY} %{MONTH} %{YEAR} %{TIME} %{ISO8601_TIMEZONE} +DATESTAMP_OTHER %{DAY} %{MONTH} %{MONTHDAY} %{TIME} %{TZ} %{YEAR} +DATESTAMP_EVENTLOG %{YEAR}%{MONTHNUM2}%{MONTHDAY}%{HOUR}%{MINUTE}%{SECOND} + +# Syslog Dates: Month Day HH:MM:SS +SYSLOGTIMESTAMP %{MONTH} +%{MONTHDAY} %{TIME} +PROG [\x21-\x5a\x5c\x5e-\x7e]+ +SYSLOGPROG %{PROG:process.name}(?:\[%{POSINT:process.pid:int}\])? +SYSLOGHOST %{IPORHOST} +SYSLOGFACILITY <%{NONNEGINT:log.syslog.facility.code:int}.%{NONNEGINT:log.syslog.priority:int}> +HTTPDATE %{MONTHDAY}/%{MONTH}/%{YEAR}:%{TIME} %{INT} + +# Shortcuts +QS %{QUOTEDSTRING} + +# Log formats +SYSLOGBASE %{SYSLOGTIMESTAMP:timestamp} (?:%{SYSLOGFACILITY} )?%{SYSLOGHOST:host.hostname} %{SYSLOGPROG}: + +# Log Levels +LOGLEVEL ([Aa]lert|ALERT|[Tt]race|TRACE|[Dd]ebug|DEBUG|[Nn]otice|NOTICE|[Ii]nfo?(?:rmation)?|INFO?(?:RMATION)?|[Ww]arn?(?:ing)?|WARN?(?:ING)?|[Ee]rr?(?:or)?|ERR?(?:OR)?|[Cc]rit?(?:ical)?|CRIT?(?:ICAL)?|[Ff]atal|FATAL|[Ss]evere|SEVERE|EMERG(?:ENCY)?|[Ee]merg(?:ency)?) diff --git a/libs/grok/src/main/resources/patterns/ecs-v1/haproxy b/libs/grok/src/main/resources/patterns/ecs-v1/haproxy new file mode 100644 index 0000000000000..f46d4ba945bb3 --- /dev/null +++ b/libs/grok/src/main/resources/patterns/ecs-v1/haproxy @@ -0,0 +1,40 @@ + +HAPROXYTIME (?!<[0-9])%{HOUR}:%{MINUTE}(?::%{SECOND})(?![0-9]) +HAPROXYDATE %{MONTHDAY}/%{MONTH}/%{YEAR}:%{HAPROXYTIME}.%{INT} + +# Override these default patterns to parse out what is captured in your haproxy.cfg +HAPROXYCAPTUREDREQUESTHEADERS %{DATA:haproxy.http.request.captured_headers} +HAPROXYCAPTUREDRESPONSEHEADERS %{DATA:haproxy.http.response.captured_headers} + +# Example: +# These haproxy config lines will add data to the logs that are captured +# by the patterns below. Place them in your custom patterns directory to +# override the defaults. +# +# capture request header Host len 40 +# capture request header X-Forwarded-For len 50 +# capture request header Accept-Language len 50 +# capture request header Referer len 200 +# capture request header User-Agent len 200 +# +# capture response header Content-Type len 30 +# capture response header Content-Encoding len 10 +# capture response header Cache-Control len 200 +# capture response header Last-Modified len 200 +# +# HAPROXYCAPTUREDREQUESTHEADERS %{DATA:haproxy.http.request.host}\|%{DATA:haproxy.http.request.x_forwarded_for}\|%{DATA:haproxy.http.request.accept_language}\|%{DATA:http.request.referrer}\|%{DATA:user_agent.original} +# HAPROXYCAPTUREDRESPONSEHEADERS %{DATA:http.response.mime_type}\|%{DATA:haproxy.http.response.encoding}\|%{DATA:haproxy.http.response.cache_control}\|%{DATA:haproxy.http.response.last_modified} + +HAPROXYURI (?:%{URIPROTO:url.scheme}://)?(?:%{USER:url.username}(?::[^@]*)?@)?(?:%{IPORHOST:url.domain}(?::%{POSINT:url.port:int})?)?(?:%{URIPATH:url.path}(?:\?%{URIQUERY:url.query})?)? + +HAPROXYHTTPREQUESTLINE (?:|(?:%{WORD:http.request.method} %{HAPROXYURI:url.original}(?: HTTP/%{NUMBER:http.version})?)) + +# parse a haproxy 'httplog' line +HAPROXYHTTPBASE %{IP:source.address}:%{INT:source.port:int} \[%{HAPROXYDATE:haproxy.request_date}\] %{NOTSPACE:haproxy.frontend_name} %{NOTSPACE:haproxy.backend_name}/(?:|%{NOTSPACE:haproxy.server_name}) (?:-1|%{INT:haproxy.http.request.time_wait_ms:int})/(?:-1|%{INT:haproxy.total_waiting_time_ms:int})/(?:-1|%{INT:haproxy.connection_wait_time_ms:int})/(?:-1|%{INT:haproxy.http.request.time_wait_without_data_ms:int})/%{NOTSPACE:haproxy.total_time_ms} %{INT:http.response.status_code:int} %{INT:source.bytes:long} (?:-|%{DATA:haproxy.http.request.captured_cookie}) (?:-|%{DATA:haproxy.http.response.captured_cookie}) %{NOTSPACE:haproxy.termination_state} %{INT:haproxy.connections.active:int}/%{INT:haproxy.connections.frontend:int}/%{INT:haproxy.connections.backend:int}/%{INT:haproxy.connections.server:int}/%{INT:haproxy.connections.retries:int} %{INT:haproxy.server_queue:int}/%{INT:haproxy.backend_queue:int}(?: \{%{HAPROXYCAPTUREDREQUESTHEADERS}\}(?: \{%{HAPROXYCAPTUREDRESPONSEHEADERS}\})?)?(?: "%{HAPROXYHTTPREQUESTLINE}"?)? +# :long - %{INT:source.bytes:int} + +HAPROXYHTTP (?:%{SYSLOGTIMESTAMP:timestamp}|%{TIMESTAMP_ISO8601:timestamp}) %{IPORHOST:host.hostname} %{SYSLOGPROG}: %{HAPROXYHTTPBASE} + +# parse a haproxy 'tcplog' line +HAPROXYTCP (?:%{SYSLOGTIMESTAMP:timestamp}|%{TIMESTAMP_ISO8601:timestamp}) %{IPORHOST:host.hostname} %{SYSLOGPROG}: %{IP:source.address}:%{INT:source.port:int} \[%{HAPROXYDATE:haproxy.request_date}\] %{NOTSPACE:haproxy.frontend_name} %{NOTSPACE:haproxy.backend_name}/(?:|%{NOTSPACE:haproxy.server_name}) (?:-1|%{INT:haproxy.total_waiting_time_ms:int})/(?:-1|%{INT:haproxy.connection_wait_time_ms:int})/%{NOTSPACE:haproxy.total_time_ms} %{INT:source.bytes:long} %{NOTSPACE:haproxy.termination_state} %{INT:haproxy.connections.active:int}/%{INT:haproxy.connections.frontend:int}/%{INT:haproxy.connections.backend:int}/%{INT:haproxy.connections.server:int}/%{INT:haproxy.connections.retries:int} %{INT:haproxy.server_queue:int}/%{INT:haproxy.backend_queue:int} +# :long - %{INT:source.bytes:int} diff --git a/libs/grok/src/main/resources/patterns/ecs-v1/httpd b/libs/grok/src/main/resources/patterns/ecs-v1/httpd new file mode 100644 index 0000000000000..9b58e5096ad38 --- /dev/null +++ b/libs/grok/src/main/resources/patterns/ecs-v1/httpd @@ -0,0 +1,17 @@ +HTTPDUSER %{EMAILADDRESS}|%{USER} +HTTPDERROR_DATE %{DAY} %{MONTH} %{MONTHDAY} %{TIME} %{YEAR} + +# Log formats +HTTPD_COMMONLOG %{IPORHOST:source.address} (?:-|%{HTTPDUSER:apache.access.user.identity}) (?:-|%{HTTPDUSER:user.name}) \[%{HTTPDATE:timestamp}\] "(?:%{WORD:http.request.method} %{NOTSPACE:url.original}(?: HTTP/%{NUMBER:http.version})?|%{DATA})" (?:-|%{INT:http.response.status_code:int}) (?:-|%{INT:http.response.body.bytes:long}) +# :long - %{INT:http.response.body.bytes:int} +HTTPD_COMBINEDLOG %{HTTPD_COMMONLOG} "(?:-|%{DATA:http.request.referrer})" "(?:-|%{DATA:user_agent.original})" + +# Error logs +HTTPD20_ERRORLOG \[%{HTTPDERROR_DATE:timestamp}\] \[%{LOGLEVEL:log.level}\] (?:\[client %{IPORHOST:source.address}\] )?%{GREEDYDATA:message} +HTTPD24_ERRORLOG \[%{HTTPDERROR_DATE:timestamp}\] \[(?:%{WORD:apache.error.module})?:%{LOGLEVEL:log.level}\] \[pid %{POSINT:process.pid:long}(:tid %{INT:process.thread.id:int})?\](?: \(%{POSINT:apache.error.proxy.error.code?}\)%{DATA:apache.error.proxy.error.message}:)?(?: \[client %{IPORHOST:source.address}(?::%{POSINT:source.port:int})?\])?(?: %{DATA:error.code}:)? %{GREEDYDATA:message} +# :long - %{INT:process.thread.id:int} +HTTPD_ERRORLOG %{HTTPD20_ERRORLOG}|%{HTTPD24_ERRORLOG} + +# Deprecated +COMMONAPACHELOG %{HTTPD_COMMONLOG} +COMBINEDAPACHELOG %{HTTPD_COMBINEDLOG} diff --git a/libs/grok/src/main/resources/patterns/ecs-v1/java b/libs/grok/src/main/resources/patterns/ecs-v1/java new file mode 100644 index 0000000000000..8dd539f6c0283 --- /dev/null +++ b/libs/grok/src/main/resources/patterns/ecs-v1/java @@ -0,0 +1,34 @@ +JAVACLASS (?:[a-zA-Z$_][a-zA-Z$_0-9]*\.)*[a-zA-Z$_][a-zA-Z$_0-9]* +#Space is an allowed character to match special cases like 'Native Method' or 'Unknown Source' +JAVAFILE (?:[a-zA-Z$_0-9. -]+) +#Allow special , methods +JAVAMETHOD (?:(<(?:cl)?init>)|[a-zA-Z$_][a-zA-Z$_0-9]*) +#Line number is optional in special cases 'Native method' or 'Unknown source' +JAVASTACKTRACEPART %{SPACE}at %{JAVACLASS:java.log.origin.class.name}\.%{JAVAMETHOD:log.origin.function}\(%{JAVAFILE:log.origin.file.name}(?::%{INT:log.origin.file.line:int})?\) +# Java Logs +JAVATHREAD (?:[A-Z]{2}-Processor[\d]+) +JAVALOGMESSAGE (?:.*) + +# MMM dd, yyyy HH:mm:ss eg: Jan 9, 2014 7:13:13 AM +# matches default logging configuration in Tomcat 4.1, 5.0, 5.5, 6.0, 7.0 +CATALINA7_DATESTAMP %{MONTH} %{MONTHDAY}, %{YEAR} %{HOUR}:%{MINUTE}:%{SECOND} (?:AM|PM) +CATALINA7_LOG %{CATALINA7_DATESTAMP:timestamp} %{JAVACLASS:java.log.origin.class.name}(?: %{JAVAMETHOD:log.origin.function})?\s*(?:%{LOGLEVEL:log.level}:)? %{JAVALOGMESSAGE:message} + +# 31-Jul-2020 16:40:38.578 in Tomcat 8.5/9.0 +CATALINA8_DATESTAMP %{MONTHDAY}-%{MONTH}-%{YEAR} %{HOUR}:%{MINUTE}:%{SECOND} +CATALINA8_LOG %{CATALINA8_DATESTAMP:timestamp} %{LOGLEVEL:log.level} \[%{DATA:java.log.origin.thread.name}\] %{JAVACLASS:java.log.origin.class.name}\.(?:%{JAVAMETHOD:log.origin.function})? %{JAVALOGMESSAGE:message} + +CATALINA_DATESTAMP (?:%{CATALINA8_DATESTAMP})|(?:%{CATALINA7_DATESTAMP}) +CATALINALOG (?:%{CATALINA8_LOG})|(?:%{CATALINA7_LOG}) + +# in Tomcat 5.5, 6.0, 7.0 it is the same as catalina.out logging format +TOMCAT7_LOG %{CATALINA7_LOG} +TOMCAT8_LOG %{CATALINA8_LOG} + +# NOTE: a weird log we started with - not sure what TC version this should match out of the box (due the | delimiters) +TOMCATLEGACY_DATESTAMP %{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND}(?: %{ISO8601_TIMEZONE})? +TOMCATLEGACY_LOG %{TOMCATLEGACY_DATESTAMP:timestamp} \| %{LOGLEVEL:log.level} \| %{JAVACLASS:java.log.origin.class.name} - %{JAVALOGMESSAGE:message} + +TOMCAT_DATESTAMP (?:%{CATALINA8_DATESTAMP})|(?:%{CATALINA7_DATESTAMP})|(?:%{TOMCATLEGACY_DATESTAMP}) + +TOMCATLOG (?:%{TOMCAT8_LOG})|(?:%{TOMCAT7_LOG})|(?:%{TOMCATLEGACY_LOG}) diff --git a/libs/grok/src/main/resources/patterns/ecs-v1/junos b/libs/grok/src/main/resources/patterns/ecs-v1/junos new file mode 100644 index 0000000000000..d23d45502aa19 --- /dev/null +++ b/libs/grok/src/main/resources/patterns/ecs-v1/junos @@ -0,0 +1,13 @@ +# JUNOS 11.4 RT_FLOW patterns +RT_FLOW_TAG (?:RT_FLOW_SESSION_CREATE|RT_FLOW_SESSION_CLOSE|RT_FLOW_SESSION_DENY) +# deprecated legacy name: +RT_FLOW_EVENT RT_FLOW_TAG + +RT_FLOW1 %{RT_FLOW_TAG:juniper.srx.tag}: %{GREEDYDATA:juniper.srx.reason}: %{IP:source.ip}/%{INT:source.port:int}->%{IP:destination.ip}/%{INT:destination.port:int} %{DATA:juniper.srx.service_name} %{IP:source.nat.ip}/%{INT:source.nat.port:int}->%{IP:destination.nat.ip}/%{INT:destination.nat.port:int} (?:(?:None)|(?:%{DATA:juniper.srx.src_nat_rule_name})) (?:(?:None)|(?:%{DATA:juniper.srx.dst_nat_rule_name})) %{INT:network.iana_number} %{DATA:rule.name} %{DATA:observer.ingress.zone} %{DATA:observer.egress.zone} %{INT:juniper.srx.session_id} \d+\(%{INT:source.bytes:long}\) \d+\(%{INT:destination.bytes:long}\) %{INT:juniper.srx.elapsed_time:int} .* +# :long - %{INT:source.bytes:int} +# :long - %{INT:destination.bytes:int} + +RT_FLOW2 %{RT_FLOW_TAG:juniper.srx.tag}: session created %{IP:source.ip}/%{INT:source.port:int}->%{IP:destination.ip}/%{INT:destination.port:int} %{DATA:juniper.srx.service_name} %{IP:source.nat.ip}/%{INT:source.nat.port:int}->%{IP:destination.nat.ip}/%{INT:destination.nat.port:int} (?:(?:None)|(?:%{DATA:juniper.srx.src_nat_rule_name})) (?:(?:None)|(?:%{DATA:juniper.srx.dst_nat_rule_name})) %{INT:network.iana_number} %{DATA:rule.name} %{DATA:observer.ingress.zone} %{DATA:observer.egress.zone} %{INT:juniper.srx.session_id} .* + +RT_FLOW3 %{RT_FLOW_TAG:juniper.srx.tag}: session denied %{IP:source.ip}/%{INT:source.port:int}->%{IP:destination.ip}/%{INT:destination.port:int} %{DATA:juniper.srx.service_name} %{INT:network.iana_number}\(\d\) %{DATA:rule.name} %{DATA:observer.ingress.zone} %{DATA:observer.egress.zone} .* + diff --git a/libs/grok/src/main/resources/patterns/ecs-v1/linux-syslog b/libs/grok/src/main/resources/patterns/ecs-v1/linux-syslog new file mode 100644 index 0000000000000..f2582f506c099 --- /dev/null +++ b/libs/grok/src/main/resources/patterns/ecs-v1/linux-syslog @@ -0,0 +1,16 @@ +SYSLOG5424PRINTASCII [!-~]+ + +SYSLOGBASE2 (?:%{SYSLOGTIMESTAMP:timestamp}|%{TIMESTAMP_ISO8601:timestamp})(?: %{SYSLOGFACILITY})?(?: %{SYSLOGHOST:host.hostname})?(?: %{SYSLOGPROG}:)? +SYSLOGPAMSESSION %{SYSLOGBASE} (?=%{GREEDYDATA:message})%{WORD:system.auth.pam.module}\(%{DATA:system.auth.pam.origin}\): session %{WORD:system.auth.pam.session_state} for user %{USERNAME:user.name}(?: by %{GREEDYDATA})? + +CRON_ACTION [A-Z ]+ +CRONLOG %{SYSLOGBASE} \(%{USER:user.name}\) %{CRON_ACTION:system.cron.action} \(%{DATA:message}\) + +SYSLOGLINE %{SYSLOGBASE2} %{GREEDYDATA:message} + +# IETF 5424 syslog(8) format (see http://www.rfc-editor.org/info/rfc5424) +SYSLOG5424PRI <%{NONNEGINT:log.syslog.priority:int}> +SYSLOG5424SD \[%{DATA}\]+ +SYSLOG5424BASE %{SYSLOG5424PRI}%{NONNEGINT:system.syslog.version} +(?:-|%{TIMESTAMP_ISO8601:timestamp}) +(?:-|%{IPORHOST:host.hostname}) +(?:-|%{SYSLOG5424PRINTASCII:process.name}) +(?:-|%{POSINT:process.pid:int}) +(?:-|%{SYSLOG5424PRINTASCII:event.code}) +(?:-|%{SYSLOG5424SD:system.syslog.structured_data})? + +SYSLOG5424LINE %{SYSLOG5424BASE} +%{GREEDYDATA:message} diff --git a/libs/grok/src/main/resources/patterns/maven b/libs/grok/src/main/resources/patterns/ecs-v1/maven similarity index 100% rename from libs/grok/src/main/resources/patterns/maven rename to libs/grok/src/main/resources/patterns/ecs-v1/maven diff --git a/libs/grok/src/main/resources/patterns/ecs-v1/mcollective b/libs/grok/src/main/resources/patterns/ecs-v1/mcollective new file mode 100644 index 0000000000000..f797cbde8a2bd --- /dev/null +++ b/libs/grok/src/main/resources/patterns/ecs-v1/mcollective @@ -0,0 +1,4 @@ +# Remember, these can be multi-line events. +MCOLLECTIVE ., \[%{TIMESTAMP_ISO8601:timestamp} #%{POSINT:process.pid:int}\]%{SPACE}%{LOGLEVEL:log.level} + +MCOLLECTIVEAUDIT %{TIMESTAMP_ISO8601:timestamp}: diff --git a/libs/grok/src/main/resources/patterns/ecs-v1/mongodb b/libs/grok/src/main/resources/patterns/ecs-v1/mongodb new file mode 100644 index 0000000000000..7f1c03de61f21 --- /dev/null +++ b/libs/grok/src/main/resources/patterns/ecs-v1/mongodb @@ -0,0 +1,7 @@ +MONGO_LOG %{SYSLOGTIMESTAMP:timestamp} \[%{WORD:mongodb.component}\] %{GREEDYDATA:message} +MONGO_QUERY \{ (?<={ ).*(?= } ntoreturn:) \} +MONGO_SLOWQUERY %{WORD:mongodb.profile.op} %{MONGO_WORDDASH:mongodb.database}\.%{MONGO_WORDDASH:mongodb.collection} %{WORD}: %{MONGO_QUERY:mongodb.query.original} ntoreturn:%{NONNEGINT:mongodb.profile.ntoreturn:int} ntoskip:%{NONNEGINT:mongodb.profile.ntoskip:int} nscanned:%{NONNEGINT:mongodb.profile.nscanned:int}.*? nreturned:%{NONNEGINT:mongodb.profile.nreturned:int}.*? %{INT:mongodb.profile.duration:int}ms +MONGO_WORDDASH \b[\w-]+\b +MONGO3_SEVERITY \w +MONGO3_COMPONENT %{WORD} +MONGO3_LOG %{TIMESTAMP_ISO8601:timestamp} %{MONGO3_SEVERITY:log.level} (?:-|%{MONGO3_COMPONENT:mongodb.component})%{SPACE}(?:\[%{DATA:mongodb.context}\])? %{GREEDYDATA:message} diff --git a/libs/grok/src/main/resources/patterns/ecs-v1/nagios b/libs/grok/src/main/resources/patterns/ecs-v1/nagios new file mode 100644 index 0000000000000..d0a3b423b14f7 --- /dev/null +++ b/libs/grok/src/main/resources/patterns/ecs-v1/nagios @@ -0,0 +1,124 @@ +################################################################################## +################################################################################## +# Chop Nagios log files to smithereens! +# +# A set of GROK filters to process logfiles generated by Nagios. +# While it does not, this set intends to cover all possible Nagios logs. +# +# Some more work needs to be done to cover all External Commands: +# http://old.nagios.org/developerinfo/externalcommands/commandlist.php +# +# If you need some support on these rules please contact: +# Jelle Smet http://smetj.net +# +################################################################################# +################################################################################# + +NAGIOSTIME \[%{NUMBER:timestamp}\] + +############################################### +######## Begin nagios log types +############################################### +NAGIOS_TYPE_CURRENT_SERVICE_STATE CURRENT SERVICE STATE +NAGIOS_TYPE_CURRENT_HOST_STATE CURRENT HOST STATE + +NAGIOS_TYPE_SERVICE_NOTIFICATION SERVICE NOTIFICATION +NAGIOS_TYPE_HOST_NOTIFICATION HOST NOTIFICATION + +NAGIOS_TYPE_SERVICE_ALERT SERVICE ALERT +NAGIOS_TYPE_HOST_ALERT HOST ALERT + +NAGIOS_TYPE_SERVICE_FLAPPING_ALERT SERVICE FLAPPING ALERT +NAGIOS_TYPE_HOST_FLAPPING_ALERT HOST FLAPPING ALERT + +NAGIOS_TYPE_SERVICE_DOWNTIME_ALERT SERVICE DOWNTIME ALERT +NAGIOS_TYPE_HOST_DOWNTIME_ALERT HOST DOWNTIME ALERT + +NAGIOS_TYPE_PASSIVE_SERVICE_CHECK PASSIVE SERVICE CHECK +NAGIOS_TYPE_PASSIVE_HOST_CHECK PASSIVE HOST CHECK + +NAGIOS_TYPE_SERVICE_EVENT_HANDLER SERVICE EVENT HANDLER +NAGIOS_TYPE_HOST_EVENT_HANDLER HOST EVENT HANDLER + +NAGIOS_TYPE_EXTERNAL_COMMAND EXTERNAL COMMAND +NAGIOS_TYPE_TIMEPERIOD_TRANSITION TIMEPERIOD TRANSITION +############################################### +######## End nagios log types +############################################### + +############################################### +######## Begin external check types +############################################### +NAGIOS_EC_DISABLE_SVC_CHECK DISABLE_SVC_CHECK +NAGIOS_EC_ENABLE_SVC_CHECK ENABLE_SVC_CHECK +NAGIOS_EC_DISABLE_HOST_CHECK DISABLE_HOST_CHECK +NAGIOS_EC_ENABLE_HOST_CHECK ENABLE_HOST_CHECK +NAGIOS_EC_PROCESS_SERVICE_CHECK_RESULT PROCESS_SERVICE_CHECK_RESULT +NAGIOS_EC_PROCESS_HOST_CHECK_RESULT PROCESS_HOST_CHECK_RESULT +NAGIOS_EC_SCHEDULE_SERVICE_DOWNTIME SCHEDULE_SERVICE_DOWNTIME +NAGIOS_EC_SCHEDULE_HOST_DOWNTIME SCHEDULE_HOST_DOWNTIME +NAGIOS_EC_DISABLE_HOST_SVC_NOTIFICATIONS DISABLE_HOST_SVC_NOTIFICATIONS +NAGIOS_EC_ENABLE_HOST_SVC_NOTIFICATIONS ENABLE_HOST_SVC_NOTIFICATIONS +NAGIOS_EC_DISABLE_HOST_NOTIFICATIONS DISABLE_HOST_NOTIFICATIONS +NAGIOS_EC_ENABLE_HOST_NOTIFICATIONS ENABLE_HOST_NOTIFICATIONS +NAGIOS_EC_DISABLE_SVC_NOTIFICATIONS DISABLE_SVC_NOTIFICATIONS +NAGIOS_EC_ENABLE_SVC_NOTIFICATIONS ENABLE_SVC_NOTIFICATIONS +############################################### +######## End external check types +############################################### +NAGIOS_WARNING Warning:%{SPACE}%{GREEDYDATA:message} + +NAGIOS_CURRENT_SERVICE_STATE %{NAGIOS_TYPE_CURRENT_SERVICE_STATE:nagios.log.type}: %{DATA:host.hostname};%{DATA:service.name};%{DATA:service.state};%{DATA:nagios.log.state_type};%{INT:nagios.log.attempt:int};%{GREEDYDATA:message} +NAGIOS_CURRENT_HOST_STATE %{NAGIOS_TYPE_CURRENT_HOST_STATE:nagios.log.type}: %{DATA:host.hostname};%{DATA:service.state};%{DATA:nagios.log.state_type};%{INT:nagios.log.attempt:int};%{GREEDYDATA:message} + +NAGIOS_SERVICE_NOTIFICATION %{NAGIOS_TYPE_SERVICE_NOTIFICATION:nagios.log.type}: %{DATA:user.name};%{DATA:host.hostname};%{DATA:service.name};%{DATA:service.state};%{DATA:nagios.log.notification_command};%{GREEDYDATA:message} +NAGIOS_HOST_NOTIFICATION %{NAGIOS_TYPE_HOST_NOTIFICATION:nagios.log.type}: %{DATA:user.name};%{DATA:host.hostname};%{DATA:service.state};%{DATA:nagios.log.notification_command};%{GREEDYDATA:message} + +NAGIOS_SERVICE_ALERT %{NAGIOS_TYPE_SERVICE_ALERT:nagios.log.type}: %{DATA:host.hostname};%{DATA:service.name};%{DATA:service.state};%{DATA:nagios.log.state_type};%{INT:nagios.log.attempt:int};%{GREEDYDATA:message} +NAGIOS_HOST_ALERT %{NAGIOS_TYPE_HOST_ALERT:nagios.log.type}: %{DATA:host.hostname};%{DATA:service.state};%{DATA:nagios.log.state_type};%{INT:nagios.log.attempt:int};%{GREEDYDATA:message} + +NAGIOS_SERVICE_FLAPPING_ALERT %{NAGIOS_TYPE_SERVICE_FLAPPING_ALERT:nagios.log.type}: %{DATA:host.hostname};%{DATA:service.name};%{DATA:service.state};%{GREEDYDATA:message} +NAGIOS_HOST_FLAPPING_ALERT %{NAGIOS_TYPE_HOST_FLAPPING_ALERT:nagios.log.type}: %{DATA:host.hostname};%{DATA:service.state};%{GREEDYDATA:message} + +NAGIOS_SERVICE_DOWNTIME_ALERT %{NAGIOS_TYPE_SERVICE_DOWNTIME_ALERT:nagios.log.type}: %{DATA:host.hostname};%{DATA:service.name};%{DATA:service.state};%{GREEDYDATA:nagios.log.comment} +NAGIOS_HOST_DOWNTIME_ALERT %{NAGIOS_TYPE_HOST_DOWNTIME_ALERT:nagios.log.type}: %{DATA:host.hostname};%{DATA:service.state};%{GREEDYDATA:nagios.log.comment} + +NAGIOS_PASSIVE_SERVICE_CHECK %{NAGIOS_TYPE_PASSIVE_SERVICE_CHECK:nagios.log.type}: %{DATA:host.hostname};%{DATA:service.name};%{DATA:service.state};%{GREEDYDATA:nagios.log.comment} +NAGIOS_PASSIVE_HOST_CHECK %{NAGIOS_TYPE_PASSIVE_HOST_CHECK:nagios.log.type}: %{DATA:host.hostname};%{DATA:service.state};%{GREEDYDATA:nagios.log.comment} + +NAGIOS_SERVICE_EVENT_HANDLER %{NAGIOS_TYPE_SERVICE_EVENT_HANDLER:nagios.log.type}: %{DATA:host.hostname};%{DATA:service.name};%{DATA:service.state};%{DATA:nagios.log.state_type};%{DATA:nagios.log.event_handler_name} +NAGIOS_HOST_EVENT_HANDLER %{NAGIOS_TYPE_HOST_EVENT_HANDLER:nagios.log.type}: %{DATA:host.hostname};%{DATA:service.state};%{DATA:nagios.log.state_type};%{DATA:nagios.log.event_handler_name} + +NAGIOS_TIMEPERIOD_TRANSITION %{NAGIOS_TYPE_TIMEPERIOD_TRANSITION:nagios.log.type}: %{DATA:service.name};%{NUMBER:nagios.log.period_from:int};%{NUMBER:nagios.log.period_to:int} + +#################### +#### External checks +#################### + +#Disable host & service check +NAGIOS_EC_LINE_DISABLE_SVC_CHECK %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios.log.type}: %{NAGIOS_EC_DISABLE_SVC_CHECK:nagios.log.command};%{DATA:host.hostname};%{DATA:service.name} +NAGIOS_EC_LINE_DISABLE_HOST_CHECK %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios.log.type}: %{NAGIOS_EC_DISABLE_HOST_CHECK:nagios.log.command};%{DATA:host.hostname} + +#Enable host & service check +NAGIOS_EC_LINE_ENABLE_SVC_CHECK %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios.log.type}: %{NAGIOS_EC_ENABLE_SVC_CHECK:nagios.log.command};%{DATA:host.hostname};%{DATA:service.name} +NAGIOS_EC_LINE_ENABLE_HOST_CHECK %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios.log.type}: %{NAGIOS_EC_ENABLE_HOST_CHECK:nagios.log.command};%{DATA:host.hostname} + +#Process host & service check +NAGIOS_EC_LINE_PROCESS_SERVICE_CHECK_RESULT %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios.log.type}: %{NAGIOS_EC_PROCESS_SERVICE_CHECK_RESULT:nagios.log.command};%{DATA:host.hostname};%{DATA:service.name};%{DATA:service.state};%{GREEDYDATA:nagios.log.check_result} +NAGIOS_EC_LINE_PROCESS_HOST_CHECK_RESULT %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios.log.type}: %{NAGIOS_EC_PROCESS_HOST_CHECK_RESULT:nagios.log.command};%{DATA:host.hostname};%{DATA:service.state};%{GREEDYDATA:nagios.log.check_result} + +#Disable host & service notifications +NAGIOS_EC_LINE_DISABLE_HOST_SVC_NOTIFICATIONS %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios.log.type}: %{NAGIOS_EC_DISABLE_HOST_SVC_NOTIFICATIONS:nagios.log.command};%{GREEDYDATA:host.hostname} +NAGIOS_EC_LINE_DISABLE_HOST_NOTIFICATIONS %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios.log.type}: %{NAGIOS_EC_DISABLE_HOST_NOTIFICATIONS:nagios.log.command};%{GREEDYDATA:host.hostname} +NAGIOS_EC_LINE_DISABLE_SVC_NOTIFICATIONS %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios.log.type}: %{NAGIOS_EC_DISABLE_SVC_NOTIFICATIONS:nagios.log.command};%{DATA:host.hostname};%{GREEDYDATA:service.name} + +#Enable host & service notifications +NAGIOS_EC_LINE_ENABLE_HOST_SVC_NOTIFICATIONS %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios.log.type}: %{NAGIOS_EC_ENABLE_HOST_SVC_NOTIFICATIONS:nagios.log.command};%{GREEDYDATA:host.hostname} +NAGIOS_EC_LINE_ENABLE_HOST_NOTIFICATIONS %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios.log.type}: %{NAGIOS_EC_ENABLE_HOST_NOTIFICATIONS:nagios.log.command};%{GREEDYDATA:host.hostname} +NAGIOS_EC_LINE_ENABLE_SVC_NOTIFICATIONS %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios.log.type}: %{NAGIOS_EC_ENABLE_SVC_NOTIFICATIONS:nagios.log.command};%{DATA:host.hostname};%{GREEDYDATA:service.name} + +#Schedule host & service downtime +NAGIOS_EC_LINE_SCHEDULE_HOST_DOWNTIME %{NAGIOS_TYPE_EXTERNAL_COMMAND:nagios.log.type}: %{NAGIOS_EC_SCHEDULE_HOST_DOWNTIME:nagios.log.command};%{DATA:host.hostname};%{NUMBER:nagios.log.start_time};%{NUMBER:nagios.log.end_time};%{NUMBER:nagios.log.fixed};%{NUMBER:nagios.log.trigger_id};%{NUMBER:nagios.log.duration:int};%{DATA:user.name};%{DATA:nagios.log.comment} + +#End matching line +NAGIOSLOGLINE %{NAGIOSTIME} (?:%{NAGIOS_WARNING}|%{NAGIOS_CURRENT_SERVICE_STATE}|%{NAGIOS_CURRENT_HOST_STATE}|%{NAGIOS_SERVICE_NOTIFICATION}|%{NAGIOS_HOST_NOTIFICATION}|%{NAGIOS_SERVICE_ALERT}|%{NAGIOS_HOST_ALERT}|%{NAGIOS_SERVICE_FLAPPING_ALERT}|%{NAGIOS_HOST_FLAPPING_ALERT}|%{NAGIOS_SERVICE_DOWNTIME_ALERT}|%{NAGIOS_HOST_DOWNTIME_ALERT}|%{NAGIOS_PASSIVE_SERVICE_CHECK}|%{NAGIOS_PASSIVE_HOST_CHECK}|%{NAGIOS_SERVICE_EVENT_HANDLER}|%{NAGIOS_HOST_EVENT_HANDLER}|%{NAGIOS_TIMEPERIOD_TRANSITION}|%{NAGIOS_EC_LINE_DISABLE_SVC_CHECK}|%{NAGIOS_EC_LINE_ENABLE_SVC_CHECK}|%{NAGIOS_EC_LINE_DISABLE_HOST_CHECK}|%{NAGIOS_EC_LINE_ENABLE_HOST_CHECK}|%{NAGIOS_EC_LINE_PROCESS_HOST_CHECK_RESULT}|%{NAGIOS_EC_LINE_PROCESS_SERVICE_CHECK_RESULT}|%{NAGIOS_EC_LINE_SCHEDULE_HOST_DOWNTIME}|%{NAGIOS_EC_LINE_DISABLE_HOST_SVC_NOTIFICATIONS}|%{NAGIOS_EC_LINE_ENABLE_HOST_SVC_NOTIFICATIONS}|%{NAGIOS_EC_LINE_DISABLE_HOST_NOTIFICATIONS}|%{NAGIOS_EC_LINE_ENABLE_HOST_NOTIFICATIONS}|%{NAGIOS_EC_LINE_DISABLE_SVC_NOTIFICATIONS}|%{NAGIOS_EC_LINE_ENABLE_SVC_NOTIFICATIONS}) diff --git a/libs/grok/src/main/resources/patterns/ecs-v1/postgresql b/libs/grok/src/main/resources/patterns/ecs-v1/postgresql new file mode 100644 index 0000000000000..cbfd5a690c4d2 --- /dev/null +++ b/libs/grok/src/main/resources/patterns/ecs-v1/postgresql @@ -0,0 +1,2 @@ +# Default postgresql pg_log format pattern +POSTGRESQL %{DATESTAMP:timestamp} %{TZ:event.timezone} %{DATA:user.name} %{GREEDYDATA:postgresql.log.connection_id} %{POSINT:process.pid:int} diff --git a/libs/grok/src/main/resources/patterns/ecs-v1/rails b/libs/grok/src/main/resources/patterns/ecs-v1/rails new file mode 100644 index 0000000000000..81717d9b8ffbe --- /dev/null +++ b/libs/grok/src/main/resources/patterns/ecs-v1/rails @@ -0,0 +1,13 @@ +RUUID \h{32} +# rails controller with action +RCONTROLLER (?[^#]+)#(?\w+) + +# this will often be the only line: +RAILS3HEAD (?m)Started %{WORD:http.request.method} "%{URIPATHPARAM:url.original}" for %{IPORHOST:source.address} at (?%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND} %{ISO8601_TIMEZONE}) +# for some a strange reason, params are stripped of {} - not sure that's a good idea. +RPROCESSING \W*Processing by %{RCONTROLLER} as (?\S+)(?:\W*Parameters: {%{DATA:rails.request.params}}\W*)? +RAILS3FOOT Completed %{POSINT:http.response.status_code:int}%{DATA} in %{NUMBER:rails.request.duration.total:float}ms %{RAILS3PROFILE}%{GREEDYDATA} +RAILS3PROFILE (?:\(Views: %{NUMBER:rails.request.duration.view:float}ms \| ActiveRecord: %{NUMBER:rails.request.duration.active_record:float}ms|\(ActiveRecord: %{NUMBER:rails.request.duration.active_record:float}ms)? + +# putting it all together +RAILS3 %{RAILS3HEAD}(?:%{RPROCESSING})?(?(?:%{DATA}\n)*)(?:%{RAILS3FOOT})? diff --git a/libs/grok/src/main/resources/patterns/ecs-v1/redis b/libs/grok/src/main/resources/patterns/ecs-v1/redis new file mode 100644 index 0000000000000..063290ed80dd9 --- /dev/null +++ b/libs/grok/src/main/resources/patterns/ecs-v1/redis @@ -0,0 +1,3 @@ +REDISTIMESTAMP %{MONTHDAY} %{MONTH} %{TIME} +REDISLOG \[%{POSINT:process.pid:int}\] %{REDISTIMESTAMP:timestamp} \* +REDISMONLOG %{NUMBER:timestamp} \[%{INT:redis.database.id} %{IP:client.ip}:%{POSINT:client.port:int}\] "%{WORD:redis.command.name}"\s?%{GREEDYDATA:redis.command.args} diff --git a/libs/grok/src/main/resources/patterns/ecs-v1/ruby b/libs/grok/src/main/resources/patterns/ecs-v1/ruby new file mode 100644 index 0000000000000..2c9a7cedd5146 --- /dev/null +++ b/libs/grok/src/main/resources/patterns/ecs-v1/ruby @@ -0,0 +1,2 @@ +RUBY_LOGLEVEL (?:DEBUG|FATAL|ERROR|WARN|INFO) +RUBY_LOGGER [DFEWI], \[%{TIMESTAMP_ISO8601:timestamp} #%{POSINT:process.pid:int}\] *%{RUBY_LOGLEVEL:log.level} -- +%{DATA:process.name}: %{GREEDYDATA:message} diff --git a/libs/grok/src/main/resources/patterns/ecs-v1/squid b/libs/grok/src/main/resources/patterns/ecs-v1/squid new file mode 100644 index 0000000000000..dfff4f623f095 --- /dev/null +++ b/libs/grok/src/main/resources/patterns/ecs-v1/squid @@ -0,0 +1,6 @@ +# Pattern squid3 +# Documentation of squid3 logs formats can be found at the following link: +# http://wiki.squid-cache.org/Features/LogFormat +SQUID3_STATUS (?:%{POSINT:http.response.status_code:int}|0|000) +SQUID3 %{NUMBER:timestamp}\s+%{NUMBER:squid.request.duration:int}\s%{IP:source.ip}\s%{WORD:event.action}/%{SQUID3_STATUS}\s%{INT:http.response.bytes:long}\s%{WORD:http.request.method}\s%{NOTSPACE:url.original}\s(?:-|%{NOTSPACE:user.name})\s%{WORD:squid.hierarchy_code}/(?:-|%{IPORHOST:destination.address})\s(?:-|%{NOTSPACE:http.response.mime_type}) +# :long - %{INT:http.response.bytes:int} diff --git a/libs/grok/src/main/resources/patterns/ecs-v1/zeek b/libs/grok/src/main/resources/patterns/ecs-v1/zeek new file mode 100644 index 0000000000000..397e84aa17c35 --- /dev/null +++ b/libs/grok/src/main/resources/patterns/ecs-v1/zeek @@ -0,0 +1,33 @@ +# updated Zeek log matching, for legacy matching see the patters/ecs-v1/bro + +ZEEK_BOOL [TF] +ZEEK_DATA [^\t]+ + +# http.log - the 'new' format (compared to BRO_HTTP) +# has *version* and *origin* fields added and *filename* replaced with *orig_filenames* + *resp_filenames* +ZEEK_HTTP %{NUMBER:timestamp}\t%{NOTSPACE:zeek.session_id}\t%{IP:source.ip}\t%{INT:source.port:int}\t%{IP:destination.ip}\t%{INT:destination.port:int}\t%{INT:zeek.http.trans_depth:int}\t(?:-|%{WORD:http.request.method})\t(?:-|%{ZEEK_DATA:url.domain})\t(?:-|%{ZEEK_DATA:url.original})\t(?:-|%{ZEEK_DATA:http.request.referrer})\t(?:-|%{NUMBER:http.version})\t(?:-|%{ZEEK_DATA:user_agent.original})\t(?:-|%{ZEEK_DATA:zeek.http.origin})\t(?:-|%{NUMBER:http.request.body.bytes:long})\t(?:-|%{NUMBER:http.response.body.bytes:long})\t(?:-|%{POSINT:http.response.status_code:int})\t(?:-|%{DATA:zeek.http.status_msg})\t(?:-|%{POSINT:zeek.http.info_code:int})\t(?:-|%{DATA:zeek.http.info_msg})\t(?:\(empty\)|%{ZEEK_DATA:zeek.http.tags})\t(?:-|%{ZEEK_DATA:url.username})\t(?:-|%{ZEEK_DATA:url.password})\t(?:-|%{ZEEK_DATA:zeek.http.proxied})\t(?:-|%{ZEEK_DATA:zeek.http.orig_fuids})\t(?:-|%{ZEEK_DATA:zeek.http.orig_filenames})\t(?:-|%{ZEEK_DATA:http.request.mime_type})\t(?:-|%{ZEEK_DATA:zeek.http.resp_fuids})\t(?:-|%{ZEEK_DATA:zeek.http.resp_filenames})\t(?:-|%{ZEEK_DATA:http.response.mime_type}) +# :long - %{NUMBER:http.request.body.bytes:int} +# :long - %{NUMBER:http.response.body.bytes:int} + +# dns.log - 'updated' BRO_DNS format (added *zeek.dns.rtt*) +ZEEK_DNS %{NUMBER:timestamp}\t%{NOTSPACE:zeek.session_id}\t%{IP:source.ip}\t%{INT:source.port:int}\t%{IP:destination.ip}\t%{INT:destination.port:int}\t%{WORD:network.transport}\t(?:-|%{INT:dns.id:int})\t(?:-|%{NUMBER:zeek.dns.rtt:float})\t(?:-|%{ZEEK_DATA:dns.question.name})\t(?:-|%{INT:zeek.dns.qclass:int})\t(?:-|%{ZEEK_DATA:zeek.dns.qclass_name})\t(?:-|%{INT:zeek.dns.qtype:int})\t(?:-|%{ZEEK_DATA:dns.question.type})\t(?:-|%{INT:zeek.dns.rcode:int})\t(?:-|%{ZEEK_DATA:dns.response_code})\t%{ZEEK_BOOL:zeek.dns.AA}\t%{ZEEK_BOOL:zeek.dns.TC}\t%{ZEEK_BOOL:zeek.dns.RD}\t%{ZEEK_BOOL:zeek.dns.RA}\t%{NONNEGINT:zeek.dns.Z:int}\t(?:-|%{ZEEK_DATA:zeek.dns.answers})\t(?:-|%{DATA:zeek.dns.TTLs})\t(?:-|%{ZEEK_BOOL:zeek.dns.rejected}) + +# conn.log - the 'new' format (requires *zeek.connection.local_resp*, handles `(empty)` as `-` for tunnel_parents, and optional mac adresses) +ZEEK_CONN %{NUMBER:timestamp}\t%{NOTSPACE:zeek.session_id}\t%{IP:source.ip}\t%{INT:source.port:int}\t%{IP:destination.ip}\t%{INT:destination.port:int}\t%{WORD:network.transport}\t(?:-|%{ZEEK_DATA:network.protocol})\t(?:-|%{NUMBER:zeek.connection.duration:float})\t(?:-|%{INT:zeek.connection.orig_bytes:long})\t(?:-|%{INT:zeek.connection.resp_bytes:long})\t(?:-|%{ZEEK_DATA:zeek.connection.state})\t(?:-|%{ZEEK_BOOL:zeek.connection.local_orig})\t(?:-|%{ZEEK_BOOL:zeek.connection.local_resp})\t(?:-|%{INT:zeek.connection.missed_bytes:long})\t(?:-|%{ZEEK_DATA:zeek.connection.history})\t(?:-|%{INT:source.packets:long})\t(?:-|%{INT:source.bytes:long})\t(?:-|%{INT:destination.packets:long})\t(?:-|%{INT:destination.bytes:long})\t(?:-|%{ZEEK_DATA:zeek.connection.tunnel_parents})(?:\t(?:-|%{COMMONMAC:source.mac})\t(?:-|%{COMMONMAC:destination.mac}))? +# :long - %{INT:zeek.connection.orig_bytes:int} +# :long - %{INT:zeek.connection.resp_bytes:int} +# :long - %{INT:zeek.connection.missed_bytes:int} +# :long - %{INT:source.packets:int} +# :long - %{INT:source.bytes:int} +# :long - %{INT:destination.packets:int} +# :long - %{INT:destination.bytes:int} + +# files.log - updated BRO_FILES format (2 new fields added at the end) +ZEEK_FILES_TX_HOSTS (?:-|%{IP:server.ip})|(?%{IP:server.ip}(?:\s,%{IP})+) +ZEEK_FILES_RX_HOSTS (?:-|%{IP:client.ip})|(?%{IP:client.ip}(?:\s,%{IP})+) +ZEEK_FILES %{NUMBER:timestamp}\t%{NOTSPACE:zeek.files.fuid}\t%{ZEEK_FILES_TX_HOSTS}\t%{ZEEK_FILES_RX_HOSTS}\t(?:-|%{ZEEK_DATA:zeek.files.session_ids})\t(?:-|%{ZEEK_DATA:zeek.files.source})\t(?:-|%{INT:zeek.files.depth:int})\t(?:-|%{ZEEK_DATA:zeek.files.analyzers})\t(?:-|%{ZEEK_DATA:file.mime_type})\t(?:-|%{ZEEK_DATA:file.name})\t(?:-|%{NUMBER:zeek.files.duration:float})\t(?:-|%{ZEEK_DATA:zeek.files.local_orig})\t(?:-|%{ZEEK_BOOL:zeek.files.is_orig})\t(?:-|%{INT:zeek.files.seen_bytes:long})\t(?:-|%{INT:file.size:long})\t(?:-|%{INT:zeek.files.missing_bytes:long})\t(?:-|%{INT:zeek.files.overflow_bytes:long})\t(?:-|%{ZEEK_BOOL:zeek.files.timedout})\t(?:-|%{ZEEK_DATA:zeek.files.parent_fuid})\t(?:-|%{ZEEK_DATA:file.hash.md5})\t(?:-|%{ZEEK_DATA:file.hash.sha1})\t(?:-|%{ZEEK_DATA:file.hash.sha256})\t(?:-|%{ZEEK_DATA:zeek.files.extracted})(?:\t(?:-|%{ZEEK_BOOL:zeek.files.extracted_cutoff})\t(?:-|%{INT:zeek.files.extracted_size:long}))? +# :long - %{INT:zeek.files.seen_bytes:int} +# :long - %{INT:file.size:int} +# :long - %{INT:zeek.files.missing_bytes:int} +# :long - %{INT:zeek.files.overflow_bytes:int} +# :long - %{INT:zeek.files.extracted_size:int} diff --git a/libs/grok/src/main/resources/patterns/aws b/libs/grok/src/main/resources/patterns/legacy/aws similarity index 100% rename from libs/grok/src/main/resources/patterns/aws rename to libs/grok/src/main/resources/patterns/legacy/aws diff --git a/libs/grok/src/main/resources/patterns/bacula b/libs/grok/src/main/resources/patterns/legacy/bacula similarity index 100% rename from libs/grok/src/main/resources/patterns/bacula rename to libs/grok/src/main/resources/patterns/legacy/bacula diff --git a/libs/grok/src/main/resources/patterns/bind b/libs/grok/src/main/resources/patterns/legacy/bind similarity index 100% rename from libs/grok/src/main/resources/patterns/bind rename to libs/grok/src/main/resources/patterns/legacy/bind diff --git a/libs/grok/src/main/resources/patterns/bro b/libs/grok/src/main/resources/patterns/legacy/bro similarity index 100% rename from libs/grok/src/main/resources/patterns/bro rename to libs/grok/src/main/resources/patterns/legacy/bro diff --git a/libs/grok/src/main/resources/patterns/exim b/libs/grok/src/main/resources/patterns/legacy/exim similarity index 100% rename from libs/grok/src/main/resources/patterns/exim rename to libs/grok/src/main/resources/patterns/legacy/exim diff --git a/libs/grok/src/main/resources/patterns/firewalls b/libs/grok/src/main/resources/patterns/legacy/firewalls similarity index 100% rename from libs/grok/src/main/resources/patterns/firewalls rename to libs/grok/src/main/resources/patterns/legacy/firewalls diff --git a/libs/grok/src/main/resources/patterns/grok-patterns b/libs/grok/src/main/resources/patterns/legacy/grok-patterns similarity index 100% rename from libs/grok/src/main/resources/patterns/grok-patterns rename to libs/grok/src/main/resources/patterns/legacy/grok-patterns diff --git a/libs/grok/src/main/resources/patterns/haproxy b/libs/grok/src/main/resources/patterns/legacy/haproxy similarity index 100% rename from libs/grok/src/main/resources/patterns/haproxy rename to libs/grok/src/main/resources/patterns/legacy/haproxy diff --git a/libs/grok/src/main/resources/patterns/httpd b/libs/grok/src/main/resources/patterns/legacy/httpd similarity index 100% rename from libs/grok/src/main/resources/patterns/httpd rename to libs/grok/src/main/resources/patterns/legacy/httpd diff --git a/libs/grok/src/main/resources/patterns/java b/libs/grok/src/main/resources/patterns/legacy/java similarity index 100% rename from libs/grok/src/main/resources/patterns/java rename to libs/grok/src/main/resources/patterns/legacy/java diff --git a/libs/grok/src/main/resources/patterns/junos b/libs/grok/src/main/resources/patterns/legacy/junos similarity index 100% rename from libs/grok/src/main/resources/patterns/junos rename to libs/grok/src/main/resources/patterns/legacy/junos diff --git a/libs/grok/src/main/resources/patterns/linux-syslog b/libs/grok/src/main/resources/patterns/legacy/linux-syslog similarity index 100% rename from libs/grok/src/main/resources/patterns/linux-syslog rename to libs/grok/src/main/resources/patterns/legacy/linux-syslog diff --git a/libs/grok/src/main/resources/patterns/legacy/maven b/libs/grok/src/main/resources/patterns/legacy/maven new file mode 100644 index 0000000000000..f1dc808871026 --- /dev/null +++ b/libs/grok/src/main/resources/patterns/legacy/maven @@ -0,0 +1 @@ +MAVEN_VERSION (?:(\d+)\.)?(?:(\d+)\.)?(\*|\d+)(?:[.-](RELEASE|SNAPSHOT))? diff --git a/libs/grok/src/main/resources/patterns/mcollective-patterns b/libs/grok/src/main/resources/patterns/legacy/mcollective-patterns similarity index 100% rename from libs/grok/src/main/resources/patterns/mcollective-patterns rename to libs/grok/src/main/resources/patterns/legacy/mcollective-patterns diff --git a/libs/grok/src/main/resources/patterns/mongodb b/libs/grok/src/main/resources/patterns/legacy/mongodb similarity index 100% rename from libs/grok/src/main/resources/patterns/mongodb rename to libs/grok/src/main/resources/patterns/legacy/mongodb diff --git a/libs/grok/src/main/resources/patterns/nagios b/libs/grok/src/main/resources/patterns/legacy/nagios similarity index 100% rename from libs/grok/src/main/resources/patterns/nagios rename to libs/grok/src/main/resources/patterns/legacy/nagios diff --git a/libs/grok/src/main/resources/patterns/postgresql b/libs/grok/src/main/resources/patterns/legacy/postgresql similarity index 100% rename from libs/grok/src/main/resources/patterns/postgresql rename to libs/grok/src/main/resources/patterns/legacy/postgresql diff --git a/libs/grok/src/main/resources/patterns/rails b/libs/grok/src/main/resources/patterns/legacy/rails similarity index 100% rename from libs/grok/src/main/resources/patterns/rails rename to libs/grok/src/main/resources/patterns/legacy/rails diff --git a/libs/grok/src/main/resources/patterns/redis b/libs/grok/src/main/resources/patterns/legacy/redis similarity index 100% rename from libs/grok/src/main/resources/patterns/redis rename to libs/grok/src/main/resources/patterns/legacy/redis diff --git a/libs/grok/src/main/resources/patterns/ruby b/libs/grok/src/main/resources/patterns/legacy/ruby similarity index 100% rename from libs/grok/src/main/resources/patterns/ruby rename to libs/grok/src/main/resources/patterns/legacy/ruby diff --git a/libs/grok/src/main/resources/patterns/squid b/libs/grok/src/main/resources/patterns/legacy/squid similarity index 100% rename from libs/grok/src/main/resources/patterns/squid rename to libs/grok/src/main/resources/patterns/legacy/squid diff --git a/libs/grok/src/test/java/org/elasticsearch/grok/GrokTests.java b/libs/grok/src/test/java/org/elasticsearch/grok/GrokTests.java index df43b84b23dfb..41a56ab5b86f9 100644 --- a/libs/grok/src/test/java/org/elasticsearch/grok/GrokTests.java +++ b/libs/grok/src/test/java/org/elasticsearch/grok/GrokTests.java @@ -8,10 +8,12 @@ package org.elasticsearch.grok; +import org.elasticsearch.core.Tuple; import org.elasticsearch.grok.GrokCaptureConfig.NativeExtracterMap; import org.elasticsearch.test.ESTestCase; import java.nio.charset.StandardCharsets; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -27,6 +29,7 @@ import java.util.function.IntConsumer; import java.util.function.LongConsumer; +import static org.elasticsearch.core.Tuple.tuple; import static org.elasticsearch.grok.GrokCaptureType.BOOLEAN; import static org.elasticsearch.grok.GrokCaptureType.DOUBLE; import static org.elasticsearch.grok.GrokCaptureType.FLOAT; @@ -40,15 +43,26 @@ public class GrokTests extends ESTestCase { + public void testMatchWithoutCaptures() { - Grok grok = new Grok(Grok.BUILTIN_PATTERNS, "value", logger::warn); + testMatchWithoutCaptures(false); + testMatchWithoutCaptures(true); + } + + private void testMatchWithoutCaptures(boolean ecsCompatibility) { + Grok grok = new Grok(Grok.getBuiltinPatterns(ecsCompatibility), "value", logger::warn); assertThat(grok.captures("value"), equalTo(Map.of())); assertThat(grok.captures("prefix_value"), equalTo(Map.of())); assertThat(grok.captures("no_match"), nullValue()); } - public void testCaputuresBytes() { - Grok grok = new Grok(Grok.BUILTIN_PATTERNS, "%{NUMBER:n:int}", logger::warn); + public void testCapturesBytes() { + testCapturesBytes(false); + testCapturesBytes(true); + } + + private void testCapturesBytes(boolean ecsCompatibility) { + Grok grok = new Grok(Grok.getBuiltinPatterns(ecsCompatibility), "%{NUMBER:n:int}", logger::warn); byte[] utf8 = "10".getBytes(StandardCharsets.UTF_8); assertThat(captureBytes(grok, utf8, 0, utf8.length), equalTo(Map.of("n", 10))); assertThat(captureBytes(grok, utf8, 0, 1), equalTo(Map.of("n", 1))); @@ -72,79 +86,171 @@ public void testNoMatchingPatternInDictionary() { } public void testSimpleSyslogLine() { - String line = "Mar 16 00:01:25 evita postfix/smtpd[1713]: connect from camomile.cloud9.net[168.100.1.3]"; - Grok grok = new Grok(Grok.BUILTIN_PATTERNS, "%{SYSLOGLINE}", logger::warn); - assertCaptureConfig( - grok, - Map.ofEntries( - Map.entry("facility", STRING), - Map.entry("logsource", STRING), - Map.entry("message", STRING), - Map.entry("pid", STRING), - Map.entry("priority", STRING), - Map.entry("program", STRING), - Map.entry("timestamp", STRING), - Map.entry("timestamp8601", STRING) - ) + final String logSource = "evita"; + final String timestamp = "Mar 16 00:01:25"; + final String message = "connect from camomile.cloud9.net[168.100.1.3]"; + final String program = "postfix/smtpd"; + + testSimpleSyslogLine( + false, + tuple(Map.entry("facility", STRING), null), + tuple(Map.entry("logsource", STRING), logSource), + tuple(Map.entry("message", STRING), message), + tuple(Map.entry("pid", STRING), "1713"), + tuple(Map.entry("priority", STRING), null), + tuple(Map.entry("program", STRING), program), + tuple(Map.entry("timestamp", STRING), timestamp), + tuple(Map.entry("timestamp8601", STRING), null), + List.of() ); + + testSimpleSyslogLine( + true, + tuple(Map.entry("log.syslog.facility.code", INTEGER), null), + tuple(Map.entry("host.hostname", STRING), logSource), + tuple(Map.entry("message", STRING), message), + tuple(Map.entry("process.pid", INTEGER), 1713), + tuple(Map.entry("log.syslog.priority", INTEGER), null), + tuple(Map.entry("process.name", STRING), program), + tuple(Map.entry("timestamp", STRING), timestamp), + null, + List.of("timestamp") + ); + } + + private void testSimpleSyslogLine( + boolean ecsCompatibility, + Tuple, Object> facility, + Tuple, Object> logSource, + Tuple, Object> message, + Tuple, Object> pid, + Tuple, Object> priority, + Tuple, Object> program, + Tuple, Object> timestamp, + Tuple, Object> timestamp8601, + List acceptedDuplicates + ) { + String line = "Mar 16 00:01:25 evita postfix/smtpd[1713]: connect from camomile.cloud9.net[168.100.1.3]"; + Grok grok = new Grok(Grok.getBuiltinPatterns(ecsCompatibility), "%{SYSLOGLINE}", logger::warn); + + Map captureTypes = new HashMap<>(); + captureTypes.put(facility.v1().getKey(), facility.v1().getValue()); + captureTypes.put(logSource.v1().getKey(), logSource.v1().getValue()); + captureTypes.put(message.v1().getKey(), message.v1().getValue()); + captureTypes.put(pid.v1().getKey(), pid.v1().getValue()); + captureTypes.put(priority.v1().getKey(), priority.v1().getValue()); + captureTypes.put(program.v1().getKey(), program.v1().getValue()); + captureTypes.put(timestamp.v1().getKey(), timestamp.v1().getValue()); + if (timestamp8601 != null) { + captureTypes.put(timestamp8601.v1().getKey(), timestamp8601.v1().getValue()); + } + + assertCaptureConfig(grok, captureTypes, acceptedDuplicates); Map matches = grok.captures(line); - assertEquals("evita", matches.get("logsource")); - assertEquals("Mar 16 00:01:25", matches.get("timestamp")); - assertEquals("connect from camomile.cloud9.net[168.100.1.3]", matches.get("message")); - assertEquals("postfix/smtpd", matches.get("program")); - assertEquals("1713", matches.get("pid")); + assertEquals(logSource.v2(), matches.get(logSource.v1().getKey())); + assertEquals(timestamp.v2(), matches.get(timestamp.v1().getKey())); + assertEquals(message.v2(), matches.get(message.v1().getKey())); + assertEquals(program.v2(), matches.get(program.v1().getKey())); + assertEquals(pid.v2(), matches.get(pid.v1().getKey())); String[] logsource = new String[1]; - GrokCaptureExtracter logsourceExtracter = namedConfig(grok, "logsource").nativeExtracter(new ThrowingNativeExtracterMap() { - @Override - public GrokCaptureExtracter forString(Function, GrokCaptureExtracter> buildExtracter) { - return buildExtracter.apply(str -> logsource[0] = str); - } - }); + GrokCaptureExtracter logsourceExtracter = + namedConfig(grok, logSource.v1().getKey()) + .nativeExtracter(new ThrowingNativeExtracterMap() { + @Override + public GrokCaptureExtracter forString(Function, GrokCaptureExtracter> buildExtracter) { + return buildExtracter.apply(str -> logsource[0] = str); + } + }); assertThat(specificCapture(grok, line, logsourceExtracter), is(true)); - assertThat(logsource[0], equalTo("evita")); + assertThat(logsource[0], equalTo(logSource.v2())); } public void testSyslog5424Line() { + final String ts = "2009-06-30T18:30:00+02:00"; + final String host = "paxton.local"; + final String app = "grokdebug"; + final String sd = "[id1 foo=\\\"bar\\\"][id2 baz=\\\"something\\\"]"; + final String msg = "Hello, syslog."; + final String ver = "1"; + + testSyslog5424Line( + false, + tuple(Map.entry("syslog5424_app", STRING), app), + tuple(Map.entry("syslog5424_host", STRING), host), + tuple(Map.entry("syslog5424_msg", STRING), msg), + tuple(Map.entry("syslog5424_msgid", STRING), null), + tuple(Map.entry("syslog5424_pri", STRING), "191"), + tuple(Map.entry("syslog5424_proc", STRING), "4123"), + tuple(Map.entry("syslog5424_sd", STRING), sd), + tuple(Map.entry("syslog5424_ts", STRING), ts), + tuple(Map.entry("syslog5424_ver", STRING), ver) + ); + testSyslog5424Line( + true, + tuple(Map.entry("process.name", STRING), app), + tuple(Map.entry("host.hostname", STRING), host), + tuple(Map.entry("message", STRING), msg), + tuple(Map.entry("event.code", STRING), null), + tuple(Map.entry("log.syslog.priority", INTEGER), 191), + tuple(Map.entry("process.pid", INTEGER), 4123), + tuple(Map.entry("system.syslog.structured_data", STRING), sd), + tuple(Map.entry("timestamp", STRING), ts), + tuple(Map.entry("system.syslog.version", STRING), ver) + ); + } + + private void testSyslog5424Line( + boolean ecsCompatibility, + Tuple, Object> app, + Tuple, Object> host, + Tuple, Object> msg, + Tuple, Object> msgid, + Tuple, Object> pri, + Tuple, Object> proc, + Tuple, Object> sd, + Tuple, Object> ts, + Tuple, Object> ver + ) { String line = "<191>1 2009-06-30T18:30:00+02:00 paxton.local grokdebug 4123 - [id1 foo=\\\"bar\\\"][id2 baz=\\\"something\\\"] " + - "Hello, syslog."; - Grok grok = new Grok(Grok.BUILTIN_PATTERNS, "%{SYSLOG5424LINE}", logger::warn); + "Hello, syslog."; + Grok grok = new Grok(Grok.getBuiltinPatterns(ecsCompatibility), "%{SYSLOG5424LINE}", logger::warn); assertCaptureConfig( grok, - Map.ofEntries( - Map.entry("syslog5424_app", STRING), - Map.entry("syslog5424_host", STRING), - Map.entry("syslog5424_msg", STRING), - Map.entry("syslog5424_msgid", STRING), - Map.entry("syslog5424_pri", STRING), - Map.entry("syslog5424_proc", STRING), - Map.entry("syslog5424_sd", STRING), - Map.entry("syslog5424_ts", STRING), - Map.entry("syslog5424_ver", STRING) - ) + Map.ofEntries(app.v1(), host.v1(), msg.v1(), msgid.v1(), pri.v1(), proc.v1(), sd.v1(), ts.v1(), ver.v1()) ); Map matches = grok.captures(line); - assertEquals("191", matches.get("syslog5424_pri")); - assertEquals("1", matches.get("syslog5424_ver")); - assertEquals("2009-06-30T18:30:00+02:00", matches.get("syslog5424_ts")); - assertEquals("paxton.local", matches.get("syslog5424_host")); - assertEquals("grokdebug", matches.get("syslog5424_app")); - assertEquals("4123", matches.get("syslog5424_proc")); - assertEquals(null, matches.get("syslog5424_msgid")); - assertEquals("[id1 foo=\\\"bar\\\"][id2 baz=\\\"something\\\"]", matches.get("syslog5424_sd")); - assertEquals("Hello, syslog.", matches.get("syslog5424_msg")); + assertEquals(pri.v2(), matches.get(pri.v1().getKey())); + assertEquals(ver.v2(), matches.get(ver.v1().getKey())); + assertEquals(ts.v2(), matches.get(ts.v1().getKey())); + assertEquals(host.v2(), matches.get(host.v1().getKey())); + assertEquals(app.v2(), matches.get(app.v1().getKey())); + assertEquals(proc.v2(), matches.get(proc.v1().getKey())); + assertEquals(msgid.v2(), matches.get(msgid.v1().getKey())); + assertEquals(sd.v2(), matches.get(sd.v1().getKey())); + assertEquals(msg.v2(), matches.get(msg.v1().getKey())); } public void testDatePattern() { + testDatePattern(false); + testDatePattern(true); + } + + private void testDatePattern(boolean ecsCompatibility) { String line = "fancy 12-12-12 12:12:12"; - Grok grok = new Grok(Grok.BUILTIN_PATTERNS, "(?%{DATE_EU} %{TIME})", logger::warn); + Grok grok = new Grok(Grok.getBuiltinPatterns(ecsCompatibility), "(?%{DATE_EU} %{TIME})", logger::warn); assertCaptureConfig(grok, Map.of("timestamp", STRING)); Map matches = grok.captures(line); assertEquals("12-12-12 12:12:12", matches.get("timestamp")); } public void testNilCoercedValues() { - Grok grok = new Grok(Grok.BUILTIN_PATTERNS, "test (N/A|%{BASE10NUM:duration:float}ms)", logger::warn); + testNilCoercedValues(false); + testNilCoercedValues(true); + } + + private void testNilCoercedValues(boolean ecsCompatibility) { + Grok grok = new Grok(Grok.getBuiltinPatterns(ecsCompatibility), "test (N/A|%{BASE10NUM:duration:float}ms)", logger::warn); assertCaptureConfig(grok, Map.of("duration", FLOAT)); Map matches = grok.captures("test 28.4ms"); assertEquals(28.4f, matches.get("duration")); @@ -153,7 +259,12 @@ public void testNilCoercedValues() { } public void testNilWithNoCoercion() { - Grok grok = new Grok(Grok.BUILTIN_PATTERNS, "test (N/A|%{BASE10NUM:duration}ms)", logger::warn); + testNilWithNoCoercion(false); + testNilWithNoCoercion(true); + } + + private void testNilWithNoCoercion(boolean ecsCompatibility) { + Grok grok = new Grok(Grok.getBuiltinPatterns(ecsCompatibility), "test (N/A|%{BASE10NUM:duration}ms)", logger::warn); assertCaptureConfig(grok, Map.of("duration", STRING)); Map matches = grok.captures("test 28.4ms"); assertEquals("28.4", matches.get("duration")); @@ -162,9 +273,17 @@ public void testNilWithNoCoercion() { } public void testUnicodeSyslog() { - Grok grok = new Grok(Grok.BUILTIN_PATTERNS, "<%{POSINT:syslog_pri}>%{SPACE}%{SYSLOGTIMESTAMP:syslog_timestamp} " + + testUnicodeSyslog(false); + testUnicodeSyslog(true); + } + + private void testUnicodeSyslog(boolean ecsCompatibility) { + Grok grok = new Grok( + Grok.getBuiltinPatterns(ecsCompatibility), + "<%{POSINT:syslog_pri}>%{SPACE}%{SYSLOGTIMESTAMP:syslog_timestamp} " + "%{SYSLOGHOST:syslog_hostname} %{PROG:syslog_program}(:?)(?:\\[%{GREEDYDATA:syslog_pid}\\])?(:?) " + - "%{GREEDYDATA:syslog_message}", logger::warn); + "%{GREEDYDATA:syslog_message}", logger::warn + ); assertCaptureConfig( grok, Map.ofEntries( @@ -185,21 +304,36 @@ public void testUnicodeSyslog() { } public void testNamedFieldsWithWholeTextMatch() { - Grok grok = new Grok(Grok.BUILTIN_PATTERNS, "%{DATE_EU:stimestamp}", logger::warn); + testNamedFieldsWithWholeTextMatch(false); + testNamedFieldsWithWholeTextMatch(true); + } + + private void testNamedFieldsWithWholeTextMatch(boolean ecsCompatibility) { + Grok grok = new Grok(Grok.getBuiltinPatterns(ecsCompatibility), "%{DATE_EU:stimestamp}", logger::warn); assertCaptureConfig(grok, Map.of("stimestamp", STRING)); Map matches = grok.captures("11/01/01"); assertThat(matches.get("stimestamp"), equalTo("11/01/01")); } public void testWithOniguramaNamedCaptures() { - Grok grok = new Grok(Grok.BUILTIN_PATTERNS, "(?\\w+)", logger::warn); + testWithOniguramaNamedCaptures(false); + testWithOniguramaNamedCaptures(true); + } + + private void testWithOniguramaNamedCaptures(boolean ecsCompatibility) { + Grok grok = new Grok(Grok.getBuiltinPatterns(ecsCompatibility), "(?\\w+)", logger::warn); assertCaptureConfig(grok, Map.of("foo", STRING)); Map matches = grok.captures("hello world"); assertThat(matches.get("foo"), equalTo("hello")); } public void testISO8601() { - Grok grok = new Grok(Grok.BUILTIN_PATTERNS, "^%{TIMESTAMP_ISO8601}$", logger::warn); + testISO8601(false); + testISO8601(true); + } + + private void testISO8601(boolean ecsCompatibility) { + Grok grok = new Grok(Grok.getBuiltinPatterns(ecsCompatibility), "^%{TIMESTAMP_ISO8601}$", logger::warn); assertCaptureConfig(grok, Map.of()); List timeMessages = Arrays.asList( "2001-01-01T00:00:00", @@ -224,7 +358,12 @@ public void testISO8601() { } public void testNotISO8601() { - Grok grok = new Grok(Grok.BUILTIN_PATTERNS, "^%{TIMESTAMP_ISO8601}$", logger::warn); + testNotISO8601(false, List.of("2001-01-01T0:00:00")); // legacy patterns do not permit single-digit hours + testNotISO8601(true, List.of()); + } + + private void testNotISO8601(boolean ecsCompatibility, List additionalCases) { + Grok grok = new Grok(Grok.getBuiltinPatterns(ecsCompatibility), "^%{TIMESTAMP_ISO8601}$", logger::warn); assertCaptureConfig(grok, Map.of()); List timeMessages = Arrays.asList( "2001-13-01T00:00:00", // invalid month @@ -234,7 +373,6 @@ public void testNotISO8601() { "2001-01-aT00:00:00", // invalid day "2001-01-1aT00:00:00", // invalid day "2001-01-01Ta0:00:00", // invalid hour - "2001-01-01T0:00:00", // invalid hour "2001-01-01T25:00:00", // invalid hour "2001-01-01T01:60:00", // invalid minute "2001-01-01T00:aa:00", // invalid minute @@ -250,7 +388,9 @@ public void testNotISO8601() { "2001-01-01T00:00:00-2500", // invalid timezone "2001-01-01T00:00:00-00:61" // invalid timezone ); - for (String msg : timeMessages) { + List timesToTest = new ArrayList<>(timeMessages); + timesToTest.addAll(additionalCases); + for (String msg : timesToTest) { assertThat(grok.match(msg), is(false)); } } @@ -350,8 +490,13 @@ public void testCircularSelfReference() { } public void testBooleanCaptures() { + testBooleanCaptures(false); + testBooleanCaptures(true); + } + + private void testBooleanCaptures(boolean ecsCompatibility) { String pattern = "%{WORD:name}=%{WORD:status:boolean}"; - Grok g = new Grok(Grok.BUILTIN_PATTERNS, pattern, logger::warn); + Grok g = new Grok(Grok.getBuiltinPatterns(ecsCompatibility), pattern, logger::warn); assertCaptureConfig(g, Map.of("name", STRING, "status", BOOLEAN)); String text = "active=true"; @@ -451,42 +596,101 @@ public void testGarbageTypeNameBecomesString() { } public void testApacheLog() { + final String agent = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.12785 " + + "YaBrowser/13.12.1599.12785 Safari/537.36"; + final String clientIp = "31.184.238.164"; + final String timestamp = "24/Jul/2014:05:35:37 +0530"; + final String verb = "GET"; + final String request = "/logs/access.log"; + final String httpVersion = "1.0"; + final String referrer = "http://8rursodiol.enjin.com"; + + testApacheLog( + false, + tuple(Map.entry("agent", STRING), "\"" + agent + "\""), + tuple(Map.entry("auth", STRING), "-"), + tuple(Map.entry("bytes", STRING), "69849"), + tuple(Map.entry("clientip", STRING), clientIp), + tuple(Map.entry("httpversion", STRING), httpVersion), + tuple(Map.entry("ident", STRING), "-"), + tuple(Map.entry("rawrequest", STRING), null), + tuple(Map.entry("referrer", STRING), "\"" + referrer + "\""), + tuple(Map.entry("request", STRING), request), + tuple(Map.entry("timestamp", STRING), timestamp), + tuple(Map.entry("verb", STRING), verb), + List.of(tuple(Map.entry("response", STRING), "200")) + ); + testApacheLog( + true, + tuple(Map.entry("user_agent.original", STRING), agent), + tuple(Map.entry("user.name", STRING), null), + tuple(Map.entry("http.response.body.bytes", LONG), 69849L), + tuple(Map.entry("source.address", STRING), clientIp), + tuple(Map.entry("http.version", STRING), httpVersion), + tuple(Map.entry("apache.access.user.identity", STRING), null), + tuple(Map.entry("http.response.status_code", INTEGER), 200), + tuple(Map.entry("http.request.referrer", STRING), referrer), + tuple(Map.entry("url.original", STRING), request), + tuple(Map.entry("timestamp", STRING), timestamp), + tuple(Map.entry("http.request.method", STRING), verb), + List.of() + ); + } + + public void testApacheLog( + boolean ecsCompatibility, + Tuple, Object> agent, + Tuple, Object> auth, + Tuple, Object> bytes, + Tuple, Object> clientIp, + Tuple, Object> httpVersion, + Tuple, Object> ident, + Tuple, Object> rawRequest, + Tuple, Object> referrer, + Tuple, Object> request, + Tuple, Object> timestamp, + Tuple, Object> verb, + List, Object>> additionalFields + ) { String logLine = "31.184.238.164 - - [24/Jul/2014:05:35:37 +0530] \"GET /logs/access.log HTTP/1.0\" 200 69849 " + "\"http://8rursodiol.enjin.com\" \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) " + "Chrome/30.0.1599.12785 YaBrowser/13.12.1599.12785 Safari/537.36\" \"www.dlwindianrailways.com\""; - Grok grok = new Grok(Grok.BUILTIN_PATTERNS, "%{COMBINEDAPACHELOG}", logger::warn); - assertCaptureConfig( - grok, - Map.ofEntries( - Map.entry("agent", STRING), - Map.entry("auth", STRING), - Map.entry("bytes", STRING), - Map.entry("clientip", STRING), - Map.entry("httpversion", STRING), - Map.entry("ident", STRING), - Map.entry("rawrequest", STRING), - Map.entry("referrer", STRING), - Map.entry("request", STRING), - Map.entry("response", STRING), - Map.entry("timestamp", STRING), - Map.entry("verb", STRING) - ) - ); + Grok grok = new Grok(Grok.getBuiltinPatterns(ecsCompatibility), "%{COMBINEDAPACHELOG}", logger::warn); + + Map captureTypes = new HashMap<>(); + captureTypes.put(agent.v1().getKey(), agent.v1().getValue()); + captureTypes.put(auth.v1().getKey(), auth.v1().getValue()); + captureTypes.put(bytes.v1().getKey(), bytes.v1().getValue()); + captureTypes.put(clientIp.v1().getKey(), clientIp.v1().getValue()); + captureTypes.put(httpVersion.v1().getKey(), httpVersion.v1().getValue()); + captureTypes.put(ident.v1().getKey(), ident.v1().getValue()); + captureTypes.put(rawRequest.v1().getKey(), rawRequest.v1().getValue()); + captureTypes.put(referrer.v1().getKey(), referrer.v1().getValue()); + captureTypes.put(request.v1().getKey(), request.v1().getValue()); + captureTypes.put(timestamp.v1().getKey(), timestamp.v1().getValue()); + captureTypes.put(verb.v1().getKey(), verb.v1().getValue()); + for (var additionalField : additionalFields) { + captureTypes.put(additionalField.v1().getKey(), additionalField.v1().getValue()); + } + + assertCaptureConfig(grok, captureTypes); Map matches = grok.captures(logLine); - assertEquals("31.184.238.164", matches.get("clientip")); - assertEquals("-", matches.get("ident")); - assertEquals("-", matches.get("auth")); - assertEquals("24/Jul/2014:05:35:37 +0530", matches.get("timestamp")); - assertEquals("GET", matches.get("verb")); - assertEquals("/logs/access.log", matches.get("request")); - assertEquals("1.0", matches.get("httpversion")); - assertEquals("200", matches.get("response")); - assertEquals("69849", matches.get("bytes")); - assertEquals("\"http://8rursodiol.enjin.com\"", matches.get("referrer")); + assertEquals(clientIp.v2(), matches.get(clientIp.v1().getKey())); + assertEquals(ident.v2(), matches.get(ident.v1().getKey())); + assertEquals(auth.v2(), matches.get(auth.v1().getKey())); + assertEquals(timestamp.v2(), matches.get(timestamp.v1().getKey())); + assertEquals(verb.v2(), matches.get(verb.v1().getKey())); + assertEquals(request.v2(), matches.get(request.v1().getKey())); + assertEquals(httpVersion.v2(), matches.get(httpVersion.v1().getKey())); + assertEquals(bytes.v2(), matches.get(bytes.v1().getKey())); + assertEquals(referrer.v2(), matches.get(referrer.v1().getKey())); assertEquals(null, matches.get("port")); - assertEquals("\"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.12785 " + - "YaBrowser/13.12.1599.12785 Safari/537.36\"", matches.get("agent")); + assertEquals(agent.v2(), matches.get(agent.v1().getKey())); + assertEquals(rawRequest.v2(), matches.get(rawRequest.v1().getKey())); + for (var additionalField : additionalFields) { + assertEquals(additionalField.v2(), matches.get(additionalField.v1().getKey())); + } } public void testComplete() { @@ -587,6 +791,11 @@ public void testMultipleNamedCapturesWithSameName() { } public void testExponentialExpressions() { + testExponentialExpressions(false); + testExponentialExpressions(true); + } + + private void testExponentialExpressions(boolean ecsCompatibility) { AtomicBoolean run = new AtomicBoolean(true); // to avoid a lingering thread when test has completed String grokPattern = "Bonsuche mit folgender Anfrage: Belegart->\\[%{WORD:param2},(?(\\s*%{NOTSPACE})*)\\] " + @@ -606,8 +815,12 @@ public void testExponentialExpressions() { }); t.start(); }; - Grok grok = new Grok(Grok.BUILTIN_PATTERNS, grokPattern, MatcherWatchdog.newInstance(10, 200, System::currentTimeMillis, scheduler), - logger::warn); + Grok grok = new Grok( + Grok.getBuiltinPatterns(ecsCompatibility), + grokPattern, + MatcherWatchdog.newInstance(10, 200, System::currentTimeMillis, scheduler), + logger::warn + ); Exception e = expectThrows(RuntimeException.class, () -> grok.captures(logLine)); run.set(false); assertThat(e.getMessage(), equalTo("grok pattern matching was interrupted after [200] ms")); @@ -647,24 +860,44 @@ public void testAlphanumericFieldName() { } public void testUnsupportedBracketsInFieldName() { - Grok grok = new Grok(Grok.BUILTIN_PATTERNS, "%{WORD:unsuppo(r)ted}", logger::warn); + testUnsupportedBracketsInFieldName(false); + testUnsupportedBracketsInFieldName(true); + } + + private void testUnsupportedBracketsInFieldName(boolean ecsCompatibility) { + Grok grok = new Grok(Grok.getBuiltinPatterns(ecsCompatibility), "%{WORD:unsuppo(r)ted}", logger::warn); Map matches = grok.captures("line"); assertNull(matches); } public void testJavaClassPatternWithUnderscore() { - Grok grok = new Grok(Grok.BUILTIN_PATTERNS, "%{JAVACLASS}", logger::warn); + testJavaClassPatternWithUnderscore(false); + testJavaClassPatternWithUnderscore(true); + } + + private void testJavaClassPatternWithUnderscore(boolean ecsCompatibility) { + Grok grok = new Grok(Grok.getBuiltinPatterns(ecsCompatibility), "%{JAVACLASS}", logger::warn); assertThat(grok.match("Test_Class.class"), is(true)); } public void testJavaFilePatternWithSpaces() { - Grok grok = new Grok(Grok.BUILTIN_PATTERNS, "%{JAVAFILE}", logger::warn); + testJavaFilePatternWithSpaces(false); + testJavaFilePatternWithSpaces(true); + } + + private void testJavaFilePatternWithSpaces(boolean ecsCompatibility) { + Grok grok = new Grok(Grok.getBuiltinPatterns(ecsCompatibility), "%{JAVAFILE}", logger::warn); assertThat(grok.match("Test Class.java"), is(true)); } - public void testLogCallBack(){ + public void testLogCallBack() { + testLogCallBack(false); + testLogCallBack(true); + } + + private void testLogCallBack(boolean ecsCompatibility) { AtomicReference message = new AtomicReference<>(); - Grok grok = new Grok(Grok.BUILTIN_PATTERNS, ".*\\[.*%{SPACE}*\\].*", message::set); + Grok grok = new Grok(Grok.getBuiltinPatterns(ecsCompatibility), ".*\\[.*%{SPACE}*\\].*", message::set); grok.match("[foo]"); //this message comes from Joni, so updates to Joni may change the expectation assertThat(message.get(), containsString("regular expression has redundant nested repeat operator")); @@ -672,16 +905,25 @@ public void testLogCallBack(){ private void assertGrokedField(String fieldName) { String line = "foo"; - Grok grok = new Grok(Grok.BUILTIN_PATTERNS, "%{WORD:" + fieldName + "}", logger::warn); - Map matches = grok.captures(line); - assertEquals(line, matches.get(fieldName)); + // test both with and without ECS compatibility + for (boolean ecsCompatibility : new boolean[]{false, true}) { + Grok grok = new Grok(Grok.getBuiltinPatterns(ecsCompatibility), "%{WORD:" + fieldName + "}", logger::warn); + Map matches = grok.captures(line); + assertEquals(line, matches.get(fieldName)); + } } private void assertCaptureConfig(Grok grok, Map nameToType) { + assertCaptureConfig(grok, nameToType, List.of()); + } + + private void assertCaptureConfig(Grok grok, Map nameToType, List acceptedDuplicates) { Map fromGrok = new TreeMap<>(); for (GrokCaptureConfig config : grok.captureConfig()) { Object old = fromGrok.put(config.name(), config.type()); - assertThat("duplicates not allowed", old, nullValue()); + if (acceptedDuplicates.contains(config.name()) == false) { + assertThat("duplicates not allowed", old, nullValue()); + } } assertThat(fromGrok, equalTo(new TreeMap<>(nameToType))); } diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/Page.java b/libs/nio/src/main/java/org/elasticsearch/nio/Page.java index dafe68cfea744..50a1c0207eca5 100644 --- a/libs/nio/src/main/java/org/elasticsearch/nio/Page.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/Page.java @@ -65,7 +65,6 @@ private static class RefCountedCloseable extends AbstractRefCounted { private final Releasable closeable; private RefCountedCloseable(Releasable closeable) { - super("byte array page"); this.closeable = closeable; } diff --git a/modules/aggs-matrix-stats/build.gradle b/modules/aggs-matrix-stats/build.gradle index 080c9af25194d..fcab80d9891ef 100644 --- a/modules/aggs-matrix-stats/build.gradle +++ b/modules/aggs-matrix-stats/build.gradle @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' esplugin { diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/MatrixAggregationPlugin.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/MatrixAggregationPlugin.java index c1b24c1fff86c..57ff7d227ceed 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/MatrixAggregationPlugin.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/MatrixAggregationPlugin.java @@ -21,7 +21,9 @@ public class MatrixAggregationPlugin extends Plugin implements SearchPlugin { @Override public List getAggregations() { - return singletonList(new AggregationSpec(MatrixStatsAggregationBuilder.NAME, MatrixStatsAggregationBuilder::new, - new MatrixStatsParser()).addResultReader(InternalMatrixStats::new)); + return singletonList( + new AggregationSpec(MatrixStatsAggregationBuilder.NAME, MatrixStatsAggregationBuilder::new, new MatrixStatsParser()) + .addResultReader(InternalMatrixStats::new) + ); } } diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/spi/MatrixStatsNamedXContentProvider.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/spi/MatrixStatsNamedXContentProvider.java index ce7da6c2576f0..659de22577a57 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/spi/MatrixStatsNamedXContentProvider.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/spi/MatrixStatsNamedXContentProvider.java @@ -8,9 +8,9 @@ package org.elasticsearch.search.aggregations.matrix.spi; -import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.common.xcontent.ContextParser; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.plugins.spi.NamedXContentProvider; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.matrix.stats.MatrixStatsAggregationBuilder; diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/InternalMatrixStats.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/InternalMatrixStats.java index 5fa90395fd76d..15264b9b51c93 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/InternalMatrixStats.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/InternalMatrixStats.java @@ -30,8 +30,13 @@ public class InternalMatrixStats extends InternalAggregation implements MatrixSt private final MatrixStatsResults results; /** per shard ctor */ - InternalMatrixStats(String name, long count, RunningStats multiFieldStatsResults, MatrixStatsResults results, - Map metadata) { + InternalMatrixStats( + String name, + long count, + RunningStats multiFieldStatsResults, + MatrixStatsResults results, + Map metadata + ) { super(name, metadata); assert count >= 0; this.stats = multiFieldStatsResults; @@ -224,7 +229,7 @@ public Object getProperty(List path) { public InternalAggregation reduce(List aggregations, ReduceContext reduceContext) { // merge stats across all shards List aggs = new ArrayList<>(aggregations); - aggs.removeIf(p -> ((InternalMatrixStats)p).stats == null); + aggs.removeIf(p -> ((InternalMatrixStats) p).stats == null); // return empty result iff all stats are null if (aggs.isEmpty()) { @@ -260,7 +265,6 @@ public boolean equals(Object obj) { if (super.equals(obj) == false) return false; InternalMatrixStats other = (InternalMatrixStats) obj; - return Objects.equals(this.stats, other.stats) && - Objects.equals(this.results, other.results); + return Objects.equals(this.stats, other.stats) && Objects.equals(this.results, other.results); } } diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStats.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStats.java index 686ed3a36bd7d..b423fa2e5caf4 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStats.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStats.java @@ -15,18 +15,25 @@ public interface MatrixStats extends Aggregation { /** return the total document count */ long getDocCount(); + /** return total field count (differs from docCount if there are missing values) */ long getFieldCount(String field); + /** return the field mean */ double getMean(String field); + /** return the field variance */ double getVariance(String field); + /** return the skewness of the distribution */ double getSkewness(String field); + /** return the kurtosis of the distribution */ double getKurtosis(String field); + /** return the covariance between field x and field y */ double getCovariance(String fieldX, String fieldY); + /** return the correlation coefficient of field x and field y */ double getCorrelation(String fieldX, String fieldY); } diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregationBuilder.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregationBuilder.java index b779ae8e2578e..63ac6cfacef38 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregationBuilder.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregationBuilder.java @@ -22,8 +22,7 @@ import java.io.IOException; import java.util.Map; -public class MatrixStatsAggregationBuilder - extends ArrayValuesSourceAggregationBuilder.LeafOnly { +public class MatrixStatsAggregationBuilder extends ArrayValuesSourceAggregationBuilder.LeafOnly { public static final String NAME = "matrix_stats"; private MultiValueMode multiValueMode = MultiValueMode.AVG; @@ -32,8 +31,11 @@ public MatrixStatsAggregationBuilder(String name) { super(name); } - protected MatrixStatsAggregationBuilder(MatrixStatsAggregationBuilder clone, - AggregatorFactories.Builder factoriesBuilder, Map metadata) { + protected MatrixStatsAggregationBuilder( + MatrixStatsAggregationBuilder clone, + AggregatorFactories.Builder factoriesBuilder, + Map metadata + ) { super(clone, factoriesBuilder, metadata); this.multiValueMode = clone.multiValueMode; } @@ -65,10 +67,12 @@ public MultiValueMode multiValueMode() { } @Override - protected MatrixStatsAggregatorFactory innerBuild(AggregationContext context, - Map configs, - AggregatorFactory parent, - AggregatorFactories.Builder subFactoriesBuilder) throws IOException { + protected MatrixStatsAggregatorFactory innerBuild( + AggregationContext context, + Map configs, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder + ) throws IOException { return new MatrixStatsAggregatorFactory(name, configs, multiValueMode, context, parent, subFactoriesBuilder, metadata); } diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregator.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregator.java index 211e0a0d0a94c..f55dc47c84677 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregator.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregator.java @@ -9,8 +9,8 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.ScoreMode; -import org.elasticsearch.core.Releasables; import org.elasticsearch.common.util.ObjectArray; +import org.elasticsearch.core.Releasables; import org.elasticsearch.index.fielddata.NumericDoubleValues; import org.elasticsearch.search.MultiValueMode; import org.elasticsearch.search.aggregations.Aggregator; @@ -35,8 +35,14 @@ final class MatrixStatsAggregator extends MetricsAggregator { /** array of descriptive stats, per shard, needed to compute the correlation */ ObjectArray stats; - MatrixStatsAggregator(String name, Map valuesSources, AggregationContext context, - Aggregator parent, MultiValueMode multiValueMode, Map metadata) throws IOException { + MatrixStatsAggregator( + String name, + Map valuesSources, + AggregationContext context, + Aggregator parent, + MultiValueMode multiValueMode, + Map metadata + ) throws IOException { super(name, context, parent, metadata); if (valuesSources != null && valuesSources.isEmpty() == false) { this.valuesSources = new NumericArrayValuesSource(valuesSources, multiValueMode); @@ -52,8 +58,7 @@ public ScoreMode scoreMode() { } @Override - public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, - final LeafBucketCollector sub) throws IOException { + public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException { if (valuesSources == null) { return LeafBucketCollector.NO_OP_COLLECTOR; } diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java index 2cbcc3992fa7d..6fd85401b8a41 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java @@ -26,13 +26,15 @@ final class MatrixStatsAggregatorFactory extends ArrayValuesSourceAggregatorFact private final MultiValueMode multiValueMode; - MatrixStatsAggregatorFactory(String name, - Map configs, - MultiValueMode multiValueMode, - AggregationContext context, - AggregatorFactory parent, - AggregatorFactories.Builder subFactoriesBuilder, - Map metadata) throws IOException { + MatrixStatsAggregatorFactory( + String name, + Map configs, + MultiValueMode multiValueMode, + AggregationContext context, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, + Map metadata + ) throws IOException { super(name, configs, context, parent, subFactoriesBuilder, metadata); this.multiValueMode = multiValueMode; } @@ -43,15 +45,18 @@ protected Aggregator createUnmapped(Aggregator parent, Map metad } @Override - protected Aggregator doCreateInternal(Map valuesSources, - Aggregator parent, - CardinalityUpperBound cardinality, - Map metadata) throws IOException { + protected Aggregator doCreateInternal( + Map valuesSources, + Aggregator parent, + CardinalityUpperBound cardinality, + Map metadata + ) throws IOException { Map typedValuesSources = new HashMap<>(valuesSources.size()); for (Map.Entry entry : valuesSources.entrySet()) { if (entry.getValue() instanceof ValuesSource.Numeric == false) { - throw new AggregationExecutionException("ValuesSource type " + entry.getValue().toString() + - "is not supported for aggregation " + this.name()); + throw new AggregationExecutionException( + "ValuesSource type " + entry.getValue().toString() + "is not supported for aggregation " + this.name() + ); } // TODO: There must be a better option than this. typedValuesSources.put(entry.getKey(), (ValuesSource.Numeric) entry.getValue()); diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsParser.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsParser.java index 80649737504a2..1001e39cde00b 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsParser.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsParser.java @@ -26,8 +26,13 @@ public MatrixStatsParser() { } @Override - protected boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, XContentParser parser, - Map otherOptions) throws IOException { + protected boolean token( + String aggregationName, + String currentFieldName, + XContentParser.Token token, + XContentParser parser, + Map otherOptions + ) throws IOException { if (MULTIVALUE_MODE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { if (token == XContentParser.Token.VALUE_STRING) { otherOptions.put(MULTIVALUE_MODE_FIELD, parser.text()); @@ -38,10 +43,14 @@ protected boolean token(String aggregationName, String currentFieldName, XConten } @Override - protected MatrixStatsAggregationBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, - ValueType targetValueType, Map otherOptions) { + protected MatrixStatsAggregationBuilder createFactory( + String aggregationName, + ValuesSourceType valuesSourceType, + ValueType targetValueType, + Map otherOptions + ) { MatrixStatsAggregationBuilder builder = new MatrixStatsAggregationBuilder(aggregationName); - String mode = (String)otherOptions.get(MULTIVALUE_MODE_FIELD); + String mode = (String) otherOptions.get(MULTIVALUE_MODE_FIELD); if (mode != null) { builder.multiValueMode(MultiValueMode.fromString(mode)); } diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsResults.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsResults.java index f98001179e40c..563001d4cc9f3 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsResults.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsResults.java @@ -189,7 +189,7 @@ private void compute() { // update skewness results.skewness.put(fieldName, Math.sqrt(results.docCount) * results.skewness.get(fieldName) / Math.pow(var, 1.5D)); // update kurtosis - results.kurtosis.put(fieldName, (double)results.docCount * results.kurtosis.get(fieldName) / (var * var)); + results.kurtosis.put(fieldName, (double) results.docCount * results.kurtosis.get(fieldName) / (var * var)); // update variances results.variances.put(fieldName, results.variances.get(fieldName) / nM1); } @@ -224,8 +224,7 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; MatrixStatsResults that = (MatrixStatsResults) o; - return Objects.equals(results, that.results) && - Objects.equals(correlation, that.correlation); + return Objects.equals(results, that.results) && Objects.equals(correlation, that.correlation); } @Override diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/ParsedMatrixStats.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/ParsedMatrixStats.java index 9386b15fc37ae..fe0f59cb17db4 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/ParsedMatrixStats.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/ParsedMatrixStats.java @@ -8,8 +8,8 @@ package org.elasticsearch.search.aggregations.matrix.stats; -import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.ParsedAggregation; @@ -140,8 +140,11 @@ private static T checkedGet(final Map values, final String fieldN return values.get(fieldName); } - private static final ObjectParser PARSER = - new ObjectParser<>(ParsedMatrixStats.class.getSimpleName(), true, ParsedMatrixStats::new); + private static final ObjectParser PARSER = new ObjectParser<>( + ParsedMatrixStats.class.getSimpleName(), + true, + ParsedMatrixStats::new + ); static { declareAggregationFields(PARSER); PARSER.declareLong(ParsedMatrixStats::setDocCount, CommonFields.DOC_COUNT); @@ -176,21 +179,27 @@ static class ParsedMatrixStatsResult { Map covariances; Map correlations; - private static final ObjectParser RESULT_PARSER = - new ObjectParser<>(ParsedMatrixStatsResult.class.getSimpleName(), true, ParsedMatrixStatsResult::new); + private static final ObjectParser RESULT_PARSER = new ObjectParser<>( + ParsedMatrixStatsResult.class.getSimpleName(), + true, + ParsedMatrixStatsResult::new + ); static { - RESULT_PARSER.declareString((result, name) -> result.name = name, - new ParseField(InternalMatrixStats.Fields.NAME)); - RESULT_PARSER.declareLong((result, count) -> result.count = count, - new ParseField(InternalMatrixStats.Fields.COUNT)); - RESULT_PARSER.declareDouble((result, mean) -> result.mean = mean, - new ParseField(InternalMatrixStats.Fields.MEAN)); - RESULT_PARSER.declareDouble((result, variance) -> result.variance = variance, - new ParseField(InternalMatrixStats.Fields.VARIANCE)); - RESULT_PARSER.declareDouble((result, skewness) -> result.skewness = skewness, - new ParseField(InternalMatrixStats.Fields.SKEWNESS)); - RESULT_PARSER.declareDouble((result, kurtosis) -> result.kurtosis = kurtosis, - new ParseField(InternalMatrixStats.Fields.KURTOSIS)); + RESULT_PARSER.declareString((result, name) -> result.name = name, new ParseField(InternalMatrixStats.Fields.NAME)); + RESULT_PARSER.declareLong((result, count) -> result.count = count, new ParseField(InternalMatrixStats.Fields.COUNT)); + RESULT_PARSER.declareDouble((result, mean) -> result.mean = mean, new ParseField(InternalMatrixStats.Fields.MEAN)); + RESULT_PARSER.declareDouble( + (result, variance) -> result.variance = variance, + new ParseField(InternalMatrixStats.Fields.VARIANCE) + ); + RESULT_PARSER.declareDouble( + (result, skewness) -> result.skewness = skewness, + new ParseField(InternalMatrixStats.Fields.SKEWNESS) + ); + RESULT_PARSER.declareDouble( + (result, kurtosis) -> result.kurtosis = kurtosis, + new ParseField(InternalMatrixStats.Fields.KURTOSIS) + ); RESULT_PARSER.declareObject((ParsedMatrixStatsResult result, Map covars) -> { result.covariances = new LinkedHashMap<>(covars.size()); diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/RunningStats.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/RunningStats.java index 4795175ee55cc..6f719bbb2ccb3 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/RunningStats.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/RunningStats.java @@ -69,25 +69,25 @@ private void init() { public RunningStats(StreamInput in) throws IOException { this(); // read doc count - docCount = (Long)in.readGenericValue(); + docCount = (Long) in.readGenericValue(); // read fieldSum - fieldSum = convertIfNeeded((Map)in.readGenericValue()); + fieldSum = convertIfNeeded((Map) in.readGenericValue()); // counts - counts = convertIfNeeded((Map)in.readGenericValue()); + counts = convertIfNeeded((Map) in.readGenericValue()); // means - means = convertIfNeeded((Map)in.readGenericValue()); + means = convertIfNeeded((Map) in.readGenericValue()); // variances - variances = convertIfNeeded((Map)in.readGenericValue()); + variances = convertIfNeeded((Map) in.readGenericValue()); // skewness - skewness = convertIfNeeded((Map)in.readGenericValue()); + skewness = convertIfNeeded((Map) in.readGenericValue()); // kurtosis - kurtosis = convertIfNeeded((Map)in.readGenericValue()); + kurtosis = convertIfNeeded((Map) in.readGenericValue()); // read covariances - covariances = convertIfNeeded((Map>)in.readGenericValue()); + covariances = convertIfNeeded((Map>) in.readGenericValue()); } // Convert Map to HashMap if it isn't - private static HashMap convertIfNeeded(Map map) { + private static HashMap convertIfNeeded(Map map) { if (map instanceof HashMap) { return (HashMap) map; } else { @@ -211,7 +211,7 @@ public void merge(final RunningStats other) { this.counts.put(fieldName, other.counts.get(fieldName).longValue()); this.fieldSum.put(fieldName, other.fieldSum.get(fieldName).doubleValue()); this.variances.put(fieldName, other.variances.get(fieldName).doubleValue()); - this.skewness.put(fieldName , other.skewness.get(fieldName).doubleValue()); + this.skewness.put(fieldName, other.skewness.get(fieldName).doubleValue()); this.kurtosis.put(fieldName, other.kurtosis.get(fieldName).doubleValue()); if (other.covariances.containsKey(fieldName)) { this.covariances.put(fieldName, other.covariances.get(fieldName)); @@ -314,14 +314,14 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; RunningStats that = (RunningStats) o; - return docCount == that.docCount && - Objects.equals(fieldSum, that.fieldSum) && - Objects.equals(counts, that.counts) && - Objects.equals(means, that.means) && - Objects.equals(variances, that.variances) && - Objects.equals(skewness, that.skewness) && - Objects.equals(kurtosis, that.kurtosis) && - Objects.equals(covariances, that.covariances); + return docCount == that.docCount + && Objects.equals(fieldSum, that.fieldSum) + && Objects.equals(counts, that.counts) + && Objects.equals(means, that.means) + && Objects.equals(variances, that.variances) + && Objects.equals(skewness, that.skewness) + && Objects.equals(kurtosis, that.kurtosis) + && Objects.equals(covariances, that.covariances); } @Override diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/ArrayValuesSourceAggregationBuilder.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/ArrayValuesSourceAggregationBuilder.java index b12be3e6887f8..976719380e361 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/ArrayValuesSourceAggregationBuilder.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/ArrayValuesSourceAggregationBuilder.java @@ -7,9 +7,9 @@ */ package org.elasticsearch.search.aggregations.support; -import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationInitializationException; @@ -25,13 +25,13 @@ import java.util.Map; import java.util.Objects; -public abstract class ArrayValuesSourceAggregationBuilder> - extends AbstractAggregationBuilder { +public abstract class ArrayValuesSourceAggregationBuilder> extends + AbstractAggregationBuilder { public static final ParseField MULTIVALUE_MODE_FIELD = new ParseField("mode"); - public abstract static class LeafOnly> - extends ArrayValuesSourceAggregationBuilder { + public abstract static class LeafOnly> extends ArrayValuesSourceAggregationBuilder< + AB> { protected LeafOnly(String name) { super(name); @@ -40,8 +40,9 @@ protected LeafOnly(String name) { protected LeafOnly(LeafOnly clone, Builder factoriesBuilder, Map metadata) { super(clone, factoriesBuilder, metadata); if (factoriesBuilder.count() > 0) { - throw new AggregationInitializationException("Aggregator [" + name + "] of type [" - + getType() + "] cannot accept sub-aggregations"); + throw new AggregationInitializationException( + "Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations" + ); } } @@ -54,8 +55,9 @@ protected LeafOnly(StreamInput in) throws IOException { @Override public AB subAggregations(Builder subFactories) { - throw new AggregationInitializationException("Aggregator [" + name + "] of type [" + - getType() + "] cannot accept sub-aggregations"); + throw new AggregationInitializationException( + "Aggregator [" + name + "] of type [" + getType() + "] cannot accept sub-aggregations" + ); } @Override @@ -77,8 +79,11 @@ protected ArrayValuesSourceAggregationBuilder(String name) { super(name); } - protected ArrayValuesSourceAggregationBuilder(ArrayValuesSourceAggregationBuilder clone, - Builder factoriesBuilder, Map metadata) { + protected ArrayValuesSourceAggregationBuilder( + ArrayValuesSourceAggregationBuilder clone, + Builder factoriesBuilder, + Map metadata + ) { super(clone, factoriesBuilder, metadata); this.fields = new ArrayList<>(clone.fields); this.userValueTypeHint = clone.userValueTypeHint; @@ -87,8 +92,7 @@ protected ArrayValuesSourceAggregationBuilder(ArrayValuesSourceAggregationBuilde this.missing = clone.missing; } - protected ArrayValuesSourceAggregationBuilder(StreamInput in) - throws IOException { + protected ArrayValuesSourceAggregationBuilder(StreamInput in) throws IOException { super(in); read(in); } @@ -98,7 +102,7 @@ protected ArrayValuesSourceAggregationBuilder(StreamInput in) */ @SuppressWarnings("unchecked") private void read(StreamInput in) throws IOException { - fields = (ArrayList)in.readGenericValue(); + fields = (ArrayList) in.readGenericValue(); userValueTypeHint = in.readOptionalWriteable(ValueType::readFromStream); format = in.readOptionalString(); missingMap = in.readMap(); @@ -178,8 +182,11 @@ public Map missingMap() { } @Override - protected final ArrayValuesSourceAggregatorFactory doBuild(AggregationContext context, AggregatorFactory parent, - Builder subFactoriesBuilder) throws IOException { + protected final ArrayValuesSourceAggregatorFactory doBuild( + AggregationContext context, + AggregatorFactory parent, + Builder subFactoriesBuilder + ) throws IOException { Map configs = resolveConfig(context); ArrayValuesSourceAggregatorFactory factory = innerBuild(context, configs, parent, subFactoriesBuilder); return factory; @@ -188,17 +195,27 @@ protected final ArrayValuesSourceAggregatorFactory doBuild(AggregationContext co protected Map resolveConfig(AggregationContext context) { HashMap configs = new HashMap<>(); for (String field : fields) { - ValuesSourceConfig config = ValuesSourceConfig.resolveUnregistered(context, userValueTypeHint, field, null, - missingMap.get(field), null, format, CoreValuesSourceType.KEYWORD); + ValuesSourceConfig config = ValuesSourceConfig.resolveUnregistered( + context, + userValueTypeHint, + field, + null, + missingMap.get(field), + null, + format, + CoreValuesSourceType.KEYWORD + ); configs.put(field, config); } return configs; } - protected abstract ArrayValuesSourceAggregatorFactory innerBuild(AggregationContext context, - Map configs, - AggregatorFactory parent, - AggregatorFactories.Builder subFactoriesBuilder) throws IOException; + protected abstract ArrayValuesSourceAggregatorFactory innerBuild( + AggregationContext context, + Map configs, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder + ) throws IOException; @Override public final XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/ArrayValuesSourceAggregatorFactory.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/ArrayValuesSourceAggregatorFactory.java index 521647d7ba372..da7cf0ee6a940 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/ArrayValuesSourceAggregatorFactory.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/ArrayValuesSourceAggregatorFactory.java @@ -17,15 +17,18 @@ import java.util.HashMap; import java.util.Map; -public abstract class ArrayValuesSourceAggregatorFactory - extends AggregatorFactory { +public abstract class ArrayValuesSourceAggregatorFactory extends AggregatorFactory { protected Map configs; - public ArrayValuesSourceAggregatorFactory(String name, Map configs, - AggregationContext context, AggregatorFactory parent, - AggregatorFactories.Builder subFactoriesBuilder, - Map metadata) throws IOException { + public ArrayValuesSourceAggregatorFactory( + String name, + Map configs, + AggregationContext context, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, + Map metadata + ) throws IOException { super(name, context, parent, subFactoriesBuilder, metadata); this.configs = configs; } diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/ArrayValuesSourceParser.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/ArrayValuesSourceParser.java index fcd0b4e124081..cfd481a5fb4e1 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/ArrayValuesSourceParser.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/ArrayValuesSourceParser.java @@ -8,8 +8,8 @@ package org.elasticsearch.search.aggregations.support; -import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.AggregationBuilder.CommonFields; @@ -56,8 +56,7 @@ private ArrayValuesSourceParser(boolean formattable, ValuesSourceType valuesSour } @Override - public final ArrayValuesSourceAggregationBuilder parse(String aggregationName, XContentParser parser) - throws IOException { + public final ArrayValuesSourceAggregationBuilder parse(String aggregationName, XContentParser parser) throws IOException { List fields = null; String format = null; @@ -74,12 +73,22 @@ public final ArrayValuesSourceAggregationBuilder parse(String aggregationName } else if (formattable && CommonFields.FORMAT.match(currentFieldName, parser.getDeprecationHandler())) { format = parser.text(); } else if (CommonFields.VALUE_TYPE.match(currentFieldName, parser.getDeprecationHandler())) { - throw new ParsingException(parser.getTokenLocation(), - "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]. " + - "Multi-field aggregations do not support scripts."); + throw new ParsingException( + parser.getTokenLocation(), + "Unexpected token " + + token + + " [" + + currentFieldName + + "] in [" + + aggregationName + + "]. " + + "Multi-field aggregations do not support scripts." + ); } else if (token(aggregationName, currentFieldName, token, parser, otherOptions) == false) { - throw new ParsingException(parser.getTokenLocation(), - "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]."); + throw new ParsingException( + parser.getTokenLocation(), + "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]." + ); } } else if (token == XContentParser.Token.START_OBJECT) { if (CommonFields.MISSING.match(currentFieldName, parser.getDeprecationHandler())) { @@ -88,41 +97,69 @@ public final ArrayValuesSourceAggregationBuilder parse(String aggregationName parseMissingAndAdd(aggregationName, currentFieldName, parser, missingMap); } } else if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - throw new ParsingException(parser.getTokenLocation(), - "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]. " + - "Multi-field aggregations do not support scripts."); + throw new ParsingException( + parser.getTokenLocation(), + "Unexpected token " + + token + + " [" + + currentFieldName + + "] in [" + + aggregationName + + "]. " + + "Multi-field aggregations do not support scripts." + ); } else if (token(aggregationName, currentFieldName, token, parser, otherOptions) == false) { - throw new ParsingException(parser.getTokenLocation(), - "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]."); + throw new ParsingException( + parser.getTokenLocation(), + "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]." + ); } } else if (token == XContentParser.Token.START_ARRAY) { if (Script.SCRIPT_PARSE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - throw new ParsingException(parser.getTokenLocation(), - "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]. " + - "Multi-field aggregations do not support scripts."); + throw new ParsingException( + parser.getTokenLocation(), + "Unexpected token " + + token + + " [" + + currentFieldName + + "] in [" + + aggregationName + + "]. " + + "Multi-field aggregations do not support scripts." + ); } else if (CommonFields.FIELDS.match(currentFieldName, parser.getDeprecationHandler())) { fields = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.VALUE_STRING) { fields.add(parser.text()); } else { - throw new ParsingException(parser.getTokenLocation(), - "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]."); + throw new ParsingException( + parser.getTokenLocation(), + "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]." + ); } } } else if (token(aggregationName, currentFieldName, token, parser, otherOptions) == false) { - throw new ParsingException(parser.getTokenLocation(), - "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]."); + throw new ParsingException( + parser.getTokenLocation(), + "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]." + ); } } else if (token(aggregationName, currentFieldName, token, parser, otherOptions) == false) { - throw new ParsingException(parser.getTokenLocation(), - "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]."); + throw new ParsingException( + parser.getTokenLocation(), + "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]." + ); } } - ArrayValuesSourceAggregationBuilder factory = createFactory(aggregationName, this.valuesSourceType, this.targetValueType, - otherOptions); + ArrayValuesSourceAggregationBuilder factory = createFactory( + aggregationName, + this.valuesSourceType, + this.targetValueType, + otherOptions + ); if (fields != null) { factory.fields(fields); } @@ -135,8 +172,12 @@ public final ArrayValuesSourceAggregationBuilder parse(String aggregationName return factory; } - private void parseMissingAndAdd(final String aggregationName, final String currentFieldName, - XContentParser parser, final Map missing) throws IOException { + private void parseMissingAndAdd( + final String aggregationName, + final String currentFieldName, + XContentParser parser, + final Map missing + ) throws IOException { XContentParser.Token token = parser.currentToken(); if (token == null) { token = parser.nextToken(); @@ -145,15 +186,18 @@ private void parseMissingAndAdd(final String aggregationName, final String curre if (token == XContentParser.Token.FIELD_NAME) { final String fieldName = parser.currentName(); if (missing.containsKey(fieldName)) { - throw new ParsingException(parser.getTokenLocation(), - "Missing field [" + fieldName + "] already defined as [" + missing.get(fieldName) - + "] in [" + aggregationName + "]."); + throw new ParsingException( + parser.getTokenLocation(), + "Missing field [" + fieldName + "] already defined as [" + missing.get(fieldName) + "] in [" + aggregationName + "]." + ); } parser.nextToken(); missing.put(fieldName, parser.objectText()); } else { - throw new ParsingException(parser.getTokenLocation(), - "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]"); + throw new ParsingException( + parser.getTokenLocation(), + "Unexpected token " + token + " [" + currentFieldName + "] in [" + aggregationName + "]" + ); } } @@ -175,10 +219,12 @@ private void parseMissingAndAdd(final String aggregationName, final String curre * method * @return the created factory */ - protected abstract ArrayValuesSourceAggregationBuilder createFactory(String aggregationName, - ValuesSourceType valuesSourceType, - ValueType targetValueType, - Map otherOptions); + protected abstract ArrayValuesSourceAggregationBuilder createFactory( + String aggregationName, + ValuesSourceType valuesSourceType, + ValueType targetValueType, + Map otherOptions + ); /** * Allows subclasses of {@link ArrayValuesSourceParser} to parse extra @@ -203,6 +249,11 @@ protected abstract ArrayValuesSourceAggregationBuilder createFactory(String a * @throws IOException * if an error occurs whilst parsing */ - protected abstract boolean token(String aggregationName, String currentFieldName, XContentParser.Token token, XContentParser parser, - Map otherOptions) throws IOException; + protected abstract boolean token( + String aggregationName, + String currentFieldName, + XContentParser.Token token, + XContentParser parser, + Map otherOptions + ) throws IOException; } diff --git a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/InternalMatrixStatsTests.java b/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/InternalMatrixStatsTests.java index 31fda40c212e6..c8055253b9aef 100644 --- a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/InternalMatrixStatsTests.java +++ b/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/InternalMatrixStatsTests.java @@ -7,13 +7,13 @@ */ package org.elasticsearch.search.aggregations.matrix.stats; -import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.MockPageCacheRecycler; import org.elasticsearch.common.xcontent.ContextParser; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.script.ScriptService; @@ -58,8 +58,10 @@ public void setUp() throws Exception { @Override protected List getNamedXContents() { ContextParser parser = (p, c) -> ParsedMatrixStats.fromXContent(p, (String) c); - return CollectionUtils.appendToCopy(getDefaultNamedXContents(), - new NamedXContentRegistry.Entry(Aggregation.class, new ParseField(MatrixStatsAggregationBuilder.NAME), parser)); + return CollectionUtils.appendToCopy( + getDefaultNamedXContents(), + new NamedXContentRegistry.Entry(Aggregation.class, new ParseField(MatrixStatsAggregationBuilder.NAME), parser) + ); } @Override @@ -83,35 +85,35 @@ protected InternalMatrixStats mutateInstance(InternalMatrixStats instance) { MatrixStatsResults matrixStatsResults = instance.getResults(); Map metadata = instance.getMetadata(); switch (between(0, 3)) { - case 0: - name += randomAlphaOfLength(5); - break; - case 1: - String[] fields = Arrays.copyOf(this.fields, this.fields.length + 1); - fields[fields.length - 1] = "field_" + (fields.length - 1); - double[] values = new double[fields.length]; - for (int i = 0; i < fields.length; i++) { - values[i] = randomDouble() * 200; - } - runningStats = new RunningStats(); - runningStats.add(fields, values); - break; - case 2: - if (matrixStatsResults == null) { - matrixStatsResults = new MatrixStatsResults(runningStats); - } else { - matrixStatsResults = null; - } - break; - case 3: - default: - if (metadata == null) { - metadata = new HashMap<>(1); - } else { - metadata = new HashMap<>(instance.getMetadata()); - } - metadata.put(randomAlphaOfLength(15), randomInt()); - break; + case 0: + name += randomAlphaOfLength(5); + break; + case 1: + String[] fields = Arrays.copyOf(this.fields, this.fields.length + 1); + fields[fields.length - 1] = "field_" + (fields.length - 1); + double[] values = new double[fields.length]; + for (int i = 0; i < fields.length; i++) { + values[i] = randomDouble() * 200; + } + runningStats = new RunningStats(); + runningStats.add(fields, values); + break; + case 2: + if (matrixStatsResults == null) { + matrixStatsResults = new MatrixStatsResults(runningStats); + } else { + matrixStatsResults = null; + } + break; + case 3: + default: + if (metadata == null) { + metadata = new HashMap<>(1); + } else { + metadata = new HashMap<>(instance.getMetadata()); + } + metadata.put(randomAlphaOfLength(15), randomInt()); + break; } return new InternalMatrixStats(name, docCount, runningStats, matrixStatsResults, metadata); } @@ -135,7 +137,7 @@ public void testReduceRandom() { double valueB = randomDouble(); bValues.add(valueB); - runningStats.add(new String[]{"a", "b"}, new double[]{valueA, valueB}); + runningStats.add(new String[] { "a", "b" }, new double[] { valueA, valueB }); if (++valuePerShardCounter == valuesPerShard) { shardResults.add(new InternalMatrixStats("_name", 1L, runningStats, null, Collections.emptyMap())); runningStats = new RunningStats(); @@ -152,7 +154,11 @@ public void testReduceRandom() { ScriptService mockScriptService = mockScriptService(); MockBigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); InternalAggregation.ReduceContext context = InternalAggregation.ReduceContext.forFinalReduction( - bigArrays, mockScriptService, b -> {}, PipelineTree.EMPTY); + bigArrays, + mockScriptService, + b -> {}, + PipelineTree.EMPTY + ); InternalMatrixStats reduced = (InternalMatrixStats) shardResults.get(0).reduce(shardResults, context); multiPassStats.assertNearlyEqual(reduced.getResults()); } diff --git a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorTests.java b/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorTests.java index 1e1e785b92b31..2edaf28beeffc 100644 --- a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorTests.java +++ b/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorTests.java @@ -30,18 +30,17 @@ public class MatrixStatsAggregatorTests extends AggregatorTestCase { public void testNoData() throws Exception { - MappedFieldType ft = - new NumberFieldMapper.NumberFieldType("field", NumberFieldMapper.NumberType.DOUBLE); + MappedFieldType ft = new NumberFieldMapper.NumberFieldType("field", NumberFieldMapper.NumberType.DOUBLE); - try (Directory directory = newDirectory(); - RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + try (Directory directory = newDirectory(); RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { if (randomBoolean()) { indexWriter.addDocument(Collections.singleton(new StringField("another_field", "value", Field.Store.NO))); } try (IndexReader reader = indexWriter.getReader()) { IndexSearcher searcher = new IndexSearcher(reader); - MatrixStatsAggregationBuilder aggBuilder = new MatrixStatsAggregationBuilder("my_agg") - .fields(Collections.singletonList("field")); + MatrixStatsAggregationBuilder aggBuilder = new MatrixStatsAggregationBuilder("my_agg").fields( + Collections.singletonList("field") + ); InternalMatrixStats stats = searchAndReduce(searcher, new MatchAllDocsQuery(), aggBuilder, ft); assertNull(stats.getStats()); assertEquals(0L, stats.getDocCount()); @@ -52,15 +51,15 @@ public void testNoData() throws Exception { public void testUnmapped() throws Exception { MappedFieldType ft = new NumberFieldMapper.NumberFieldType("field", NumberFieldMapper.NumberType.DOUBLE); - try (Directory directory = newDirectory(); - RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + try (Directory directory = newDirectory(); RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { if (randomBoolean()) { indexWriter.addDocument(Collections.singleton(new StringField("another_field", "value", Field.Store.NO))); } try (IndexReader reader = indexWriter.getReader()) { IndexSearcher searcher = new IndexSearcher(reader); - MatrixStatsAggregationBuilder aggBuilder = new MatrixStatsAggregationBuilder("my_agg") - .fields(Collections.singletonList("bogus")); + MatrixStatsAggregationBuilder aggBuilder = new MatrixStatsAggregationBuilder("my_agg").fields( + Collections.singletonList("bogus") + ); InternalMatrixStats stats = searchAndReduce(searcher, new MatchAllDocsQuery(), aggBuilder, ft); assertNull(stats.getStats()); assertEquals(0L, stats.getDocCount()); @@ -74,8 +73,7 @@ public void testTwoFields() throws Exception { String fieldB = "b"; MappedFieldType ftB = new NumberFieldMapper.NumberFieldType(fieldB, NumberFieldMapper.NumberType.DOUBLE); - try (Directory directory = newDirectory(); - RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + try (Directory directory = newDirectory(); RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { int numDocs = scaledRandomIntBetween(8192, 16384); Double[] fieldAValues = new Double[numDocs]; @@ -94,8 +92,9 @@ public void testTwoFields() throws Exception { multiPassStats.computeStats(Arrays.asList(fieldAValues), Arrays.asList(fieldBValues)); try (IndexReader reader = indexWriter.getReader()) { IndexSearcher searcher = new IndexSearcher(reader); - MatrixStatsAggregationBuilder aggBuilder = new MatrixStatsAggregationBuilder("my_agg") - .fields(Arrays.asList(fieldA, fieldB)); + MatrixStatsAggregationBuilder aggBuilder = new MatrixStatsAggregationBuilder("my_agg").fields( + Arrays.asList(fieldA, fieldB) + ); InternalMatrixStats stats = searchAndReduce(searcher, new MatchAllDocsQuery(), aggBuilder, ftA, ftB); multiPassStats.assertNearlyEqual(stats); assertTrue(MatrixAggregationInspectionHelper.hasValue(stats)); diff --git a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MultiPassStats.java b/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MultiPassStats.java index c6cba046635e6..81ec04389a06f 100644 --- a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MultiPassStats.java +++ b/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MultiPassStats.java @@ -118,8 +118,8 @@ void assertNearlyEqual(MatrixStatsResults stats) { assertTrue(nearlyEqual(kurtosis.get(fieldAKey), stats.getKurtosis(fieldAKey), 1e-4)); assertTrue(nearlyEqual(kurtosis.get(fieldBKey), stats.getKurtosis(fieldBKey), 1e-4)); // covariances - assertTrue(nearlyEqual(covariances.get(fieldAKey).get(fieldBKey),stats.getCovariance(fieldAKey, fieldBKey), 1e-7)); - assertTrue(nearlyEqual(covariances.get(fieldBKey).get(fieldAKey),stats.getCovariance(fieldBKey, fieldAKey), 1e-7)); + assertTrue(nearlyEqual(covariances.get(fieldAKey).get(fieldBKey), stats.getCovariance(fieldAKey, fieldBKey), 1e-7)); + assertTrue(nearlyEqual(covariances.get(fieldBKey).get(fieldAKey), stats.getCovariance(fieldBKey, fieldAKey), 1e-7)); // correlation assertTrue(nearlyEqual(correlations.get(fieldAKey).get(fieldBKey), stats.getCorrelation(fieldAKey, fieldBKey), 1e-7)); assertTrue(nearlyEqual(correlations.get(fieldBKey).get(fieldAKey), stats.getCorrelation(fieldBKey, fieldAKey), 1e-7)); @@ -142,8 +142,8 @@ void assertNearlyEqual(InternalMatrixStats stats) { assertTrue(nearlyEqual(kurtosis.get(fieldAKey), stats.getKurtosis(fieldAKey), 1e-4)); assertTrue(nearlyEqual(kurtosis.get(fieldBKey), stats.getKurtosis(fieldBKey), 1e-4)); // covariances - assertTrue(nearlyEqual(covariances.get(fieldAKey).get(fieldBKey),stats.getCovariance(fieldAKey, fieldBKey), 1e-7)); - assertTrue(nearlyEqual(covariances.get(fieldBKey).get(fieldAKey),stats.getCovariance(fieldBKey, fieldAKey), 1e-7)); + assertTrue(nearlyEqual(covariances.get(fieldAKey).get(fieldBKey), stats.getCovariance(fieldAKey, fieldBKey), 1e-7)); + assertTrue(nearlyEqual(covariances.get(fieldBKey).get(fieldAKey), stats.getCovariance(fieldBKey, fieldAKey), 1e-7)); // correlation assertTrue(nearlyEqual(correlations.get(fieldAKey).get(fieldBKey), stats.getCorrelation(fieldAKey, fieldBKey), 1e-7)); assertTrue(nearlyEqual(correlations.get(fieldBKey).get(fieldAKey), stats.getCorrelation(fieldBKey, fieldAKey), 1e-7)); diff --git a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/RunningStatsTests.java b/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/RunningStatsTests.java index 5ba22156b9df8..3768f00962dd2 100644 --- a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/RunningStatsTests.java +++ b/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/RunningStatsTests.java @@ -25,9 +25,9 @@ public void testMergedStats() throws Exception { int start = 0; RunningStats stats = null; List fieldAShard, fieldBShard; - for (int s = 0; s < numShards-1; start = ++s * (int)obsPerShard) { - fieldAShard = fieldA.subList(start, start + (int)obsPerShard); - fieldBShard = fieldB.subList(start, start + (int)obsPerShard); + for (int s = 0; s < numShards - 1; start = ++s * (int) obsPerShard) { + fieldAShard = fieldA.subList(start, start + (int) obsPerShard); + fieldBShard = fieldB.subList(start, start + (int) obsPerShard); if (stats == null) { stats = createRunningStats(fieldAShard, fieldBShard); } else { diff --git a/modules/aggs-matrix-stats/src/yamlRestTest/java/org/elasticsearch/search/aggregations/matrix/MatrixStatsClientYamlTestSuiteIT.java b/modules/aggs-matrix-stats/src/yamlRestTest/java/org/elasticsearch/search/aggregations/matrix/MatrixStatsClientYamlTestSuiteIT.java index 11de471e6fdf9..6f29a3fb765f3 100644 --- a/modules/aggs-matrix-stats/src/yamlRestTest/java/org/elasticsearch/search/aggregations/matrix/MatrixStatsClientYamlTestSuiteIT.java +++ b/modules/aggs-matrix-stats/src/yamlRestTest/java/org/elasticsearch/search/aggregations/matrix/MatrixStatsClientYamlTestSuiteIT.java @@ -9,11 +9,12 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; public class MatrixStatsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { - public MatrixStatsClientYamlTestSuiteIT(@Name("yaml")ClientYamlTestCandidate testCandidate) { + public MatrixStatsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { super(testCandidate); } diff --git a/modules/analysis-common/build.gradle b/modules/analysis-common/build.gradle index 02e715c03bca9..702f32ca8d199 100644 --- a/modules/analysis-common/build.gradle +++ b/modules/analysis-common/build.gradle @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' @@ -25,7 +25,7 @@ dependencies { compileOnly project(':modules:lang-painless') } -tasks.named("yamlRestCompatTest").configure { +tasks.named("yamlRestTestV7CompatTest").configure { systemProperty 'tests.rest.blacklist', [ //marked as not needing compatible api 'indices.analyze/10_analyze/htmlStrip_deprecated', // Cleanup versioned deprecations in analysis #41560 diff --git a/modules/geo/build.gradle b/modules/geo/build.gradle deleted file mode 100644 index 1c8537df6fc25..0000000000000 --- a/modules/geo/build.gradle +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -apply plugin: 'elasticsearch.yaml-rest-test' -apply plugin: 'elasticsearch.yaml-rest-compat-test' - -import org.elasticsearch.gradle.internal.info.BuildParams - -esplugin { - description 'Placeholder plugin for geospatial features in ES. only registers geo_shape field mapper for now' - classname 'org.elasticsearch.geo.GeoPlugin' -} - -restResources { - restApi { - include '_common', 'indices', 'index', 'search' - } -} -artifacts { - restTests(project.file('src/yamlRestTest/resources/rest-api-spec/test')) -} -tasks.named("test").configure { enabled = false } - -if (BuildParams.inFipsJvm){ - // The geo module is replaced by spatial in the default distribution and in FIPS 140 mode, we set the testclusters to - // use the default distribution, so there is no need to run these tests - tasks.named("yamlRestTest").configure{enabled = false } - tasks.named("yamlRestCompatTest").configure{enabled = false } -} diff --git a/modules/geo/src/main/java/org/elasticsearch/geo/GeoPlugin.java b/modules/geo/src/main/java/org/elasticsearch/geo/GeoPlugin.java deleted file mode 100644 index a719dde1d9f62..0000000000000 --- a/modules/geo/src/main/java/org/elasticsearch/geo/GeoPlugin.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.geo; - -import org.elasticsearch.index.mapper.GeoShapeFieldMapper; -import org.elasticsearch.index.mapper.Mapper; -import org.elasticsearch.plugins.MapperPlugin; -import org.elasticsearch.plugins.Plugin; - -import java.util.Collections; -import java.util.Map; - -public class GeoPlugin extends Plugin implements MapperPlugin { - - @Override - public Map getMappers() { - return Collections.singletonMap(GeoShapeFieldMapper.CONTENT_TYPE, GeoShapeFieldMapper.PARSER); - } -} diff --git a/modules/geo/src/yamlRestTest/java/org/elasticsearch/geo/GeoClientYamlTestSuiteIT.java b/modules/geo/src/yamlRestTest/java/org/elasticsearch/geo/GeoClientYamlTestSuiteIT.java deleted file mode 100644 index ac081af447633..0000000000000 --- a/modules/geo/src/yamlRestTest/java/org/elasticsearch/geo/GeoClientYamlTestSuiteIT.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.geo; - -import com.carrotsearch.randomizedtesting.annotations.Name; -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; - -import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; -import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; - -/** Runs yaml rest tests */ -public class GeoClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { - - public GeoClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { - super(testCandidate); - } - - @ParametersFactory - public static Iterable parameters() throws Exception { - return ESClientYamlSuiteTestCase.createParameters(); - } -} diff --git a/modules/geo/src/yamlRestTest/resources/rest-api-spec/test/geo_shape/10_basic.yml b/modules/geo/src/yamlRestTest/resources/rest-api-spec/test/geo_shape/10_basic.yml deleted file mode 100644 index aaa692ba933b7..0000000000000 --- a/modules/geo/src/yamlRestTest/resources/rest-api-spec/test/geo_shape/10_basic.yml +++ /dev/null @@ -1,82 +0,0 @@ -setup: - - do: - indices.create: - index: test - body: - settings: - number_of_replicas: 0 - mappings: - properties: - location: - type: geo_shape - - - do: - index: - index: test - id: 1 - body: - location: "POINT (1.0 1.0)" - - - do: - indices.refresh: {} - ---- -"Test Geo Shape Query": - - - do: - search: - rest_total_hits_as_int: true - body: - query: - bool: - filter: - geo_shape: - location: - shape: - type: Envelope - coordinates: - - [-80.0, 34.0] - - [43, -13.0] - relation: within - - - match: - hits.total: 1 - - - match: - hits.hits.0._id: "1" - ---- -"Test Exists Query on geo_shape field": - - do: - search: - rest_total_hits_as_int: true - index: test - body: - query: - exists: - field: location - - - match: {hits.total: 1} - ---- -"Test retrieve geo_shape field": - - do: - search: - index: test - body: - fields: [location] - _source: false - - - match: { hits.hits.0.fields.location.0.type: "Point" } - - match: { hits.hits.0.fields.location.0.coordinates: [1.0, 1.0] } - - - do: - search: - index: test - body: - fields: - - field: location - format: wkt - _source: false - - - match: { hits.hits.0.fields.location.0: "POINT (1.0 1.0)" } diff --git a/modules/ingest-common/build.gradle b/modules/ingest-common/build.gradle index af28ddec0cd79..fe668993da4e7 100644 --- a/modules/ingest-common/build.gradle +++ b/modules/ingest-common/build.gradle @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' @@ -44,12 +44,6 @@ tasks.named("thirdPartyAudit").configure { ) } -tasks.named("transformV7RestTests").configure({ task -> +tasks.named("yamlRestTestV7CompatTransform").configure { task -> task.addAllowedWarningRegex("\\[types removal\\].*") -}) - -tasks.named("yamlRestCompatTest").configure { - systemProperty 'tests.rest.blacklist', ([ - 'ingest/120_grok/Test Grok Patterns Retrieval' // un-mute this test after backporting - ]).join(',') } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessor.java index 7e71519223214..73f26b895ff57 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessor.java @@ -26,6 +26,8 @@ public final class GrokProcessor extends AbstractProcessor { public static final String TYPE = "grok"; + public static final String DEFAULT_ECS_COMPATIBILITY_MODE = Grok.ECS_COMPATIBILITY_MODES[0]; + private static final String PATTERN_MATCH_KEY = "_ingest._grok_match_index"; private static final Logger logger = LogManager.getLogger(GrokProcessor.class); @@ -127,11 +129,9 @@ static String combinePatterns(List patterns, boolean traceMatch) { public static final class Factory implements Processor.Factory { - private final Map builtinPatterns; private final MatcherWatchdog matcherWatchdog; - public Factory(Map builtinPatterns, MatcherWatchdog matcherWatchdog) { - this.builtinPatterns = builtinPatterns; + public Factory(MatcherWatchdog matcherWatchdog) { this.matcherWatchdog = matcherWatchdog; } @@ -142,12 +142,19 @@ public GrokProcessor create(Map registry, String proc List matchPatterns = ConfigurationUtils.readList(TYPE, processorTag, config, "patterns"); boolean traceMatch = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "trace_match", false); boolean ignoreMissing = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false); + String ecsCompatibility = + ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "ecs_compatibility", DEFAULT_ECS_COMPATIBILITY_MODE); + if (Grok.isValidEcsCompatibilityMode(ecsCompatibility) == false) { + throw newConfigurationException(TYPE, processorTag, "ecs_compatibility", "unsupported mode '" + ecsCompatibility + "'"); + } if (matchPatterns.isEmpty()) { throw newConfigurationException(TYPE, processorTag, "patterns", "List of patterns must not be empty"); } Map customPatternBank = ConfigurationUtils.readOptionalMap(TYPE, processorTag, config, "pattern_definitions"); - Map patternBank = new HashMap<>(builtinPatterns); + Map patternBank = new HashMap<>( + Grok.getBuiltinPatterns(ecsCompatibility) + ); if (customPatternBank != null) { patternBank.putAll(customPatternBank); } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java index 0ec7b237dea7e..b8e0aea827a29 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java @@ -7,6 +7,7 @@ */ package org.elasticsearch.ingest.common; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; @@ -46,14 +47,19 @@ private GrokProcessorGetAction() { public static class Request extends ActionRequest { private final boolean sorted; + private final String ecsCompatibility; - public Request(boolean sorted) { + public Request(boolean sorted, String ecsCompatibility) { this.sorted = sorted; + this.ecsCompatibility = ecsCompatibility; } Request(StreamInput in) throws IOException { super(in); this.sorted = in.readBoolean(); + this.ecsCompatibility = in.getVersion().onOrAfter(Version.V_8_0_0) + ? in.readString() + : GrokProcessor.DEFAULT_ECS_COMPATIBILITY_MODE; } @Override @@ -65,11 +71,18 @@ public ActionRequestValidationException validate() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeBoolean(sorted); + if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + out.writeString(ecsCompatibility); + } } public boolean sorted() { return sorted; } + + public String getEcsCompatibility() { + return ecsCompatibility; + } } public static class Response extends ActionResponse implements ToXContentObject { @@ -105,25 +118,38 @@ public void writeTo(StreamOutput out) throws IOException { public static class TransportAction extends HandledTransportAction { - private final Map grokPatterns; - private final Map sortedGrokPatterns; + private final Map legacyGrokPatterns; + private final Map sortedLegacyGrokPatterns; + private final Map ecsV1GrokPatterns; + private final Map sortedEcsV1GrokPatterns; @Inject public TransportAction(TransportService transportService, ActionFilters actionFilters) { - this(transportService, actionFilters, Grok.BUILTIN_PATTERNS); + this(transportService, actionFilters, Grok.getBuiltinPatterns(false), Grok.getBuiltinPatterns(true)); } // visible for testing - TransportAction(TransportService transportService, ActionFilters actionFilters, Map grokPatterns) { + TransportAction( + TransportService transportService, + ActionFilters actionFilters, + Map legacyGrokPatterns, + Map ecsV1GrokPatterns) { super(NAME, transportService, actionFilters, Request::new); - this.grokPatterns = grokPatterns; - this.sortedGrokPatterns = new TreeMap<>(this.grokPatterns); + this.legacyGrokPatterns = legacyGrokPatterns; + this.sortedLegacyGrokPatterns = new TreeMap<>(this.legacyGrokPatterns); + this.ecsV1GrokPatterns = ecsV1GrokPatterns; + this.sortedEcsV1GrokPatterns = new TreeMap<>(this.ecsV1GrokPatterns); } @Override protected void doExecute(Task task, Request request, ActionListener listener) { try { - listener.onResponse(new Response(request.sorted() ? sortedGrokPatterns : grokPatterns)); + listener.onResponse(new Response( + request.getEcsCompatibility().equals(Grok.ECS_COMPATIBILITY_MODES[0]) + ? request.sorted() ? sortedLegacyGrokPatterns : legacyGrokPatterns + : request.sorted() ? sortedEcsV1GrokPatterns : ecsV1GrokPatterns + ) + ); } catch (Exception e) { listener.onFailure(e); } @@ -145,7 +171,11 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { boolean sorted = request.paramAsBoolean("s", false); - Request grokPatternsRequest = new Request(sorted); + String ecsCompatibility = request.param("ecs_compatibility", GrokProcessor.DEFAULT_ECS_COMPATIBILITY_MODE); + if (Grok.isValidEcsCompatibilityMode(ecsCompatibility) == false) { + throw new IllegalArgumentException("unsupported ECS compatibility mode [" + ecsCompatibility + "]"); + } + Request grokPatternsRequest = new Request(sorted, ecsCompatibility); return channel -> client.executeLocally(INSTANCE, grokPatternsRequest, new RestToXContentListener<>(channel)); } } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java index 9bb5a0c3fa430..945ce13957124 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java @@ -18,7 +18,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.grok.Grok; import org.elasticsearch.grok.MatcherWatchdog; import org.elasticsearch.ingest.DropProcessor; import org.elasticsearch.ingest.PipelineProcessor; @@ -66,7 +65,7 @@ public Map getProcessors(Processor.Parameters paramet entry(ForEachProcessor.TYPE, new ForEachProcessor.Factory(parameters.scriptService)), entry(DateIndexNameProcessor.TYPE, new DateIndexNameProcessor.Factory(parameters.scriptService)), entry(SortProcessor.TYPE, new SortProcessor.Factory()), - entry(GrokProcessor.TYPE, new GrokProcessor.Factory(Grok.BUILTIN_PATTERNS, createGrokThreadWatchdog(parameters))), + entry(GrokProcessor.TYPE, new GrokProcessor.Factory(createGrokThreadWatchdog(parameters))), entry(ScriptProcessor.TYPE, new ScriptProcessor.Factory(parameters.scriptService)), entry(DotExpanderProcessor.TYPE, new DotExpanderProcessor.Factory()), entry(JsonProcessor.TYPE, new JsonProcessor.Factory()), diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorFactoryTests.java index 1bd85c2aca913..c9d0c0f49e6ee 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorFactoryTests.java @@ -23,7 +23,7 @@ public class GrokProcessorFactoryTests extends ESTestCase { public void testBuild() throws Exception { - GrokProcessor.Factory factory = new GrokProcessor.Factory(Collections.emptyMap(), MatcherWatchdog.noop()); + GrokProcessor.Factory factory = new GrokProcessor.Factory(MatcherWatchdog.noop()); Map config = new HashMap<>(); config.put("field", "_field"); @@ -37,7 +37,7 @@ public void testBuild() throws Exception { } public void testBuildWithIgnoreMissing() throws Exception { - GrokProcessor.Factory factory = new GrokProcessor.Factory(Collections.emptyMap(), MatcherWatchdog.noop()); + GrokProcessor.Factory factory = new GrokProcessor.Factory(MatcherWatchdog.noop()); Map config = new HashMap<>(); config.put("field", "_field"); @@ -52,7 +52,7 @@ public void testBuildWithIgnoreMissing() throws Exception { } public void testBuildMissingField() throws Exception { - GrokProcessor.Factory factory = new GrokProcessor.Factory(Collections.emptyMap(), MatcherWatchdog.noop()); + GrokProcessor.Factory factory = new GrokProcessor.Factory(MatcherWatchdog.noop()); Map config = new HashMap<>(); config.put("patterns", Collections.singletonList("(?\\w+)")); ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, null, null, config)); @@ -60,7 +60,7 @@ public void testBuildMissingField() throws Exception { } public void testBuildMissingPatterns() throws Exception { - GrokProcessor.Factory factory = new GrokProcessor.Factory(Collections.emptyMap(), MatcherWatchdog.noop()); + GrokProcessor.Factory factory = new GrokProcessor.Factory(MatcherWatchdog.noop()); Map config = new HashMap<>(); config.put("field", "foo"); ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, null, null, config)); @@ -68,7 +68,7 @@ public void testBuildMissingPatterns() throws Exception { } public void testBuildEmptyPatternsList() throws Exception { - GrokProcessor.Factory factory = new GrokProcessor.Factory(Collections.emptyMap(), MatcherWatchdog.noop()); + GrokProcessor.Factory factory = new GrokProcessor.Factory(MatcherWatchdog.noop()); Map config = new HashMap<>(); config.put("field", "foo"); config.put("patterns", Collections.emptyList()); @@ -77,7 +77,7 @@ public void testBuildEmptyPatternsList() throws Exception { } public void testCreateWithCustomPatterns() throws Exception { - GrokProcessor.Factory factory = new GrokProcessor.Factory(Collections.emptyMap(), MatcherWatchdog.noop()); + GrokProcessor.Factory factory = new GrokProcessor.Factory(MatcherWatchdog.noop()); Map config = new HashMap<>(); config.put("field", "_field"); @@ -90,7 +90,7 @@ public void testCreateWithCustomPatterns() throws Exception { } public void testCreateWithInvalidPattern() throws Exception { - GrokProcessor.Factory factory = new GrokProcessor.Factory(Collections.emptyMap(), MatcherWatchdog.noop()); + GrokProcessor.Factory factory = new GrokProcessor.Factory(MatcherWatchdog.noop()); Map config = new HashMap<>(); config.put("field", "_field"); config.put("patterns", Collections.singletonList("[")); @@ -99,7 +99,7 @@ public void testCreateWithInvalidPattern() throws Exception { } public void testCreateWithInvalidPatternDefinition() throws Exception { - GrokProcessor.Factory factory = new GrokProcessor.Factory(Collections.emptyMap(), MatcherWatchdog.noop()); + GrokProcessor.Factory factory = new GrokProcessor.Factory(MatcherWatchdog.noop()); Map config = new HashMap<>(); config.put("field", "_field"); config.put("patterns", Collections.singletonList("%{MY_PATTERN:name}!")); @@ -108,4 +108,15 @@ public void testCreateWithInvalidPatternDefinition() throws Exception { assertThat(e.getMessage(), equalTo("[patterns] Invalid regex pattern found in: [%{MY_PATTERN:name}!]. premature end of char-class")); } + + public void testCreateWithInvalidEcsCompatibilityMode() throws Exception { + GrokProcessor.Factory factory = new GrokProcessor.Factory(MatcherWatchdog.noop()); + Map config = new HashMap<>(); + config.put("field", "_field"); + config.put("patterns", Collections.singletonList("(?\\w+)")); + String invalidEcsMode = randomAlphaOfLength(3); + config.put("ecs_compatibility", invalidEcsMode); + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, null, null, config)); + assertThat(e.getMessage(), equalTo("[ecs_compatibility] unsupported mode '" + invalidEcsMode + "'")); + } } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorGetActionTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorGetActionTests.java index cec1f08270df4..efae348adea33 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorGetActionTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorGetActionTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.grok.Grok; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.TransportService; @@ -33,10 +34,11 @@ import static org.mockito.Mockito.mock; public class GrokProcessorGetActionTests extends ESTestCase { - private static final Map TEST_PATTERNS = Map.of("PATTERN2", "foo2", "PATTERN1", "foo1"); + private static final Map LEGACY_TEST_PATTERNS = Map.of("PATTERN2", "foo2", "PATTERN1", "foo1"); + private static final Map ECS_TEST_PATTERNS = Map.of("ECS_PATTERN2", "foo2", "ECS_PATTERN1", "foo1"); public void testRequest() throws Exception { - GrokProcessorGetAction.Request request = new GrokProcessorGetAction.Request(false); + GrokProcessorGetAction.Request request = new GrokProcessorGetAction.Request(false, GrokProcessor.DEFAULT_ECS_COMPATIBILITY_MODE); BytesStreamOutput out = new BytesStreamOutput(); request.writeTo(out); StreamInput streamInput = out.bytes().streamInput(); @@ -45,55 +47,96 @@ public void testRequest() throws Exception { } public void testResponseSerialization() throws Exception { - GrokProcessorGetAction.Response response = new GrokProcessorGetAction.Response(TEST_PATTERNS); + GrokProcessorGetAction.Response response = new GrokProcessorGetAction.Response(LEGACY_TEST_PATTERNS); BytesStreamOutput out = new BytesStreamOutput(); response.writeTo(out); StreamInput streamInput = out.bytes().streamInput(); GrokProcessorGetAction.Response otherResponse = new GrokProcessorGetAction.Response(streamInput); - assertThat(response.getGrokPatterns(), equalTo(TEST_PATTERNS)); + assertThat(response.getGrokPatterns(), equalTo(LEGACY_TEST_PATTERNS)); assertThat(response.getGrokPatterns(), equalTo(otherResponse.getGrokPatterns())); } public void testResponseSorting() { - List sortedKeys = new ArrayList<>(TEST_PATTERNS.keySet()); + List sortedKeys = new ArrayList<>(LEGACY_TEST_PATTERNS.keySet()); Collections.sort(sortedKeys); - GrokProcessorGetAction.TransportAction transportAction = - new GrokProcessorGetAction.TransportAction(mock(TransportService.class), mock(ActionFilters.class), TEST_PATTERNS); + GrokProcessorGetAction.TransportAction transportAction = new GrokProcessorGetAction.TransportAction( + mock(TransportService.class), + mock(ActionFilters.class), + LEGACY_TEST_PATTERNS, + ECS_TEST_PATTERNS + ); GrokProcessorGetAction.Response[] receivedResponse = new GrokProcessorGetAction.Response[1]; - transportAction.doExecute(null, new GrokProcessorGetAction.Request(true), new ActionListener<>() { - @Override - public void onResponse(GrokProcessorGetAction.Response response) { + transportAction.doExecute( + null, + new GrokProcessorGetAction.Request(true, GrokProcessor.DEFAULT_ECS_COMPATIBILITY_MODE), + new ActionListener<>() { + @Override + public void onResponse(GrokProcessorGetAction.Response response) { receivedResponse[0] = response; } - @Override - public void onFailure(Exception e) { + @Override + public void onFailure(Exception e) { fail(); } - }); + } + ); assertThat(receivedResponse[0], notNullValue()); assertThat(receivedResponse[0].getGrokPatterns().keySet().toArray(), equalTo(sortedKeys.toArray())); GrokProcessorGetAction.Response firstResponse = receivedResponse[0]; - transportAction.doExecute(null, new GrokProcessorGetAction.Request(true), new ActionListener<>() { - @Override - public void onResponse(GrokProcessorGetAction.Response response) { + transportAction.doExecute( + null, + new GrokProcessorGetAction.Request(true, GrokProcessor.DEFAULT_ECS_COMPATIBILITY_MODE), + new ActionListener<>() { + @Override + public void onResponse(GrokProcessorGetAction.Response response) { receivedResponse[0] = response; } - @Override - public void onFailure(Exception e) { + @Override + public void onFailure(Exception e) { fail(); } - }); + } + ); assertThat(receivedResponse[0], notNullValue()); assertThat(receivedResponse[0], not(sameInstance(firstResponse))); assertThat(receivedResponse[0].getGrokPatterns(), sameInstance(firstResponse.getGrokPatterns())); } + public void testEcsCompatibilityMode() { + List sortedKeys = new ArrayList<>(ECS_TEST_PATTERNS.keySet()); + Collections.sort(sortedKeys); + GrokProcessorGetAction.TransportAction transportAction = new GrokProcessorGetAction.TransportAction( + mock(TransportService.class), + mock(ActionFilters.class), + LEGACY_TEST_PATTERNS, + ECS_TEST_PATTERNS + ); + GrokProcessorGetAction.Response[] receivedResponse = new GrokProcessorGetAction.Response[1]; + transportAction.doExecute( + null, + new GrokProcessorGetAction.Request(true, Grok.ECS_COMPATIBILITY_MODES[1]), + new ActionListener<>() { + @Override + public void onResponse(GrokProcessorGetAction.Response response) { + receivedResponse[0] = response; + } + + @Override + public void onFailure(Exception e) { + fail(); + } + } + ); + assertThat(receivedResponse[0], notNullValue()); + assertThat(receivedResponse[0].getGrokPatterns().keySet().toArray(), equalTo(sortedKeys.toArray())); + } + @SuppressWarnings("unchecked") public void testResponseToXContent() throws Exception { - GrokProcessorGetAction.Response response = new GrokProcessorGetAction.Response(TEST_PATTERNS); + GrokProcessorGetAction.Response response = new GrokProcessorGetAction.Response(LEGACY_TEST_PATTERNS); try (XContentBuilder builder = JsonXContent.contentBuilder()) { response.toXContent(builder, ToXContent.EMPTY_PARAMS); Map converted = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index cf59994ab8d2d..b2d8689e5c2e6 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -8,7 +8,7 @@ import org.apache.tools.ant.taskdefs.condition.Os -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java index b8ab92c3c65ee..21513c3433a04 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.Client; -import org.elasticsearch.client.OriginSettingClient; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.hash.MessageDigests; import org.elasticsearch.common.settings.Setting; @@ -32,7 +31,6 @@ import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.index.reindex.DeleteByQueryAction; import org.elasticsearch.index.reindex.DeleteByQueryRequest; -import org.elasticsearch.ingest.IngestService; import org.elasticsearch.ingest.geoip.GeoIpTaskState.Metadata; import org.elasticsearch.ingest.geoip.stats.GeoIpDownloaderStats; import org.elasticsearch.persistent.AllocatedPersistentTask; @@ -84,7 +82,7 @@ public class GeoIpDownloader extends AllocatedPersistentTask { long id, String type, String action, String description, TaskId parentTask, Map headers) { super(id, type, action, description, parentTask, headers); this.httpClient = httpClient; - this.client = new OriginSettingClient(client, IngestService.INGEST_ORIGIN); + this.client = client; this.clusterService = clusterService; this.threadPool = threadPool; endpoint = ENDPOINT_SETTING.get(settings); diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java index 02e04bccb3e65..5ef7077406b1e 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java @@ -11,13 +11,17 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.Client; +import org.elasticsearch.client.OriginSettingClient; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.gateway.GatewayService; +import org.elasticsearch.ingest.IngestService; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; @@ -29,6 +33,7 @@ import java.util.Map; import java.util.concurrent.atomic.AtomicReference; +import static org.elasticsearch.ingest.geoip.GeoIpDownloader.DATABASES_INDEX; import static org.elasticsearch.ingest.geoip.GeoIpDownloader.GEOIP_DOWNLOADER; /** @@ -54,15 +59,14 @@ public final class GeoIpDownloaderTaskExecutor extends PersistentTasksExecutor { }); } else { - persistentTasksService.sendRemoveRequest(GEOIP_DOWNLOADER, ActionListener.wrap(r -> { - }, e -> logger.error("failed to remove geoip task", e))); + stopTask(() -> { + }); } } @@ -86,23 +90,33 @@ protected void nodeOperation(AllocatedPersistentTask task, GeoIpTaskParams param currentTask.set(downloader); GeoIpTaskState geoIpTaskState = state == null ? GeoIpTaskState.EMPTY : (GeoIpTaskState) state; downloader.setState(geoIpTaskState); - downloader.runDownloader(); + if (ENABLED_SETTING.get(clusterService.state().metadata().settings(), settings)) { + downloader.runDownloader(); + } } @Override protected GeoIpDownloader createTask(long id, String type, String action, TaskId parentTaskId, - PersistentTasksCustomMetadata.PersistentTask taskInProgress, - Map headers) { + PersistentTasksCustomMetadata.PersistentTask taskInProgress, + Map headers) { return new GeoIpDownloader(client, httpClient, clusterService, threadPool, settings, id, type, action, getDescription(taskInProgress), parentTaskId, headers); } @Override public void clusterChanged(ClusterChangedEvent event) { + if(event.state().blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)){ + //wait for state recovered + return; + } //bootstrap downloader after first cluster start clusterService.removeListener(this); - if (event.localNodeMaster() && ENABLED_SETTING.get(event.state().getMetadata().settings())) { - startTask(() -> clusterService.addListener(this)); + if (event.localNodeMaster()) { + if (ENABLED_SETTING.get(event.state().getMetadata().settings(), settings)) { + startTask(() -> clusterService.addListener(this)); + } else { + stopTask(() -> clusterService.addListener(this)); + } } } @@ -116,7 +130,24 @@ private void startTask(Runnable onFailure) { })); } - public GeoIpDownloader getCurrentTask(){ + private void stopTask(Runnable onFailure) { + ActionListener> listener = ActionListener.wrap(r -> { + }, e -> { + if (e instanceof ResourceNotFoundException == false) { + logger.error("failed to remove geoip downloader task", e); + onFailure.run(); + } + }); + persistentTasksService.sendRemoveRequest(GEOIP_DOWNLOADER, ActionListener.runAfter(listener, () -> + client.admin().indices().prepareDelete(DATABASES_INDEX).execute(ActionListener.wrap(rr -> { + }, e -> { + if (e instanceof ResourceNotFoundException == false) { + logger.warn("failed to remove " + DATABASES_INDEX, e); + } + })))); + } + + public GeoIpDownloader getCurrentTask() { return currentTask.get(); } } diff --git a/modules/ingest-user-agent/build.gradle b/modules/ingest-user-agent/build.gradle index 1518b1d64a08c..e84ad0e5d4418 100644 --- a/modules/ingest-user-agent/build.gradle +++ b/modules/ingest-user-agent/build.gradle @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' esplugin { @@ -23,6 +23,6 @@ testClusters.all { extraConfigFile 'ingest-user-agent/test-regexes.yml', file('src/test/test-regexes.yml') } -tasks.named("transformV7RestTests").configure({ task -> +tasks.named("yamlRestTestV7CompatTransform").configure {task -> task.addAllowedWarningRegex("setting \\[ecs\\] is deprecated as ECS format is the default and only option") -}) +} diff --git a/modules/lang-expression/build.gradle b/modules/lang-expression/build.gradle index a73c929be0d48..b177abfc6e638 100644 --- a/modules/lang-expression/build.gradle +++ b/modules/lang-expression/build.gradle @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' diff --git a/modules/lang-mustache/build.gradle b/modules/lang-mustache/build.gradle index a4e328ac46080..817b21035e722 100644 --- a/modules/lang-mustache/build.gradle +++ b/modules/lang-mustache/build.gradle @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.java-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' @@ -26,6 +26,6 @@ restResources { } } -tasks.named("transformV7RestTests").configure({ task -> +tasks.named("yamlRestTestV7CompatTransform").configure {task -> task.addAllowedWarningRegex("\\[types removal\\].*") -}) +} diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index 278f95a020e9d..395daf771dfc7 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -8,7 +8,7 @@ import org.elasticsearch.gradle.testclusters.DefaultTestClustersTask; apply plugin: 'elasticsearch.validate-rest-spec' -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' esplugin { @@ -58,11 +58,6 @@ tasks.named("test").configure { jvmArgs '-XX:-OmitStackTraceInFastThrow', '-XX:-HeapDumpOnOutOfMemoryError' } -tasks.named("yamlRestCompatTest").configure { - systemProperty 'tests.rest.blacklist', [ - ].join(',') -} - /* Build Javadoc for the Java classes in Painless's public API that are in the * Painless plugin */ tasks.register("apiJavadoc", Javadoc) { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java index dfab5dc8b324c..bb166eabd662b 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java @@ -9,6 +9,10 @@ package org.elasticsearch.painless.lookup; import java.lang.invoke.MethodHandle; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -25,6 +29,7 @@ public final class PainlessLookup { private final Map> javaClassNamesToClasses; private final Map> canonicalClassNamesToClasses; private final Map, PainlessClass> classesToPainlessClasses; + private final Map, Set>> classesToDirectSubClasses; private final Map painlessMethodKeysToImportedPainlessMethods; private final Map painlessMethodKeysToPainlessClassBindings; @@ -34,6 +39,7 @@ public final class PainlessLookup { Map> javaClassNamesToClasses, Map> canonicalClassNamesToClasses, Map, PainlessClass> classesToPainlessClasses, + Map, Set>> classesToDirectSubClasses, Map painlessMethodKeysToImportedPainlessMethods, Map painlessMethodKeysToPainlessClassBindings, Map painlessMethodKeysToPainlessInstanceBindings) { @@ -41,6 +47,7 @@ public final class PainlessLookup { Objects.requireNonNull(javaClassNamesToClasses); Objects.requireNonNull(canonicalClassNamesToClasses); Objects.requireNonNull(classesToPainlessClasses); + Objects.requireNonNull(classesToDirectSubClasses); Objects.requireNonNull(painlessMethodKeysToImportedPainlessMethods); Objects.requireNonNull(painlessMethodKeysToPainlessClassBindings); @@ -49,6 +56,7 @@ public final class PainlessLookup { this.javaClassNamesToClasses = javaClassNamesToClasses; this.canonicalClassNamesToClasses = Map.copyOf(canonicalClassNamesToClasses); this.classesToPainlessClasses = Map.copyOf(classesToPainlessClasses); + this.classesToDirectSubClasses = Map.copyOf(classesToDirectSubClasses); this.painlessMethodKeysToImportedPainlessMethods = Map.copyOf(painlessMethodKeysToImportedPainlessMethods); this.painlessMethodKeysToPainlessClassBindings = Map.copyOf(painlessMethodKeysToPainlessClassBindings); @@ -75,6 +83,10 @@ public Set> getClasses() { return classesToPainlessClasses.keySet(); } + public Set> getDirectSubClasses(Class superClass) { + return classesToDirectSubClasses.get(superClass); + } + public Set getImportedPainlessMethodsKeys() { return painlessMethodKeysToImportedPainlessMethods.keySet(); } @@ -138,20 +150,79 @@ public PainlessMethod lookupPainlessMethod(Class targetClass, boolean isStati Objects.requireNonNull(targetClass); Objects.requireNonNull(methodName); + if (classesToPainlessClasses.containsKey(targetClass) == false) { + return null; + } + if (targetClass.isPrimitive()) { targetClass = typeToBoxedType(targetClass); + + if (classesToPainlessClasses.containsKey(targetClass) == false) { + return null; + } } - PainlessClass targetPainlessClass = classesToPainlessClasses.get(targetClass); String painlessMethodKey = buildPainlessMethodKey(methodName, methodArity); + Function objectLookup = isStatic ? + targetPainlessClass -> targetPainlessClass.staticMethods.get(painlessMethodKey) : + targetPainlessClass -> targetPainlessClass.methods.get(painlessMethodKey); - if (targetPainlessClass == null) { + return lookupPainlessObject(targetClass, objectLookup); + } + + public List lookupPainlessSubClassesMethod(String targetCanonicalClassName, String methodName, int methodArity) { + Objects.requireNonNull(targetCanonicalClassName); + + Class targetClass = canonicalTypeNameToType(targetCanonicalClassName); + + if (targetClass == null) { return null; } - return isStatic ? - targetPainlessClass.staticMethods.get(painlessMethodKey) : - targetPainlessClass.methods.get(painlessMethodKey); + return lookupPainlessSubClassesMethod(targetClass, methodName, methodArity); + } + + public List lookupPainlessSubClassesMethod(Class targetClass, String methodName, int methodArity) { + Objects.requireNonNull(targetClass); + Objects.requireNonNull(methodName); + + if (classesToPainlessClasses.containsKey(targetClass) == false) { + return null; + } + + if (targetClass.isPrimitive()) { + targetClass = typeToBoxedType(targetClass); + + if (classesToPainlessClasses.containsKey(targetClass) == false) { + return null; + } + } + + String painlessMethodKey = buildPainlessMethodKey(methodName, methodArity); + List> subClasses = new ArrayList<>(classesToDirectSubClasses.get(targetClass)); + Set> resolvedSubClasses = new HashSet<>(); + List subMethods = null; + + while (subClasses.isEmpty() == false) { + Class subClass = subClasses.remove(0); + + if (resolvedSubClasses.add(subClass)) { + subClasses.addAll(classesToDirectSubClasses.get(subClass)); + + PainlessClass painlessClass = classesToPainlessClasses.get(subClass); + PainlessMethod painlessMethod = painlessClass.methods.get(painlessMethodKey); + + if (painlessMethod != null) { + if (subMethods == null) { + subMethods = new ArrayList<>(); + } + + subMethods.add(painlessMethod); + } + } + } + + return subMethods; } public PainlessField lookupPainlessField(String targetCanonicalClassName, boolean isStatic, String fieldName) { @@ -170,22 +241,16 @@ public PainlessField lookupPainlessField(Class targetClass, boolean isStatic, Objects.requireNonNull(targetClass); Objects.requireNonNull(fieldName); - PainlessClass targetPainlessClass = classesToPainlessClasses.get(targetClass); - String painlessFieldKey = buildPainlessFieldKey(fieldName); - - if (targetPainlessClass == null) { + if (classesToPainlessClasses.containsKey(targetClass) == false) { return null; } - PainlessField painlessField = isStatic ? - targetPainlessClass.staticFields.get(painlessFieldKey) : - targetPainlessClass.fields.get(painlessFieldKey); - - if (painlessField == null) { - return null; - } + String painlessFieldKey = buildPainlessFieldKey(fieldName); + Function objectLookup = isStatic ? + targetPainlessClass -> targetPainlessClass.staticFields.get(painlessFieldKey) : + targetPainlessClass -> targetPainlessClass.fields.get(painlessFieldKey); - return painlessField; + return lookupPainlessObject(targetClass, objectLookup); } public PainlessMethod lookupImportedPainlessMethod(String methodName, int arity) { @@ -230,7 +295,7 @@ public PainlessMethod lookupRuntimePainlessMethod(Class originalTargetClass, Function objectLookup = targetPainlessClass -> targetPainlessClass.runtimeMethods.get(painlessMethodKey); - return lookupRuntimePainlessObject(originalTargetClass, objectLookup); + return lookupPainlessObject(originalTargetClass, objectLookup); } public MethodHandle lookupRuntimeGetterMethodHandle(Class originalTargetClass, String getterName) { @@ -239,7 +304,7 @@ public MethodHandle lookupRuntimeGetterMethodHandle(Class originalTargetClass Function objectLookup = targetPainlessClass -> targetPainlessClass.getterMethodHandles.get(getterName); - return lookupRuntimePainlessObject(originalTargetClass, objectLookup); + return lookupPainlessObject(originalTargetClass, objectLookup); } public MethodHandle lookupRuntimeSetterMethodHandle(Class originalTargetClass, String setterName) { @@ -248,10 +313,13 @@ public MethodHandle lookupRuntimeSetterMethodHandle(Class originalTargetClass Function objectLookup = targetPainlessClass -> targetPainlessClass.setterMethodHandles.get(setterName); - return lookupRuntimePainlessObject(originalTargetClass, objectLookup); + return lookupPainlessObject(originalTargetClass, objectLookup); } - private T lookupRuntimePainlessObject(Class originalTargetClass, Function objectLookup) { + private T lookupPainlessObject(Class originalTargetClass, Function objectLookup) { + Objects.requireNonNull(originalTargetClass); + Objects.requireNonNull(objectLookup); + Class currentTargetClass = originalTargetClass; while (currentTargetClass != null) { @@ -268,17 +336,38 @@ private T lookupRuntimePainlessObject(Class originalTargetClass, Function currentTargetClass = currentTargetClass.getSuperclass(); } + if (originalTargetClass.isInterface()) { + PainlessClass targetPainlessClass = classesToPainlessClasses.get(Object.class); + + if (targetPainlessClass != null) { + T painlessObject = objectLookup.apply(targetPainlessClass); + + if (painlessObject != null) { + return painlessObject; + } + } + } + currentTargetClass = originalTargetClass; + Set> resolvedInterfaces = new HashSet<>(); while (currentTargetClass != null) { - for (Class targetInterface : currentTargetClass.getInterfaces()) { - PainlessClass targetPainlessClass = classesToPainlessClasses.get(targetInterface); + List> targetInterfaces = new ArrayList<>(Arrays.asList(currentTargetClass.getInterfaces())); + + while (targetInterfaces.isEmpty() == false) { + Class targetInterface = targetInterfaces.remove(0); + + if (resolvedInterfaces.add(targetInterface)) { + PainlessClass targetPainlessClass = classesToPainlessClasses.get(targetInterface); + + if (targetPainlessClass != null) { + T painlessObject = objectLookup.apply(targetPainlessClass); - if (targetPainlessClass != null) { - T painlessObject = objectLookup.apply(targetPainlessClass); + if (painlessObject != null) { + return painlessObject; + } - if (painlessObject != null) { - return painlessObject; + targetInterfaces.addAll(Arrays.asList(targetInterface.getInterfaces())); } } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java index a7390b5415870..b819b1e134048 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java @@ -42,11 +42,14 @@ import java.security.SecureClassLoader; import java.security.cert.Certificate; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.function.Supplier; import java.util.regex.Pattern; @@ -189,6 +192,7 @@ public static PainlessLookup buildFromWhitelists(List whitelists) { // of the values of javaClassNamesToClasses. private final Map> canonicalClassNamesToClasses; private final Map, PainlessClassBuilder> classesToPainlessClassBuilders; + private final Map, Set>> classesToDirectSubClasses; private final Map painlessMethodKeysToImportedPainlessMethods; private final Map painlessMethodKeysToPainlessClassBindings; @@ -198,6 +202,7 @@ public PainlessLookupBuilder() { javaClassNamesToClasses = new HashMap<>(); canonicalClassNamesToClasses = new HashMap<>(); classesToPainlessClassBuilders = new HashMap<>(); + classesToDirectSubClasses = new HashMap<>(); painlessMethodKeysToImportedPainlessMethods = new HashMap<>(); painlessMethodKeysToPainlessClassBindings = new HashMap<>(); @@ -1255,7 +1260,7 @@ public void addPainlessInstanceBinding( } public PainlessLookup build() { - copyPainlessClassMembers(); + buildPainlessClassHierarchy(); setFunctionalInterfaceMethods(); generateRuntimeMethods(); cacheRuntimeHandles(); @@ -1286,71 +1291,66 @@ public PainlessLookup build() { javaClassNamesToClasses, canonicalClassNamesToClasses, classesToPainlessClasses, + classesToDirectSubClasses, painlessMethodKeysToImportedPainlessMethods, painlessMethodKeysToPainlessClassBindings, painlessMethodKeysToPainlessInstanceBindings); } - private void copyPainlessClassMembers() { - for (Class parentClass : classesToPainlessClassBuilders.keySet()) { - copyPainlessInterfaceMembers(parentClass, parentClass); - - Class childClass = parentClass.getSuperclass(); - - while (childClass != null) { - if (classesToPainlessClassBuilders.containsKey(childClass)) { - copyPainlessClassMembers(childClass, parentClass); - } - - copyPainlessInterfaceMembers(childClass, parentClass); - childClass = childClass.getSuperclass(); - } - } - - for (Class javaClass : classesToPainlessClassBuilders.keySet()) { - if (javaClass.isInterface()) { - copyPainlessClassMembers(Object.class, javaClass); - } - } - } - - private void copyPainlessInterfaceMembers(Class parentClass, Class targetClass) { - for (Class childClass : parentClass.getInterfaces()) { - if (classesToPainlessClassBuilders.containsKey(childClass)) { - copyPainlessClassMembers(childClass, targetClass); - } - - copyPainlessInterfaceMembers(childClass, targetClass); + private void buildPainlessClassHierarchy() { + for (Class targetClass : classesToPainlessClassBuilders.keySet()) { + classesToDirectSubClasses.put(targetClass, new HashSet<>()); } - } - private void copyPainlessClassMembers(Class originalClass, Class targetClass) { - PainlessClassBuilder originalPainlessClassBuilder = classesToPainlessClassBuilders.get(originalClass); - PainlessClassBuilder targetPainlessClassBuilder = classesToPainlessClassBuilders.get(targetClass); + for (Class subClass : classesToPainlessClassBuilders.keySet()) { + List> superInterfaces = new ArrayList<>(Arrays.asList(subClass.getInterfaces())); - Objects.requireNonNull(originalPainlessClassBuilder); - Objects.requireNonNull(targetPainlessClassBuilder); + // we check for Object.class as part of the allow listed classes because + // it is possible for the compiler to work without Object + if (subClass.isInterface() && superInterfaces.isEmpty() && classesToPainlessClassBuilders.containsKey(Object.class)) { + classesToDirectSubClasses.get(Object.class).add(subClass); + } else { + Class superClass = subClass.getSuperclass(); + + // this finds the nearest super class for a given sub class + // because the allow list may have gaps between classes + // example: + // class A {} // allowed + // class B extends A // not allowed + // class C extends B // allowed + // in this case C is considered a direct sub class of A + while (superClass != null) { + if (classesToPainlessClassBuilders.containsKey(superClass)) { + break; + } else { + // this ensures all interfaces from a sub class that + // is not allow listed are checked if they are + // considered a direct super class of the sub class + // because these interfaces may still be allow listed + // even if their sub class is not + superInterfaces.addAll(Arrays.asList(superClass.getInterfaces())); + } - for (Map.Entry painlessMethodEntry : originalPainlessClassBuilder.methods.entrySet()) { - String painlessMethodKey = painlessMethodEntry.getKey(); - PainlessMethod newPainlessMethod = painlessMethodEntry.getValue(); - PainlessMethod existingPainlessMethod = targetPainlessClassBuilder.methods.get(painlessMethodKey); + superClass = superClass.getSuperclass(); + } - if (existingPainlessMethod == null || existingPainlessMethod.targetClass != newPainlessMethod.targetClass && - existingPainlessMethod.targetClass.isAssignableFrom(newPainlessMethod.targetClass)) { - targetPainlessClassBuilder.methods.put(painlessMethodKey.intern(), newPainlessMethod); + if (superClass != null) { + classesToDirectSubClasses.get(superClass).add(subClass); + } } - } - for (Map.Entry painlessFieldEntry : originalPainlessClassBuilder.fields.entrySet()) { - String painlessFieldKey = painlessFieldEntry.getKey(); - PainlessField newPainlessField = painlessFieldEntry.getValue(); - PainlessField existingPainlessField = targetPainlessClassBuilder.fields.get(painlessFieldKey); + Set> resolvedInterfaces = new HashSet<>(); + + while (superInterfaces.isEmpty() == false) { + Class superInterface = superInterfaces.remove(0); - if (existingPainlessField == null || - existingPainlessField.javaField.getDeclaringClass() != newPainlessField.javaField.getDeclaringClass() && - existingPainlessField.javaField.getDeclaringClass().isAssignableFrom(newPainlessField.javaField.getDeclaringClass())) { - targetPainlessClassBuilder.fields.put(painlessFieldKey.intern(), newPainlessField); + if (resolvedInterfaces.add(superInterface)) { + if (classesToPainlessClassBuilders.containsKey(superInterface)) { + classesToDirectSubClasses.get(superInterface).add(subClass); + } else { + superInterfaces.addAll(Arrays.asList(superInterface.getInterfaces())); + } + } } } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/LookupTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/LookupTests.java new file mode 100644 index 0000000000000..09dd970adfb6d --- /dev/null +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/LookupTests.java @@ -0,0 +1,276 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.painless; + +import org.elasticsearch.painless.lookup.PainlessLookup; +import org.elasticsearch.painless.lookup.PainlessLookupBuilder; +import org.elasticsearch.painless.lookup.PainlessMethod; +import org.elasticsearch.painless.spi.WhitelistLoader; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.util.Collections; +import java.util.List; +import java.util.Set; + +public class LookupTests extends ESTestCase { + + protected PainlessLookup painlessLookup; + + @Before + public void setup() { + painlessLookup = PainlessLookupBuilder.buildFromWhitelists(Collections.singletonList( + WhitelistLoader.loadFromResourceFiles(PainlessPlugin.class, "org.elasticsearch.painless.lookup") + )); + } + + public static class A { } // in whitelist + public static class B extends A { } // not in whitelist + public static class C extends B { // in whitelist + public String getString0() { return "C/0"; } // in whitelist + } + public static class D extends B { // in whitelist + public String getString0() { return "D/0"; } // in whitelist + public String getString1(int param0) { return "D/1 (" + param0 + ")"; } // in whitelist + } + + public interface Z { } // in whitelist + public interface Y { } // not in whitelist + public interface X extends Y, Z { } // not in whitelist + public interface V extends Y, Z { } // in whitelist + public interface U extends X { // in whitelist + String getString2(int x, int y); // in whitelist + String getString1(int param0); // in whitelist + String getString0(); // not in whitelist + } + public interface T extends V { // in whitelist + String getString1(int param0); // in whitelist + int getInt0(); // in whitelist + } + public interface S extends U, X { } // in whitelist + + public static class AA implements X { } // in whitelist + public static class AB extends AA implements S { // not in whitelist + public String getString2(int x, int y) { return "" + x + y; } // not in whitelist + public String getString1(int param0) { return "" + param0; } // not in whitelist + public String getString0() { return ""; } // not in whitelist + } + public static class AC extends AB implements V { // in whitelist + public String getString2(int x, int y) { return "" + x + y; } // in whitelist + } + public static class AD extends AA implements X, S, T { // in whitelist + public String getString2(int x, int y) { return "" + x + y; } // in whitelist + public String getString1(int param0) { return "" + param0; } // in whitelist + public String getString0() { return ""; } // not in whitelist + public int getInt0() { return 0; } // in whitelist + } + + public void testDirectSubClasses() { + Set> directSubClasses = painlessLookup.getDirectSubClasses(Object.class); + assertEquals(4, directSubClasses.size()); + assertTrue(directSubClasses.contains(String.class)); + assertTrue(directSubClasses.contains(A.class)); + assertTrue(directSubClasses.contains(Z.class)); + assertTrue(directSubClasses.contains(AA.class)); + + directSubClasses = painlessLookup.getDirectSubClasses(A.class); + assertEquals(2, directSubClasses.size()); + assertTrue(directSubClasses.contains(D.class)); + assertTrue(directSubClasses.contains(C.class)); + + directSubClasses = painlessLookup.getDirectSubClasses(B.class); + assertNull(directSubClasses); + + directSubClasses = painlessLookup.getDirectSubClasses(C.class); + assertTrue(directSubClasses.isEmpty()); + + directSubClasses = painlessLookup.getDirectSubClasses(D.class); + assertTrue(directSubClasses.isEmpty()); + + directSubClasses = painlessLookup.getDirectSubClasses(Z.class); + assertEquals(5, directSubClasses.size()); + assertTrue(directSubClasses.contains(V.class)); + assertTrue(directSubClasses.contains(U.class)); + assertTrue(directSubClasses.contains(S.class)); + assertTrue(directSubClasses.contains(AA.class)); + assertTrue(directSubClasses.contains(AD.class)); + + directSubClasses = painlessLookup.getDirectSubClasses(Y.class); + assertNull(directSubClasses); + + directSubClasses = painlessLookup.getDirectSubClasses(X.class); + assertNull(directSubClasses); + + directSubClasses = painlessLookup.getDirectSubClasses(V.class); + assertEquals(2, directSubClasses.size()); + assertTrue(directSubClasses.contains(T.class)); + assertTrue(directSubClasses.contains(AC.class)); + + directSubClasses = painlessLookup.getDirectSubClasses(U.class); + assertEquals(1, directSubClasses.size()); + assertTrue(directSubClasses.contains(S.class)); + + directSubClasses = painlessLookup.getDirectSubClasses(T.class); + assertEquals(1, directSubClasses.size()); + assertTrue(directSubClasses.contains(AD.class)); + + directSubClasses = painlessLookup.getDirectSubClasses(S.class); + assertEquals(2, directSubClasses.size()); + assertTrue(directSubClasses.contains(AC.class)); + assertTrue(directSubClasses.contains(AD.class)); + + directSubClasses = painlessLookup.getDirectSubClasses(AA.class); + assertEquals(2, directSubClasses.size()); + assertTrue(directSubClasses.contains(AC.class)); + assertTrue(directSubClasses.contains(AD.class)); + + directSubClasses = painlessLookup.getDirectSubClasses(AB.class); + assertNull(directSubClasses); + + directSubClasses = painlessLookup.getDirectSubClasses(AC.class); + assertTrue(directSubClasses.isEmpty()); + + directSubClasses = painlessLookup.getDirectSubClasses(AD.class); + assertTrue(directSubClasses.isEmpty()); + } + + public void testDirectSubClassMethods() { + PainlessMethod CgetString0 = painlessLookup.lookupPainlessMethod(C.class, false, "getString0", 0); + PainlessMethod DgetString0 = painlessLookup.lookupPainlessMethod(D.class, false, "getString0", 0); + List subMethods = painlessLookup.lookupPainlessSubClassesMethod(A.class, "getString0", 0); + assertNotNull(subMethods); + assertEquals(2, subMethods.size()); + assertTrue(subMethods.contains(CgetString0)); + assertTrue(subMethods.contains(DgetString0)); + + PainlessMethod DgetString1 = painlessLookup.lookupPainlessMethod(D.class, false, "getString1", 1); + subMethods = painlessLookup.lookupPainlessSubClassesMethod(A.class, "getString1", 1); + assertNotNull(subMethods); + assertEquals(1, subMethods.size()); + assertTrue(subMethods.contains(DgetString1)); + + subMethods = painlessLookup.lookupPainlessSubClassesMethod(A.class, "getString2", 0); + assertNull(subMethods); + + PainlessMethod ACgetString2 = painlessLookup.lookupPainlessMethod(AC.class, false, "getString2", 2); + PainlessMethod ADgetString2 = painlessLookup.lookupPainlessMethod(AD.class, false, "getString2", 2); + subMethods = painlessLookup.lookupPainlessSubClassesMethod(AA.class, "getString2", 2); + assertNotNull(subMethods); + assertEquals(2, subMethods.size()); + assertTrue(subMethods.contains(ACgetString2)); + assertTrue(subMethods.contains(ADgetString2)); + + PainlessMethod ADgetString1 = painlessLookup.lookupPainlessMethod(AD.class, false, "getString1", 1); + subMethods = painlessLookup.lookupPainlessSubClassesMethod(AA.class, "getString1", 1); + assertNotNull(subMethods); + assertEquals(1, subMethods.size()); + assertTrue(subMethods.contains(ADgetString1)); + + subMethods = painlessLookup.lookupPainlessSubClassesMethod(AA.class, "getString0", 0); + assertNull(subMethods); + + PainlessMethod ADgetInt0 = painlessLookup.lookupPainlessMethod(AD.class, false, "getInt0", 0); + subMethods = painlessLookup.lookupPainlessSubClassesMethod(AA.class, "getInt0", 0); + assertNotNull(subMethods); + assertEquals(1, subMethods.size()); + assertTrue(subMethods.contains(ADgetInt0)); + + PainlessMethod UgetString2 = painlessLookup.lookupPainlessMethod(U.class, false, "getString2", 2); + subMethods = painlessLookup.lookupPainlessSubClassesMethod(Z.class, "getString2", 2); + assertNotNull(subMethods); + assertEquals(3, subMethods.size()); + assertTrue(subMethods.contains(UgetString2)); + assertTrue(subMethods.contains(ACgetString2)); + assertTrue(subMethods.contains(ADgetString2)); + + PainlessMethod UgetString1 = painlessLookup.lookupPainlessMethod(U.class, false, "getString1", 1); + PainlessMethod TgetString1 = painlessLookup.lookupPainlessMethod(T.class, false, "getString1", 1); + subMethods = painlessLookup.lookupPainlessSubClassesMethod(Z.class, "getString1", 1); + assertNotNull(subMethods); + assertEquals(3, subMethods.size()); + assertTrue(subMethods.contains(UgetString1)); + assertTrue(subMethods.contains(TgetString1)); + assertTrue(subMethods.contains(ADgetString1)); + + subMethods = painlessLookup.lookupPainlessSubClassesMethod(Z.class, "getString0", 0); + assertNull(subMethods); + + PainlessMethod TgetInt0 = painlessLookup.lookupPainlessMethod(T.class, false, "getInt0", 0); + subMethods = painlessLookup.lookupPainlessSubClassesMethod(Z.class, "getInt0", 0); + assertNotNull(subMethods); + assertEquals(2, subMethods.size()); + assertTrue(subMethods.contains(TgetInt0)); + assertTrue(subMethods.contains(ADgetInt0)); + + subMethods = painlessLookup.lookupPainlessSubClassesMethod(V.class, "getString2", 2); + assertNotNull(subMethods); + assertEquals(2, subMethods.size()); + assertTrue(subMethods.contains(ACgetString2)); + assertTrue(subMethods.contains(ADgetString2)); + + subMethods = painlessLookup.lookupPainlessSubClassesMethod(V.class, "getString1", 1); + assertNotNull(subMethods); + assertEquals(2, subMethods.size()); + assertTrue(subMethods.contains(TgetString1)); + assertTrue(subMethods.contains(ADgetString1)); + + subMethods = painlessLookup.lookupPainlessSubClassesMethod(V.class, "getString0", 0); + assertNull(subMethods); + + subMethods = painlessLookup.lookupPainlessSubClassesMethod(V.class, "getInt0", 0); + assertNotNull(subMethods); + assertEquals(2, subMethods.size()); + assertTrue(subMethods.contains(TgetInt0)); + assertTrue(subMethods.contains(ADgetInt0)); + + subMethods = painlessLookup.lookupPainlessSubClassesMethod(U.class, "getString2", 2); + assertNotNull(subMethods); + assertEquals(2, subMethods.size()); + assertTrue(subMethods.contains(ACgetString2)); + assertTrue(subMethods.contains(ADgetString2)); + + subMethods = painlessLookup.lookupPainlessSubClassesMethod(U.class, "getString1", 1); + assertNotNull(subMethods); + assertEquals(1, subMethods.size()); + assertTrue(subMethods.contains(ADgetString1)); + + subMethods = painlessLookup.lookupPainlessSubClassesMethod(U.class, "getString0", 0); + assertNull(subMethods); + + subMethods = painlessLookup.lookupPainlessSubClassesMethod(U.class, "getInt0", 0); + assertNotNull(subMethods); + assertEquals(1, subMethods.size()); + assertTrue(subMethods.contains(ADgetInt0)); + + subMethods = painlessLookup.lookupPainlessSubClassesMethod(V.class, "getInt0", 0); + assertNotNull(subMethods); + assertEquals(2, subMethods.size()); + assertTrue(subMethods.contains(TgetInt0)); + assertTrue(subMethods.contains(ADgetInt0)); + + subMethods = painlessLookup.lookupPainlessSubClassesMethod(S.class, "getString2", 2); + assertNotNull(subMethods); + assertEquals(2, subMethods.size()); + assertTrue(subMethods.contains(ACgetString2)); + assertTrue(subMethods.contains(ADgetString2)); + + subMethods = painlessLookup.lookupPainlessSubClassesMethod(S.class, "getString1", 1); + assertNotNull(subMethods); + assertEquals(1, subMethods.size()); + assertTrue(subMethods.contains(ADgetString1)); + + subMethods = painlessLookup.lookupPainlessSubClassesMethod(S.class, "getString0", 0); + assertNull(subMethods); + + subMethods = painlessLookup.lookupPainlessSubClassesMethod(S.class, "getInt0", 0); + assertNotNull(subMethods); + assertEquals(1, subMethods.size()); + assertTrue(subMethods.contains(ADgetInt0)); + } +} diff --git a/modules/lang-painless/src/test/resources/org/elasticsearch/painless/org.elasticsearch.painless.lookup b/modules/lang-painless/src/test/resources/org/elasticsearch/painless/org.elasticsearch.painless.lookup new file mode 100644 index 0000000000000..39ff66776103f --- /dev/null +++ b/modules/lang-painless/src/test/resources/org/elasticsearch/painless/org.elasticsearch.painless.lookup @@ -0,0 +1,52 @@ +class int @no_import { +} + +class java.lang.Object { +} + +class java.lang.String { +} + +class org.elasticsearch.painless.LookupTests$A { +} + +class org.elasticsearch.painless.LookupTests$C { + String getString0() +} + +class org.elasticsearch.painless.LookupTests$D { + String getString0() + String getString1(int) +} + +class org.elasticsearch.painless.LookupTests$Z { +} + +class org.elasticsearch.painless.LookupTests$V { +} + +class org.elasticsearch.painless.LookupTests$U { + String getString2(int, int); + String getString1(int); +} + +class org.elasticsearch.painless.LookupTests$T { + String getString1(int); + int getInt0(); +} + +class org.elasticsearch.painless.LookupTests$S { +} + +class org.elasticsearch.painless.LookupTests$AA { +} + +class org.elasticsearch.painless.LookupTests$AC { + String getString2(int, int); +} + +class org.elasticsearch.painless.LookupTests$AD { + String getString2(int, int) + String getString1(int) + int getInt0() +} \ No newline at end of file diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/40_fields_api.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/40_fields_api.yml index df571349ecb97..eb02667536104 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/40_fields_api.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/40_fields_api.yml @@ -63,6 +63,40 @@ setup: - match: { hits.hits.1._id: d1 } - match: { hits.hits.2._id: d2 } +--- +"script fields api for dates": + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 2 + mappings: + properties: + dt: + type: date_nanos + - do: + index: + index: test + id: d1 + body: {"dt": "2021-08-24T18:45:52.123456789Z" } + - do: + indices.refresh: {} + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: { "match_all": {} } + sort: [ { dt: asc } ] + script_fields: + date_field: + script: + source: "field('dt').getLong(100L)" + - match: { hits.total: 1 } + - match: { hits.hits.0._id: d1 } + - match: { hits.hits.0.fields.date_field.0: 1629830752123456789 } + --- "script score fields api": - do: diff --git a/modules/mapper-extras/build.gradle b/modules/mapper-extras/build.gradle index f518204d2f056..d63df5a685931 100644 --- a/modules/mapper-extras/build.gradle +++ b/modules/mapper-extras/build.gradle @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MatchOnlyTextFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MatchOnlyTextFieldMapper.java index fff7ac2a86ce5..31086cc826176 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MatchOnlyTextFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MatchOnlyTextFieldMapper.java @@ -41,7 +41,6 @@ import java.io.IOException; import java.io.UncheckedIOException; -import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; @@ -72,10 +71,6 @@ public static class Defaults { } - private static Builder builder(FieldMapper in) { - return ((MatchOnlyTextFieldMapper) in).builder; - } - public static class Builder extends FieldMapper.Builder { private final Version indexCreatedVersion; @@ -91,37 +86,32 @@ public Builder(String name, IndexAnalyzers indexAnalyzers) { public Builder(String name, Version indexCreatedVersion, IndexAnalyzers indexAnalyzers) { super(name); this.indexCreatedVersion = indexCreatedVersion; - this.analyzers = new TextParams.Analyzers(indexAnalyzers, m -> builder(m).analyzers); - } - - public Builder addMultiField(FieldMapper.Builder builder) { - this.multiFieldsBuilder.add(builder); - return this; + this.analyzers = new TextParams.Analyzers(indexAnalyzers, + m -> ((MatchOnlyTextFieldMapper) m).indexAnalyzer, + m -> ((MatchOnlyTextFieldMapper) m).positionIncrementGap); } @Override protected List> getParameters() { - return Arrays.asList(meta); + return List.of(meta); } - private MatchOnlyTextFieldType buildFieldType(FieldType fieldType, ContentPath contentPath) { + private MatchOnlyTextFieldType buildFieldType(ContentPath contentPath) { NamedAnalyzer searchAnalyzer = analyzers.getSearchAnalyzer(); NamedAnalyzer searchQuoteAnalyzer = analyzers.getSearchQuoteAnalyzer(); NamedAnalyzer indexAnalyzer = analyzers.getIndexAnalyzer(); - TextSearchInfo tsi = new TextSearchInfo(fieldType, null, searchAnalyzer, searchQuoteAnalyzer); - MatchOnlyTextFieldType ft = new MatchOnlyTextFieldType(buildFullName(contentPath), tsi, indexAnalyzer, meta.getValue()); - return ft; + TextSearchInfo tsi = new TextSearchInfo(Defaults.FIELD_TYPE, null, searchAnalyzer, searchQuoteAnalyzer); + return new MatchOnlyTextFieldType(buildFullName(contentPath), tsi, indexAnalyzer, meta.getValue()); } @Override public MatchOnlyTextFieldMapper build(ContentPath contentPath) { - MatchOnlyTextFieldType tft = buildFieldType(Defaults.FIELD_TYPE, contentPath); + MatchOnlyTextFieldType tft = buildFieldType(contentPath); MultiFields multiFields = multiFieldsBuilder.build(this, contentPath); return new MatchOnlyTextFieldMapper( name, Defaults.FIELD_TYPE, tft, - analyzers.getIndexAnalyzer(), multiFields, copyTo.build(), this @@ -142,19 +132,6 @@ public MatchOnlyTextFieldType(String name, TextSearchInfo tsi, Analyzer indexAna this.textFieldType = new TextFieldType(name); } - public MatchOnlyTextFieldType(String name, boolean stored, Map meta) { - super( - name, - true, - stored, - false, - new TextSearchInfo(Defaults.FIELD_TYPE, null, Lucene.STANDARD_ANALYZER, Lucene.STANDARD_ANALYZER), - meta - ); - this.indexAnalyzer = Lucene.STANDARD_ANALYZER; - this.textFieldType = new TextFieldType(name); - } - public MatchOnlyTextFieldType(String name) { this( name, @@ -292,28 +269,33 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S } - private final Builder builder; + private final Version indexCreatedVersion; + private final IndexAnalyzers indexAnalyzers; + private final NamedAnalyzer indexAnalyzer; + private final int positionIncrementGap; private final FieldType fieldType; private MatchOnlyTextFieldMapper( String simpleName, FieldType fieldType, MatchOnlyTextFieldType mappedFieldType, - NamedAnalyzer indexAnalyzer, MultiFields multiFields, CopyTo copyTo, Builder builder ) { - super(simpleName, mappedFieldType, indexAnalyzer, multiFields, copyTo); + super(simpleName, mappedFieldType, builder.analyzers.getIndexAnalyzer(), multiFields, copyTo); assert mappedFieldType.getTextSearchInfo().isTokenized(); assert mappedFieldType.hasDocValues() == false; this.fieldType = fieldType; - this.builder = builder; + this.indexCreatedVersion = builder.indexCreatedVersion; + this.indexAnalyzers = builder.analyzers.indexAnalyzers; + this.indexAnalyzer = builder.analyzers.getIndexAnalyzer(); + this.positionIncrementGap = builder.analyzers.positionIncrementGap.getValue(); } @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName(), builder.indexCreatedVersion, builder.analyzers.indexAnalyzers).init(this); + return new Builder(simpleName(), indexCreatedVersion, indexAnalyzers).init(this); } @Override diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java index 4b3a65d91fd60..213f5e86b4a4d 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java @@ -70,7 +70,7 @@ public static class Builder extends FieldMapper.Builder { private final Parameter scalingFactor = new Parameter<>("scaling_factor", false, () -> null, (n, c, o) -> XContentMapValues.nodeDoubleValue(o), m -> toType(m).scalingFactor) - .setValidator(v -> { + .addValidator(v -> { if (v == null) { throw new IllegalArgumentException("Field [scaling_factor] is required"); } @@ -233,15 +233,11 @@ public Object valueForDisplay(Object value) { @Override public DocValueFormat docValueFormat(String format, ZoneId timeZone) { - if (timeZone != null) { - throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() - + "] does not support custom time zones"); - } + checkNoTimeZone(timeZone); if (format == null) { return DocValueFormat.RAW; - } else { - return new DocValueFormat.Decimal(format); } + return new DocValueFormat.Decimal(format); } /** diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SearchAsYouTypeFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SearchAsYouTypeFieldMapper.java index 450b98cc23173..4391ad8e1c930 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SearchAsYouTypeFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SearchAsYouTypeFieldMapper.java @@ -97,7 +97,7 @@ public static class Builder extends FieldMapper.Builder { // `doc_values=false`, even though it cannot be set; and so we need to continue // serializing it forever because of mapper assertions in mixed clusters. private final Parameter docValues = Parameter.docValuesParam(m -> false, false) - .setValidator(v -> { + .addValidator(v -> { if (v) { throw new MapperParsingException("Cannot set [doc_values] on field of type [search_as_you_type]"); } @@ -106,7 +106,7 @@ public static class Builder extends FieldMapper.Builder { private final Parameter maxShingleSize = Parameter.intParam("max_shingle_size", false, m -> builder(m).maxShingleSize.get(), Defaults.MAX_SHINGLE_SIZE) - .setValidator(v -> { + .addValidator(v -> { if (v < MAX_SHINGLE_SIZE_LOWER_BOUND || v > MAX_SHINGLE_SIZE_UPPER_BOUND) { throw new MapperParsingException("[max_shingle_size] must be at least [" + MAX_SHINGLE_SIZE_LOWER_BOUND + "] and at most " + "[" + MAX_SHINGLE_SIZE_UPPER_BOUND + "], got [" + v + "]"); @@ -125,7 +125,11 @@ public static class Builder extends FieldMapper.Builder { public Builder(String name, IndexAnalyzers indexAnalyzers) { super(name); - this.analyzers = new TextParams.Analyzers(indexAnalyzers, m -> builder(m).analyzers); + this.analyzers = new TextParams.Analyzers( + indexAnalyzers, + m -> builder(m).analyzers.getIndexAnalyzer(), + m -> builder(m).analyzers.positionIncrementGap.getValue() + ); } @Override diff --git a/modules/parent-join/build.gradle b/modules/parent-join/build.gradle index 0b3099dc80a6e..9704d35d80917 100644 --- a/modules/parent-join/build.gradle +++ b/modules/parent-join/build.gradle @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' @@ -18,4 +18,4 @@ restResources { restApi { include '_common', 'bulk', 'cluster', 'nodes', 'indices', 'index', 'search' } -} +} \ No newline at end of file diff --git a/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/11_parent_child.yml b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/11_parent_child.yml index a3c2d78690a7a..3fa7c1f0e22bc 100644 --- a/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/11_parent_child.yml +++ b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/11_parent_child.yml @@ -20,7 +20,7 @@ setup: index: index: test id: 2 - routing: 1 + routing: "1" body: {"bar": "baz", "join_field": { "name" : "child", "parent": "1"} } - do: diff --git a/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/30_inner_hits.yml b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/30_inner_hits.yml index 2ef7f7fd93918..c0190ac116c3b 100644 --- a/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/30_inner_hits.yml +++ b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/30_inner_hits.yml @@ -25,7 +25,7 @@ setup: index: index: test id: 2 - routing: 1 + routing: "1" body: { "join_field": { "name": "answer", "parent": 1} , "entity_type": "answer" } - do: @@ -37,7 +37,7 @@ setup: - do: index: index: test - routing: 3 + routing: "3" id: 4 body: { "join_field": { "name": "address", "parent": 3 }, "entity_type": "address" } diff --git a/modules/percolator/build.gradle b/modules/percolator/build.gradle index 81936f44a7ec6..79155f5b0d5c4 100644 --- a/modules/percolator/build.gradle +++ b/modules/percolator/build.gradle @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' @@ -16,7 +16,6 @@ esplugin { dependencies { testImplementation project(':modules:parent-join') - testImplementation project(':modules:geo') } restResources { @@ -25,6 +24,6 @@ restResources { } } -tasks.named("transformV7RestTests").configure({ task -> +tasks.named("yamlRestTestV7CompatTransform").configure({ task -> task.addAllowedWarningRegex("\\[types removal\\].*") }) diff --git a/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java b/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java index 2e5025bf0302d..57f1b397b313d 100644 --- a/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java +++ b/modules/percolator/src/internalClusterTest/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java @@ -20,7 +20,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.geo.GeoPlugin; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.query.MatchPhraseQueryBuilder; import org.elasticsearch.index.query.MultiMatchQueryBuilder; @@ -30,6 +29,8 @@ import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.TestGeoShapeFieldMapperPlugin; + import java.io.IOException; import java.util.Arrays; import java.util.Collection; @@ -68,7 +69,7 @@ protected boolean addMockGeoShapeFieldMapper() { @Override protected Collection> nodePlugins() { - return Arrays.asList(PercolatorPlugin.class, GeoPlugin.class); + return Arrays.asList(PercolatorPlugin.class, TestGeoShapeFieldMapperPlugin.class); } public void testPercolatorQuery() throws Exception { diff --git a/modules/rank-eval/build.gradle b/modules/rank-eval/build.gradle index 308d859931dac..a0a51095c9c85 100644 --- a/modules/rank-eval/build.gradle +++ b/modules/rank-eval/build.gradle @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index 277fee4aa6580..4fbeaa789c9bf 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -14,7 +14,7 @@ import org.elasticsearch.gradle.internal.test.AntFixture apply plugin: 'elasticsearch.test-with-dependencies' apply plugin: 'elasticsearch.jdk-download' -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.java-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' @@ -155,11 +155,11 @@ if (Os.isFamily(Os.FAMILY_WINDOWS)) { } } } -tasks.named("transformV7RestTests").configure({ task -> +tasks.named("yamlRestTestV7CompatTransform").configure({ task -> task.addAllowedWarningRegex("\\[types removal\\].*") }) -tasks.named("yamlRestCompatTest").configure { +tasks.named("yamlRestTestV7CompatTest").configure { systemProperty 'tests.rest.blacklist', [ 'reindex/20_validation/reindex without source gives useful error message', // exception with a type. Not much benefit adding _doc there. 'update_by_query/20_validation/update_by_query without source gives useful error message' // exception with a type. Not much benefit adding _doc there. diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/95_parent_join.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/95_parent_join.yml index b36593b4b962c..e4861d35e781a 100644 --- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/95_parent_join.yml +++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/95_parent_join.yml @@ -25,14 +25,14 @@ setup: index: index: source id: 2 - routing: 1 + routing: "1" body: { "join_field": { "name": "child", "parent": "1" } } - do: index: index: source id: 3 - routing: 1 + routing: "1" body: { "join_field": { "name": "grand_child", "parent": "2" } } - do: diff --git a/modules/repository-url/build.gradle b/modules/repository-url/build.gradle index 0245a3ea69b92..a19cae51e8981 100644 --- a/modules/repository-url/build.gradle +++ b/modules/repository-url/build.gradle @@ -8,7 +8,7 @@ import org.elasticsearch.gradle.PropertyNormalization -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' apply plugin: 'elasticsearch.test.fixtures' diff --git a/modules/runtime-fields-common/build.gradle b/modules/runtime-fields-common/build.gradle index 83c4e7410a4c1..54076aaecdf81 100644 --- a/modules/runtime-fields-common/build.gradle +++ b/modules/runtime-fields-common/build.gradle @@ -7,7 +7,7 @@ */ apply plugin: 'elasticsearch.validate-rest-spec' -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' esplugin { diff --git a/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/NamedGroupExtractor.java b/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/NamedGroupExtractor.java index 90a166d34f64f..26c1dc3178d5c 100644 --- a/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/NamedGroupExtractor.java +++ b/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/NamedGroupExtractor.java @@ -99,13 +99,13 @@ public Grok run() { try { // Try to collect warnings up front and refuse to compile the expression if there are any List warnings = new ArrayList<>(); - new Grok(Grok.BUILTIN_PATTERNS, pattern, watchdog, warnings::add).match("__nomatch__"); + new Grok(Grok.getBuiltinPatterns(false), pattern, watchdog, warnings::add).match("__nomatch__"); if (false == warnings.isEmpty()) { throw new IllegalArgumentException("emitted warnings: " + warnings); } return new Grok( - Grok.BUILTIN_PATTERNS, + Grok.getBuiltinPatterns(false), pattern, watchdog, w -> { throw new IllegalArgumentException("grok [" + pattern + "] emitted a warning: " + w); } diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 9cd1db75e86d0..4eb68a6e4cf11 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -12,7 +12,7 @@ import org.elasticsearch.gradle.internal.test.RestIntegTestTask import org.elasticsearch.gradle.internal.test.rest.JavaRestTestPlugin import org.elasticsearch.gradle.internal.test.InternalClusterTestPlugin -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.java-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/SharedGroupFactory.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/SharedGroupFactory.java index d9ccd0eb7b158..686d46e02a7e9 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/SharedGroupFactory.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/SharedGroupFactory.java @@ -85,11 +85,9 @@ private SharedGroup getGenericGroup() { private static class RefCountedGroup extends AbstractRefCounted { - public static final String NAME = "ref-counted-event-loop-group"; private final EventLoopGroup eventLoopGroup; private RefCountedGroup(EventLoopGroup eventLoopGroup) { - super(NAME); this.eventLoopGroup = eventLoopGroup; } diff --git a/plugins/analysis-icu/build.gradle b/plugins/analysis-icu/build.gradle index e2926ae0cf7a4..148a08a002c33 100644 --- a/plugins/analysis-icu/build.gradle +++ b/plugins/analysis-icu/build.gradle @@ -7,7 +7,7 @@ import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' @@ -36,7 +36,7 @@ restResources { tasks.named("dependencyLicenses").configure { mapping from: /lucene-.*/, to: 'lucene' } -tasks.named("yamlRestCompatTest").configure { +tasks.named("yamlRestTestV7CompatTest").configure { systemProperty 'tests.rest.blacklist', [ //marked as not needing compatible api 'analysis_icu/10_basic/Normalization with deprecated unicodeSetFilter' // Cleanup versioned deprecations in analysis #41560 diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java index b66e56d7e0d56..e4c278f6ddb9e 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java @@ -213,7 +213,7 @@ public static class Builder extends FieldMapper.Builder { final Parameter ignoreAbove = Parameter.intParam("ignore_above", true, m -> toType(m).ignoreAbove, Integer.MAX_VALUE) - .setValidator(v -> { + .addValidator(v -> { if (v < 0) { throw new IllegalArgumentException("[ignore_above] must be positive, got [" + v + "]"); } diff --git a/plugins/analysis-kuromoji/build.gradle b/plugins/analysis-kuromoji/build.gradle index 801d6e3238966..0b3beb8ab5dbe 100644 --- a/plugins/analysis-kuromoji/build.gradle +++ b/plugins/analysis-kuromoji/build.gradle @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' esplugin { diff --git a/plugins/analysis-nori/build.gradle b/plugins/analysis-nori/build.gradle index 7c0604b56813f..099e620795148 100644 --- a/plugins/analysis-nori/build.gradle +++ b/plugins/analysis-nori/build.gradle @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' esplugin { diff --git a/plugins/analysis-phonetic/build.gradle b/plugins/analysis-phonetic/build.gradle index bd8af3ff93a12..89ff6f900c802 100644 --- a/plugins/analysis-phonetic/build.gradle +++ b/plugins/analysis-phonetic/build.gradle @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' esplugin { diff --git a/plugins/analysis-smartcn/build.gradle b/plugins/analysis-smartcn/build.gradle index e997c81b3fc82..3a3bd3012fd86 100644 --- a/plugins/analysis-smartcn/build.gradle +++ b/plugins/analysis-smartcn/build.gradle @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' esplugin { diff --git a/plugins/analysis-stempel/build.gradle b/plugins/analysis-stempel/build.gradle index bc5e973b9d980..3b1eb7623f947 100644 --- a/plugins/analysis-stempel/build.gradle +++ b/plugins/analysis-stempel/build.gradle @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' esplugin { diff --git a/plugins/analysis-ukrainian/build.gradle b/plugins/analysis-ukrainian/build.gradle index 3c869e9195b19..2a059bd746c93 100644 --- a/plugins/analysis-ukrainian/build.gradle +++ b/plugins/analysis-ukrainian/build.gradle @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' esplugin { diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index 3101b0b343c58..a97c9d8e5a11c 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -8,7 +8,7 @@ import org.elasticsearch.gradle.internal.info.BuildParams * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 6eac2f760f655..0a8f299955850 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -7,7 +7,7 @@ import org.elasticsearch.gradle.internal.info.BuildParams * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { diff --git a/plugins/discovery-ec2/qa/amazon-ec2/build.gradle b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle index 5d87ad59fef1f..e8afba63970c9 100644 --- a/plugins/discovery-ec2/qa/amazon-ec2/build.gradle +++ b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle @@ -10,11 +10,11 @@ import org.apache.tools.ant.filters.ReplaceTokens import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.AntFixture import org.elasticsearch.gradle.internal.test.RestIntegTestTask -import org.elasticsearch.gradle.internal.test.rest.YamlRestTestPlugin +import org.elasticsearch.gradle.internal.test.rest.InternalYamlRestTestPlugin import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' dependencies { yamlRestTestImplementation project(':plugins:discovery-ec2') @@ -62,7 +62,7 @@ tasks.named("yamlRestTest").configure { enabled = false } def yamlRestTestTask = tasks.register("yamlRestTest${action}", RestIntegTestTask) { dependsOn fixture SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); - SourceSet yamlRestTestSourceSet = sourceSets.getByName(YamlRestTestPlugin.SOURCE_SET_NAME) + SourceSet yamlRestTestSourceSet = sourceSets.getByName(InternalYamlRestTestPlugin.SOURCE_SET_NAME) testClassesDirs = yamlRestTestSourceSet.getOutput().getClassesDirs() classpath = yamlRestTestSourceSet.getRuntimeClasspath() } diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AmazonEc2Reference.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AmazonEc2Reference.java index f6c65e326b79a..6000fd7b6731e 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AmazonEc2Reference.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AmazonEc2Reference.java @@ -22,7 +22,6 @@ public class AmazonEc2Reference extends AbstractRefCounted implements Releasable private final AmazonEC2 client; AmazonEc2Reference(AmazonEC2 client) { - super("AWS_EC2_CLIENT"); this.client = client; } diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index 738929e6f2b26..b936669d4ee26 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -1,4 +1,4 @@ -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { diff --git a/plugins/discovery-gce/qa/gce/build.gradle b/plugins/discovery-gce/qa/gce/build.gradle index 86fbf30963461..c2cd26c2e928e 100644 --- a/plugins/discovery-gce/qa/gce/build.gradle +++ b/plugins/discovery-gce/qa/gce/build.gradle @@ -13,7 +13,7 @@ import org.elasticsearch.gradle.internal.test.AntFixture import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' final int gceNumberOfNodes = 3 diff --git a/plugins/examples/build.gradle b/plugins/examples/build.gradle index e3bbc6cb22b34..31bcccd9c5ece 100644 --- a/plugins/examples/build.gradle +++ b/plugins/examples/build.gradle @@ -1,14 +1,24 @@ import org.elasticsearch.gradle.internal.info.BuildParams // Subprojects aren't published so do not assemble -subprojects { - project.tasks.matching { it.name.equals('assemble') }.configureEach { +subprojects { p -> + p.tasks.matching { it.name.equals('assemble') }.configureEach { enabled = false } if (BuildParams.inFipsJvm) { - project.tasks.configureEach { + p.tasks.configureEach { enabled = false } } + + // configure project dependencies for yaml rest test plugin. + // plugin defaults to external available artifacts + p.getPluginManager().withPlugin("elasticsearch.yaml-rest-test", new Action() { + @Override + void execute(AppliedPlugin appliedPlugin) { + p.dependencies.add("yamlRestTestImplementation", project(":test:framework")) + p.dependencies.add("restTestSpecs", p.dependencies.project(path:':rest-api-spec', configuration:'basicRestSpecs')) + } + }) } configure(project('painless-whitelist')) { diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 0f5f7a4031789..94cbc91b49336 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -7,7 +7,7 @@ import org.elasticsearch.gradle.internal.info.BuildParams * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' esplugin { @@ -96,6 +96,6 @@ if (BuildParams.inFipsJvm) { tasks.named("jarHell").configure { enabled = false } tasks.named("test").configure { enabled = false } tasks.named("yamlRestTest").configure { enabled = false }; - tasks.named("yamlRestCompatTest").configure { enabled = false }; + tasks.named("yamlRestTestV7CompatTest").configure { enabled = false }; tasks.named("testingConventions").configure { enabled = false }; } diff --git a/plugins/mapper-annotated-text/build.gradle b/plugins/mapper-annotated-text/build.gradle index 3bf05291ef2ce..de3c0b1c6fb37 100644 --- a/plugins/mapper-annotated-text/build.gradle +++ b/plugins/mapper-annotated-text/build.gradle @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java index da05111e20fcc..468e28beef583 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java +++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java @@ -86,7 +86,9 @@ public static class Builder extends FieldMapper.Builder { public Builder(String name, IndexAnalyzers indexAnalyzers) { super(name); - this.analyzers = new TextParams.Analyzers(indexAnalyzers, m -> builder(m).analyzers); + this.analyzers = new TextParams.Analyzers(indexAnalyzers, + m -> builder(m).analyzers.getIndexAnalyzer(), + m -> builder(m).analyzers.positionIncrementGap.getValue()); } @Override diff --git a/plugins/mapper-murmur3/build.gradle b/plugins/mapper-murmur3/build.gradle index 8bfdff8fd9130..81df70e3855bf 100644 --- a/plugins/mapper-murmur3/build.gradle +++ b/plugins/mapper-murmur3/build.gradle @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' esplugin { diff --git a/plugins/mapper-size/build.gradle b/plugins/mapper-size/build.gradle index 12176fff28c5a..ca4738335d0cc 100644 --- a/plugins/mapper-size/build.gradle +++ b/plugins/mapper-size/build.gradle @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 82459bd6f78ca..b5f7b4d498312 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -13,7 +13,7 @@ import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' apply plugin: 'elasticsearch.internal-test-artifact-base' diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index b64d911d0663a..6bd792096b022 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -1,7 +1,7 @@ import org.apache.tools.ant.filters.ReplaceTokens import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.RestIntegTestTask -import org.elasticsearch.gradle.internal.test.rest.YamlRestTestPlugin +import org.elasticsearch.gradle.internal.test.rest.InternalYamlRestTestPlugin import org.elasticsearch.gradle.internal.test.InternalClusterTestPlugin import java.nio.file.Files @@ -16,7 +16,7 @@ import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' apply plugin: 'elasticsearch.internal-test-artifact-base' @@ -286,7 +286,7 @@ def largeBlobYamlRestTest = tasks.register("largeBlobYamlRestTest", RestIntegTes dependsOn "createServiceAccountFile" } SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); - SourceSet yamlRestTestSourceSet = sourceSets.getByName(YamlRestTestPlugin.SOURCE_SET_NAME) + SourceSet yamlRestTestSourceSet = sourceSets.getByName(InternalYamlRestTestPlugin.SOURCE_SET_NAME) setTestClassesDirs(yamlRestTestSourceSet.getOutput().getClassesDirs()) setClasspath(yamlRestTestSourceSet.getRuntimeClasspath()) @@ -339,7 +339,7 @@ if (useFixture) { tasks.register("yamlRestTestApplicationDefaultCredentials", RestIntegTestTask.class) { dependsOn('bundlePlugin') SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); - SourceSet yamlRestTestSourceSet = sourceSets.getByName(YamlRestTestPlugin.SOURCE_SET_NAME) + SourceSet yamlRestTestSourceSet = sourceSets.getByName(InternalYamlRestTestPlugin.SOURCE_SET_NAME) setTestClassesDirs(yamlRestTestSourceSet.getOutput().getClassesDirs()) setClasspath(yamlRestTestSourceSet.getRuntimeClasspath()) } diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 0a83463d21dff..0c98d919f682b 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -1,7 +1,7 @@ import org.apache.tools.ant.filters.ReplaceTokens import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.RestIntegTestTask -import org.elasticsearch.gradle.internal.test.rest.YamlRestTestPlugin +import org.elasticsearch.gradle.internal.test.rest.InternalYamlRestTestPlugin import org.elasticsearch.gradle.internal.test.InternalClusterTestPlugin import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE @@ -13,7 +13,7 @@ import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' apply plugin: 'elasticsearch.internal-test-artifact-base' @@ -218,7 +218,7 @@ if (useFixture) { description = "Runs REST tests using the Minio repository." dependsOn tasks.named("bundlePlugin") SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); - SourceSet yamlRestTestSourceSet = sourceSets.getByName(YamlRestTestPlugin.SOURCE_SET_NAME) + SourceSet yamlRestTestSourceSet = sourceSets.getByName(InternalYamlRestTestPlugin.SOURCE_SET_NAME) setTestClassesDirs(yamlRestTestSourceSet.getOutput().getClassesDirs()) setClasspath(yamlRestTestSourceSet.getRuntimeClasspath()) @@ -246,7 +246,7 @@ if (useFixture) { description = "Runs tests using the ECS repository." dependsOn('bundlePlugin') SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); - SourceSet yamlRestTestSourceSet = sourceSets.getByName(YamlRestTestPlugin.SOURCE_SET_NAME) + SourceSet yamlRestTestSourceSet = sourceSets.getByName(InternalYamlRestTestPlugin.SOURCE_SET_NAME) setTestClassesDirs(yamlRestTestSourceSet.getOutput().getClassesDirs()) setClasspath(yamlRestTestSourceSet.getRuntimeClasspath()) systemProperty 'tests.rest.blacklist', [ diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AmazonS3Reference.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AmazonS3Reference.java index 4aa085268d660..cf48dcf1d04cf 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AmazonS3Reference.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AmazonS3Reference.java @@ -24,7 +24,6 @@ public class AmazonS3Reference extends AbstractRefCounted implements Releasable private final AmazonS3 client; AmazonS3Reference(AmazonS3 client) { - super("AWS_S3_CLIENT"); this.client = client; } diff --git a/plugins/store-smb/build.gradle b/plugins/store-smb/build.gradle index 728e9642d9c29..91c7dcadf0226 100644 --- a/plugins/store-smb/build.gradle +++ b/plugins/store-smb/build.gradle @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioGroupFactory.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioGroupFactory.java index e51e1a832adec..500e840224b49 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioGroupFactory.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioGroupFactory.java @@ -86,11 +86,9 @@ private void onException(Exception exception) { private static class RefCountedNioGroup extends AbstractRefCounted implements NioGroup { - public static final String NAME = "ref-counted-nio-group"; private final NioSelectorGroup nioGroup; private RefCountedNioGroup(NioSelectorGroup nioGroup) { - super(NAME); this.nioGroup = nioGroup; } diff --git a/qa/die-with-dignity/src/main/java/org/elasticsearch/RestDieWithDignityAction.java b/qa/die-with-dignity/src/main/java/org/elasticsearch/RestDieWithDignityAction.java deleted file mode 100644 index 82288f0a73364..0000000000000 --- a/qa/die-with-dignity/src/main/java/org/elasticsearch/RestDieWithDignityAction.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch; - -import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestRequest; - -import java.util.List; - -import static org.elasticsearch.rest.RestRequest.Method.GET; - -public class RestDieWithDignityAction extends BaseRestHandler { - - RestDieWithDignityAction() {} - - @Override - public List routes() { - return List.of(new Route(GET, "/_die_with_dignity")); - } - - @Override - public String getName() { - return "die_with_dignity_action"; - } - - @Override - protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { - throw new OutOfMemoryError("die with dignity"); - } - -} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java index d5fd91a427bac..45689f0fed691 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java @@ -849,7 +849,6 @@ public void test131InitProcessHasCorrectPID() { /** * Check that Elasticsearch reports per-node cgroup information. */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/76812") public void test140CgroupOsStatsAreAvailable() throws Exception { waitForElasticsearch(installation, USERNAME, PASSWORD); diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/Packages.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/Packages.java index 20cbaeac464f9..db07be0d1eb86 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/Packages.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/Packages.java @@ -247,12 +247,18 @@ private static void verifyDefaultInstallation(Installation es, Distribution dist /** * Starts Elasticsearch, without checking that startup is successful. */ - public static Shell.Result runElasticsearchStartCommand(Shell sh) throws IOException { + public static Shell.Result runElasticsearchStartCommand(Shell sh) { if (isSystemd()) { + Packages.JournaldWrapper journald = new Packages.JournaldWrapper(sh); sh.run("systemctl daemon-reload"); sh.run("systemctl enable elasticsearch.service"); sh.run("systemctl is-enabled elasticsearch.service"); - return sh.runIgnoreExitCode("systemctl start elasticsearch.service"); + Result exitCode = sh.runIgnoreExitCode("systemctl start elasticsearch.service"); + if (exitCode.isSuccess() == false) { + logger.warn(sh.runIgnoreExitCode("systemctl status elasticsearch.service").stdout); + logger.warn(journald.getLogs().stdout); + } + return exitCode; } return sh.runIgnoreExitCode("service elasticsearch start"); } diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java index 70da8e782b887..3cdc32029babd 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java @@ -86,6 +86,21 @@ private void doTestSortOrder(String repoName, Collection allSnapshotName GetSnapshotsRequest.SortBy.START_TIME, order ); + assertSnapshotListSorted( + allSnapshotsSorted(allSnapshotNames, repoName, GetSnapshotsRequest.SortBy.SHARDS, order), + GetSnapshotsRequest.SortBy.SHARDS, + order + ); + assertSnapshotListSorted( + allSnapshotsSorted(allSnapshotNames, repoName, GetSnapshotsRequest.SortBy.FAILED_SHARDS, order), + GetSnapshotsRequest.SortBy.FAILED_SHARDS, + order + ); + assertSnapshotListSorted( + allSnapshotsSorted(allSnapshotNames, repoName, GetSnapshotsRequest.SortBy.REPOSITORY, order), + GetSnapshotsRequest.SortBy.REPOSITORY, + order + ); } public void testResponseSizeLimit() throws Exception { diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/20_combine_processors.yml b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/20_combine_processors.yml index 27f7f804ead1c..d748a2388bdd4 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/20_combine_processors.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/20_combine_processors.yml @@ -67,6 +67,63 @@ - match: { _source.geoip.continent_name: "North America" } - match: { _source.geoip.country_iso_code: "US" } +--- +"Test with date processor and ECS-v1": + - do: + ingest.put_pipeline: + id: "_id" + body: > + { + "processors": [ + { + "grok" : { + "field" : "log", + "ecs_compatibility": "v1", + "patterns": ["%{COMBINEDAPACHELOG}"] + } + }, + { + "date" : { + "field" : "timestamp", + "target_field" : "timestamp", + "formats" : ["dd/MMM/yyyy:HH:mm:ss xx"] + } + }, + { + "geoip" : { + "field" : "source.address" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "_id" + body: { + log: "70.193.17.92 - - [08/Sep/2014:02:54:42 +0000] \"GET /presentations/logstash-scale11x/images/ahhh___rage_face_by_samusmmx-d5g5zap.png HTTP/1.1\" 200 175208 \"http://mobile.rivals.com/board_posts.asp?SID=880&mid=198829575&fid=2208&tid=198829575&Team=&TeamId=&SiteId=\" \"Mozilla/5.0 (Linux; Android 4.2.2; VS980 4G Build/JDQ39B) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.135 Mobile Safari/537.36\"" + } + + - do: + get: + index: test + id: 1 + - length: { _source: 7 } + - match: { _source.url.original: "/presentations/logstash-scale11x/images/ahhh___rage_face_by_samusmmx-d5g5zap.png" } + - match: { _source.user_agent.original: "Mozilla/5.0 (Linux; Android 4.2.2; VS980 4G Build/JDQ39B) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.135 Mobile Safari/537.36" } + - match: { _source.http.request.method: "GET" } + - match: { _source.http.request.referrer: "http://mobile.rivals.com/board_posts.asp?SID=880&mid=198829575&fid=2208&tid=198829575&Team=&TeamId=&SiteId=" } + - match: { _source.http.response.status_code: 200 } + - match: { _source.http.response.body.bytes: 175208 } + - match: { _source.source.address: "70.193.17.92" } + - match: { _source.http.version: "1.1" } + - match: { _source.timestamp: "2014-09-08T02:54:42.000Z" } + - match: { _source.geoip.continent_name: "North America" } + - match: { _source.geoip.country_iso_code: "US" } + --- "Test mutate": - do: diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index a126408b5dbeb..7d6059f035ba3 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -4,7 +4,7 @@ apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.publish' apply plugin: 'elasticsearch.rest-resources' apply plugin: 'elasticsearch.validate-rest-spec' -apply plugin: 'elasticsearch.yaml-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' apply plugin: 'elasticsearch.yaml-rest-compat-test' restResources { @@ -17,7 +17,18 @@ restResources { ext.projectLicenses.set(['The Apache Software License, Version 2.0': 'http://www.apache.org/licenses/LICENSE-2.0']) ext.licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt') +configurations { + // configuration to make use by external yaml rest test plugin in our examples + // easy and efficient + basicRestSpecs { + attributes { + attribute(org.gradle.api.internal.artifacts.ArtifactAttributes.ARTIFACT_FORMAT, ArtifactTypeDefinition.DIRECTORY_TYPE) + } + } +} + artifacts { + basicRestSpecs(new File(projectDir, "src/main/resources")) restSpecs(new File(projectDir, "src/main/resources/rest-api-spec/api")) restTests(new File(projectDir, "src/yamlRestTest/resources/rest-api-spec/test")) } @@ -28,11 +39,7 @@ testClusters.all { tasks.named("test").configure { enabled = false } tasks.named("jarHell").configure { enabled = false } -tasks.named("yamlRestCompatTest").configure { - onlyIf { - // Skip these tests on Windows since the blacklist exceeds Windows CLI limits - OS.current() != OS.WINDOWS - } +tasks.named("yamlRestTestV7CompatTest").configure { systemProperty 'tests.rest.blacklist', [ // Cat API are meant to be consumed by humans, so will not be supported by Compatible REST API 'cat*/*/*', @@ -77,6 +84,7 @@ tasks.named("yamlRestCompatTest").configure { // upgrade api will only get a dummy endpoint returning an exception suggesting to use _reindex 'indices.upgrade/*/*', + 'search.aggregation/20_terms/*profiler*', // The profiler results aren't backwards compatible. 'search.aggregation/370_doc_count_field/Test filters agg with doc_count', // Uses profiler for assertions which is not backwards compatible @@ -92,7 +100,7 @@ tasks.named("yamlRestCompatTest").configure { ].join(',') } -tasks.named("transformV7RestTests").configure({ task -> +tasks.named("yamlRestTestV7CompatTransform").configure({ task -> task.replaceValueInMatch("_type", "_doc") task.addAllowedWarningRegex("\\[types removal\\].*") task.replaceValueInMatch("nodes.\$node_id.roles.8", "ml", "node_info role test") diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.put_trained_model.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.put_trained_model.json index 28cb5821cea18..af3ef880bf6bb 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.put_trained_model.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.put_trained_model.json @@ -26,6 +26,14 @@ } ] }, + "params":{ + "defer_definition_decompression": { + "required": false, + "type": "boolean", + "description": "If set to `true` and a `compressed_definition` is provided, the request defers definition decompression and skips relevant validations.", + "default": false + } + }, "body":{ "description":"The trained model configuration", "required":true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search_mvt.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search_mvt.json index 132b2639a2e04..6d5760cc635c5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search_mvt.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search_mvt.json @@ -19,8 +19,8 @@ { "path": "/{index}/_mvt/{field}/{zoom}/{x}/{y}", "methods": [ - "GET", - "POST" + "POST", + "GET" ], "parts": { "index": { diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/create/40_routing.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/create/40_routing.yml index af8d865031bc4..6fb845f4fa869 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/create/40_routing.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/create/40_routing.yml @@ -19,14 +19,14 @@ create: index: test_1 id: 1 - routing: 5 + routing: "5" body: { foo: bar } - do: get: index: test_1 id: 1 - routing: 5 + routing: "5" stored_fields: [_routing] - match: { _id: "1"} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/delete/30_routing.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/delete/30_routing.yml index c2d6adfcd4e72..2bbafe5e04416 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/delete/30_routing.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/delete/30_routing.yml @@ -13,7 +13,7 @@ index: index: test_1 id: 1 - routing: 5 + routing: "5" body: { foo: bar } - do: @@ -21,11 +21,11 @@ delete: index: test_1 id: 1 - routing: 4 + routing: "4" - do: delete: index: test_1 id: 1 - routing: 5 + routing: "5" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/exists/40_routing.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/exists/40_routing.yml index 086286276fcd3..039eeb87d1f0e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/exists/40_routing.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/exists/40_routing.yml @@ -20,14 +20,14 @@ index: index: test_1 id: 1 - routing: 5 + routing: "5" body: { foo: bar } - do: exists: index: test_1 id: 1 - routing: 5 + routing: "5" - is_true: '' diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/40_routing.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/40_routing.yml index 9ce60a7e8c12a..78102c4411ca8 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/40_routing.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/40_routing.yml @@ -21,14 +21,14 @@ index: index: test_1 id: 1 - routing: 5 + routing: "5" body: { foo: bar } - do: get: index: test_1 id: 1 - routing: 5 + routing: "5" stored_fields: [_routing] - match: { _id: "1"} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get_source/40_routing.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get_source/40_routing.yml index 1396f2ab8ad22..74752f1d95f56 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get_source/40_routing.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get_source/40_routing.yml @@ -22,14 +22,14 @@ index: index: test_1 id: 1 - routing: 5 + routing: "5" body: { foo: bar } - do: get_source: index: test_1 id: 1 - routing: 5 + routing: "5" - match: { '': {foo: bar}} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/40_routing.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/40_routing.yml index f3cefc56a8e98..dcf2224e5807d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/40_routing.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/index/40_routing.yml @@ -20,14 +20,14 @@ index: index: test_1 id: 1 - routing: 5 + routing: "5" body: { foo: bar } - do: get: index: test_1 id: 1 - routing: 5 + routing: "5" stored_fields: [_routing] - match: { _id: "1"} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.recovery/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.recovery/10_basic.yml index 490c093904a89..ac3f379fb86cd 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.recovery/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.recovery/10_basic.yml @@ -1,9 +1,8 @@ --- "Indices recovery test": - skip: - # todo: change after backport - version: " - 7.99.99" - reason: recovery from snapshot bytes not available until 8.0 + version: " - 7.14.99" + reason: recovery from snapshot bytes not available until 7.15 - do: indices.create: @@ -47,7 +46,7 @@ --- "Indices recovery test without recovery from snapshot": - +# to be removed once 7.15 is out. - do: indices.create: index: test_1 diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.update_aliases/20_routing.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.update_aliases/20_routing.yml index ecedcef0c1a48..58ceba60d9bcb 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.update_aliases/20_routing.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.update_aliases/20_routing.yml @@ -125,7 +125,7 @@ setup: - add: index: test_index alias: test_alias - routing: 5 + routing: "5" - do: indices.get_alias: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/40_routing.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/40_routing.yml index 89c2b21c1673d..0643982156b93 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/40_routing.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mget/40_routing.yml @@ -20,7 +20,7 @@ index: index: test_1 id: 1 - routing: 5 + routing: "5" body: { foo: bar } - do: @@ -30,8 +30,8 @@ body: docs: - { _id: 1 } - - { _id: 1, routing: 4 } - - { _id: 1, routing: 5 } + - { _id: 1, routing: "4" } + - { _id: 1, routing: "5" } - is_false: docs.0.found - is_false: docs.1.found diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/20_terms.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/20_terms.yml index 9ee78a00e09a7..c9d832b0d6adb 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/20_terms.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/20_terms.yml @@ -657,7 +657,7 @@ setup: refresh: true index: test_1 id: 1 - routing: 1 + routing: "1" body: { "str": "abc" } - do: @@ -665,7 +665,7 @@ setup: refresh: true index: test_1 id: 2 - routing: 1 + routing: "1" body: { "str": "abc" } - do: @@ -673,7 +673,7 @@ setup: refresh: true index: test_1 id: 3 - routing: 1 + routing: "1" body: { "str": "bcd" } - do: @@ -703,7 +703,7 @@ setup: refresh: true index: test_1 id: 1 - routing: 1 + routing: "1" body: { "str": "abc" } - do: @@ -711,7 +711,7 @@ setup: refresh: true index: test_1 id: 2 - routing: 1 + routing: "1" body: { "str": "abc" } - do: @@ -719,7 +719,7 @@ setup: refresh: true index: test_1 id: 3 - routing: 1 + routing: "1" body: { "str": "bcd" } - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml index 70f40c413adf0..ab1717b0dc1d7 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/330_fetch_fields.yml @@ -855,6 +855,7 @@ Test nested field with sibling field resolving to DocValueFetcher: - match: { hits.hits.0.fields.number.2 : 3 } - match: { hits.hits.0.fields.number.3 : 5 } - match: { hits.hits.0.fields.number.4 : 6 } + --- Test token_count inside nested field doesn't fail: - skip: @@ -892,3 +893,117 @@ Test token_count inside nested field doesn't fail: body: _source: false fields: [ "*" ] + +--- +error includes field name: + - skip: + version: ' - 7.15.99' + reason: 'error changed in 7.16.0' + + - do: + indices.create: + index: test + body: + settings: + index.number_of_shards: 1 + mappings: + properties: + keyword: + type: keyword + date: + type: date + + - do: + index: + index: test + id: 1 + refresh: true + body: + keyword: "value" + date: "1990-12-29T22:30:00.000Z" + + - do: + catch: '/error fetching \[keyword\]: Field \[keyword\] of type \[keyword\] doesn''t support formats./' + search: + index: test + body: + fields: + - field: keyword + format: "yyyy/MM/dd" + +--- +error includes glob pattern: + - skip: + version: ' - 7.15.99' + reason: 'error changed in 7.16.0' + + - do: + indices.create: + index: test + body: + settings: + index.number_of_shards: 1 + mappings: + properties: + dkeyword: + type: keyword + date: + type: date + + - do: + index: + index: test + id: 1 + refresh: true + body: + dkeyword: "value" + date: "1990-12-29T22:30:00.000Z" + + - do: + catch: '/error fetching \[dkeyword\] which matched \[d\*\]: Field \[dkeyword\] of type \[keyword\] doesn''t support formats./' + search: + index: test + body: + fields: + - field: d* + format: "yyyy/MM/dd" + + +--- +error for flattened includes whole path: + - skip: + version: ' - 7.15.99' + reason: 'error changed in 7.16.0' + + - do: + indices.create: + index: test + body: + settings: + index.number_of_shards: 1 + mappings: + properties: + flattened: + type: flattened + + date: + type: date + + - do: + index: + index: test + id: 1 + refresh: true + body: + flattened: + foo: bar + date: "1990-12-29T22:30:00.000Z" + + - do: + catch: '/error fetching \[flattened.bar\]: Field \[flattened.bar\] of type \[flattened\] doesn''t support formats./' + search: + index: test + body: + fields: + - field: flattened.bar + format: "yyyy/MM/dd" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/update/40_routing.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/update/40_routing.yml index 1a91beebbb15d..8b67ca512f326 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/update/40_routing.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/update/40_routing.yml @@ -21,7 +21,7 @@ update: index: test_1 id: 1 - routing: 5 + routing: "5" body: doc: { foo: baz } upsert: { foo: bar } @@ -30,7 +30,7 @@ get: index: test_1 id: 1 - routing: 5 + routing: "5" stored_fields: _routing - match: { _routing: "5"} @@ -47,7 +47,7 @@ update: index: test_1 id: 1 - routing: 5 + routing: "5" _source: foo body: doc: { foo: baz } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java new file mode 100644 index 0000000000000..967e20f9be2ba --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.indices.create; + +import org.elasticsearch.action.admin.indices.get.GetIndexResponse; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.indices.TestSystemIndexPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.Collection; + +import static org.elasticsearch.indices.TestSystemIndexDescriptor.INDEX_NAME; +import static org.elasticsearch.indices.TestSystemIndexDescriptor.PRIMARY_INDEX_NAME; +import static org.hamcrest.Matchers.is; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) +public class AutoCreateSystemIndexIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return CollectionUtils.appendToCopy(super.nodePlugins(), TestSystemIndexPlugin.class); + } + + public void testAutoCreatePrimaryIndex() throws Exception { + CreateIndexRequest request = new CreateIndexRequest(PRIMARY_INDEX_NAME); + client().execute(AutoCreateAction.INSTANCE, request).get(); + + GetIndexResponse response = client().admin().indices().prepareGetIndex().addIndices(PRIMARY_INDEX_NAME).get(); + assertThat(response.indices().length, is(1)); + } + + public void testAutoCreatePrimaryIndexFromAlias() throws Exception { + CreateIndexRequest request = new CreateIndexRequest(INDEX_NAME); + client().execute(AutoCreateAction.INSTANCE, request).get(); + + GetIndexResponse response = client().admin().indices().prepareGetIndex().addIndices(PRIMARY_INDEX_NAME).get(); + assertThat(response.indices().length, is(1)); + } + + public void testAutoCreateNonPrimaryIndex() throws Exception { + CreateIndexRequest request = new CreateIndexRequest(INDEX_NAME + "-2"); + client().execute(AutoCreateAction.INSTANCE, request).get(); + + GetIndexResponse response = client().admin().indices().prepareGetIndex().addIndices(INDEX_NAME + "-2").get(); + assertThat(response.indices().length, is(1)); + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java index 8cc2e41594330..b8f0935a4f625 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateSystemIndicesIT.java @@ -52,7 +52,7 @@ protected Collection> nodePlugins() { * settings when it is first used, when it is referenced via its alias. */ public void testSystemIndexIsAutoCreatedViaAlias() { - doCreateTest(() -> indexDoc(INDEX_NAME, "1", "foo", "bar")); + doCreateTest(() -> indexDoc(INDEX_NAME, "1", "foo", "bar"), PRIMARY_INDEX_NAME); } /** @@ -61,7 +61,17 @@ public void testSystemIndexIsAutoCreatedViaAlias() { * index name. */ public void testSystemIndexIsAutoCreatedViaConcreteName() { - doCreateTest(() -> indexDoc(PRIMARY_INDEX_NAME, "1", "foo", "bar")); + doCreateTest(() -> indexDoc(PRIMARY_INDEX_NAME, "1", "foo", "bar"), PRIMARY_INDEX_NAME); + } + + /** + * Check that a system index is auto-created with the expected mappings and + * settings when it is first used, when it is referenced via its concrete + * index name. + */ + public void testNonPrimarySystemIndexIsAutoCreatedViaConcreteName() { + final String nonPrimarySystemIndex = INDEX_NAME + "-2"; + doCreateTest(() -> indexDoc(nonPrimarySystemIndex, "1", "foo", "bar"), nonPrimarySystemIndex); } /** @@ -69,7 +79,7 @@ public void testSystemIndexIsAutoCreatedViaConcreteName() { * settings when it is explicitly created, when it is referenced via its alias. */ public void testCreateSystemIndexViaAlias() { - doCreateTest(() -> assertAcked(prepareCreate(INDEX_NAME))); + doCreateTest(() -> assertAcked(prepareCreate(INDEX_NAME)), PRIMARY_INDEX_NAME); } /** @@ -78,21 +88,21 @@ public void testCreateSystemIndexViaAlias() { * concrete index name. */ public void testCreateSystemIndexViaConcreteName() { - doCreateTest(() -> assertAcked(prepareCreate(PRIMARY_INDEX_NAME))); + doCreateTest(() -> assertAcked(prepareCreate(PRIMARY_INDEX_NAME)), PRIMARY_INDEX_NAME); } - private void doCreateTest(Runnable runnable) { + private void doCreateTest(Runnable runnable, String concreteIndex) { internalCluster().startNodes(1); // Trigger the creation of the system index runnable.run(); ensureGreen(INDEX_NAME); - assertMappingsAndSettings(TestSystemIndexDescriptor.getOldMappings()); + assertMappingsAndSettings(TestSystemIndexDescriptor.getOldMappings(), concreteIndex); // Remove the index and alias... - assertAcked(client().admin().indices().prepareAliases().removeAlias(PRIMARY_INDEX_NAME, INDEX_NAME).get()); - assertAcked(client().admin().indices().prepareDelete(PRIMARY_INDEX_NAME)); + assertAcked(client().admin().indices().prepareAliases().removeAlias(concreteIndex, INDEX_NAME).get()); + assertAcked(client().admin().indices().prepareDelete(concreteIndex)); // ...so that we can check that the they will still be auto-created again, // but this time with updated settings @@ -101,14 +111,14 @@ private void doCreateTest(Runnable runnable) { runnable.run(); ensureGreen(INDEX_NAME); - assertMappingsAndSettings(TestSystemIndexDescriptor.getNewMappings()); + assertMappingsAndSettings(TestSystemIndexDescriptor.getNewMappings(), concreteIndex); } /** * Fetch the mappings and settings for {@link TestSystemIndexDescriptor#INDEX_NAME} and verify that they match the expected values. * Note that in the case of the mappings, this is just a dumb string comparison, so order of keys matters. */ - private void assertMappingsAndSettings(String expectedMappings) { + private void assertMappingsAndSettings(String expectedMappings, String concreteIndex) { final GetMappingsResponse getMappingsResponse = client().admin() .indices() .getMappings(new GetMappingsRequest().indices(INDEX_NAME)) @@ -116,11 +126,11 @@ private void assertMappingsAndSettings(String expectedMappings) { final ImmutableOpenMap mappings = getMappingsResponse.getMappings(); assertThat( - "Expected mappings to contain a key for [" + PRIMARY_INDEX_NAME + "], but found: " + mappings.toString(), - mappings.containsKey(PRIMARY_INDEX_NAME), + "Expected mappings to contain a key for [" + concreteIndex + "], but found: " + mappings.toString(), + mappings.containsKey(concreteIndex), equalTo(true) ); - final Map sourceAsMap = mappings.get(PRIMARY_INDEX_NAME).getSourceAsMap(); + final Map sourceAsMap = mappings.get(concreteIndex).getSourceAsMap(); try { assertThat(convertToXContent(sourceAsMap, XContentType.JSON).utf8ToString(), equalTo(expectedMappings)); @@ -133,7 +143,7 @@ private void assertMappingsAndSettings(String expectedMappings) { .getSettings(new GetSettingsRequest().indices(INDEX_NAME)) .actionGet(); - final Settings actual = getSettingsResponse.getIndexToSettings().get(PRIMARY_INDEX_NAME); + final Settings actual = getSettingsResponse.getIndexToSettings().get(concreteIndex); for (String settingName : TestSystemIndexDescriptor.SETTINGS.keySet()) { assertThat(actual.get(settingName), equalTo(TestSystemIndexDescriptor.SETTINGS.get(settingName))); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java index 8a4f8762801ee..05330a64356c8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java @@ -63,6 +63,17 @@ public void testSimple() throws Exception { .setSource(doc) .get(); } + final boolean forceNorms = randomBoolean(); + if (forceNorms) { + final XContentBuilder doc = XContentFactory.jsonBuilder() + .startObject() + .field("english_text", "A long sentence to make sure that norms is non-zero") + .endObject(); + client().prepareIndex(index) + .setId("id") + .setSource(doc) + .get(); + } PlainActionFuture future = PlainActionFuture.newFuture(); client().execute(AnalyzeIndexDiskUsageAction.INSTANCE, new AnalyzeIndexDiskUsageRequest(new String[] {index}, AnalyzeIndexDiskUsageRequest.DEFAULT_INDICES_OPTIONS, true), @@ -77,8 +88,9 @@ public void testSimple() throws Exception { final IndexDiskUsageStats.PerFieldDiskUsage englishField = stats.getFields().get("english_text"); assertThat(englishField.getInvertedIndexBytes(), greaterThan(0L)); assertThat(englishField.getStoredFieldBytes(), equalTo(0L)); - assertThat(englishField.getNormsBytes(), greaterThan(0L)); - + if (forceNorms) { + assertThat(englishField.getNormsBytes(), greaterThan(0L)); + } final IndexDiskUsageStats.PerFieldDiskUsage valueField = stats.getFields().get("value"); assertThat(valueField.getInvertedIndexBytes(), equalTo(0L)); assertThat(valueField.getStoredFieldBytes(), equalTo(0L)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java index 19dc61ef36049..4ddd82659ad4e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java @@ -7,7 +7,6 @@ */ package org.elasticsearch.indices; -import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; @@ -52,12 +51,12 @@ EngineConfig engineConfigWithLargerIndexingMemory(EngineConfig config) { IndexSettings indexSettings = new IndexSettings(config.getIndexSettings().getIndexMetadata(), settings); return new EngineConfig(config.getShardId(), config.getThreadPool(), indexSettings, config.getWarmer(), config.getStore(), config.getMergePolicy(), config.getAnalyzer(), - config.getSimilarity(), new CodecService(null, LogManager.getLogger(IndexingMemoryControllerIT.class)), + config.getSimilarity(), new CodecService(null), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(), config.getTranslogConfig(), config.getFlushMergesAfter(), config.getExternalRefreshListener(), config.getInternalRefreshListener(), config.getIndexSort(), config.getCircuitBreakerService(), config.getGlobalCheckpointSupplier(), config.retentionLeasesSupplier(), - config.getPrimaryTermSupplier(), config.getSnapshotCommitSupplier()); + config.getPrimaryTermSupplier(), config.getSnapshotCommitSupplier(), config.getLeafSorter()); } @Override diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java index 3600aaecb0b78..dc2b7ab0303b7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java @@ -432,12 +432,12 @@ public void testShardActiveElseWhere() throws Exception { CountDownLatch latch = new CountDownLatch(1); clusterApplierService.onNewClusterState("test", () -> newState, new ClusterApplyListener() { @Override - public void onSuccess(String source) { + public void onSuccess() { latch.countDown(); } @Override - public void onFailure(String source, Exception e) { + public void onFailure(Exception e) { latch.countDown(); throw new AssertionError("Expected a proper response", e); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java index d69d0b562abcd..a936e04f2d7bb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java @@ -115,9 +115,6 @@ public void testGetShardSnapshotReturnsTheLatestSuccessfulSnapshot() throws Exce if (useBwCFormat) { final Version version = randomVersionBetween(random(), Version.V_7_5_0, Version.CURRENT); initWithSnapshotVersion(repoName, repoPath, version); - // Re-create repo to clear repository data cache - assertAcked(clusterAdmin().prepareDeleteRepository(repoName).get()); - createRepository(repoName, "fs", repoPath); } createSnapshot(repoName, "empty-snap", Collections.emptyList()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/AggregationsIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/AggregationsIntegrationIT.java index 1eab2ca7c6034..d6b9cb0ac267c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/AggregationsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/AggregationsIntegrationIT.java @@ -21,7 +21,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; - @ESIntegTestCase.SuiteScopeTestCase public class AggregationsIntegrationIT extends ESIntegTestCase { @@ -41,8 +40,10 @@ public void setupSuiteScopeCluster() throws Exception { public void testScroll() { final int size = randomIntBetween(1, 4); SearchResponse response = client().prepareSearch("index") - .setSize(size).setScroll(TimeValue.timeValueMinutes(1)) - .addAggregation(terms("f").field("f")).get(); + .setSize(size) + .setScroll(TimeValue.timeValueMinutes(1)) + .addAggregation(terms("f").field("f")) + .get(); assertSearchResponse(response); Aggregations aggregations = response.getAggregations(); assertNotNull(aggregations); @@ -51,9 +52,7 @@ public void testScroll() { int total = response.getHits().getHits().length; while (response.getHits().getHits().length > 0) { - response = client().prepareSearchScroll(response.getScrollId()) - .setScroll(TimeValue.timeValueMinutes(1)) - .get(); + response = client().prepareSearchScroll(response.getScrollId()).setScroll(TimeValue.timeValueMinutes(1)).get(); assertSearchResponse(response); assertNull(response.getAggregations()); total += response.getHits().getHits().length; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/CombiIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/CombiIT.java index 166dda16dd957..4f54de00d454f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/CombiIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/CombiIT.java @@ -10,6 +10,7 @@ import com.carrotsearch.hppc.IntIntHashMap; import com.carrotsearch.hppc.IntIntMap; + import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; @@ -49,30 +50,22 @@ public void testMultipleAggsOnSameField_WithDifferentRequiredValueSourceType() t String name = "name_" + randomIntBetween(1, 10); if (rarely()) { missingValues++; - builders[i] = client().prepareIndex("idx").setSource(jsonBuilder() - .startObject() - .field("name", name) - .endObject()); + builders[i] = client().prepareIndex("idx").setSource(jsonBuilder().startObject().field("name", name).endObject()); } else { int value = randomIntBetween(1, 10); values.put(value, values.getOrDefault(value, 0) + 1); - builders[i] = client().prepareIndex("idx").setSource(jsonBuilder() - .startObject() - .field("name", name) - .field("value", value) - .endObject()); + builders[i] = client().prepareIndex("idx") + .setSource(jsonBuilder().startObject().field("name", name).field("value", value).endObject()); } } indexRandom(true, builders); ensureSearchable(); - SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values()); SearchResponse response = client().prepareSearch("idx") - .addAggregation(missing("missing_values").field("value")) - .addAggregation(terms("values").field("value") - .collectMode(aggCollectionMode )) - .get(); + .addAggregation(missing("missing_values").field("value")) + .addAggregation(terms("values").field("value").collectMode(aggCollectionMode)) + .get(); assertSearchResponse(response); @@ -92,7 +85,6 @@ public void testMultipleAggsOnSameField_WithDifferentRequiredValueSourceType() t assertTrue(values.isEmpty()); } - /** * Some top aggs (eg. date_/histogram) that are executed on unmapped fields, will generate an estimate count of buckets - zero. * when the sub aggregator is then created, it will take this estimation into account. This used to cause @@ -100,22 +92,29 @@ public void testMultipleAggsOnSameField_WithDifferentRequiredValueSourceType() t */ public void testSubAggregationForTopAggregationOnUnmappedField() throws Exception { - prepareCreate("idx").setMapping(jsonBuilder() - .startObject() - .startObject("_doc").startObject("properties") - .startObject("name").field("type", "keyword").endObject() - .startObject("value").field("type", "integer").endObject() - .endObject().endObject() - .endObject()).get(); + prepareCreate("idx").setMapping( + jsonBuilder().startObject() + .startObject("_doc") + .startObject("properties") + .startObject("name") + .field("type", "keyword") + .endObject() + .startObject("value") + .field("type", "integer") + .endObject() + .endObject() + .endObject() + .endObject() + ).get(); ensureSearchable("idx"); SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values()); SearchResponse searchResponse = client().prepareSearch("idx") - .addAggregation(histogram("values").field("value1").interval(1) - .subAggregation(terms("names").field("name") - .collectMode(aggCollectionMode ))) - .get(); + .addAggregation( + histogram("values").field("value1").interval(1).subAggregation(terms("names").field("name").collectMode(aggCollectionMode)) + ) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, Matchers.equalTo(0L)); Histogram values = searchResponse.getAggregations().get("values"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/EquivalenceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/EquivalenceIT.java index 6774666b7a1bd..338d4ae8a7d46 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/EquivalenceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/EquivalenceIT.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations; import com.carrotsearch.hppc.IntHashSet; + import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; @@ -84,16 +85,16 @@ protected Map, Object>> pluginScripts() { @Before private void setupMaxBuckets() { // disables the max bucket limit for this test - client().admin().cluster().prepareUpdateSettings() + client().admin() + .cluster() + .prepareUpdateSettings() .setTransientSettings(Collections.singletonMap("search.max_buckets", Integer.MAX_VALUE)) .get(); } @After private void cleanupMaxBuckets() { - client().admin().cluster().prepareUpdateSettings() - .setTransientSettings(Collections.singletonMap("search.max_buckets", null)) - .get(); + client().admin().cluster().prepareUpdateSettings().setTransientSettings(Collections.singletonMap("search.max_buckets", null)).get(); } // Make sure that unordered, reversed, disjoint and/or overlapping ranges are supported @@ -109,47 +110,43 @@ public void testRandomRanges() throws Exception { } } - prepareCreate("idx") - .setMapping(jsonBuilder() - .startObject() - .startObject("_doc") - .startObject("properties") - .startObject("values") - .field("type", "double") - .endObject() - .endObject() - .endObject() - .endObject()).get(); + prepareCreate("idx").setMapping( + jsonBuilder().startObject() + .startObject("_doc") + .startObject("properties") + .startObject("values") + .field("type", "double") + .endObject() + .endObject() + .endObject() + .endObject() + ).get(); for (int i = 0; i < docs.length; ++i) { - XContentBuilder source = jsonBuilder() - .startObject() - .startArray("values"); + XContentBuilder source = jsonBuilder().startObject().startArray("values"); for (int j = 0; j < docs[i].length; ++j) { source = source.value(docs[i][j]); } source = source.endArray().endObject(); client().prepareIndex("idx").setSource(source).get(); } - assertNoFailures(client().admin().indices().prepareRefresh("idx"). - setIndicesOptions(IndicesOptions.lenientExpandOpen()) - .get()); + assertNoFailures(client().admin().indices().prepareRefresh("idx").setIndicesOptions(IndicesOptions.lenientExpandOpen()).get()); final int numRanges = randomIntBetween(1, 20); final double[][] ranges = new double[numRanges][]; for (int i = 0; i < ranges.length; ++i) { switch (randomInt(2)) { - case 0: - ranges[i] = new double[] { Double.NEGATIVE_INFINITY, randomInt(100) }; - break; - case 1: - ranges[i] = new double[] { randomInt(100), Double.POSITIVE_INFINITY }; - break; - case 2: - ranges[i] = new double[] { randomInt(100), randomInt(100) }; - break; - default: - throw new AssertionError(); + case 0: + ranges[i] = new double[] { Double.NEGATIVE_INFINITY, randomInt(100) }; + break; + case 1: + ranges[i] = new double[] { randomInt(100), Double.POSITIVE_INFINITY }; + break; + case 2: + ranges[i] = new double[] { randomInt(100), randomInt(100) }; + break; + default: + throw new AssertionError(); } } @@ -171,7 +168,7 @@ public void testRandomRanges() throws Exception { if (ranges[i][0] != Double.NEGATIVE_INFINITY) { filter = filter.from(ranges[i][0]); } - if (ranges[i][1] != Double.POSITIVE_INFINITY){ + if (ranges[i][1] != Double.POSITIVE_INFINITY) { filter = filter.to(ranges[i][1]); } reqBuilder = reqBuilder.addAggregation(filter("filter" + i, filter)); @@ -214,32 +211,32 @@ public void testDuelTerms() throws Exception { final IntHashSet valuesSet = new IntHashSet(); cluster().wipeIndices("idx"); - prepareCreate("idx") - .setMapping(jsonBuilder() - .startObject() - .startObject("_doc") - .startObject("properties") - .startObject("num") - .field("type", "double") - .endObject() - .startObject("string_values") - .field("type", "keyword") - .startObject("fields") - .startObject("doc_values") - .field("type", "keyword") - .field("index", false) - .endObject() - .endObject() - .endObject() - .startObject("long_values") - .field("type", "long") - .endObject() - .startObject("double_values") - .field("type", "double") - .endObject() - .endObject() - .endObject() - .endObject()).get(); + prepareCreate("idx").setMapping( + jsonBuilder().startObject() + .startObject("_doc") + .startObject("properties") + .startObject("num") + .field("type", "double") + .endObject() + .startObject("string_values") + .field("type", "keyword") + .startObject("fields") + .startObject("doc_values") + .field("type", "keyword") + .field("index", false) + .endObject() + .endObject() + .endObject() + .startObject("long_values") + .field("type", "long") + .endObject() + .startObject("double_values") + .field("type", "double") + .endObject() + .endObject() + .endObject() + .endObject() + ).get(); List indexingRequests = new ArrayList<>(); for (int i = 0; i < numDocs; ++i) { @@ -248,10 +245,7 @@ public void testDuelTerms() throws Exception { values[j] = randomInt(maxNumTerms - 1) - 1000; valuesSet.add(values[j]); } - XContentBuilder source = jsonBuilder() - .startObject() - .field("num", randomDouble()) - .startArray("long_values"); + XContentBuilder source = jsonBuilder().startObject().field("num", randomDouble()).startArray("long_values"); for (int j = 0; j < values.length; ++j) { source = source.value(values[j]); } @@ -268,45 +262,45 @@ public void testDuelTerms() throws Exception { } indexRandom(true, indexingRequests); - assertNoFailures(client().admin().indices().prepareRefresh("idx") - .setIndicesOptions(IndicesOptions.lenientExpandOpen()) - .execute().get()); + assertNoFailures( + client().admin().indices().prepareRefresh("idx").setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute().get() + ); SearchResponse resp = client().prepareSearch("idx") - .addAggregation( - terms("long") - .field("long_values") - .size(maxNumTerms) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(min("min").field("num"))) - .addAggregation( - terms("double") - .field("double_values") - .size(maxNumTerms) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(max("max").field("num"))) - .addAggregation( - terms("string_map") - .field("string_values") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .executionHint(TermsAggregatorFactory.ExecutionMode.MAP.toString()) - .size(maxNumTerms) - .subAggregation(stats("stats").field("num"))) - .addAggregation( - terms("string_global_ordinals") - .field("string_values") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .executionHint(TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS.toString()) - .size(maxNumTerms) - .subAggregation(extendedStats("stats").field("num"))) - .addAggregation( - terms("string_global_ordinals_doc_values") - .field("string_values.doc_values") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .executionHint(TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS.toString()) - .size(maxNumTerms) - .subAggregation(extendedStats("stats").field("num"))) - .get(); + .addAggregation( + terms("long").field("long_values") + .size(maxNumTerms) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(min("min").field("num")) + ) + .addAggregation( + terms("double").field("double_values") + .size(maxNumTerms) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(max("max").field("num")) + ) + .addAggregation( + terms("string_map").field("string_values") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .executionHint(TermsAggregatorFactory.ExecutionMode.MAP.toString()) + .size(maxNumTerms) + .subAggregation(stats("stats").field("num")) + ) + .addAggregation( + terms("string_global_ordinals").field("string_values") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .executionHint(TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS.toString()) + .size(maxNumTerms) + .subAggregation(extendedStats("stats").field("num")) + ) + .addAggregation( + terms("string_global_ordinals_doc_values").field("string_values.doc_values") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .executionHint(TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS.toString()) + .size(maxNumTerms) + .subAggregation(extendedStats("stats").field("num")) + ) + .get(); assertAllSuccessful(resp); assertEquals(numDocs, resp.getHits().getTotalHits().value); @@ -339,18 +333,17 @@ public void testDuelTerms() throws Exception { // Duel between histograms and scripted terms public void testDuelTermsHistogram() throws Exception { - prepareCreate("idx") - .setMapping(jsonBuilder() - .startObject() - .startObject("_doc") - .startObject("properties") - .startObject("num") - .field("type", "double") - .endObject() - .endObject() - .endObject() - .endObject()).get(); - + prepareCreate("idx").setMapping( + jsonBuilder().startObject() + .startObject("_doc") + .startObject("properties") + .startObject("num") + .field("type", "double") + .endObject() + .endObject() + .endObject() + .endObject() + ).get(); final int numDocs = scaledRandomIntBetween(500, 5000); final int maxNumTerms = randomIntBetween(10, 2000); @@ -362,10 +355,7 @@ public void testDuelTermsHistogram() throws Exception { } for (int i = 0; i < numDocs; ++i) { - XContentBuilder source = jsonBuilder() - .startObject() - .field("num", randomDouble()) - .startArray("values"); + XContentBuilder source = jsonBuilder().startObject().field("num", randomDouble()).startArray("values"); final int numValues = randomInt(4); for (int j = 0; j < numValues; ++j) { source = source.value(randomFrom(values)); @@ -373,26 +363,22 @@ public void testDuelTermsHistogram() throws Exception { source = source.endArray().endObject(); client().prepareIndex("idx").setSource(source).get(); } - assertNoFailures(client().admin().indices().prepareRefresh("idx") - .setIndicesOptions(IndicesOptions.lenientExpandOpen()) - .execute().get()); + assertNoFailures( + client().admin().indices().prepareRefresh("idx").setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute().get() + ); Map params = new HashMap<>(); params.put("interval", interval); SearchResponse resp = client().prepareSearch("idx") - .addAggregation( - terms("terms") - .field("values") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "floor(_value / interval)", params)) - .size(maxNumTerms)) - .addAggregation( - histogram("histo") - .field("values") - .interval(interval) - .minDocCount(1)) - .get(); + .addAggregation( + terms("terms").field("values") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "floor(_value / interval)", params)) + .size(maxNumTerms) + ) + .addAggregation(histogram("histo").field("values").interval(interval).minDocCount(1)) + .get(); assertSearchResponse(resp); @@ -410,17 +396,17 @@ public void testDuelTermsHistogram() throws Exception { public void testLargeNumbersOfPercentileBuckets() throws Exception { // test high numbers of percentile buckets to make sure paging and release work correctly - prepareCreate("idx") - .setMapping(jsonBuilder() - .startObject() - .startObject("_doc") - .startObject("properties") - .startObject("double_value") - .field("type", "double") - .endObject() - .endObject() - .endObject() - .endObject()).get(); + prepareCreate("idx").setMapping( + jsonBuilder().startObject() + .startObject("_doc") + .startObject("properties") + .startObject("double_value") + .field("type", "double") + .endObject() + .endObject() + .endObject() + .endObject() + ).get(); final int numDocs = scaledRandomIntBetween(2500, 5000); logger.info("Indexing [{}] docs", numDocs); @@ -431,12 +417,12 @@ public void testLargeNumbersOfPercentileBuckets() throws Exception { indexRandom(true, indexingRequests); SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms") - .field("double_value") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(percentiles("pcts").field("double_value"))) - .get(); + .addAggregation( + terms("terms").field("double_value") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(percentiles("pcts").field("double_value")) + ) + .get(); assertAllSuccessful(response); assertEquals(numDocs, response.getHits().getTotalHits().value); } @@ -447,13 +433,12 @@ public void testReduce() throws Exception { final int value = randomIntBetween(0, 10); indexRandom(true, client().prepareIndex("idx").setSource("f", value)); SearchResponse response = client().prepareSearch("idx") - .addAggregation(filter("filter", QueryBuilders.matchAllQuery()) - .subAggregation(range("range") - .field("f") - .addUnboundedTo(6) - .addUnboundedFrom(6) - .subAggregation(sum("sum").field("f")))) - .get(); + .addAggregation( + filter("filter", QueryBuilders.matchAllQuery()).subAggregation( + range("range").field("f").addUnboundedTo(6).addUnboundedFrom(6).subAggregation(sum("sum").field("f")) + ) + ) + .get(); assertSearchResponse(response); @@ -490,7 +475,7 @@ private void assertEquals(Terms t1, Terms t2) { List t1Buckets = t1.getBuckets(); List t2Buckets = t1.getBuckets(); assertEquals(t1Buckets.size(), t2Buckets.size()); - for (Iterator it1 = t1Buckets.iterator(), it2 = t2Buckets.iterator(); it1.hasNext(); ) { + for (Iterator it1 = t1Buckets.iterator(), it2 = t2Buckets.iterator(); it1.hasNext();) { final Terms.Bucket b1 = it1.next(); final Terms.Bucket b2 = it2.next(); assertEquals(b1.getDocCount(), b2.getDocCount()); @@ -510,15 +495,29 @@ public void testDuelDepthBreadthFirst() throws Exception { } indexRandom(true, reqs); - final SearchResponse r1 = client().prepareSearch("idx").addAggregation( - terms("f1").field("f1").collectMode(SubAggCollectionMode.DEPTH_FIRST) - .subAggregation(terms("f2").field("f2").collectMode(SubAggCollectionMode.DEPTH_FIRST) - .subAggregation(terms("f3").field("f3").collectMode(SubAggCollectionMode.DEPTH_FIRST)))).get(); + final SearchResponse r1 = client().prepareSearch("idx") + .addAggregation( + terms("f1").field("f1") + .collectMode(SubAggCollectionMode.DEPTH_FIRST) + .subAggregation( + terms("f2").field("f2") + .collectMode(SubAggCollectionMode.DEPTH_FIRST) + .subAggregation(terms("f3").field("f3").collectMode(SubAggCollectionMode.DEPTH_FIRST)) + ) + ) + .get(); assertSearchResponse(r1); - final SearchResponse r2 = client().prepareSearch("idx").addAggregation( - terms("f1").field("f1").collectMode(SubAggCollectionMode.BREADTH_FIRST) - .subAggregation(terms("f2").field("f2").collectMode(SubAggCollectionMode.BREADTH_FIRST) - .subAggregation(terms("f3").field("f3").collectMode(SubAggCollectionMode.BREADTH_FIRST)))).get(); + final SearchResponse r2 = client().prepareSearch("idx") + .addAggregation( + terms("f1").field("f1") + .collectMode(SubAggCollectionMode.BREADTH_FIRST) + .subAggregation( + terms("f2").field("f2") + .collectMode(SubAggCollectionMode.BREADTH_FIRST) + .subAggregation(terms("f3").field("f3").collectMode(SubAggCollectionMode.BREADTH_FIRST)) + ) + ) + .get(); assertSearchResponse(r2); final Terms t1 = r1.getAggregations().get("f1"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/FiltersAggsRewriteIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/FiltersAggsRewriteIT.java index 734ab059feab8..d8cc4537f1f01 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/FiltersAggsRewriteIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/FiltersAggsRewriteIT.java @@ -47,8 +47,10 @@ public void testWrapperQueryIsRewritten() throws IOException { builder.endObject(); bytesReference = BytesReference.bytes(builder); } - FiltersAggregationBuilder builder = new FiltersAggregationBuilder("titles", new FiltersAggregator.KeyedFilter("titleterms", - new WrapperQueryBuilder(bytesReference))); + FiltersAggregationBuilder builder = new FiltersAggregationBuilder( + "titles", + new FiltersAggregator.KeyedFilter("titleterms", new WrapperQueryBuilder(bytesReference)) + ); Map metadata = new HashMap<>(); metadata.put(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); builder.setMetadata(metadata); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MetadataIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MetadataIT.java index 982196a43756e..1e7fe6be8f7f7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MetadataIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MetadataIT.java @@ -26,48 +26,40 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; - public class MetadataIT extends ESIntegTestCase { public void testMetadataSetOnAggregationResult() throws Exception { - assertAcked(client().admin().indices().prepareCreate("idx") - .setMapping("name", "type=keyword").get()); + assertAcked(client().admin().indices().prepareCreate("idx").setMapping("name", "type=keyword").get()); IndexRequestBuilder[] builders = new IndexRequestBuilder[randomInt(30)]; for (int i = 0; i < builders.length; i++) { String name = "name_" + randomIntBetween(1, 10); - builders[i] = client().prepareIndex("idx").setSource(jsonBuilder() - .startObject() - .field("name", name) - .field("value", randomInt()) - .endObject()); + builders[i] = client().prepareIndex("idx") + .setSource(jsonBuilder().startObject().field("name", name).field("value", randomInt()).endObject()); } indexRandom(true, builders); ensureSearchable(); - final Map nestedMetadata = new HashMap() {{ - put("nested", "value"); - }}; - - Map metadata = new HashMap() {{ - put("key", "value"); - put("numeric", 1.2); - put("bool", true); - put("complex", nestedMetadata); - }}; + final Map nestedMetadata = new HashMap() { + { + put("nested", "value"); + } + }; + + Map metadata = new HashMap() { + { + put("key", "value"); + put("numeric", 1.2); + put("bool", true); + put("complex", nestedMetadata); + } + }; SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("the_terms") - .setMetadata(metadata) - .field("name") - .subAggregation( - sum("the_sum") - .setMetadata(metadata) - .field("value") - ) - ) - .addAggregation(maxBucket("the_max_bucket", "the_terms>the_sum").setMetadata(metadata)) - .get(); + .addAggregation( + terms("the_terms").setMetadata(metadata).field("name").subAggregation(sum("the_sum").setMetadata(metadata).field("value")) + ) + .addAggregation(maxBucket("the_max_bucket", "the_terms>the_sum").setMetadata(metadata)) + .get(); assertSearchResponse(response); @@ -104,7 +96,7 @@ private void assertMetadata(Map returnedMetadata) { assertNotNull(nestedObject); @SuppressWarnings("unchecked") - Map nestedMap = (Map)nestedObject; + Map nestedMap = (Map) nestedObject; assertEquals("value", nestedMap.get("nested")); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MissingValueIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MissingValueIT.java index 97683a2f4f4e0..b044353ebbf9e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MissingValueIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MissingValueIT.java @@ -43,17 +43,20 @@ protected int maximumNumberOfShards() { @Override protected void setupSuiteScopeCluster() throws Exception { - assertAcked(prepareCreate("idx") - .setMapping("date", "type=date", "location", "type=geo_point", "str", "type=keyword").get()); - indexRandom(true, - client().prepareIndex("idx").setId("1").setSource(), - client().prepareIndex("idx").setId("2") - .setSource("str", "foo", "long", 3L, "double", 5.5, "date", "2015-05-07", "location", "1,2")); + assertAcked(prepareCreate("idx").setMapping("date", "type=date", "location", "type=geo_point", "str", "type=keyword").get()); + indexRandom( + true, + client().prepareIndex("idx").setId("1").setSource(), + client().prepareIndex("idx") + .setId("2") + .setSource("str", "foo", "long", 3L, "double", 5.5, "date", "2015-05-07", "location", "1,2") + ); } public void testUnmappedTerms() { SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("my_terms").field("non_existing_field").missing("bar")).get(); + .addAggregation(terms("my_terms").field("non_existing_field").missing("bar")) + .get(); assertSearchResponse(response); Terms terms = response.getAggregations().get("my_terms"); assertEquals(1, terms.getBuckets().size()); @@ -62,11 +65,9 @@ public void testUnmappedTerms() { public void testStringTerms() { for (ExecutionMode mode : ExecutionMode.values()) { - SearchResponse response = client().prepareSearch("idx").addAggregation( - terms("my_terms") - .field("str") - .executionHint(mode.toString()) - .missing("bar")).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation(terms("my_terms").field("str").executionHint(mode.toString()).missing("bar")) + .get(); assertSearchResponse(response); Terms terms = response.getAggregations().get("my_terms"); assertEquals(2, terms.getBuckets().size()); @@ -82,16 +83,14 @@ public void testStringTerms() { } public void testLongTerms() { - SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("my_terms").field("long").missing(4)).get(); + SearchResponse response = client().prepareSearch("idx").addAggregation(terms("my_terms").field("long").missing(4)).get(); assertSearchResponse(response); Terms terms = response.getAggregations().get("my_terms"); assertEquals(2, terms.getBuckets().size()); assertEquals(1, terms.getBucketByKey("3").getDocCount()); assertEquals(1, terms.getBucketByKey("4").getDocCount()); - response = client().prepareSearch("idx") - .addAggregation(terms("my_terms").field("long").missing(3)).get(); + response = client().prepareSearch("idx").addAggregation(terms("my_terms").field("long").missing(3)).get(); assertSearchResponse(response); terms = response.getAggregations().get("my_terms"); assertEquals(1, terms.getBuckets().size()); @@ -99,8 +98,7 @@ public void testLongTerms() { } public void testDoubleTerms() { - SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("my_terms").field("double").missing(4.5)).get(); + SearchResponse response = client().prepareSearch("idx").addAggregation(terms("my_terms").field("double").missing(4.5)).get(); assertSearchResponse(response); Terms terms = response.getAggregations().get("my_terms"); assertEquals(2, terms.getBuckets().size()); @@ -116,7 +114,8 @@ public void testDoubleTerms() { public void testUnmappedHistogram() { SearchResponse response = client().prepareSearch("idx") - .addAggregation(histogram("my_histogram").field("non-existing_field").interval(5).missing(12)).get(); + .addAggregation(histogram("my_histogram").field("non-existing_field").interval(5).missing(12)) + .get(); assertSearchResponse(response); Histogram histogram = response.getAggregations().get("my_histogram"); assertEquals(1, histogram.getBuckets().size()); @@ -126,7 +125,8 @@ public void testUnmappedHistogram() { public void testHistogram() { SearchResponse response = client().prepareSearch("idx") - .addAggregation(histogram("my_histogram").field("long").interval(5).missing(7)).get(); + .addAggregation(histogram("my_histogram").field("long").interval(5).missing(7)) + .get(); assertSearchResponse(response); Histogram histogram = response.getAggregations().get("my_histogram"); assertEquals(2, histogram.getBuckets().size()); @@ -135,8 +135,7 @@ public void testHistogram() { assertEquals(5d, histogram.getBuckets().get(1).getKey()); assertEquals(1, histogram.getBuckets().get(1).getDocCount()); - response = client().prepareSearch("idx") - .addAggregation(histogram("my_histogram").field("long").interval(5).missing(3)).get(); + response = client().prepareSearch("idx").addAggregation(histogram("my_histogram").field("long").interval(5).missing(3)).get(); assertSearchResponse(response); histogram = response.getAggregations().get("my_histogram"); assertEquals(1, histogram.getBuckets().size()); @@ -146,9 +145,8 @@ public void testHistogram() { public void testDateHistogram() { SearchResponse response = client().prepareSearch("idx") - .addAggregation( - dateHistogram("my_histogram").field("date").calendarInterval(DateHistogramInterval.YEAR).missing("2014-05-07")) - .get(); + .addAggregation(dateHistogram("my_histogram").field("date").calendarInterval(DateHistogramInterval.YEAR).missing("2014-05-07")) + .get(); assertSearchResponse(response); Histogram histogram = response.getAggregations().get("my_histogram"); assertEquals(2, histogram.getBuckets().size()); @@ -158,9 +156,8 @@ public void testDateHistogram() { assertEquals(1, histogram.getBuckets().get(1).getDocCount()); response = client().prepareSearch("idx") - .addAggregation( - dateHistogram("my_histogram").field("date").calendarInterval(DateHistogramInterval.YEAR).missing("2015-05-07")) - .get(); + .addAggregation(dateHistogram("my_histogram").field("date").calendarInterval(DateHistogramInterval.YEAR).missing("2015-05-07")) + .get(); assertSearchResponse(response); histogram = response.getAggregations().get("my_histogram"); assertEquals(1, histogram.getBuckets().size()); @@ -169,8 +166,7 @@ public void testDateHistogram() { } public void testCardinality() { - SearchResponse response = client().prepareSearch("idx") - .addAggregation(cardinality("card").field("long").missing(2)).get(); + SearchResponse response = client().prepareSearch("idx").addAggregation(cardinality("card").field("long").missing(2)).get(); assertSearchResponse(response); Cardinality cardinality = response.getAggregations().get("card"); assertEquals(2, cardinality.getValue()); @@ -178,15 +174,15 @@ public void testCardinality() { public void testPercentiles() { SearchResponse response = client().prepareSearch("idx") - .addAggregation(percentiles("percentiles").field("long").missing(1000)).get(); + .addAggregation(percentiles("percentiles").field("long").missing(1000)) + .get(); assertSearchResponse(response); Percentiles percentiles = response.getAggregations().get("percentiles"); assertEquals(1000, percentiles.percentile(100), 0); } public void testStats() { - SearchResponse response = client().prepareSearch("idx") - .addAggregation(stats("stats").field("long").missing(5)).get(); + SearchResponse response = client().prepareSearch("idx").addAggregation(stats("stats").field("long").missing(5)).get(); assertSearchResponse(response); Stats stats = response.getAggregations().get("stats"); assertEquals(2, stats.getCount()); @@ -195,7 +191,8 @@ public void testStats() { public void testUnmappedGeoBounds() { SearchResponse response = client().prepareSearch("idx") - .addAggregation(geoBounds("bounds").field("non_existing_field").missing("2,1")).get(); + .addAggregation(geoBounds("bounds").field("non_existing_field").missing("2,1")) + .get(); assertSearchResponse(response); GeoBounds bounds = response.getAggregations().get("bounds"); assertThat(bounds.bottomRight().lat(), closeTo(2.0, 1E-5)); @@ -205,8 +202,7 @@ public void testUnmappedGeoBounds() { } public void testGeoBounds() { - SearchResponse response = client().prepareSearch("idx") - .addAggregation(geoBounds("bounds").field("location").missing("2,1")).get(); + SearchResponse response = client().prepareSearch("idx").addAggregation(geoBounds("bounds").field("location").missing("2,1")).get(); assertSearchResponse(response); GeoBounds bounds = response.getAggregations().get("bounds"); assertThat(bounds.bottomRight().lat(), closeTo(1.0, 1E-5)); @@ -217,7 +213,8 @@ public void testGeoBounds() { public void testGeoCentroid() { SearchResponse response = client().prepareSearch("idx") - .addAggregation(geoCentroid("centroid").field("location").missing("2,1")).get(); + .addAggregation(geoCentroid("centroid").field("location").missing("2,1")) + .get(); assertSearchResponse(response); GeoCentroid centroid = response.getAggregations().get("centroid"); GeoPoint point = new GeoPoint(1.5, 1.5); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/AdjacencyMatrixIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/AdjacencyMatrixIT.java index e03a93488b7e5..70d90c48d3bd3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/AdjacencyMatrixIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/AdjacencyMatrixIT.java @@ -86,8 +86,11 @@ public void setupSuiteScopeCluster() throws Exception { } prepareCreate("empty_bucket_idx").setMapping("value", "type=integer").get(); for (int i = 0; i < 2; i++) { - builders.add(client().prepareIndex("empty_bucket_idx").setId("" + i) - .setSource(jsonBuilder().startObject().field("value", i * 2).endObject())); + builders.add( + client().prepareIndex("empty_bucket_idx") + .setId("" + i) + .setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) + ); } indexRandom(true, builders); ensureSearchable(); @@ -95,9 +98,8 @@ public void setupSuiteScopeCluster() throws Exception { public void testSimple() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(adjacencyMatrix("tags", - newMap("tag1", termQuery("tag", "tag1")).add("tag2", termQuery("tag", "tag2")))) - .get(); + .addAggregation(adjacencyMatrix("tags", newMap("tag1", termQuery("tag", "tag1")).add("tag2", termQuery("tag", "tag2")))) + .get(); assertSearchResponse(response); @@ -128,9 +130,8 @@ public void testSimple() throws Exception { public void testCustomSeparator() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(adjacencyMatrix("tags", "\t", - newMap("tag1", termQuery("tag", "tag1")).add("tag2", termQuery("tag", "tag2")))) - .get(); + .addAggregation(adjacencyMatrix("tags", "\t", newMap("tag1", termQuery("tag", "tag1")).add("tag2", termQuery("tag", "tag2")))) + .get(); assertSearchResponse(response); @@ -147,15 +148,13 @@ public void testCustomSeparator() throws Exception { } - // See NullPointer issue when filters are empty: // https://github.com/elastic/elasticsearch/issues/8438 public void testEmptyFilterDeclarations() throws Exception { QueryBuilder emptyFilter = new BoolQueryBuilder(); SearchResponse response = client().prepareSearch("idx") - .addAggregation(adjacencyMatrix("tags", - newMap("all", emptyFilter).add("tag1", termQuery("tag", "tag1")))) - .get(); + .addAggregation(adjacencyMatrix("tags", newMap("all", emptyFilter).add("tag1", termQuery("tag", "tag1")))) + .get(); assertSearchResponse(response); @@ -174,13 +173,11 @@ public void testWithSubAggregation() throws Exception { boolQ.must(termQuery("tag", "tag1")); boolQ.must(termQuery("tag", "tag2")); SearchResponse response = client().prepareSearch("idx") - .addAggregation( - adjacencyMatrix("tags", - newMap("tag1", termQuery("tag", "tag1")) - .add("tag2", termQuery("tag", "tag2")) - .add("both", boolQ)) - .subAggregation(avg("avg_value").field("value"))) - .get(); + .addAggregation( + adjacencyMatrix("tags", newMap("tag1", termQuery("tag", "tag1")).add("tag2", termQuery("tag", "tag2")).add("both", boolQ)) + .subAggregation(avg("avg_value").field("value")) + ) + .get(); assertSearchResponse(response); @@ -190,10 +187,10 @@ public void testWithSubAggregation() throws Exception { int expectedBuckets = 0; if (numTag1Docs > 0) { - expectedBuckets ++; + expectedBuckets++; } if (numTag2Docs > 0) { - expectedBuckets ++; + expectedBuckets++; } if (numMultiTagDocs > 0) { // both, both&tag1, both&tag2, tag1&tag2 @@ -201,11 +198,11 @@ public void testWithSubAggregation() throws Exception { } assertThat(matrix.getBuckets().size(), equalTo(expectedBuckets)); - assertThat(((InternalAggregation)matrix).getProperty("_bucket_count"), equalTo(expectedBuckets)); + assertThat(((InternalAggregation) matrix).getProperty("_bucket_count"), equalTo(expectedBuckets)); - Object[] propertiesKeys = (Object[]) ((InternalAggregation)matrix).getProperty("_key"); - Object[] propertiesDocCounts = (Object[]) ((InternalAggregation)matrix).getProperty("_count"); - Object[] propertiesCounts = (Object[]) ((InternalAggregation)matrix).getProperty("avg_value.value"); + Object[] propertiesKeys = (Object[]) ((InternalAggregation) matrix).getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) matrix).getProperty("_count"); + Object[] propertiesCounts = (Object[]) ((InternalAggregation) matrix).getProperty("avg_value.value"); assertEquals(expectedBuckets, propertiesKeys.length); assertEquals(propertiesKeys.length, propertiesDocCounts.length); @@ -259,8 +256,7 @@ public void testWithSubAggregation() throws Exception { assertThat(bucketIntersectQ, Matchers.nullValue()); Bucket tag1Both = matrix.getBucketByKey("both&tag1"); assertThat(tag1Both, Matchers.nullValue()); - } else - { + } else { assertThat(bucketBothQ, Matchers.notNullValue()); assertThat(bucketBothQ.getDocCount(), equalTo((long) numMultiTagDocs)); Avg avgValueBothQ = bucketBothQ.getAggregations().get("avg_value"); @@ -278,10 +274,9 @@ public void testWithSubAggregation() throws Exception { assertThat(avgValueTag1BothIntersectQ.getValue(), equalTo(avgValueBothQ.getValue())); } - } - public void testTooLargeMatrix() throws Exception{ + public void testTooLargeMatrix() throws Exception { // Create more filters than is permitted by Lucene Bool clause settings. MapBuilder filtersMap = new MapBuilder(); @@ -291,9 +286,7 @@ public void testTooLargeMatrix() throws Exception{ } try { - client().prepareSearch("idx") - .addAggregation(adjacencyMatrix("tags", "\t", filtersMap)) - .get(); + client().prepareSearch("idx").addAggregation(adjacencyMatrix("tags", "\t", filtersMap)).get(); fail("SearchPhaseExecutionException should have been thrown"); } catch (SearchPhaseExecutionException ex) { assertThat(ex.getCause().getMessage(), containsString("Number of filters is too large")); @@ -301,8 +294,11 @@ public void testTooLargeMatrix() throws Exception{ } public void testAsSubAggregation() { - SearchResponse response = client().prepareSearch("idx").addAggregation(histogram("histo").field("value").interval(2L) - .subAggregation(adjacencyMatrix("matrix", newMap("all", matchAllQuery())))).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + histogram("histo").field("value").interval(2L).subAggregation(adjacencyMatrix("matrix", newMap("all", matchAllQuery()))) + ) + .get(); assertSearchResponse(response); @@ -323,13 +319,17 @@ public void testWithContextBasedSubAggregation() throws Exception { try { client().prepareSearch("idx") - .addAggregation(adjacencyMatrix("tags", - newMap("tag1", termQuery("tag", "tag1")).add("tag2", termQuery("tag", "tag2"))) - .subAggregation(avg("avg_value"))) - .get(); + .addAggregation( + adjacencyMatrix("tags", newMap("tag1", termQuery("tag", "tag1")).add("tag2", termQuery("tag", "tag2"))).subAggregation( + avg("avg_value") + ) + ) + .get(); - fail("expected execution to fail - an attempt to have a context based numeric sub-aggregation, but there is not value source" - + "context which the sub-aggregation can inherit"); + fail( + "expected execution to fail - an attempt to have a context based numeric sub-aggregation, but there is not value source" + + "context which the sub-aggregation can inherit" + ); } catch (ElasticsearchException e) { assertThat(e.getMessage(), is("all shards failed")); @@ -337,10 +337,15 @@ public void testWithContextBasedSubAggregation() throws Exception { } public void testEmptyAggregation() throws Exception { - SearchResponse searchResponse = client() - .prepareSearch("empty_bucket_idx").setQuery(matchAllQuery()).addAggregation(histogram("histo").field("value").interval(1L) - .minDocCount(0).subAggregation(adjacencyMatrix("matrix", newMap("all", matchAllQuery())))) - .get(); + SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation(adjacencyMatrix("matrix", newMap("all", matchAllQuery()))) + ) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); Histogram histo = searchResponse.getAggregations().get("histo"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java index c963315cd71e0..ad9574b1d7c53 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java @@ -48,35 +48,35 @@ public void setupSuiteScopeCluster() throws Exception { break; case 1: numMultiFalses++; - multiValue = new boolean[] {false}; + multiValue = new boolean[] { false }; break; case 2: numMultiTrues++; - multiValue = new boolean[] {true}; + multiValue = new boolean[] { true }; break; case 3: numMultiFalses++; numMultiTrues++; - multiValue = new boolean[] {false, true}; + multiValue = new boolean[] { false, true }; break; default: throw new AssertionError(); } - builders[i] = client().prepareIndex("idx").setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, singleValue) - .array(MULTI_VALUED_FIELD_NAME, multiValue) - .endObject()); + builders[i] = client().prepareIndex("idx") + .setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, singleValue) + .array(MULTI_VALUED_FIELD_NAME, multiValue) + .endObject() + ); } indexRandom(true, builders); } public void testSingleValueField() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms") - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation(terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values()))) + .get(); assertSearchResponse(response); @@ -107,10 +107,8 @@ public void testSingleValueField() throws Exception { public void testMultiValueField() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms") - .field(MULTI_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation(terms("terms").field(MULTI_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values()))) + .get(); assertSearchResponse(response); @@ -141,11 +139,10 @@ public void testMultiValueField() throws Exception { public void testUnmapped() throws Exception { SearchResponse response = client().prepareSearch("idx_unmapped") - .addAggregation(terms("terms") - .field(SINGLE_VALUED_FIELD_NAME) - .size(between(1, 5)) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").field(SINGLE_VALUED_FIELD_NAME).size(between(1, 5)).collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(response); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java index 46fc3cd772847..17802313e0af8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java @@ -86,22 +86,32 @@ private static String format(ZonedDateTime date, String pattern) { } private IndexRequestBuilder indexDoc(String idx, ZonedDateTime date, int value) throws Exception { - return client().prepareIndex(idx).setSource(jsonBuilder() - .startObject() - .timeField("date", date) - .field("value", value) - .startArray("dates").timeValue(date).timeValue(date.plusMonths(1).plusDays(1)).endArray() - .endObject()); + return client().prepareIndex(idx) + .setSource( + jsonBuilder().startObject() + .timeField("date", date) + .field("value", value) + .startArray("dates") + .timeValue(date) + .timeValue(date.plusMonths(1).plusDays(1)) + .endArray() + .endObject() + ); } private IndexRequestBuilder indexDoc(int month, int day, int value) throws Exception { - return client().prepareIndex("idx").setSource(jsonBuilder() - .startObject() - .field("value", value) - .field("constant", 1) - .timeField("date", date(month, day)) - .startArray("dates").timeValue(date(month, day)).timeValue(date(month + 1, day + 1)).endArray() - .endObject()); + return client().prepareIndex("idx") + .setSource( + jsonBuilder().startObject() + .field("value", value) + .field("constant", 1) + .timeField("date", date(month, day)) + .startArray("dates") + .timeValue(date(month, day)) + .timeValue(date(month + 1, day + 1)) + .endArray() + .endObject() + ); } @Override @@ -111,21 +121,25 @@ public void setupSuiteScopeCluster() throws Exception { assertAcked(prepareCreate("empty_bucket_idx").setMapping("value", "type=integer")); List builders = new ArrayList<>(); for (int i = 0; i < 2; i++) { - builders.add(client().prepareIndex("empty_bucket_idx").setId("" + i).setSource(jsonBuilder() - .startObject() - .field("value", i * 2) - .endObject())); + builders.add( + client().prepareIndex("empty_bucket_idx") + .setId("" + i) + .setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) + ); } getMultiSortDocs(builders); - builders.addAll(Arrays.asList( + builders.addAll( + Arrays.asList( indexDoc(1, 2, 1), // date: Jan 2, dates: Jan 2, Feb 3 indexDoc(2, 2, 2), // date: Feb 2, dates: Feb 2, Mar 3 indexDoc(2, 15, 3), // date: Feb 15, dates: Feb 15, Mar 16 indexDoc(3, 2, 4), // date: Mar 2, dates: Mar 2, Apr 3 indexDoc(3, 15, 5), // date: Mar 15, dates: Mar 15, Apr 16 - indexDoc(3, 23, 6))); // date: Mar 23, dates: Mar 23, Apr 24 + indexDoc(3, 23, 6) + ) + ); // date: Mar 23, dates: Mar 23, Apr 24 indexRandom(true, builders); ensureSearchable(); } @@ -148,30 +162,49 @@ private void getMultiSortDocs(List builders) throws IOExcep addExpectedBucket(date(1, 6), 1, 5, 1); addExpectedBucket(date(1, 7), 1, 5, 1); - assertAcked(client().admin().indices().prepareCreate("sort_idx") - .setMapping("date", "type=date").get()); + assertAcked(client().admin().indices().prepareCreate("sort_idx").setMapping("date", "type=date").get()); for (int i = 1; i <= 3; i++) { - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().timeField("date", date(1, 1)).field("l", 1).field("d", i).endObject())); - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().timeField("date", date(1, 2)).field("l", 2).field("d", i).endObject())); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().timeField("date", date(1, 1)).field("l", 1).field("d", i).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().timeField("date", date(1, 2)).field("l", 2).field("d", i).endObject()) + ); } - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().timeField("date", date(1, 3)).field("l", 3).field("d", 1).endObject())); - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().timeField("date", date(1, 3).plusHours(1)).field("l", 3).field("d", 2).endObject())); - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().timeField("date", date(1, 4)).field("l", 3).field("d", 1).endObject())); - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().timeField("date", date(1, 4).plusHours(2)).field("l", 3).field("d", 3).endObject())); - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().timeField("date", date(1, 5)).field("l", 5).field("d", 1).endObject())); - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().timeField("date", date(1, 5).plusHours(12)).field("l", 5).field("d", 2).endObject())); - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().timeField("date", date(1, 6)).field("l", 5).field("d", 1).endObject())); - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().timeField("date", date(1, 7)).field("l", 5).field("d", 1).endObject())); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().timeField("date", date(1, 3)).field("l", 3).field("d", 1).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().timeField("date", date(1, 3).plusHours(1)).field("l", 3).field("d", 2).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().timeField("date", date(1, 4)).field("l", 3).field("d", 1).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().timeField("date", date(1, 4).plusHours(2)).field("l", 3).field("d", 3).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().timeField("date", date(1, 5)).field("l", 5).field("d", 1).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().timeField("date", date(1, 5).plusHours(12)).field("l", 5).field("d", 2).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().timeField("date", date(1, 6)).field("l", 5).field("d", 1).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().timeField("date", date(1, 7)).field("l", 5).field("d", 1).endObject()) + ); } @Override @@ -194,8 +227,8 @@ private static String getBucketKeyAsString(ZonedDateTime key, ZoneId tz) { public void testSingleValuedField() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH)) - .get(); + .addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH)) + .get(); assertSearchResponse(response); @@ -229,9 +262,14 @@ public void testSingleValuedField() throws Exception { public void testSingleValuedFieldWithTimeZone() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.DAY).minDocCount(1) - .timeZone(ZoneId.of("+01:00"))).execute() - .actionGet(); + .addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.DAY) + .minDocCount(1) + .timeZone(ZoneId.of("+01:00")) + ) + .execute() + .actionGet(); ZoneId tz = ZoneId.of("+01:00"); assertSearchResponse(response); @@ -292,10 +330,10 @@ public void testSingleValued_timeZone_epoch() throws Exception { } ZoneId tz = ZoneId.of("+01:00"); SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo").field("date") - .calendarInterval(DateHistogramInterval.DAY).minDocCount(1) - .timeZone(tz).format(format)) - .get(); + .addAggregation( + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.DAY).minDocCount(1).timeZone(tz).format(format) + ) + .get(); assertSearchResponse(response); Histogram histo = response.getAggregations().get("histo"); @@ -329,11 +367,8 @@ public void testSingleValued_timeZone_epoch() throws Exception { public void testSingleValuedFieldOrderedByKeyAsc() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo") - .field("date") - .calendarInterval(DateHistogramInterval.MONTH) - .order(BucketOrder.key(true))) - .get(); + .addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).order(BucketOrder.key(true))) + .get(); assertSearchResponse(response); @@ -352,11 +387,10 @@ public void testSingleValuedFieldOrderedByKeyAsc() throws Exception { public void testSingleValuedFieldOrderedByKeyDesc() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo") - .field("date") - .calendarInterval(DateHistogramInterval.MONTH) - .order(BucketOrder.key(false))) - .get(); + .addAggregation( + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).order(BucketOrder.key(false)) + ) + .get(); assertSearchResponse(response); @@ -374,11 +408,10 @@ public void testSingleValuedFieldOrderedByKeyDesc() throws Exception { public void testSingleValuedFieldOrderedByCountAsc() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo") - .field("date") - .calendarInterval(DateHistogramInterval.MONTH) - .order(BucketOrder.count(true))) - .get(); + .addAggregation( + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).order(BucketOrder.count(true)) + ) + .get(); assertSearchResponse(response); @@ -396,11 +429,10 @@ public void testSingleValuedFieldOrderedByCountAsc() throws Exception { public void testSingleValuedFieldOrderedByCountDesc() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo") - .field("date") - .calendarInterval(DateHistogramInterval.MONTH) - .order(BucketOrder.count(false))) - .get(); + .addAggregation( + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).order(BucketOrder.count(false)) + ) + .get(); assertSearchResponse(response); @@ -418,9 +450,10 @@ public void testSingleValuedFieldOrderedByCountDesc() throws Exception { public void testSingleValuedFieldWithSubAggregation() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH) - .subAggregation(sum("sum").field("value"))) - .get(); + .addAggregation( + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).subAggregation(sum("sum").field("value")) + ) + .get(); assertSearchResponse(response); @@ -429,10 +462,10 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { assertThat(histo.getName(), equalTo("histo")); List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(3)); - assertThat(((InternalAggregation)histo).getProperty("_bucket_count"), equalTo(3)); - Object[] propertiesKeys = (Object[]) ((InternalAggregation)histo).getProperty("_key"); - Object[] propertiesDocCounts = (Object[]) ((InternalAggregation)histo).getProperty("_count"); - Object[] propertiesCounts = (Object[]) ((InternalAggregation)histo).getProperty("sum.value"); + assertThat(((InternalAggregation) histo).getProperty("_bucket_count"), equalTo(3)); + Object[] propertiesKeys = (Object[]) ((InternalAggregation) histo).getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) histo).getProperty("_count"); + Object[] propertiesCounts = (Object[]) ((InternalAggregation) histo).getProperty("sum.value"); ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); @@ -476,12 +509,13 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { public void testSingleValuedFieldOrderedBySubAggregationAsc() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo") - .field("date") - .calendarInterval(DateHistogramInterval.MONTH) - .order(BucketOrder.aggregation("sum", true)) - .subAggregation(max("sum").field("value"))) - .get(); + .addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.MONTH) + .order(BucketOrder.aggregation("sum", true)) + .subAggregation(max("sum").field("value")) + ) + .get(); assertSearchResponse(response); @@ -499,12 +533,13 @@ public void testSingleValuedFieldOrderedBySubAggregationAsc() throws Exception { public void testSingleValuedFieldOrderedBySubAggregationDesc() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo") - .field("date") - .calendarInterval(DateHistogramInterval.MONTH) - .order(BucketOrder.aggregation("sum", false)) - .subAggregation(max("sum").field("value"))) - .get(); + .addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.MONTH) + .order(BucketOrder.aggregation("sum", false)) + .subAggregation(max("sum").field("value")) + ) + .get(); assertSearchResponse(response); @@ -522,12 +557,13 @@ public void testSingleValuedFieldOrderedBySubAggregationDesc() throws Exception public void testSingleValuedFieldOrderedByMultiValuedSubAggregationDesc() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo") - .field("date") - .calendarInterval(DateHistogramInterval.MONTH) - .order(BucketOrder.aggregation("stats", "sum", false)) - .subAggregation(stats("stats").field("value"))) - .get(); + .addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.MONTH) + .order(BucketOrder.aggregation("stats", "sum", false)) + .subAggregation(stats("stats").field("value")) + ) + .get(); assertSearchResponse(response); @@ -545,11 +581,12 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationDesc() throws public void testSingleValuedFieldOrderedByTieBreaker() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo") - .field("date") - .calendarInterval(DateHistogramInterval.MONTH) - .order(BucketOrder.aggregation("max_constant", randomBoolean())) - .subAggregation(max("max_constant").field("constant"))) + .addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.MONTH) + .order(BucketOrder.aggregation("max_constant", randomBoolean())) + .subAggregation(max("max_constant").field("constant")) + ) .get(); assertSearchResponse(response); @@ -569,16 +606,17 @@ public void testSingleValuedFieldOrderedByTieBreaker() throws Exception { public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { boolean asc = true; try { - client() - .prepareSearch("idx") + client().prepareSearch("idx") .addAggregation( dateHistogram("histo").field("date") .calendarInterval(DateHistogramInterval.MONTH) .order(BucketOrder.aggregation("inner_histo>avg", asc)) - .subAggregation(dateHistogram("inner_histo") - .calendarInterval(DateHistogramInterval.MONTH) - .field("dates") - .subAggregation(avg("avg").field("value")))) + .subAggregation( + dateHistogram("inner_histo").calendarInterval(DateHistogramInterval.MONTH) + .field("dates") + .subAggregation(avg("avg").field("value")) + ) + ) .get(); fail("Expected an exception"); } catch (SearchPhaseExecutionException e) { @@ -601,10 +639,12 @@ public void testSingleValuedFieldWithValueScript() throws Exception { Map params = new HashMap<>(); params.put("fieldname", "date"); SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo") - .field("date") - .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.LONG_PLUS_ONE_MONTH, params)) - .calendarInterval(DateHistogramInterval.MONTH)).get(); + .addAggregation( + dateHistogram("histo").field("date") + .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.LONG_PLUS_ONE_MONTH, params)) + .calendarInterval(DateHistogramInterval.MONTH) + ) + .get(); assertSearchResponse(response); @@ -648,8 +688,8 @@ public void testSingleValuedFieldWithValueScript() throws Exception { public void testMultiValuedField() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo").field("dates").calendarInterval(DateHistogramInterval.MONTH)) - .get(); + .addAggregation(dateHistogram("histo").field("dates").calendarInterval(DateHistogramInterval.MONTH)) + .get(); assertSearchResponse(response); @@ -690,11 +730,10 @@ public void testMultiValuedField() throws Exception { public void testMultiValuedFieldOrderedByCountDesc() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo") - .field("dates") - .calendarInterval(DateHistogramInterval.MONTH) - .order(BucketOrder.count(false))) - .get(); + .addAggregation( + dateHistogram("histo").field("dates").calendarInterval(DateHistogramInterval.MONTH).order(BucketOrder.count(false)) + ) + .get(); assertSearchResponse(response); @@ -740,10 +779,12 @@ public void testMultiValuedFieldWithValueScript() throws Exception { Map params = new HashMap<>(); params.put("fieldname", "dates"); SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo") - .field("dates") - .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.LONG_PLUS_ONE_MONTH, params)) - .calendarInterval(DateHistogramInterval.MONTH)).get(); + .addAggregation( + dateHistogram("histo").field("dates") + .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.LONG_PLUS_ONE_MONTH, params)) + .calendarInterval(DateHistogramInterval.MONTH) + ) + .get(); assertSearchResponse(response); @@ -794,10 +835,11 @@ public void testScriptSingleValue() throws Exception { Map params = new HashMap<>(); params.put("fieldname", "date"); SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo").script( - new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.EXTRACT_FIELD, params)) - .calendarInterval(DateHistogramInterval.MONTH)) - .get(); + .addAggregation( + dateHistogram("histo").script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.EXTRACT_FIELD, params)) + .calendarInterval(DateHistogramInterval.MONTH) + ) + .get(); assertSearchResponse(response); @@ -833,10 +875,11 @@ public void testScriptMultiValued() throws Exception { Map params = new HashMap<>(); params.put("fieldname", "dates"); SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo").script( - new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.EXTRACT_FIELD, params)) - .calendarInterval(DateHistogramInterval.MONTH)) - .get(); + .addAggregation( + dateHistogram("histo").script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.EXTRACT_FIELD, params)) + .calendarInterval(DateHistogramInterval.MONTH) + ) + .get(); assertSearchResponse(response); @@ -875,21 +918,19 @@ public void testScriptMultiValued() throws Exception { assertThat(bucket.getDocCount(), equalTo(3L)); } - - - /* + /* [ Jan 2, Feb 3] [ Feb 2, Mar 3] [ Feb 15, Mar 16] [ Mar 2, Apr 3] [ Mar 15, Apr 16] [ Mar 23, Apr 24] - */ + */ public void testUnmapped() throws Exception { SearchResponse response = client().prepareSearch("idx_unmapped") - .addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH)) - .get(); + .addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH)) + .get(); assertSearchResponse(response); @@ -901,8 +942,8 @@ public void testUnmapped() throws Exception { public void testPartiallyUnmapped() throws Exception { SearchResponse response = client().prepareSearch("idx", "idx_unmapped") - .addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH)) - .get(); + .addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH)) + .get(); assertSearchResponse(response); @@ -936,10 +977,14 @@ public void testPartiallyUnmapped() throws Exception { public void testEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") - .setQuery(matchAllQuery()) - .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0) - .subAggregation(dateHistogram("date_histo").field("value").fixedInterval(DateHistogramInterval.HOUR))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation(dateHistogram("date_histo").field("value").fixedInterval(DateHistogramInterval.HOUR)) + ) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); Histogram histo = searchResponse.getAggregations().get("histo"); @@ -963,20 +1008,22 @@ public void testSingleValueWithTimeZone() throws Exception { IndexRequestBuilder[] reqs = new IndexRequestBuilder[5]; ZonedDateTime date = date("2014-03-11T00:00:00+00:00"); for (int i = 0; i < reqs.length; i++) { - reqs[i] = client().prepareIndex("idx2").setId("" + i) - .setSource(jsonBuilder().startObject().timeField("date", date).endObject()); + reqs[i] = client().prepareIndex("idx2") + .setId("" + i) + .setSource(jsonBuilder().startObject().timeField("date", date).endObject()); date = date.plusHours(1); } indexRandom(true, reqs); SearchResponse response = client().prepareSearch("idx2") - .setQuery(matchAllQuery()) - .addAggregation(dateHistogram("date_histo") - .field("date") - .timeZone(ZoneId.of("-02:00")) - .calendarInterval(DateHistogramInterval.DAY) - .format("yyyy-MM-dd:HH-mm-ssZZZZZ")) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + dateHistogram("date_histo").field("date") + .timeZone(ZoneId.of("-02:00")) + .calendarInterval(DateHistogramInterval.DAY) + .format("yyyy-MM-dd:HH-mm-ssZZZZZ") + ) + .get(); assertThat(response.getHits().getTotalHits().value, equalTo(5L)); @@ -1004,10 +1051,9 @@ public void testSingleValueFieldWithExtendedBounds() throws Exception { ZonedDateTime baseKey = Instant.ofEpochMilli(intervalMillis * (base.toInstant().toEpochMilli() / intervalMillis)) .atZone(ZoneOffset.UTC); - prepareCreate("idx2") - .setSettings( - Settings.builder().put(indexSettings()).put("index.number_of_shards", 1) - .put("index.number_of_replicas", 0)).get(); + prepareCreate("idx2").setSettings( + Settings.builder().put(indexSettings()).put("index.number_of_shards", 1).put("index.number_of_replicas", 0) + ).get(); int numOfBuckets = randomIntBetween(3, 6); int emptyBucketIndex = randomIntBetween(1, numOfBuckets - 2); // should be in the middle @@ -1066,14 +1112,15 @@ public void testSingleValueFieldWithExtendedBounds() throws Exception { SearchResponse response = null; try { response = client().prepareSearch("idx2") - .addAggregation(dateHistogram("histo") - .field("date") - .fixedInterval(DateHistogramInterval.days(interval)) - .minDocCount(0) - // when explicitly specifying a format, the extended bounds should be defined by the same format - .extendedBounds(new LongBounds(format(boundsMin, pattern), format(boundsMax, pattern))) - .format(pattern)) - .get(); + .addAggregation( + dateHistogram("histo").field("date") + .fixedInterval(DateHistogramInterval.days(interval)) + .minDocCount(0) + // when explicitly specifying a format, the extended bounds should be defined by the same format + .extendedBounds(new LongBounds(format(boundsMin, pattern), format(boundsMax, pattern))) + .format(pattern) + ) + .get(); if (invalidBoundsError) { fail("Expected an exception to be thrown when bounds.min is greater than bounds.max"); @@ -1113,9 +1160,9 @@ public void testSingleValueFieldWithExtendedBounds() throws Exception { */ public void testSingleValueFieldWithExtendedBoundsTimezone() throws Exception { String index = "test12278"; - prepareCreate(index) - .setSettings(Settings.builder().put(indexSettings()).put("index.number_of_shards", 1).put("index.number_of_replicas", 0)) - .get(); + prepareCreate(index).setSettings( + Settings.builder().put(indexSettings()).put("index.number_of_shards", 1).put("index.number_of_replicas", 0) + ).get(); DateMathParser parser = DateFormatter.forPattern("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd||epoch_millis").toDateMathParser(); @@ -1133,18 +1180,25 @@ public void testSingleValueFieldWithExtendedBoundsTimezone() throws Exception { SearchResponse response = null; // retrieve those docs with the same time zone and extended bounds - response = client() - .prepareSearch(index) - .setQuery(QueryBuilders.rangeQuery("date") - .from("now/d").to("now/d").includeLower(true).includeUpper(true).timeZone(timezone.getId())) - .addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.hours(1)) - .timeZone(timezone).minDocCount(0).extendedBounds(new LongBounds("now/d", "now/d+23h")) - ).get(); + response = client().prepareSearch(index) + .setQuery( + QueryBuilders.rangeQuery("date").from("now/d").to("now/d").includeLower(true).includeUpper(true).timeZone(timezone.getId()) + ) + .addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.hours(1)) + .timeZone(timezone) + .minDocCount(0) + .extendedBounds(new LongBounds("now/d", "now/d+23h")) + ) + .get(); assertSearchResponse(response); - assertThat("Expected 24 buckets for one day aggregation with hourly interval", response.getHits().getTotalHits().value, - equalTo(2L)); + assertThat( + "Expected 24 buckets for one day aggregation with hourly interval", + response.getHits().getTotalHits().value, + equalTo(2L) + ); Histogram histo = response.getAggregations().get("histo"); assertThat(histo, notNullValue()); @@ -1172,9 +1226,9 @@ public void testSingleValueFieldWithExtendedBoundsTimezone() throws Exception { */ public void testSingleValueFieldWithExtendedBoundsOffset() throws Exception { String index = "test23776"; - prepareCreate(index) - .setSettings(Settings.builder().put(indexSettings()).put("index.number_of_shards", 1).put("index.number_of_replicas", 0)) - .get(); + prepareCreate(index).setSettings( + Settings.builder().put(indexSettings()).put("index.number_of_shards", 1).put("index.number_of_replicas", 0) + ).get(); List builders = new ArrayList<>(); DateFormatter formatter = DateFormatter.forPattern("date_optional_time"); @@ -1187,13 +1241,15 @@ public void testSingleValueFieldWithExtendedBoundsOffset() throws Exception { SearchResponse response = null; // retrieve those docs with the same time zone and extended bounds - response = client() - .prepareSearch(index) - .addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.days(1)) - .offset("+6h").minDocCount(0) - .extendedBounds(new LongBounds("2016-01-01T06:00:00Z", "2016-01-08T08:00:00Z")) - ).get(); + response = client().prepareSearch(index) + .addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.days(1)) + .offset("+6h") + .minDocCount(0) + .extendedBounds(new LongBounds("2016-01-01T06:00:00Z", "2016-01-08T08:00:00Z")) + ) + .get(); assertSearchResponse(response); Histogram histo = response.getAggregations().get("histo"); @@ -1223,24 +1279,29 @@ public void testSingleValueFieldWithExtendedBoundsOffset() throws Exception { } public void testSingleValueWithMultipleDateFormatsFromMapping() throws Exception { - String mappingJson = Strings.toString(jsonBuilder().startObject() + String mappingJson = Strings.toString( + jsonBuilder().startObject() .startObject("properties") - .startObject("date").field("type", "date").field("format", "strict_date_optional_time||dd-MM-yyyy") - .endObject().endObject().endObject()); + .startObject("date") + .field("type", "date") + .field("format", "strict_date_optional_time||dd-MM-yyyy") + .endObject() + .endObject() + .endObject() + ); prepareCreate("idx2").setMapping(mappingJson).get(); IndexRequestBuilder[] reqs = new IndexRequestBuilder[5]; for (int i = 0; i < reqs.length; i++) { - reqs[i] = client().prepareIndex("idx2").setId("" + i) - .setSource(jsonBuilder().startObject().field("date", "10-03-2014").endObject()); + reqs[i] = client().prepareIndex("idx2") + .setId("" + i) + .setSource(jsonBuilder().startObject().field("date", "10-03-2014").endObject()); } indexRandom(true, reqs); SearchResponse response = client().prepareSearch("idx2") - .setQuery(matchAllQuery()) - .addAggregation(dateHistogram("date_histo") - .field("date") - .calendarInterval(DateHistogramInterval.DAY)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(dateHistogram("date_histo").field("date").calendarInterval(DateHistogramInterval.DAY)) + .get(); assertSearchHits(response, "0", "1", "2", "3", "4"); @@ -1258,9 +1319,13 @@ public void testSingleValueWithMultipleDateFormatsFromMapping() throws Exception public void testIssue6965() { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo").field("date").timeZone(ZoneId.of("+01:00")) - .calendarInterval(DateHistogramInterval.MONTH).minDocCount(0)) - .get(); + .addAggregation( + dateHistogram("histo").field("date") + .timeZone(ZoneId.of("+01:00")) + .calendarInterval(DateHistogramInterval.MONTH) + .minDocCount(0) + ) + .get(); assertSearchResponse(response); @@ -1296,13 +1361,20 @@ public void testIssue6965() { public void testDSTBoundaryIssue9491() throws InterruptedException, ExecutionException { assertAcked(client().admin().indices().prepareCreate("test9491").setMapping("d", "type=date").get()); - indexRandom(true, client().prepareIndex("test9491").setSource("d", "2014-10-08T13:00:00Z"), - client().prepareIndex("test9491").setSource("d", "2014-11-08T13:00:00Z")); + indexRandom( + true, + client().prepareIndex("test9491").setSource("d", "2014-10-08T13:00:00Z"), + client().prepareIndex("test9491").setSource("d", "2014-11-08T13:00:00Z") + ); ensureSearchable("test9491"); SearchResponse response = client().prepareSearch("test9491") - .addAggregation(dateHistogram("histo").field("d").calendarInterval(DateHistogramInterval.YEAR) - .timeZone(ZoneId.of("Asia/Jerusalem")).format("yyyy-MM-dd'T'HH:mm:ss.SSSXXXXX")) - .get(); + .addAggregation( + dateHistogram("histo").field("d") + .calendarInterval(DateHistogramInterval.YEAR) + .timeZone(ZoneId.of("Asia/Jerusalem")) + .format("yyyy-MM-dd'T'HH:mm:ss.SSSXXXXX") + ) + .get(); assertSearchResponse(response); Histogram histo = response.getAggregations().get("histo"); assertThat(histo.getBuckets().size(), equalTo(1)); @@ -1312,16 +1384,22 @@ public void testDSTBoundaryIssue9491() throws InterruptedException, ExecutionExc public void testIssue8209() throws InterruptedException, ExecutionException { assertAcked(client().admin().indices().prepareCreate("test8209").setMapping("d", "type=date").get()); - indexRandom(true, - client().prepareIndex("test8209").setSource("d", "2014-01-01T00:00:00Z"), - client().prepareIndex("test8209").setSource("d", "2014-04-01T00:00:00Z"), - client().prepareIndex("test8209").setSource("d", "2014-04-30T00:00:00Z")); + indexRandom( + true, + client().prepareIndex("test8209").setSource("d", "2014-01-01T00:00:00Z"), + client().prepareIndex("test8209").setSource("d", "2014-04-01T00:00:00Z"), + client().prepareIndex("test8209").setSource("d", "2014-04-30T00:00:00Z") + ); ensureSearchable("test8209"); SearchResponse response = client().prepareSearch("test8209") - .addAggregation(dateHistogram("histo").field("d").calendarInterval(DateHistogramInterval.MONTH) + .addAggregation( + dateHistogram("histo").field("d") + .calendarInterval(DateHistogramInterval.MONTH) .format("yyyy-MM-dd'T'HH:mm:ss.SSSXXXXX") - .timeZone(ZoneId.of("CET")).minDocCount(0)) - .get(); + .timeZone(ZoneId.of("CET")) + .minDocCount(0) + ) + .get(); assertSearchResponse(response); Histogram histo = response.getAggregations().get("histo"); assertThat(histo.getBuckets().size(), equalTo(4)); @@ -1351,10 +1429,14 @@ public void testFormatIndexUnmapped() throws InterruptedException, ExecutionExce ensureSearchable(indexDateUnmapped); SearchResponse response = client().prepareSearch(indexDateUnmapped) - .addAggregation( - dateHistogram("histo").field("dateField").calendarInterval(DateHistogramInterval.MONTH).format("yyyy-MM") - .minDocCount(0).extendedBounds(new LongBounds("2018-01", "2018-01"))) - .get(); + .addAggregation( + dateHistogram("histo").field("dateField") + .calendarInterval(DateHistogramInterval.MONTH) + .format("yyyy-MM") + .minDocCount(0) + .extendedBounds(new LongBounds("2018-01", "2018-01")) + ) + .get(); assertSearchResponse(response); Histogram histo = response.getAggregations().get("histo"); assertThat(histo.getBuckets().size(), equalTo(1)); @@ -1373,8 +1455,11 @@ public void testRewriteTimeZone_EpochMillisFormat() throws InterruptedException, assertAcked(client().admin().indices().prepareCreate(index).setMapping("d", "type=date,format=epoch_millis").get()); indexRandom(true, client().prepareIndex(index).setSource("d", "1477954800000")); ensureSearchable(index); - SearchResponse response = client().prepareSearch(index).addAggregation(dateHistogram("histo").field("d") - .calendarInterval(DateHistogramInterval.MONTH).timeZone(ZoneId.of("Europe/Berlin"))).get(); + SearchResponse response = client().prepareSearch(index) + .addAggregation( + dateHistogram("histo").field("d").calendarInterval(DateHistogramInterval.MONTH).timeZone(ZoneId.of("Europe/Berlin")) + ) + .get(); assertSearchResponse(response); Histogram histo = response.getAggregations().get("histo"); assertThat(histo.getBuckets().size(), equalTo(1)); @@ -1385,9 +1470,14 @@ public void testRewriteTimeZone_EpochMillisFormat() throws InterruptedException, } assertThat(histo.getBuckets().get(0).getDocCount(), equalTo(1L)); - response = client().prepareSearch(index).addAggregation(dateHistogram("histo").field("d") - .calendarInterval(DateHistogramInterval.MONTH).timeZone(ZoneId.of("Europe/Berlin")).format("yyyy-MM-dd")) - .get(); + response = client().prepareSearch(index) + .addAggregation( + dateHistogram("histo").field("d") + .calendarInterval(DateHistogramInterval.MONTH) + .timeZone(ZoneId.of("Europe/Berlin")) + .format("yyyy-MM-dd") + ) + .get(); assertSearchResponse(response); histo = response.getAggregations().get("histo"); assertThat(histo.getBuckets().size(), equalTo(1)); @@ -1406,38 +1496,64 @@ public void testRewriteTimeZone_EpochMillisFormat() throws InterruptedException, */ public void testDSTEndTransition() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setQuery(new MatchNoneQueryBuilder()) - .addAggregation(dateHistogram("histo").field("date").timeZone(ZoneId.of("Europe/Oslo")) - .calendarInterval(DateHistogramInterval.HOUR).minDocCount(0).extendedBounds( - new LongBounds("2015-10-25T02:00:00.000+02:00", "2015-10-25T04:00:00.000+01:00"))) - .get(); + .setQuery(new MatchNoneQueryBuilder()) + .addAggregation( + dateHistogram("histo").field("date") + .timeZone(ZoneId.of("Europe/Oslo")) + .calendarInterval(DateHistogramInterval.HOUR) + .minDocCount(0) + .extendedBounds(new LongBounds("2015-10-25T02:00:00.000+02:00", "2015-10-25T04:00:00.000+01:00")) + ) + .get(); Histogram histo = response.getAggregations().get("histo"); List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(4)); - assertThat(((ZonedDateTime) buckets.get(1).getKey()).toInstant().toEpochMilli() - - ((ZonedDateTime) buckets.get(0).getKey()).toInstant().toEpochMilli(), equalTo(3600000L)); - assertThat(((ZonedDateTime) buckets.get(2).getKey()).toInstant().toEpochMilli() - - ((ZonedDateTime) buckets.get(1).getKey()).toInstant().toEpochMilli(), equalTo(3600000L)); - assertThat(((ZonedDateTime) buckets.get(3).getKey()).toInstant().toEpochMilli() - - ((ZonedDateTime) buckets.get(2).getKey()).toInstant().toEpochMilli(), equalTo(3600000L)); + assertThat( + ((ZonedDateTime) buckets.get(1).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(0).getKey()).toInstant() + .toEpochMilli(), + equalTo(3600000L) + ); + assertThat( + ((ZonedDateTime) buckets.get(2).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(1).getKey()).toInstant() + .toEpochMilli(), + equalTo(3600000L) + ); + assertThat( + ((ZonedDateTime) buckets.get(3).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(2).getKey()).toInstant() + .toEpochMilli(), + equalTo(3600000L) + ); response = client().prepareSearch("idx") .setQuery(new MatchNoneQueryBuilder()) - .addAggregation(dateHistogram("histo").field("date").timeZone(ZoneId.of("Europe/Oslo")) - .calendarInterval(DateHistogramInterval.HOUR).minDocCount(0).extendedBounds( - new LongBounds("2015-10-25T02:00:00.000+02:00", "2015-10-25T04:00:00.000+01:00"))) + .addAggregation( + dateHistogram("histo").field("date") + .timeZone(ZoneId.of("Europe/Oslo")) + .calendarInterval(DateHistogramInterval.HOUR) + .minDocCount(0) + .extendedBounds(new LongBounds("2015-10-25T02:00:00.000+02:00", "2015-10-25T04:00:00.000+01:00")) + ) .get(); histo = response.getAggregations().get("histo"); buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(4)); - assertThat(((ZonedDateTime) buckets.get(1).getKey()).toInstant().toEpochMilli() - - ((ZonedDateTime) buckets.get(0).getKey()).toInstant().toEpochMilli(), equalTo(3600000L)); - assertThat(((ZonedDateTime) buckets.get(2).getKey()).toInstant().toEpochMilli() - - ((ZonedDateTime) buckets.get(1).getKey()).toInstant().toEpochMilli(), equalTo(3600000L)); - assertThat(((ZonedDateTime) buckets.get(3).getKey()).toInstant().toEpochMilli() - - ((ZonedDateTime) buckets.get(2).getKey()).toInstant().toEpochMilli(), equalTo(3600000L)); + assertThat( + ((ZonedDateTime) buckets.get(1).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(0).getKey()).toInstant() + .toEpochMilli(), + equalTo(3600000L) + ); + assertThat( + ((ZonedDateTime) buckets.get(2).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(1).getKey()).toInstant() + .toEpochMilli(), + equalTo(3600000L) + ); + assertThat( + ((ZonedDateTime) buckets.get(3).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(2).getKey()).toInstant() + .toEpochMilli(), + equalTo(3600000L) + ); } /** @@ -1445,53 +1561,142 @@ public void testDSTEndTransition() throws Exception { * Ensure requests using nondeterministic scripts do not get cached. */ public void testScriptCaching() throws Exception { - assertAcked(prepareCreate("cache_test_idx").setMapping("d", "type=date") + assertAcked( + prepareCreate("cache_test_idx").setMapping("d", "type=date") .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) - .get()); + .get() + ); String date = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(date(1, 1)); String date2 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(date(2, 1)); - indexRandom(true, client().prepareIndex("cache_test_idx").setId("1").setSource("d", date), - client().prepareIndex("cache_test_idx").setId("2").setSource("d", date2)); + indexRandom( + true, + client().prepareIndex("cache_test_idx").setId("1").setSource("d", date), + client().prepareIndex("cache_test_idx").setId("2").setSource("d", date2) + ); // Make sure we are starting with a clear cache - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a nondeterministic script does not get cached Map params = new HashMap<>(); params.put("fieldname", "d"); - SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(dateHistogram("histo").field("d") - .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.CURRENT_DATE, params)) - .calendarInterval(DateHistogramInterval.MONTH)).get(); + SearchResponse r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + dateHistogram("histo").field("d") + .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.CURRENT_DATE, params)) + .calendarInterval(DateHistogramInterval.MONTH) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a deterministic script gets cached - r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(dateHistogram("histo").field("d") - .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.LONG_PLUS_ONE_MONTH, params)) - .calendarInterval(DateHistogramInterval.MONTH)).get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + dateHistogram("histo").field("d") + .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.LONG_PLUS_ONE_MONTH, params)) + .calendarInterval(DateHistogramInterval.MONTH) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(1L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(1L) + ); // Ensure that non-scripted requests are cached as normal - r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation(dateHistogram("histo").field("d").calendarInterval(DateHistogramInterval.MONTH)).get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation(dateHistogram("histo").field("d").calendarInterval(DateHistogramInterval.MONTH)) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(2L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(2L) + ); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndKeyDesc() throws Exception { @@ -1500,43 +1705,51 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndKeyDesc } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndKeyAsc() throws Exception { - int[] expectedDays = new int[] { 1, 2, 3, 4, 5, 6, 7 }; + int[] expectedDays = new int[] { 1, 2, 3, 4, 5, 6, 7 }; assertMultiSortResponse(expectedDays, BucketOrder.aggregation("avg_l", true), BucketOrder.key(true)); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationDescAndKeyAsc() throws Exception { - int[] expectedDays = new int[] { 5, 6, 7, 3, 4, 2, 1 }; + int[] expectedDays = new int[] { 5, 6, 7, 3, 4, 2, 1 }; assertMultiSortResponse(expectedDays, BucketOrder.aggregation("avg_l", false), BucketOrder.key(true)); } public void testSingleValuedFieldOrderedByCountAscAndSingleValueSubAggregationAsc() throws Exception { - int[] expectedDays = new int[] { 6, 7, 3, 4, 5, 1, 2 }; + int[] expectedDays = new int[] { 6, 7, 3, 4, 5, 1, 2 }; assertMultiSortResponse(expectedDays, BucketOrder.count(true), BucketOrder.aggregation("avg_l", true)); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscSingleValueSubAggregationAsc() throws Exception { - int[] expectedDays = new int[] { 6, 7, 3, 5, 4, 1, 2 }; + int[] expectedDays = new int[] { 6, 7, 3, 5, 4, 1, 2 }; assertMultiSortResponse(expectedDays, BucketOrder.aggregation("sum_d", true), BucketOrder.aggregation("avg_l", true)); } public void testSingleValuedFieldOrderedByThreeCriteria() throws Exception { - int[] expectedDays = new int[] { 2, 1, 4, 5, 3, 6, 7 }; - assertMultiSortResponse(expectedDays, BucketOrder.count(false), BucketOrder.aggregation("sum_d", false), - BucketOrder.aggregation("avg_l", false)); + int[] expectedDays = new int[] { 2, 1, 4, 5, 3, 6, 7 }; + assertMultiSortResponse( + expectedDays, + BucketOrder.count(false), + BucketOrder.aggregation("sum_d", false), + BucketOrder.aggregation("avg_l", false) + ); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound() throws Exception { - int[] expectedDays = new int[] { 1, 2, 3, 4, 5, 6, 7 }; + int[] expectedDays = new int[] { 1, 2, 3, 4, 5, 6, 7 }; assertMultiSortResponse(expectedDays, BucketOrder.aggregation("avg_l", true)); } private void assertMultiSortResponse(int[] expectedDays, BucketOrder... order) { ZonedDateTime[] expectedKeys = Arrays.stream(expectedDays).mapToObj(d -> date(1, d)).toArray(ZonedDateTime[]::new); - SearchResponse response = client() - .prepareSearch("sort_idx") + SearchResponse response = client().prepareSearch("sort_idx") .addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.DAY).order(BucketOrder.compound(order)) - .subAggregation(avg("avg_l").field("l")).subAggregation(sum("sum_d").field("d"))).get(); + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.DAY) + .order(BucketOrder.compound(order)) + .subAggregation(avg("avg_l").field("l")) + .subAggregation(sum("sum_d").field("d")) + ) + .get(); assertSearchResponse(response); @@ -1570,15 +1783,16 @@ private ZonedDateTime key(Histogram.Bucket bucket) { */ public void testDateNanosHistogram() throws Exception { assertAcked(prepareCreate("nanos").setMapping("date", "type=date_nanos").get()); - indexRandom(true, - client().prepareIndex("nanos").setId("1").setSource("date", "2000-01-01")); - indexRandom(true, - client().prepareIndex("nanos").setId("2").setSource("date", "2000-01-02")); + indexRandom(true, client().prepareIndex("nanos").setId("1").setSource("date", "2000-01-01")); + indexRandom(true, client().prepareIndex("nanos").setId("2").setSource("date", "2000-01-02")); - //Search interval 24 hours + // Search interval 24 hours SearchResponse r = client().prepareSearch("nanos") - .addAggregation(dateHistogram("histo").field("date"). - fixedInterval(DateHistogramInterval.seconds(60 * 60 * 24)).timeZone(ZoneId.of("Europe/Berlin"))) + .addAggregation( + dateHistogram("histo").field("date") + .fixedInterval(DateHistogramInterval.seconds(60 * 60 * 24)) + .timeZone(ZoneId.of("Europe/Berlin")) + ) .addDocValueField("date") .get(); assertSearchResponse(r); @@ -1586,14 +1800,15 @@ public void testDateNanosHistogram() throws Exception { Histogram histogram = r.getAggregations().get("histo"); List buckets = histogram.getBuckets(); assertEquals(2, buckets.size()); - assertEquals(946681200000L, ((ZonedDateTime)buckets.get(0).getKey()).toEpochSecond() * 1000); + assertEquals(946681200000L, ((ZonedDateTime) buckets.get(0).getKey()).toEpochSecond() * 1000); assertEquals(1, buckets.get(0).getDocCount()); - assertEquals(946767600000L, ((ZonedDateTime)buckets.get(1).getKey()).toEpochSecond() * 1000); + assertEquals(946767600000L, ((ZonedDateTime) buckets.get(1).getKey()).toEpochSecond() * 1000); assertEquals(1, buckets.get(1).getDocCount()); r = client().prepareSearch("nanos") - .addAggregation(dateHistogram("histo").field("date") - .fixedInterval(DateHistogramInterval.seconds(60 * 60 * 24)).timeZone(ZoneId.of("UTC"))) + .addAggregation( + dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.seconds(60 * 60 * 24)).timeZone(ZoneId.of("UTC")) + ) .addDocValueField("date") .get(); assertSearchResponse(r); @@ -1601,19 +1816,18 @@ public void testDateNanosHistogram() throws Exception { histogram = r.getAggregations().get("histo"); buckets = histogram.getBuckets(); assertEquals(2, buckets.size()); - assertEquals(946684800000L, ((ZonedDateTime)buckets.get(0).getKey()).toEpochSecond() * 1000); + assertEquals(946684800000L, ((ZonedDateTime) buckets.get(0).getKey()).toEpochSecond() * 1000); assertEquals(1, buckets.get(0).getDocCount()); - assertEquals(946771200000L, ((ZonedDateTime)buckets.get(1).getKey()).toEpochSecond() * 1000); + assertEquals(946771200000L, ((ZonedDateTime) buckets.get(1).getKey()).toEpochSecond() * 1000); assertEquals(1, buckets.get(1).getDocCount()); } public void testDateKeyFormatting() { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo") - .field("date") - .calendarInterval(DateHistogramInterval.MONTH) - .timeZone(ZoneId.of("America/Edmonton"))) - .get(); + .addAggregation( + dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).timeZone(ZoneId.of("America/Edmonton")) + ) + .get(); assertSearchResponse(response); @@ -1626,10 +1840,10 @@ public void testDateKeyFormatting() { public void testHardBoundsOnDates() { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo") - .field("date") - .calendarInterval(DateHistogramInterval.DAY) - .hardBounds(new LongBounds("2012-02-01T00:00:00.000", "2012-03-03T00:00:00.000")) + .addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.DAY) + .hardBounds(new LongBounds("2012-02-01T00:00:00.000", "2012-03-03T00:00:00.000")) ) .get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java index aad4ef7963c4b..7c2c6128717a4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java @@ -36,7 +36,7 @@ * tests using all versions */ @ESIntegTestCase.SuiteScopeTestCase -@ESIntegTestCase.ClusterScope(scope= ESIntegTestCase.Scope.SUITE) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE) public class DateHistogramOffsetIT extends ESIntegTestCase { private static final String DATE_FORMAT = "yyyy-MM-dd:hh-mm-ss"; @@ -56,13 +56,14 @@ public void afterEachTest() throws IOException { internalCluster().wipeIndices("idx2"); } - private void prepareIndex(ZonedDateTime date, int numHours, int stepSizeHours, int idxIdStart) - throws IOException, InterruptedException { + private void prepareIndex(ZonedDateTime date, int numHours, int stepSizeHours, int idxIdStart) throws IOException, + InterruptedException { IndexRequestBuilder[] reqs = new IndexRequestBuilder[numHours]; for (int i = idxIdStart; i < idxIdStart + reqs.length; i++) { - reqs[i - idxIdStart] = client().prepareIndex("idx2").setId("" + i) - .setSource(jsonBuilder().startObject().timeField("date", date).endObject()); + reqs[i - idxIdStart] = client().prepareIndex("idx2") + .setId("" + i) + .setSource(jsonBuilder().startObject().timeField("date", date).endObject()); date = date.plusHours(stepSizeHours); } indexRandom(true, reqs); @@ -72,13 +73,11 @@ public void testSingleValueWithPositiveOffset() throws Exception { prepareIndex(date("2014-03-11T00:00:00+00:00"), 5, 1, 0); SearchResponse response = client().prepareSearch("idx2") - .setQuery(matchAllQuery()) - .addAggregation(dateHistogram("date_histo") - .field("date") - .offset("2h") - .format(DATE_FORMAT) - .fixedInterval(DateHistogramInterval.DAY)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + dateHistogram("date_histo").field("date").offset("2h").format(DATE_FORMAT).fixedInterval(DateHistogramInterval.DAY) + ) + .get(); assertThat(response.getHits().getTotalHits().value, equalTo(5L)); @@ -94,13 +93,11 @@ public void testSingleValueWithNegativeOffset() throws Exception { prepareIndex(date("2014-03-11T00:00:00+00:00"), 5, -1, 0); SearchResponse response = client().prepareSearch("idx2") - .setQuery(matchAllQuery()) - .addAggregation(dateHistogram("date_histo") - .field("date") - .offset("-2h") - .format(DATE_FORMAT) - .fixedInterval(DateHistogramInterval.DAY)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + dateHistogram("date_histo").field("date").offset("-2h").format(DATE_FORMAT).fixedInterval(DateHistogramInterval.DAY) + ) + .get(); assertThat(response.getHits().getTotalHits().value, equalTo(5L)); @@ -120,14 +117,15 @@ public void testSingleValueWithOffsetMinDocCount() throws Exception { prepareIndex(date("2014-03-14T00:00:00+00:00"), 12, 1, 13); SearchResponse response = client().prepareSearch("idx2") - .setQuery(matchAllQuery()) - .addAggregation(dateHistogram("date_histo") - .field("date") - .offset("6h") - .minDocCount(0) - .format(DATE_FORMAT) - .fixedInterval(DateHistogramInterval.DAY)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + dateHistogram("date_histo").field("date") + .offset("6h") + .minDocCount(0) + .format(DATE_FORMAT) + .fixedInterval(DateHistogramInterval.DAY) + ) + .get(); assertThat(response.getHits().getTotalHits().value, equalTo(24L)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java index a7340c8bee273..bba29ac405a84 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java @@ -55,12 +55,17 @@ public class DateRangeIT extends ESIntegTestCase { private static IndexRequestBuilder indexDoc(int month, int day, int value) throws Exception { - return client().prepareIndex("idx").setSource(jsonBuilder() - .startObject() - .field("value", value) - .timeField("date", date(month, day)) - .startArray("dates").timeValue(date(month, day)).timeValue(date(month + 1, day + 1)).endArray() - .endObject()); + return client().prepareIndex("idx") + .setSource( + jsonBuilder().startObject() + .field("value", value) + .timeField("date", date(month, day)) + .startArray("dates") + .timeValue(date(month, day)) + .timeValue(date(month + 1, day + 1)) + .endArray() + .endObject() + ); } private static ZonedDateTime date(int month, int day) { @@ -72,6 +77,7 @@ private static ZonedDateTime date(int month, int day, ZoneId timezone) { } private static int numDocs; + @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); @@ -80,14 +86,16 @@ public void setupSuiteScopeCluster() throws Exception { numDocs = randomIntBetween(7, 20); List docs = new ArrayList<>(); - docs.addAll(Arrays.asList( + docs.addAll( + Arrays.asList( indexDoc(1, 2, 1), // Jan 2 indexDoc(2, 2, 2), // Feb 2 indexDoc(2, 15, 3), // Feb 15 indexDoc(3, 2, 4), // Mar 2 indexDoc(3, 15, 5), // Mar 15 indexDoc(3, 23, 6) // Mar 23 - )); + ) + ); // dummy docs for (int i = docs.size(); i < numDocs; ++i) { @@ -95,10 +103,11 @@ public void setupSuiteScopeCluster() throws Exception { } assertAcked(prepareCreate("empty_bucket_idx").setMapping("value", "type=integer")); for (int i = 0; i < 2; i++) { - docs.add(client().prepareIndex("empty_bucket_idx").setId(""+i).setSource(jsonBuilder() - .startObject() - .field("value", i*2) - .endObject())); + docs.add( + client().prepareIndex("empty_bucket_idx") + .setId("" + i) + .setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) + ); } indexRandom(true, docs); ensureSearchable(); @@ -118,11 +127,14 @@ public void testDateMath() throws Exception { } else { rangeBuilder.script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.EXTRACT_FIELD, params)); } - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - rangeBuilder.addUnboundedTo("a long time ago", "now-50y").addRange("recently", "now-50y", "now-1y") - .addUnboundedFrom("last year", "now-1y").timeZone(ZoneId.of("Etc/GMT+5"))).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + rangeBuilder.addUnboundedTo("a long time ago", "now-50y") + .addRange("recently", "now-50y", "now-1y") + .addUnboundedFrom("last year", "now-1y") + .timeZone(ZoneId.of("Etc/GMT+5")) + ) + .get(); assertSearchResponse(response); @@ -151,16 +163,16 @@ public void testDateMath() throws Exception { public void testSingleValueField() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateRange("range") - .field("date") - .addUnboundedTo(date(2, 15)) - .addRange(date(2, 15), date(3, 15)) - .addUnboundedFrom(date(3, 15))) - .get(); + .addAggregation( + dateRange("range").field("date") + .addUnboundedTo(date(2, 15)) + .addRange(date(2, 15), date(3, 15)) + .addUnboundedFrom(date(3, 15)) + ) + .get(); assertSearchResponse(response); - Range range = response.getAggregations().get("range"); assertThat(range, notNullValue()); assertThat(range.getName(), equalTo("range")); @@ -197,16 +209,16 @@ public void testSingleValueField() throws Exception { public void testSingleValueFieldWithStringDates() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateRange("range") - .field("date") - .addUnboundedTo("2012-02-15") - .addRange("2012-02-15", "2012-03-15") - .addUnboundedFrom("2012-03-15")) - .get(); + .addAggregation( + dateRange("range").field("date") + .addUnboundedTo("2012-02-15") + .addRange("2012-02-15", "2012-03-15") + .addUnboundedFrom("2012-03-15") + ) + .get(); assertSearchResponse(response); - Range range = response.getAggregations().get("range"); assertThat(range, notNullValue()); assertThat(range.getName(), equalTo("range")); @@ -243,17 +255,17 @@ public void testSingleValueFieldWithStringDates() throws Exception { public void testSingleValueFieldWithStringDatesWithCustomFormat() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateRange("range") - .field("date") - .format("yyyy-MM-dd") - .addUnboundedTo("2012-02-15") - .addRange("2012-02-15", "2012-03-15") - .addUnboundedFrom("2012-03-15")) - .get(); + .addAggregation( + dateRange("range").field("date") + .format("yyyy-MM-dd") + .addUnboundedTo("2012-02-15") + .addRange("2012-02-15", "2012-03-15") + .addUnboundedFrom("2012-03-15") + ) + .get(); assertSearchResponse(response); - Range range = response.getAggregations().get("range"); assertThat(range, notNullValue()); assertThat(range.getName(), equalTo("range")); @@ -291,19 +303,20 @@ public void testSingleValueFieldWithStringDatesWithCustomFormat() throws Excepti public void testSingleValueFieldWithDateMath() throws Exception { ZoneId timezone = randomZone(); int timeZoneOffset = timezone.getRules().getOffset(date(2, 15).toInstant()).getTotalSeconds(); - //there is a daylight saving time change on 11th March so suffix will be different - String feb15Suffix = timeZoneOffset == 0 ? "Z" : date(2,15, timezone).format(DateTimeFormatter.ofPattern("xxx", Locale.ROOT)); - String mar15Suffix = timeZoneOffset == 0 ? "Z" : date(3,15, timezone).format(DateTimeFormatter.ofPattern("xxx", Locale.ROOT)); + // there is a daylight saving time change on 11th March so suffix will be different + String feb15Suffix = timeZoneOffset == 0 ? "Z" : date(2, 15, timezone).format(DateTimeFormatter.ofPattern("xxx", Locale.ROOT)); + String mar15Suffix = timeZoneOffset == 0 ? "Z" : date(3, 15, timezone).format(DateTimeFormatter.ofPattern("xxx", Locale.ROOT)); long expectedFirstBucketCount = timeZoneOffset < 0 ? 3L : 2L; SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateRange("range") - .field("date") - .addUnboundedTo("2012-02-15") - .addRange("2012-02-15", "2012-02-15||+1M") - .addUnboundedFrom("2012-02-15||+1M") - .timeZone(timezone)) - .get(); + .addAggregation( + dateRange("range").field("date") + .addUnboundedTo("2012-02-15") + .addRange("2012-02-15", "2012-02-15||+1M") + .addUnboundedFrom("2012-02-15||+1M") + .timeZone(timezone) + ) + .get(); assertSearchResponse(response); @@ -324,8 +337,7 @@ public void testSingleValueFieldWithDateMath() throws Exception { bucket = buckets.get(1); assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000" + feb15Suffix + - "-2012-03-15T00:00:00.000" + mar15Suffix)); + assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000" + feb15Suffix + "-2012-03-15T00:00:00.000" + mar15Suffix)); assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000" + feb15Suffix)); @@ -344,16 +356,16 @@ public void testSingleValueFieldWithDateMath() throws Exception { public void testSingleValueFieldWithCustomKey() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateRange("range") - .field("date") - .addUnboundedTo("r1", date(2, 15)) - .addRange("r2", date(2, 15), date(3, 15)) - .addUnboundedFrom("r3", date(3, 15))) - .get(); + .addAggregation( + dateRange("range").field("date") + .addUnboundedTo("r1", date(2, 15)) + .addRange("r2", date(2, 15), date(3, 15)) + .addUnboundedFrom("r3", date(3, 15)) + ) + .get(); assertSearchResponse(response); - Range range = response.getAggregations().get("range"); assertThat(range, notNullValue()); assertThat(range.getName(), equalTo("range")); @@ -399,26 +411,26 @@ public void testSingleValueFieldWithCustomKey() throws Exception { public void testSingleValuedFieldWithSubAggregation() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateRange("range") - .field("date") - .addUnboundedTo("r1", date(2, 15)) - .addRange("r2", date(2, 15), date(3, 15)) - .addUnboundedFrom("r3", date(3, 15)) - .subAggregation(sum("sum").field("value"))) - .get(); + .addAggregation( + dateRange("range").field("date") + .addUnboundedTo("r1", date(2, 15)) + .addRange("r2", date(2, 15), date(3, 15)) + .addUnboundedFrom("r3", date(3, 15)) + .subAggregation(sum("sum").field("value")) + ) + .get(); assertSearchResponse(response); - Range range = response.getAggregations().get("range"); assertThat(range, notNullValue()); assertThat(range.getName(), equalTo("range")); List buckets = range.getBuckets(); assertThat(buckets.size(), equalTo(3)); - assertThat(((InternalAggregation)range).getProperty("_bucket_count"), equalTo(3)); - Object[] propertiesKeys = (Object[]) ((InternalAggregation)range).getProperty("_key"); - Object[] propertiesDocCounts = (Object[]) ((InternalAggregation)range).getProperty("_count"); - Object[] propertiesCounts = (Object[]) ((InternalAggregation)range).getProperty("sum.value"); + assertThat(((InternalAggregation) range).getProperty("_bucket_count"), equalTo(3)); + Object[] propertiesKeys = (Object[]) ((InternalAggregation) range).getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) range).getProperty("_count"); + Object[] propertiesCounts = (Object[]) ((InternalAggregation) range).getProperty("sum.value"); Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); @@ -464,8 +476,6 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { assertThat((long) propertiesDocCounts[2], equalTo(numDocs - 4L)); } - - /* Jan 2, Feb 3, 1 Feb 2, Mar 3, 2 @@ -477,16 +487,16 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { public void testMultiValuedField() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateRange("range") - .field("dates") - .addUnboundedTo(date(2, 15)) - .addRange(date(2, 15), date(3, 15)) - .addUnboundedFrom(date(3, 15))) - .get(); + .addAggregation( + dateRange("range").field("dates") + .addUnboundedTo(date(2, 15)) + .addRange(date(2, 15), date(3, 15)) + .addUnboundedFrom(date(3, 15)) + ) + .get(); assertSearchResponse(response); - Range range = response.getAggregations().get("range"); assertThat(range, notNullValue()); assertThat(range.getName(), equalTo("range")); @@ -530,15 +540,18 @@ public void testMultiValuedField() throws Exception { Apr 23, May 24 6 */ - public void testMultiValuedFieldWithValueScript() throws Exception { Map params = new HashMap<>(); params.put("fieldname", "dates"); SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateRange("range") - .field("dates") - .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.DOUBLE_PLUS_ONE_MONTH, params)) - .addUnboundedTo(date(2, 15)).addRange(date(2, 15), date(3, 15)).addUnboundedFrom(date(3, 15))).get(); + .addAggregation( + dateRange("range").field("dates") + .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.DOUBLE_PLUS_ONE_MONTH, params)) + .addUnboundedTo(date(2, 15)) + .addRange(date(2, 15), date(3, 15)) + .addUnboundedFrom(date(3, 15)) + ) + .get(); assertSearchResponse(response); @@ -576,8 +589,6 @@ public void testMultiValuedFieldWithValueScript() throws Exception { assertThat(bucket.getDocCount(), equalTo(numDocs - 1L)); } - - /* Feb 2, Mar 3, 1 Mar 2, Apr 3, 2 @@ -591,16 +602,16 @@ public void testScriptSingleValue() throws Exception { Map params = new HashMap<>(); params.put("fieldname", "date"); SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateRange("range") - .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.EXTRACT_FIELD, params)) - .addUnboundedTo(date(2, 15)) - .addRange(date(2, 15), date(3, 15)) - .addUnboundedFrom(date(3, 15))) - .get(); + .addAggregation( + dateRange("range").script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.EXTRACT_FIELD, params)) + .addUnboundedTo(date(2, 15)) + .addRange(date(2, 15), date(3, 15)) + .addUnboundedFrom(date(3, 15)) + ) + .get(); assertSearchResponse(response); - Range range = response.getAggregations().get("range"); assertThat(range, notNullValue()); assertThat(range.getName(), equalTo("range")); @@ -635,10 +646,6 @@ public void testScriptSingleValue() throws Exception { assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); } - - - - /* Jan 2, Feb 3, 1 Feb 2, Mar 3, 2 @@ -651,12 +658,14 @@ public void testScriptSingleValue() throws Exception { public void testScriptMultiValued() throws Exception { Map params = new HashMap<>(); params.put("fieldname", "dates"); - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - dateRange("range").script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.EXTRACT_FIELD, params)) - .addUnboundedTo(date(2, 15)).addRange(date(2, 15), date(3, 15)) - .addUnboundedFrom(date(3, 15))).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + dateRange("range").script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.EXTRACT_FIELD, params)) + .addUnboundedTo(date(2, 15)) + .addRange(date(2, 15), date(3, 15)) + .addUnboundedFrom(date(3, 15)) + ) + .get(); assertSearchResponse(response); @@ -698,16 +707,16 @@ public void testUnmapped() throws Exception { client().admin().cluster().prepareHealth("idx_unmapped").setWaitForYellowStatus().get(); SearchResponse response = client().prepareSearch("idx_unmapped") - .addAggregation(dateRange("range") - .field("date") - .addUnboundedTo(date(2, 15)) - .addRange(date(2, 15), date(3, 15)) - .addUnboundedFrom(date(3, 15))) - .get(); + .addAggregation( + dateRange("range").field("date") + .addUnboundedTo(date(2, 15)) + .addRange(date(2, 15), date(3, 15)) + .addUnboundedFrom(date(3, 15)) + ) + .get(); assertSearchResponse(response); - Range range = response.getAggregations().get("range"); assertThat(range, notNullValue()); assertThat(range.getName(), equalTo("range")); @@ -744,16 +753,16 @@ public void testUnmapped() throws Exception { public void testUnmappedWithStringDates() throws Exception { SearchResponse response = client().prepareSearch("idx_unmapped") - .addAggregation(dateRange("range") - .field("date") - .addUnboundedTo("2012-02-15") - .addRange("2012-02-15", "2012-03-15") - .addUnboundedFrom("2012-03-15")) - .get(); + .addAggregation( + dateRange("range").field("date") + .addUnboundedTo("2012-02-15") + .addRange("2012-02-15", "2012-03-15") + .addUnboundedFrom("2012-03-15") + ) + .get(); assertSearchResponse(response); - Range range = response.getAggregations().get("range"); assertThat(range, notNullValue()); assertThat(range.getName(), equalTo("range")); @@ -790,16 +799,16 @@ public void testUnmappedWithStringDates() throws Exception { public void testPartiallyUnmapped() throws Exception { SearchResponse response = client().prepareSearch("idx", "idx_unmapped") - .addAggregation(dateRange("range") - .field("date") - .addUnboundedTo(date(2, 15)) - .addRange(date(2, 15), date(3, 15)) - .addUnboundedFrom(date(3, 15))) - .get(); + .addAggregation( + dateRange("range").field("date") + .addUnboundedTo(date(2, 15)) + .addRange(date(2, 15), date(3, 15)) + .addUnboundedFrom(date(3, 15)) + ) + .get(); assertSearchResponse(response); - Range range = response.getAggregations().get("range"); assertThat(range, notNullValue()); assertThat(range.getName(), equalTo("range")); @@ -836,10 +845,14 @@ public void testPartiallyUnmapped() throws Exception { public void testEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") - .setQuery(matchAllQuery()) - .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0) - .subAggregation(dateRange("date_range").field("value").addRange("0-1", 0, 1))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation(dateRange("date_range").field("value").addRange("0-1", 0, 1)) + ) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); Histogram histo = searchResponse.getAggregations().get("histo"); @@ -859,13 +872,11 @@ public void testEmptyAggregation() throws Exception { assertThat(buckets.get(0).getAggregations().asList().isEmpty(), is(true)); } - public void testNoRangesInQuery() { + public void testNoRangesInQuery() { try { - client().prepareSearch("idx") - .addAggregation(dateRange("my_date_range_agg").field("value")) - .get(); + client().prepareSearch("idx").addAggregation(dateRange("my_date_range_agg").field("value")).get(); fail(); - } catch (SearchPhaseExecutionException spee){ + } catch (SearchPhaseExecutionException spee) { Throwable rootCause = spee.getCause().getCause(); assertThat(rootCause, instanceOf(IllegalArgumentException.class)); assertEquals(rootCause.getMessage(), "No [ranges] specified for the [my_date_range_agg] aggregation"); @@ -877,60 +888,156 @@ public void testNoRangesInQuery() { * Ensure requests using nondeterministic scripts do not get cached. */ public void testScriptCaching() throws Exception { - assertAcked(prepareCreate("cache_test_idx").setMapping("date", "type=date") + assertAcked( + prepareCreate("cache_test_idx").setMapping("date", "type=date") .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) - .get()); - indexRandom(true, - client().prepareIndex("cache_test_idx").setId("1") - .setSource(jsonBuilder().startObject().timeField("date", date(1, 1)).endObject()), - client().prepareIndex("cache_test_idx").setId("2") - .setSource(jsonBuilder().startObject().timeField("date", date(2, 1)).endObject())); + .get() + ); + indexRandom( + true, + client().prepareIndex("cache_test_idx") + .setId("1") + .setSource(jsonBuilder().startObject().timeField("date", date(1, 1)).endObject()), + client().prepareIndex("cache_test_idx") + .setId("2") + .setSource(jsonBuilder().startObject().timeField("date", date(2, 1)).endObject()) + ); // Make sure we are starting with a clear cache - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a nondeterministic script does not get cached Map params = new HashMap<>(); params.put("fieldname", "date"); - SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(dateRange("foo").field("date") - .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.CURRENT_DATE, params)) - .addRange(ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC), - ZonedDateTime.of(2013, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))) - .get(); + SearchResponse r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + dateRange("foo").field("date") + .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.CURRENT_DATE, params)) + .addRange( + ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2013, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC) + ) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a deterministic script gets cached - r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(dateRange("foo").field("date") - .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.DOUBLE_PLUS_ONE_MONTH, params)) - .addRange(ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC), - ZonedDateTime.of(2013, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))) - .get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + dateRange("foo").field("date") + .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.DOUBLE_PLUS_ONE_MONTH, params)) + .addRange( + ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2013, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC) + ) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(1L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(1L) + ); // Ensure that non-scripted requests are cached as normal - r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(dateRange("foo").field("date") - .addRange(ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC), - ZonedDateTime.of(2013, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))) - .get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + dateRange("foo").field("date") + .addRange( + ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2013, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC) + ) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(2L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(2L) + ); } /** @@ -940,16 +1047,19 @@ public void testScriptCaching() throws Exception { public void testRangeWithFormatStringValue() throws Exception { String indexName = "dateformat_test_idx"; assertAcked(prepareCreate(indexName).setMapping("date", "type=date,format=strict_hour_minute_second")); - indexRandom(true, - client().prepareIndex(indexName).setId("1").setSource(jsonBuilder().startObject().field("date", "00:16:40").endObject()), - client().prepareIndex(indexName).setId("2").setSource(jsonBuilder().startObject().field("date", "00:33:20").endObject()), - client().prepareIndex(indexName).setId("3").setSource(jsonBuilder().startObject().field("date", "00:50:00").endObject())); + indexRandom( + true, + client().prepareIndex(indexName).setId("1").setSource(jsonBuilder().startObject().field("date", "00:16:40").endObject()), + client().prepareIndex(indexName).setId("2").setSource(jsonBuilder().startObject().field("date", "00:33:20").endObject()), + client().prepareIndex(indexName).setId("3").setSource(jsonBuilder().startObject().field("date", "00:50:00").endObject()) + ); // using no format should work when to/from is compatible with format in // mapping - SearchResponse searchResponse = client().prepareSearch(indexName).setSize(0) - .addAggregation(dateRange("date_range").field("date").addRange("00:16:40", "00:50:00").addRange("00:50:00", "01:06:40")) - .get(); + SearchResponse searchResponse = client().prepareSearch(indexName) + .setSize(0) + .addAggregation(dateRange("date_range").field("date").addRange("00:16:40", "00:50:00").addRange("00:50:00", "01:06:40")) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); List buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); assertBucket(buckets.get(0), 2L, "00:16:40-00:50:00", 1000000L, 3000000L); @@ -957,9 +1067,12 @@ public void testRangeWithFormatStringValue() throws Exception { // using different format should work when to/from is compatible with // format in aggregation - searchResponse = client().prepareSearch(indexName).setSize(0).addAggregation( - dateRange("date_range").field("date").addRange("00.16.40", "00.50.00").addRange("00.50.00", "01.06.40").format("HH.mm.ss")) - .get(); + searchResponse = client().prepareSearch(indexName) + .setSize(0) + .addAggregation( + dateRange("date_range").field("date").addRange("00.16.40", "00.50.00").addRange("00.50.00", "01.06.40").format("HH.mm.ss") + ) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); assertBucket(buckets.get(0), 2L, "00.16.40-00.50.00", 1000000L, 3000000L); @@ -967,20 +1080,26 @@ public void testRangeWithFormatStringValue() throws Exception { // providing numeric input with format should work, but bucket keys are // different now - searchResponse = client().prepareSearch(indexName).setSize(0) - .addAggregation( - dateRange("date_range").field("date").addRange(1000000, 3000000).addRange(3000000, 4000000).format("epoch_millis")) - .get(); + searchResponse = client().prepareSearch(indexName) + .setSize(0) + .addAggregation( + dateRange("date_range").field("date").addRange(1000000, 3000000).addRange(3000000, 4000000).format("epoch_millis") + ) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); assertBucket(buckets.get(0), 2L, "1000000-3000000", 1000000L, 3000000L); assertBucket(buckets.get(1), 1L, "3000000-4000000", 3000000L, 4000000L); // providing numeric input without format should throw an exception - ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> client().prepareSearch(indexName).setSize(0) - .addAggregation(dateRange("date_range").field("date").addRange(1000000, 3000000).addRange(3000000, 4000000)).get()); - assertThat(e.getDetailedMessage(), - containsString("failed to parse date field [1000000] with format [strict_hour_minute_second]")); + ElasticsearchException e = expectThrows( + ElasticsearchException.class, + () -> client().prepareSearch(indexName) + .setSize(0) + .addAggregation(dateRange("date_range").field("date").addRange(1000000, 3000000).addRange(3000000, 4000000)) + .get() + ); + assertThat(e.getDetailedMessage(), containsString("failed to parse date field [1000000] with format [strict_hour_minute_second]")); } /** @@ -990,31 +1109,39 @@ public void testRangeWithFormatStringValue() throws Exception { public void testRangeWithFormatNumericValue() throws Exception { String indexName = "dateformat_numeric_test_idx"; assertAcked(prepareCreate(indexName).setMapping("date", "type=date,format=epoch_second")); - indexRandom(true, - client().prepareIndex(indexName).setId("1").setSource(jsonBuilder().startObject().field("date", 1002).endObject()), - client().prepareIndex(indexName).setId("2").setSource(jsonBuilder().startObject().field("date", 2000).endObject()), - client().prepareIndex(indexName).setId("3").setSource(jsonBuilder().startObject().field("date", 3008).endObject())); + indexRandom( + true, + client().prepareIndex(indexName).setId("1").setSource(jsonBuilder().startObject().field("date", 1002).endObject()), + client().prepareIndex(indexName).setId("2").setSource(jsonBuilder().startObject().field("date", 2000).endObject()), + client().prepareIndex(indexName).setId("3").setSource(jsonBuilder().startObject().field("date", 3008).endObject()) + ); // using no format should work when to/from is compatible with format in // mapping - SearchResponse searchResponse = client().prepareSearch(indexName).setSize(0) - .addAggregation(dateRange("date_range").field("date").addRange(1000, 3000).addRange(3000, 4000)).get(); + SearchResponse searchResponse = client().prepareSearch(indexName) + .setSize(0) + .addAggregation(dateRange("date_range").field("date").addRange(1000, 3000).addRange(3000, 4000)) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); List buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); // using no format should also work when and to/from are string values - searchResponse = client().prepareSearch(indexName).setSize(0) - .addAggregation(dateRange("date_range").field("date").addRange("1000", "3000").addRange("3000", "4000")).get(); + searchResponse = client().prepareSearch(indexName) + .setSize(0) + .addAggregation(dateRange("date_range").field("date").addRange("1000", "3000").addRange("3000", "4000")) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); // also e-notation should work, fractional parts should be truncated - searchResponse = client().prepareSearch(indexName).setSize(0) - .addAggregation(dateRange("date_range").field("date").addRange(1.0e3, 3000.8123).addRange(3000.8123, 4.0e3)).get(); + searchResponse = client().prepareSearch(indexName) + .setSize(0) + .addAggregation(dateRange("date_range").field("date").addRange(1.0e3, 3000.8123).addRange(3000.8123, 4.0e3)) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); @@ -1022,9 +1149,12 @@ public void testRangeWithFormatNumericValue() throws Exception { // using different format should work when to/from is compatible with // format in aggregation - searchResponse = client().prepareSearch(indexName).setSize(0).addAggregation( - dateRange("date_range").field("date").addRange("00.16.40", "00.50.00").addRange("00.50.00", "01.06.40").format("HH.mm.ss")) - .get(); + searchResponse = client().prepareSearch(indexName) + .setSize(0) + .addAggregation( + dateRange("date_range").field("date").addRange("00.16.40", "00.50.00").addRange("00.50.00", "01.06.40").format("HH.mm.ss") + ) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); assertBucket(buckets.get(0), 2L, "00.16.40-00.50.00", 1000000L, 3000000L); @@ -1032,10 +1162,12 @@ public void testRangeWithFormatNumericValue() throws Exception { // providing different numeric input with format should work, but bucket // keys are different now - searchResponse = client().prepareSearch(indexName).setSize(0) - .addAggregation( - dateRange("date_range").field("date").addRange(1000000, 3000000).addRange(3000000, 4000000).format("epoch_millis")) - .get(); + searchResponse = client().prepareSearch(indexName) + .setSize(0) + .addAggregation( + dateRange("date_range").field("date").addRange(1000000, 3000000).addRange(3000000, 4000000).format("epoch_millis") + ) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); assertBucket(buckets.get(0), 2L, "1000000-3000000", 1000000L, 3000000L); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java index 85fdef887d87b..62c8d704c669b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java @@ -49,42 +49,47 @@ public String randomExecutionHint() { return randomBoolean() ? null : randomFrom(SamplerAggregator.ExecutionMode.values()).toString(); } - @Override public void setupSuiteScopeCluster() throws Exception { - assertAcked(prepareCreate("test").setSettings( - Settings.builder().put(SETTING_NUMBER_OF_SHARDS, NUM_SHARDS).put(SETTING_NUMBER_OF_REPLICAS, 0)).setMapping( - "author", "type=keyword", "name", "type=keyword", "genre", - "type=keyword", "price", "type=float")); + assertAcked( + prepareCreate("test").setSettings( + Settings.builder().put(SETTING_NUMBER_OF_SHARDS, NUM_SHARDS).put(SETTING_NUMBER_OF_REPLICAS, 0) + ).setMapping("author", "type=keyword", "name", "type=keyword", "genre", "type=keyword", "price", "type=float") + ); createIndex("idx_unmapped"); // idx_unmapped_author is same as main index but missing author field - assertAcked(prepareCreate("idx_unmapped_author").setSettings( - Settings.builder().put(SETTING_NUMBER_OF_SHARDS, NUM_SHARDS).put(SETTING_NUMBER_OF_REPLICAS, 0)) - .setMapping("name", "type=keyword", "genre", "type=keyword", "price", - "type=float")); + assertAcked( + prepareCreate("idx_unmapped_author").setSettings( + Settings.builder().put(SETTING_NUMBER_OF_SHARDS, NUM_SHARDS).put(SETTING_NUMBER_OF_REPLICAS, 0) + ).setMapping("name", "type=keyword", "genre", "type=keyword", "price", "type=float") + ); ensureGreen(); String data[] = { - // "id,cat,name,price,inStock,author_t,series_t,sequence_i,genre_s", - "0553573403,book,A Game of Thrones,7.99,true,George R.R. Martin,A Song of Ice and Fire,1,fantasy", - "0553579908,book,A Clash of Kings,7.99,true,George R.R. Martin,A Song of Ice and Fire,2,fantasy", - "055357342X,book,A Storm of Swords,7.99,true,George R.R. Martin,A Song of Ice and Fire,3,fantasy", - "0553293354,book,Foundation,17.99,true,Isaac Asimov,Foundation Novels,1,scifi", - "0812521390,book,The Black Company,6.99,false,Glen Cook,The Chronicles of The Black Company,1,fantasy", - "0812550706,book,Ender's Game,6.99,true,Orson Scott Card,Ender,1,scifi", - "0441385532,book,Jhereg,7.95,false,Steven Brust,Vlad Taltos,1,fantasy", - "0380014300,book,Nine Princes In Amber,6.99,true,Roger Zelazny,the Chronicles of Amber,1,fantasy", - "0805080481,book,The Book of Three,5.99,true,Lloyd Alexander,The Chronicles of Prydain,1,fantasy", - "080508049X,book,The Black Cauldron,5.99,true,Lloyd Alexander,The Chronicles of Prydain,2,fantasy" + // "id,cat,name,price,inStock,author_t,series_t,sequence_i,genre_s", + "0553573403,book,A Game of Thrones,7.99,true,George R.R. Martin,A Song of Ice and Fire,1,fantasy", + "0553579908,book,A Clash of Kings,7.99,true,George R.R. Martin,A Song of Ice and Fire,2,fantasy", + "055357342X,book,A Storm of Swords,7.99,true,George R.R. Martin,A Song of Ice and Fire,3,fantasy", + "0553293354,book,Foundation,17.99,true,Isaac Asimov,Foundation Novels,1,scifi", + "0812521390,book,The Black Company,6.99,false,Glen Cook,The Chronicles of The Black Company,1,fantasy", + "0812550706,book,Ender's Game,6.99,true,Orson Scott Card,Ender,1,scifi", + "0441385532,book,Jhereg,7.95,false,Steven Brust,Vlad Taltos,1,fantasy", + "0380014300,book,Nine Princes In Amber,6.99,true,Roger Zelazny,the Chronicles of Amber,1,fantasy", + "0805080481,book,The Book of Three,5.99,true,Lloyd Alexander,The Chronicles of Prydain,1,fantasy", + "080508049X,book,The Black Cauldron,5.99,true,Lloyd Alexander,The Chronicles of Prydain,2,fantasy" - }; + }; for (int i = 0; i < data.length; i++) { String[] parts = data[i].split(","); - client().prepareIndex("test").setId("" + i) - .setSource("author", parts[5], "name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])).get(); - client().prepareIndex("idx_unmapped_author").setId("" + i) - .setSource("name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])).get(); + client().prepareIndex("test") + .setId("" + i) + .setSource("author", parts[5], "name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])) + .get(); + client().prepareIndex("idx_unmapped_author") + .setId("" + i) + .setSource("name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])) + .get(); } client().admin().indices().refresh(new RefreshRequest("test")).get(); } @@ -93,13 +98,14 @@ public void testIssue10719() throws Exception { // Tests that we can refer to nested elements under a sample in a path // statement boolean asc = randomBoolean(); - SearchResponse response = client().prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) - .addAggregation(terms("genres") - .field("genre") - .order(BucketOrder.aggregation("sample>max_price.value", asc)) - .subAggregation(sampler("sample").shardSize(100) - .subAggregation(max("max_price").field("price"))) - ).get(); + SearchResponse response = client().prepareSearch("test") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .addAggregation( + terms("genres").field("genre") + .order(BucketOrder.aggregation("sample>max_price.value", asc)) + .subAggregation(sampler("sample").shardSize(100).subAggregation(max("max_price").field("price"))) + ) + .get(); assertSearchResponse(response); Terms genres = response.getAggregations().get("genres"); Collection genreBuckets = genres.getBuckets(); @@ -126,11 +132,12 @@ public void testSimpleDiversity() throws Exception { sampleAgg.field("author").maxDocsPerValue(MAX_DOCS_PER_AUTHOR).executionHint(randomExecutionHint()); sampleAgg.subAggregation(terms("authors").field("author")); SearchResponse response = client().prepareSearch("test") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("genre", "fantasy")) - .setFrom(0).setSize(60) - .addAggregation(sampleAgg) - .get(); + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(new TermQueryBuilder("genre", "fantasy")) + .setFrom(0) + .setSize(60) + .addAggregation(sampleAgg) + .get(); assertSearchResponse(response); Sampler sample = response.getAggregations().get("sample"); Terms authors = sample.getAggregations().get("authors"); @@ -151,8 +158,7 @@ public void testNestedDiversity() throws Exception { sampleAgg.subAggregation(terms("authors").field("author")); rootTerms.subAggregation(sampleAgg); - SearchResponse response = client().prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) - .addAggregation(rootTerms).get(); + SearchResponse response = client().prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH).addAggregation(rootTerms).get(); assertSearchResponse(response); Terms genres = response.getAggregations().get("genres"); List genreBuckets = genres.getBuckets(); @@ -172,8 +178,8 @@ public void testNestedSamples() throws Exception { int MAX_DOCS_PER_AUTHOR = 1; int MAX_DOCS_PER_GENRE = 2; DiversifiedAggregationBuilder rootSample = new DiversifiedAggregationBuilder("genreSample").shardSize(100) - .field("genre") - .maxDocsPerValue(MAX_DOCS_PER_GENRE); + .field("genre") + .maxDocsPerValue(MAX_DOCS_PER_GENRE); DiversifiedAggregationBuilder sampleAgg = new DiversifiedAggregationBuilder("sample").shardSize(100); sampleAgg.field("author").maxDocsPerValue(MAX_DOCS_PER_AUTHOR).executionHint(randomExecutionHint()); @@ -181,8 +187,10 @@ public void testNestedSamples() throws Exception { sampleAgg.subAggregation(terms("genres").field("genre")); rootSample.subAggregation(sampleAgg); - SearchResponse response = client().prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH).addAggregation(rootSample) - .get(); + SearchResponse response = client().prepareSearch("test") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .addAggregation(rootSample) + .get(); assertSearchResponse(response); Sampler genreSample = response.getAggregations().get("genreSample"); Sampler sample = genreSample.getAggregations().get("sample"); @@ -203,12 +211,17 @@ public void testNestedSamples() throws Exception { public void testPartiallyUnmappedDiversifyField() throws Exception { // One of the indexes is missing the "author" field used for // diversifying results - DiversifiedAggregationBuilder sampleAgg = new DiversifiedAggregationBuilder("sample").shardSize(100).field("author") - .maxDocsPerValue(1); + DiversifiedAggregationBuilder sampleAgg = new DiversifiedAggregationBuilder("sample").shardSize(100) + .field("author") + .maxDocsPerValue(1); sampleAgg.subAggregation(terms("authors").field("author")); - SearchResponse response = client().prepareSearch("idx_unmapped_author", "test").setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("genre", "fantasy")).setFrom(0).setSize(60).addAggregation(sampleAgg) - .get(); + SearchResponse response = client().prepareSearch("idx_unmapped_author", "test") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(new TermQueryBuilder("genre", "fantasy")) + .setFrom(0) + .setSize(60) + .addAggregation(sampleAgg) + .get(); assertSearchResponse(response); Sampler sample = response.getAggregations().get("sample"); assertThat(sample.getDocCount(), greaterThan(0L)); @@ -217,13 +230,18 @@ public void testPartiallyUnmappedDiversifyField() throws Exception { } public void testWhollyUnmappedDiversifyField() throws Exception { - //All of the indices are missing the "author" field used for diversifying results + // All of the indices are missing the "author" field used for diversifying results int MAX_DOCS_PER_AUTHOR = 1; DiversifiedAggregationBuilder sampleAgg = new DiversifiedAggregationBuilder("sample").shardSize(100); sampleAgg.field("author").maxDocsPerValue(MAX_DOCS_PER_AUTHOR).executionHint(randomExecutionHint()); sampleAgg.subAggregation(terms("authors").field("author")); - SearchResponse response = client().prepareSearch("idx_unmapped", "idx_unmapped_author").setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("genre", "fantasy")).setFrom(0).setSize(60).addAggregation(sampleAgg).get(); + SearchResponse response = client().prepareSearch("idx_unmapped", "idx_unmapped_author") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(new TermQueryBuilder("genre", "fantasy")) + .setFrom(0) + .setSize(60) + .addAggregation(sampleAgg) + .get(); assertSearchResponse(response); Sampler sample = response.getAggregations().get("sample"); assertThat(sample.getDocCount(), equalTo(0L)); @@ -239,7 +257,8 @@ public void testRidiculousSizeDiversity() throws Exception { SearchResponse response = client().prepareSearch("test") .setSearchType(SearchType.QUERY_THEN_FETCH) .setQuery(new TermQueryBuilder("genre", "fantasy")) - .setFrom(0).setSize(60) + .setFrom(0) + .setSize(60) .addAggregation(sampleAgg) .get(); assertSearchResponse(response); @@ -250,7 +269,8 @@ public void testRidiculousSizeDiversity() throws Exception { response = client().prepareSearch("test") .setSearchType(SearchType.QUERY_THEN_FETCH) .setQuery(new TermQueryBuilder("genre", "fantasy")) - .setFrom(0).setSize(60) + .setFrom(0) + .setSize(60) .addAggregation(sampleAgg) .get(); assertSearchResponse(response); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java index 9c6d2817fb535..df7d96c379d0e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java @@ -122,30 +122,45 @@ public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); List builders = new ArrayList<>(); for (int i = 0; i < NUM_DOCS; i++) { - builders.add(client().prepareIndex("idx").setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, (double) i) - .field("num_tag", i < NUM_DOCS/2 + 1 ? 1 : 0) // used to test order by single-bucket sub agg - .field("constant", 1) - .startArray(MULTI_VALUED_FIELD_NAME).value((double) i).value(i + 1d).endArray() - .endObject())); + builders.add( + client().prepareIndex("idx") + .setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, (double) i) + .field("num_tag", i < NUM_DOCS / 2 + 1 ? 1 : 0) // used to test order by single-bucket sub agg + .field("constant", 1) + .startArray(MULTI_VALUED_FIELD_NAME) + .value((double) i) + .value(i + 1d) + .endArray() + .endObject() + ) + ); } for (int i = 0; i < 100; i++) { - builders.add(client().prepareIndex("high_card_idx").setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, (double) i) - .startArray(MULTI_VALUED_FIELD_NAME).value((double)i).value(i + 1d).endArray() - .endObject())); + builders.add( + client().prepareIndex("high_card_idx") + .setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, (double) i) + .startArray(MULTI_VALUED_FIELD_NAME) + .value((double) i) + .value(i + 1d) + .endArray() + .endObject() + ) + ); } createIndex("idx_unmapped"); assertAcked(prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer")); for (int i = 0; i < 2; i++) { - builders.add(client().prepareIndex("empty_bucket_idx").setId(""+i).setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, i*2) - .endObject())); + builders.add( + client().prepareIndex("empty_bucket_idx") + .setId("" + i) + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) + ); } getMultiSortDocs(builders); @@ -201,67 +216,47 @@ private void getMultiSortDocs(List builders) throws IOExcep assertAcked(prepareCreate("sort_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=double")); for (int i = 1; i <= 3; i++) { - builders.add(client().prepareIndex("sort_idx").setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, 1) - .field("l", 1) - .field("d", i) - .endObject())); - builders.add(client().prepareIndex("sort_idx").setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, 2) - .field("l", 2) - .field("d", i) - .endObject())); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 1).field("l", 1).field("d", i).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 2).field("l", 2).field("d", i).endObject()) + ); } - builders.add(client().prepareIndex("sort_idx").setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, 3) - .field("l", 3) - .field("d", 1) - .endObject())); - builders.add(client().prepareIndex("sort_idx").setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, 3) - .field("l", 3) - .field("d", 2) - .endObject())); - builders.add(client().prepareIndex("sort_idx").setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, 4) - .field("l", 3) - .field("d", 1) - .endObject())); - builders.add(client().prepareIndex("sort_idx").setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, 4) - .field("l", 3) - .field("d", 3) - .endObject())); - builders.add(client().prepareIndex("sort_idx").setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, 5) - .field("l", 5) - .field("d", 1) - .endObject())); - builders.add(client().prepareIndex("sort_idx").setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, 5) - .field("l", 5) - .field("d", 2) - .endObject())); - builders.add(client().prepareIndex("sort_idx").setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, 6) - .field("l", 5) - .field("d", 1) - .endObject())); - builders.add(client().prepareIndex("sort_idx").setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, 7) - .field("l", 5) - .field("d", 1) - .endObject())); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3).field("l", 3).field("d", 1).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3).field("l", 3).field("d", 2).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).field("l", 3).field("d", 1).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).field("l", 3).field("d", 3).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).field("l", 5).field("d", 1).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).field("l", 5).field("d", 2).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 6).field("l", 5).field("d", 1).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 7).field("l", 5).field("d", 1).endObject()) + ); } private String key(Terms.Bucket bucket) { @@ -270,11 +265,17 @@ private String key(Terms.Bucket bucket) { // the main purpose of this test is to make sure we're not allocating 2GB of memory per shard public void testSizeIsZero() { - IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, - () -> client() - .prepareSearch("high_card_idx").addAggregation(terms("terms").field(SINGLE_VALUED_FIELD_NAME) - .minDocCount(randomInt(1)).size(0).collectMode(randomFrom(SubAggCollectionMode.values()))) - .get()); + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> client().prepareSearch("high_card_idx") + .addAggregation( + terms("terms").field(SINGLE_VALUED_FIELD_NAME) + .minDocCount(randomInt(1)) + .size(0) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get() + ); assertThat(exception.getMessage(), containsString("[size] must be greater than 0. Found [0] in [terms]")); } @@ -289,8 +290,8 @@ public void testMultiValueFieldWithPartitionedFiltering() throws Exception { private void runTestFieldWithPartitionedFiltering(String field) throws Exception { // Find total number of unique terms SearchResponse allResponse = client().prepareSearch("idx") - .addAggregation(terms("terms").field(field).size(10000).collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation(terms("terms").field(field).size(10000).collectMode(randomFrom(SubAggCollectionMode.values()))) + .get(); assertSearchResponse(allResponse); Terms terms = allResponse.getAggregations().get("terms"); assertThat(terms, notNullValue()); @@ -301,9 +302,13 @@ private void runTestFieldWithPartitionedFiltering(String field) throws Exception final int numPartitions = randomIntBetween(2, 4); Set foundTerms = new HashSet<>(); for (int partition = 0; partition < numPartitions; partition++) { - SearchResponse response = client().prepareSearch("idx").addAggregation(terms("terms").field(field) - .includeExclude(new IncludeExclude(partition, numPartitions)).collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field(field) + .includeExclude(new IncludeExclude(partition, numPartitions)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(response); terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); @@ -318,15 +323,15 @@ private void runTestFieldWithPartitionedFiltering(String field) throws Exception public void testSingleValuedFieldWithValueScript() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms") - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap()))) - .get(); + .addAggregation( + terms("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) + ) + .get(); assertSearchResponse(response); - Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); @@ -335,7 +340,7 @@ public void testSingleValuedFieldWithValueScript() throws Exception { for (int i = 0; i < 5; i++) { Terms.Bucket bucket = terms.getBucketByKey("" + (i + 1d)); assertThat(bucket, notNullValue()); - assertThat(key(bucket), equalTo("" + (i+1d))); + assertThat(key(bucket), equalTo("" + (i + 1d))); assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i + 1)); assertThat(bucket.getDocCount(), equalTo(1L)); } @@ -343,15 +348,15 @@ public void testSingleValuedFieldWithValueScript() throws Exception { public void testMultiValuedFieldWithValueScript() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms") - .field(MULTI_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap()))) - .get(); + .addAggregation( + terms("terms").field(MULTI_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) + ) + .get(); assertSearchResponse(response); - Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); @@ -360,7 +365,7 @@ public void testMultiValuedFieldWithValueScript() throws Exception { for (int i = 0; i < 6; i++) { Terms.Bucket bucket = terms.getBucketByKey("" + (i + 1d)); assertThat(bucket, notNullValue()); - assertThat(key(bucket), equalTo("" + (i+1d))); + assertThat(key(bucket), equalTo("" + (i + 1d))); assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i + 1)); if (i == 0 || i == 5) { assertThat(bucket.getDocCount(), equalTo(1L)); @@ -372,16 +377,15 @@ public void testMultiValuedFieldWithValueScript() throws Exception { public void testMultiValuedFieldWithValueScriptNotUnique() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms") - .field(MULTI_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script( - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "(long) (_value / 1000 + 1)", Collections.emptyMap()))) - .get(); + .addAggregation( + terms("terms").field(MULTI_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "(long) (_value / 1000 + 1)", Collections.emptyMap())) + ) + .get(); assertSearchResponse(response); - Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); @@ -412,14 +416,19 @@ public void testMultiValuedFieldWithValueScriptNotUnique() throws Exception { */ public void testScriptSingleValue() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, - CustomScriptPlugin.NAME, "doc['" + MULTI_VALUED_FIELD_NAME + "'].value", Collections.emptyMap()))) - .get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())) + .script( + new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "doc['" + MULTI_VALUED_FIELD_NAME + "'].value", + Collections.emptyMap() + ) + ) + ) + .get(); assertSearchResponse(response); @@ -438,14 +447,19 @@ public void testScriptSingleValue() throws Exception { } public void testScriptMultiValued() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['" + MULTI_VALUED_FIELD_NAME + "']", - Collections.emptyMap()))) - .get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())) + .script( + new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "doc['" + MULTI_VALUED_FIELD_NAME + "']", + Collections.emptyMap() + ) + ) + ) + .get(); assertSearchResponse(response); @@ -469,14 +483,11 @@ public void testScriptMultiValued() throws Exception { public void testPartiallyUnmapped() throws Exception { SearchResponse response = client().prepareSearch("idx_unmapped", "idx") - .addAggregation(terms("terms") - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation(terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values()))) + .get(); assertSearchResponse(response); - Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); @@ -493,15 +504,13 @@ public void testPartiallyUnmapped() throws Exception { public void testPartiallyUnmappedWithFormat() throws Exception { SearchResponse response = client().prepareSearch("idx_unmapped", "idx") - .addAggregation(terms("terms") - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .format("0000.00")) - .get(); + .addAggregation( + terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())).format("0000.00") + ) + .get(); assertSearchResponse(response); - Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); @@ -519,17 +528,15 @@ public void testPartiallyUnmappedWithFormat() throws Exception { public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscWithSubTermsAgg() throws Exception { boolean asc = true; - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("avg_i", asc)) - .subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME)) - .subAggregation( - terms("subTerms").field(MULTI_VALUED_FIELD_NAME).collectMode( - randomFrom(SubAggCollectionMode.values())))).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("avg_i", asc)) + .subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME)) + .subAggregation(terms("subTerms").field(MULTI_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values()))) + ) + .get(); assertSearchResponse(response); @@ -541,7 +548,7 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscWithSubTer for (int i = 0; i < 5; i++) { Terms.Bucket bucket = terms.getBucketByKey("" + (double) i); assertThat(bucket, notNullValue()); - assertThat(key(bucket), equalTo("" + (double)i)); + assertThat(key(bucket), equalTo("" + (double) i)); assertThat(bucket.getDocCount(), equalTo(1L)); Avg avg = bucket.getAggregations().get("avg_i"); @@ -563,13 +570,14 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscWithSubTer public void testSingleValuedFieldOrderedBySingleBucketSubAggregationAsc() throws Exception { boolean asc = randomBoolean(); - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("num_tags").field("num_tag").collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("filter", asc)) - .subAggregation(filter("filter", QueryBuilders.matchAllQuery()))).get(); - + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("num_tags").field("num_tag") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("filter", asc)) + .subAggregation(filter("filter", QueryBuilders.matchAllQuery())) + ) + .get(); assertSearchResponse(response); @@ -599,18 +607,18 @@ public void testSingleValuedFieldOrderedBySingleBucketSubAggregationAsc() throws public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevels() throws Exception { boolean asc = randomBoolean(); - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("tags") - .field("num_tag") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("filter1>filter2>max", asc)) - .subAggregation( - filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( - filter("filter2", QueryBuilders.matchAllQuery()).subAggregation( - max("max").field(SINGLE_VALUED_FIELD_NAME))))).get(); - + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("tags").field("num_tag") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("filter1>filter2>max", asc)) + .subAggregation( + filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( + filter("filter2", QueryBuilders.matchAllQuery()).subAggregation(max("max").field(SINGLE_VALUED_FIELD_NAME)) + ) + ) + ) + .get(); assertSearchResponse(response); @@ -657,9 +665,12 @@ public void testSingleValuedFieldOrderedByMissingSubAggregation() throws Excepti for (String index : Arrays.asList("idx", "idx_unmapped")) { try { client().prepareSearch(index) - .addAggregation( - terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("avg_i", true))).get(); + .addAggregation( + terms("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("avg_i", true)) + ) + .get(); fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation that doesn't exist"); @@ -673,14 +684,13 @@ public void testSingleValuedFieldOrderedByNonMetricsOrMultiBucketSubAggregation( for (String index : Arrays.asList("idx", "idx_unmapped")) { try { client().prepareSearch(index) - .addAggregation( - terms("terms") - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("num_tags", true)) - .subAggregation( - terms("num_tags").field("num_tags").collectMode(randomFrom(SubAggCollectionMode.values())))) - .get(); + .addAggregation( + terms("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("num_tags", true)) + .subAggregation(terms("num_tags").field("num_tags").collectMode(randomFrom(SubAggCollectionMode.values()))) + ) + .get(); fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation which is not of a metrics type"); @@ -694,13 +704,18 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithUnknownMe for (String index : Arrays.asList("idx", "idx_unmapped")) { try { client().prepareSearch(index) - .addAggregation( - terms("terms").field(SINGLE_VALUED_FIELD_NAME + "2").collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.foo", true)) - .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))).get(); + .addAggregation( + terms("terms").field(SINGLE_VALUED_FIELD_NAME + "2") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.foo", true)) + .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) + ) + .get(); - fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " + - "with an unknown specified metric to order by"); + fail( + "Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " + + "with an unknown specified metric to order by" + ); } catch (ElasticsearchException e) { // expected @@ -712,13 +727,18 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithoutMetric for (String index : Arrays.asList("idx", "idx_unmapped")) { try { client().prepareSearch(index) - .addAggregation( - terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats", true)) - .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))).get(); + .addAggregation( + terms("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats", true)) + .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) + ) + .get(); - fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " + - "where the metric name is not specified"); + fail( + "Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " + + "where the metric name is not specified" + ); } catch (ElasticsearchException e) { // expected @@ -728,12 +748,14 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithoutMetric public void testSingleValuedFieldOrderedByMultiValueSubAggregationAsc() throws Exception { boolean asc = true; - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.avg", asc)) - .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.avg", asc)) + .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) + ) + .get(); assertSearchResponse(response); @@ -745,7 +767,7 @@ public void testSingleValuedFieldOrderedByMultiValueSubAggregationAsc() throws E for (int i = 0; i < 5; i++) { Terms.Bucket bucket = terms.getBucketByKey("" + (double) i); assertThat(bucket, notNullValue()); - assertThat(key(bucket), equalTo("" + (double)i)); + assertThat(key(bucket), equalTo("" + (double) i)); assertThat(bucket.getDocCount(), equalTo(1L)); Stats stats = bucket.getAggregations().get("stats"); @@ -756,12 +778,14 @@ public void testSingleValuedFieldOrderedByMultiValueSubAggregationAsc() throws E public void testSingleValuedFieldOrderedByMultiValueSubAggregationDesc() throws Exception { boolean asc = false; - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.avg", asc)) - .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.avg", asc)) + .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) + ) + .get(); assertSearchResponse(response); @@ -773,7 +797,7 @@ public void testSingleValuedFieldOrderedByMultiValueSubAggregationDesc() throws for (int i = 4; i >= 0; i--) { Terms.Bucket bucket = terms.getBucketByKey("" + (double) i); assertThat(bucket, notNullValue()); - assertThat(key(bucket), equalTo("" + (double)i)); + assertThat(key(bucket), equalTo("" + (double) i)); assertThat(bucket.getDocCount(), equalTo(1L)); Stats stats = bucket.getAggregations().get("stats"); @@ -784,12 +808,14 @@ public void testSingleValuedFieldOrderedByMultiValueSubAggregationDesc() throws public void testSingleValuedFieldOrderedByMultiValueExtendedStatsAsc() throws Exception { boolean asc = true; - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.variance", asc)) - .subAggregation(extendedStats("stats").field(SINGLE_VALUED_FIELD_NAME))).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.variance", asc)) + .subAggregation(extendedStats("stats").field(SINGLE_VALUED_FIELD_NAME)) + ) + .get(); assertSearchResponse(response); @@ -801,7 +827,7 @@ public void testSingleValuedFieldOrderedByMultiValueExtendedStatsAsc() throws Ex for (int i = 0; i < 5; i++) { Terms.Bucket bucket = terms.getBucketByKey("" + (double) i); assertThat(bucket, notNullValue()); - assertThat(key(bucket), equalTo("" + (double)i)); + assertThat(key(bucket), equalTo("" + (double) i)); assertThat(bucket.getDocCount(), equalTo(1L)); ExtendedStats stats = bucket.getAggregations().get("stats"); @@ -813,19 +839,23 @@ public void testSingleValuedFieldOrderedByMultiValueExtendedStatsAsc() throws Ex public void testScriptScore() { Script scoringScript = new Script( - ScriptType.INLINE, CustomScriptPlugin .NAME, "doc['" + SINGLE_VALUED_FIELD_NAME + "'].value", Collections.emptyMap()); + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "doc['" + SINGLE_VALUED_FIELD_NAME + "'].value", + Collections.emptyMap() + ); Script aggregationScript = new Script( - ScriptType.INLINE, CustomScriptPlugin.NAME, "ceil(_score.doubleValue()/3)", Collections.emptyMap()); + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "ceil(_score.doubleValue()/3)", + Collections.emptyMap() + ); - SearchResponse response = client() - .prepareSearch("idx") - .setQuery(functionScoreQuery(scriptFunction(scoringScript))) - .addAggregation( - terms("terms") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(aggregationScript)) - .get(); + SearchResponse response = client().prepareSearch("idx") + .setQuery(functionScoreQuery(scriptFunction(scoringScript))) + .addAggregation(terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())).script(aggregationScript)) + .get(); assertSearchResponse(response); @@ -837,7 +867,7 @@ public void testScriptScore() { for (int i = 0; i < 3; i++) { Terms.Bucket bucket = terms.getBucketByKey("" + (double) i); assertThat(bucket, notNullValue()); - assertThat(key(bucket), equalTo("" + (double)i)); + assertThat(key(bucket), equalTo("" + (double) i)); assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i)); assertThat(bucket.getDocCount(), equalTo(i == 1 ? 3L : 1L)); } @@ -870,9 +900,12 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscSingleValu public void testSingleValuedFieldOrderedByThreeCriteria() throws Exception { double[] expectedKeys = new double[] { 2, 1, 4, 5, 3, 6, 7 }; - assertMultiSortResponse(expectedKeys, BucketOrder.count(false), - BucketOrder.aggregation("sum_d", false), - BucketOrder.aggregation("avg_l", false)); + assertMultiSortResponse( + expectedKeys, + BucketOrder.count(false), + BucketOrder.aggregation("sum_d", false), + BucketOrder.aggregation("avg_l", false) + ); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound() throws Exception { @@ -881,12 +914,15 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound } private void assertMultiSortResponse(double[] expectedKeys, BucketOrder... order) { - SearchResponse response = client() - .prepareSearch("sort_idx") - .addAggregation( - terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.compound(order)).subAggregation(avg("avg_l").field("l")) - .subAggregation(sum("sum_d").field("d"))).get(); + SearchResponse response = client().prepareSearch("sort_idx") + .addAggregation( + terms("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.compound(order)) + .subAggregation(avg("avg_l").field("l")) + .subAggregation(sum("sum_d").field("d")) + ) + .get(); assertSearchResponse(response); @@ -919,47 +955,132 @@ public void testOtherDocCount() { * Ensure requests using nondeterministic scripts do not get cached. */ public void testScriptCaching() throws Exception { - assertAcked(prepareCreate("cache_test_idx").setMapping("d", "type=float") + assertAcked( + prepareCreate("cache_test_idx").setMapping("d", "type=float") .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) - .get()); - indexRandom(true, client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1.5), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2.5)); + .get() + ); + indexRandom( + true, + client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1.5), + client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2.5) + ); // Make sure we are starting with a clear cache - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation( - terms("terms").field("d").script( - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap()))).get(); + SearchResponse r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + terms("terms").field("d") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap())) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a deterministic script gets cached - r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation( - terms("terms").field("d").script( - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap()))).get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + terms("terms").field("d") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(1L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(1L) + ); // Ensure that non-scripted requests are cached as normal r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(terms("terms").field("d")).get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(2L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(2L) + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FilterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FilterIT.java index 92b3fd7bd5654..3b66b224717c3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FilterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FilterIT.java @@ -48,44 +48,41 @@ public void setupSuiteScopeCluster() throws Exception { numTag1Docs = randomIntBetween(1, numDocs - 1); List builders = new ArrayList<>(); for (int i = 0; i < numTag1Docs; i++) { - builders.add(client().prepareIndex("idx").setId(""+i).setSource(jsonBuilder() - .startObject() - .field("value", i + 1) - .field("tag", "tag1") - .endObject())); + builders.add( + client().prepareIndex("idx") + .setId("" + i) + .setSource(jsonBuilder().startObject().field("value", i + 1).field("tag", "tag1").endObject()) + ); } for (int i = numTag1Docs; i < numDocs; i++) { - XContentBuilder source = jsonBuilder() - .startObject() - .field("value", i) - .field("tag", "tag2") - .field("name", "name" + i) - .endObject(); - builders.add(client().prepareIndex("idx").setId(""+i).setSource(source)); + XContentBuilder source = jsonBuilder().startObject() + .field("value", i) + .field("tag", "tag2") + .field("name", "name" + i) + .endObject(); + builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); if (randomBoolean()) { // randomly index the document twice so that we have deleted docs that match the filter - builders.add(client().prepareIndex("idx").setId(""+i).setSource(source)); + builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); } } prepareCreate("empty_bucket_idx").setMapping("value", "type=integer").get(); for (int i = 0; i < 2; i++) { - builders.add(client().prepareIndex("empty_bucket_idx").setId(""+i).setSource(jsonBuilder() - .startObject() - .field("value", i*2) - .endObject())); + builders.add( + client().prepareIndex("empty_bucket_idx") + .setId("" + i) + .setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) + ); } indexRandom(true, builders); ensureSearchable(); } public void testSimple() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation(filter("tag1", termQuery("tag", "tag1"))) - .get(); + SearchResponse response = client().prepareSearch("idx").addAggregation(filter("tag1", termQuery("tag", "tag1"))).get(); assertSearchResponse(response); - Filter filter = response.getAggregations().get("tag1"); assertThat(filter, notNullValue()); assertThat(filter.getName(), equalTo("tag1")); @@ -107,18 +104,16 @@ public void testEmptyFilterDeclarations() throws Exception { public void testWithSubAggregation() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(filter("tag1", termQuery("tag", "tag1")) - .subAggregation(avg("avg_value").field("value"))) - .get(); + .addAggregation(filter("tag1", termQuery("tag", "tag1")).subAggregation(avg("avg_value").field("value"))) + .get(); assertSearchResponse(response); - Filter filter = response.getAggregations().get("tag1"); assertThat(filter, notNullValue()); assertThat(filter.getName(), equalTo("tag1")); assertThat(filter.getDocCount(), equalTo((long) numTag1Docs)); - assertThat((long) ((InternalAggregation)filter).getProperty("_count"), equalTo((long) numTag1Docs)); + assertThat((long) ((InternalAggregation) filter).getProperty("_count"), equalTo((long) numTag1Docs)); long sum = 0; for (int i = 0; i < numTag1Docs; ++i) { @@ -129,14 +124,13 @@ public void testWithSubAggregation() throws Exception { assertThat(avgValue, notNullValue()); assertThat(avgValue.getName(), equalTo("avg_value")); assertThat(avgValue.getValue(), equalTo((double) sum / numTag1Docs)); - assertThat((double) ((InternalAggregation)filter).getProperty("avg_value.value"), equalTo((double) sum / numTag1Docs)); + assertThat((double) ((InternalAggregation) filter).getProperty("avg_value.value"), equalTo((double) sum / numTag1Docs)); } public void testAsSubAggregation() { SearchResponse response = client().prepareSearch("idx") - .addAggregation( - histogram("histo").field("value").interval(2L).subAggregation( - filter("filter", matchAllQuery()))).get(); + .addAggregation(histogram("histo").field("value").interval(2L).subAggregation(filter("filter", matchAllQuery()))) + .get(); assertSearchResponse(response); @@ -153,13 +147,12 @@ public void testAsSubAggregation() { public void testWithContextBasedSubAggregation() throws Exception { try { - client().prepareSearch("idx") - .addAggregation(filter("tag1", termQuery("tag", "tag1")) - .subAggregation(avg("avg_value"))) - .get(); + client().prepareSearch("idx").addAggregation(filter("tag1", termQuery("tag", "tag1")).subAggregation(avg("avg_value"))).get(); - fail("expected execution to fail - an attempt to have a context based numeric sub-aggregation, but there is not value source" + - "context which the sub-aggregation can inherit"); + fail( + "expected execution to fail - an attempt to have a context based numeric sub-aggregation, but there is not value source" + + "context which the sub-aggregation can inherit" + ); } catch (ElasticsearchException e) { assertThat(e.getMessage(), is("all shards failed")); @@ -168,10 +161,9 @@ public void testWithContextBasedSubAggregation() throws Exception { public void testEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") - .setQuery(matchAllQuery()) - .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0) - .subAggregation(filter("filter", matchAllQuery()))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(filter("filter", matchAllQuery()))) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); Histogram histo = searchResponse.getAggregations().get("histo"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FiltersIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FiltersIT.java index 1c2a59d1a4629..bbf2806673004 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FiltersIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/FiltersIT.java @@ -55,33 +55,31 @@ public void setupSuiteScopeCluster() throws Exception { numTag2Docs = randomIntBetween(1, numDocs - numTag1Docs); List builders = new ArrayList<>(); for (int i = 0; i < numTag1Docs; i++) { - XContentBuilder source = jsonBuilder() - .startObject() - .field("value", i + 1) - .field("tag", "tag1") - .endObject(); - builders.add(client().prepareIndex("idx").setId(""+i).setSource(source)); + XContentBuilder source = jsonBuilder().startObject().field("value", i + 1).field("tag", "tag1").endObject(); + builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); if (randomBoolean()) { // randomly index the document twice so that we have deleted docs that match the filter - builders.add(client().prepareIndex("idx").setId(""+i).setSource(source)); + builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); } } for (int i = numTag1Docs; i < (numTag1Docs + numTag2Docs); i++) { - XContentBuilder source = jsonBuilder() - .startObject() - .field("value", i) - .field("tag", "tag2") - .field("name", "name" + i) - .endObject(); - builders.add(client().prepareIndex("idx").setId(""+i).setSource(source)); + XContentBuilder source = jsonBuilder().startObject() + .field("value", i) + .field("tag", "tag2") + .field("name", "name" + i) + .endObject(); + builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); if (randomBoolean()) { - builders.add(client().prepareIndex("idx").setId(""+i).setSource(source)); + builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); } } for (int i = numTag1Docs + numTag2Docs; i < numDocs; i++) { numOtherDocs++; - XContentBuilder source = jsonBuilder().startObject().field("value", i).field("tag", "tag3").field("name", "name" + i) - .endObject(); + XContentBuilder source = jsonBuilder().startObject() + .field("value", i) + .field("tag", "tag3") + .field("name", "name" + i) + .endObject(); builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); if (randomBoolean()) { builders.add(client().prepareIndex("idx").setId("" + i).setSource(source)); @@ -89,20 +87,25 @@ public void setupSuiteScopeCluster() throws Exception { } prepareCreate("empty_bucket_idx").setMapping("value", "type=integer").get(); for (int i = 0; i < 2; i++) { - builders.add(client().prepareIndex("empty_bucket_idx").setId(""+i).setSource(jsonBuilder() - .startObject() - .field("value", i*2) - .endObject())); + builders.add( + client().prepareIndex("empty_bucket_idx") + .setId("" + i) + .setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) + ); } indexRandom(true, builders); ensureSearchable(); } public void testSimple() throws Exception { - SearchResponse response = client().prepareSearch("idx").addAggregation( - filters("tags", randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), - new KeyedFilter("tag2", termQuery("tag", "tag2"))))) - .get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + filters( + "tags", + randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), new KeyedFilter("tag2", termQuery("tag", "tag2"))) + ) + ) + .get(); assertSearchResponse(response); @@ -126,9 +129,10 @@ public void testSimple() throws Exception { public void testEmptyFilterDeclarations() throws Exception { QueryBuilder emptyFilter = new BoolQueryBuilder(); SearchResponse response = client().prepareSearch("idx") - .addAggregation(filters("tags", randomOrder(new KeyedFilter("all", emptyFilter), - new KeyedFilter("tag1", termQuery("tag", "tag1"))))) - .get(); + .addAggregation( + filters("tags", randomOrder(new KeyedFilter("all", emptyFilter), new KeyedFilter("tag1", termQuery("tag", "tag1")))) + ) + .get(); assertSearchResponse(response); @@ -144,9 +148,13 @@ public void testEmptyFilterDeclarations() throws Exception { public void testWithSubAggregation() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(filters("tags", randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), - new KeyedFilter("tag2", termQuery("tag", "tag2")))).subAggregation(avg("avg_value").field("value"))) - .get(); + .addAggregation( + filters( + "tags", + randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), new KeyedFilter("tag2", termQuery("tag", "tag2"))) + ).subAggregation(avg("avg_value").field("value")) + ) + .get(); assertSearchResponse(response); @@ -155,10 +163,10 @@ public void testWithSubAggregation() throws Exception { assertThat(filters.getName(), equalTo("tags")); assertThat(filters.getBuckets().size(), equalTo(2)); - assertThat(((InternalAggregation)filters).getProperty("_bucket_count"), equalTo(2)); - Object[] propertiesKeys = (Object[]) ((InternalAggregation)filters).getProperty("_key"); - Object[] propertiesDocCounts = (Object[]) ((InternalAggregation)filters).getProperty("_count"); - Object[] propertiesCounts = (Object[]) ((InternalAggregation)filters).getProperty("avg_value.value"); + assertThat(((InternalAggregation) filters).getProperty("_bucket_count"), equalTo(2)); + Object[] propertiesKeys = (Object[]) ((InternalAggregation) filters).getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) filters).getProperty("_count"); + Object[] propertiesCounts = (Object[]) ((InternalAggregation) filters).getProperty("avg_value.value"); Filters.Bucket bucket = filters.getBucketByKey("tag1"); assertThat(bucket, Matchers.notNullValue()); @@ -195,9 +203,8 @@ public void testWithSubAggregation() throws Exception { public void testAsSubAggregation() { SearchResponse response = client().prepareSearch("idx") - .addAggregation( - histogram("histo").field("value").interval(2L).subAggregation( - filters("filters", matchAllQuery()))).get(); + .addAggregation(histogram("histo").field("value").interval(2L).subAggregation(filters("filters", matchAllQuery()))) + .get(); assertSearchResponse(response); @@ -218,14 +225,18 @@ public void testWithContextBasedSubAggregation() throws Exception { try { client().prepareSearch("idx") - .addAggregation( - filters("tags", - randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), - new KeyedFilter("tag2", termQuery("tag", "tag2")))).subAggregation(avg("avg_value"))) - .get(); + .addAggregation( + filters( + "tags", + randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), new KeyedFilter("tag2", termQuery("tag", "tag2"))) + ).subAggregation(avg("avg_value")) + ) + .get(); - fail("expected execution to fail - an attempt to have a context based numeric sub-aggregation, but there is not value source" + - "context which the sub-aggregation can inherit"); + fail( + "expected execution to fail - an attempt to have a context based numeric sub-aggregation, but there is not value source" + + "context which the sub-aggregation can inherit" + ); } catch (ElasticsearchException e) { assertThat(e.getMessage(), is("all shards failed")); @@ -234,10 +245,14 @@ public void testWithContextBasedSubAggregation() throws Exception { public void testEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") - .setQuery(matchAllQuery()) - .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0) - .subAggregation(filters("filters", new KeyedFilter("all", matchAllQuery())))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation(filters("filters", new KeyedFilter("all", matchAllQuery()))) + ) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); Histogram histo = searchResponse.getAggregations().get("histo"); @@ -255,7 +270,8 @@ public void testEmptyAggregation() throws Exception { public void testSimpleNonKeyed() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(filters("tags", termQuery("tag", "tag1"), termQuery("tag", "tag2"))).get(); + .addAggregation(filters("tags", termQuery("tag", "tag1"), termQuery("tag", "tag2"))) + .get(); assertSearchResponse(response); @@ -278,10 +294,14 @@ public void testSimpleNonKeyed() throws Exception { } public void testOtherBucket() throws Exception { - SearchResponse response = client().prepareSearch("idx").addAggregation( - filters("tags", randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), - new KeyedFilter("tag2", termQuery("tag", "tag2")))).otherBucket(true)) - .get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + filters( + "tags", + randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), new KeyedFilter("tag2", termQuery("tag", "tag2"))) + ).otherBucket(true) + ) + .get(); assertSearchResponse(response); @@ -306,9 +326,13 @@ public void testOtherBucket() throws Exception { public void testOtherNamedBucket() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(filters("tags", randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), - new KeyedFilter("tag2", termQuery("tag", "tag2")))).otherBucket(true).otherBucketKey("foobar")) - .get(); + .addAggregation( + filters( + "tags", + randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), new KeyedFilter("tag2", termQuery("tag", "tag2"))) + ).otherBucket(true).otherBucketKey("foobar") + ) + .get(); assertSearchResponse(response); @@ -333,7 +357,8 @@ public void testOtherNamedBucket() throws Exception { public void testOtherNonKeyed() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(filters("tags", termQuery("tag", "tag1"), termQuery("tag", "tag2")).otherBucket(true)).get(); + .addAggregation(filters("tags", termQuery("tag", "tag1"), termQuery("tag", "tag2")).otherBucket(true)) + .get(); assertSearchResponse(response); @@ -361,10 +386,13 @@ public void testOtherNonKeyed() throws Exception { public void testOtherWithSubAggregation() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(filters("tags", randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), - new KeyedFilter("tag2", termQuery("tag", "tag2")))).otherBucket(true) - .subAggregation(avg("avg_value").field("value"))) - .get(); + .addAggregation( + filters( + "tags", + randomOrder(new KeyedFilter("tag1", termQuery("tag", "tag1")), new KeyedFilter("tag2", termQuery("tag", "tag2"))) + ).otherBucket(true).subAggregation(avg("avg_value").field("value")) + ) + .get(); assertSearchResponse(response); @@ -373,10 +401,10 @@ public void testOtherWithSubAggregation() throws Exception { assertThat(filters.getName(), equalTo("tags")); assertThat(filters.getBuckets().size(), equalTo(3)); - assertThat(((InternalAggregation)filters).getProperty("_bucket_count"), equalTo(3)); - Object[] propertiesKeys = (Object[]) ((InternalAggregation)filters).getProperty("_key"); - Object[] propertiesDocCounts = (Object[]) ((InternalAggregation)filters).getProperty("_count"); - Object[] propertiesCounts = (Object[]) ((InternalAggregation)filters).getProperty("avg_value.value"); + assertThat(((InternalAggregation) filters).getProperty("_bucket_count"), equalTo(3)); + Object[] propertiesKeys = (Object[]) ((InternalAggregation) filters).getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) filters).getProperty("_count"); + Object[] propertiesCounts = (Object[]) ((InternalAggregation) filters).getProperty("avg_value.value"); Filters.Bucket bucket = filters.getBucketByKey("tag1"); assertThat(bucket, Matchers.notNullValue()); @@ -429,11 +457,14 @@ public void testOtherWithSubAggregation() throws Exception { public void testEmptyAggregationWithOtherBucket() throws Exception { SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") - .setQuery(matchAllQuery()) - .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0) - .subAggregation(filters("filters", new KeyedFilter("foo", matchAllQuery())) - .otherBucket(true).otherBucketKey("bar"))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation(filters("filters", new KeyedFilter("foo", matchAllQuery())).otherBucket(true).otherBucketKey("bar")) + ) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); Histogram histo = searchResponse.getAggregations().get("histo"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java index ae23c6b0997a8..8e0dd0ba6638c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java @@ -69,18 +69,15 @@ private IndexRequestBuilder indexCity(String idx, String name, String... latLons @Override public void setupSuiteScopeCluster() throws Exception { Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); - prepareCreate("idx").setSettings(settings) - .setMapping("location", "type=geo_point", "city", "type=keyword") - .get(); + prepareCreate("idx").setSettings(settings).setMapping("location", "type=geo_point", "city", "type=keyword").get(); - prepareCreate("idx-multi") - .setMapping("location", "type=geo_point", "city", "type=keyword") - .get(); + prepareCreate("idx-multi").setMapping("location", "type=geo_point", "city", "type=keyword").get(); createIndex("idx_unmapped"); List cities = new ArrayList<>(); - cities.addAll(Arrays.asList( + cities.addAll( + Arrays.asList( // below 500km indexCity("idx", "utrecht", "52.0945, 5.116"), indexCity("idx", "haarlem", "52.3890, 4.637"), @@ -88,7 +85,9 @@ public void setupSuiteScopeCluster() throws Exception { indexCity("idx", "berlin", "52.540, 13.409"), indexCity("idx", "prague", "50.097679, 14.441314"), // above 1000km - indexCity("idx", "tel-aviv", "32.0741, 34.777"))); + indexCity("idx", "tel-aviv", "32.0741, 34.777") + ) + ); // random cities with no location for (String cityName : Arrays.asList("london", "singapour", "tokyo", "milan")) { @@ -99,28 +98,30 @@ public void setupSuiteScopeCluster() throws Exception { indexRandom(true, cities); cities.clear(); - cities.addAll(Arrays.asList( + cities.addAll( + Arrays.asList( // first point is within the ~17.5km, the second is ~710km indexCity("idx-multi", "city1", "52.3890, 4.637", "50.097679,14.441314"), // first point is ~576km, the second is within the ~35km indexCity("idx-multi", "city2", "52.540, 13.409", "52.0945, 5.116"), // above 1000km - indexCity("idx-multi", "city3", "32.0741, 34.777"))); + indexCity("idx-multi", "city3", "32.0741, 34.777") + ) + ); // random cities with no location for (String cityName : Arrays.asList("london", "singapour", "tokyo", "milan")) { cities.add(indexCity("idx-multi", cityName)); } indexRandom(true, cities); - prepareCreate("empty_bucket_idx") - .setMapping("value", "type=integer", "location", "type=geo_point").get(); + prepareCreate("empty_bucket_idx").setMapping("value", "type=integer", "location", "type=geo_point").get(); List builders = new ArrayList<>(); for (int i = 0; i < 2; i++) { - builders.add(client().prepareIndex("empty_bucket_idx").setId("" + i).setSource(jsonBuilder() - .startObject() - .field("value", i * 2) - .field("location", "52.0945, 5.116") - .endObject())); + builders.add( + client().prepareIndex("empty_bucket_idx") + .setId("" + i) + .setSource(jsonBuilder().startObject().field("value", i * 2).field("location", "52.0945, 5.116").endObject()) + ); } indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()])); ensureSearchable(); @@ -128,17 +129,17 @@ public void setupSuiteScopeCluster() throws Exception { public void testSimple() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)) - .field("location") - .unit(DistanceUnit.KILOMETERS) - .addUnboundedTo(500) - .addRange(500, 1000) - .addUnboundedFrom(1000)) - .get(); + .addAggregation( + geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") + .unit(DistanceUnit.KILOMETERS) + .addUnboundedTo(500) + .addRange(500, 1000) + .addUnboundedFrom(1000) + ) + .get(); assertSearchResponse(response); - Range geoDist = response.getAggregations().get("amsterdam_rings"); assertThat(geoDist, notNullValue()); assertThat(geoDist.getName(), equalTo("amsterdam_rings")); @@ -175,17 +176,17 @@ public void testSimple() throws Exception { public void testSimpleWithCustomKeys() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)) - .field("location") - .unit(DistanceUnit.KILOMETERS) - .addUnboundedTo("ring1", 500) - .addRange("ring2", 500, 1000) - .addUnboundedFrom("ring3", 1000)) - .get(); + .addAggregation( + geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") + .unit(DistanceUnit.KILOMETERS) + .addUnboundedTo("ring1", 500) + .addRange("ring2", 500, 1000) + .addUnboundedFrom("ring3", 1000) + ) + .get(); assertSearchResponse(response); - Range geoDist = response.getAggregations().get("amsterdam_rings"); assertThat(geoDist, notNullValue()); assertThat(geoDist.getName(), equalTo("amsterdam_rings")); @@ -224,17 +225,17 @@ public void testUnmapped() throws Exception { client().admin().cluster().prepareHealth("idx_unmapped").setWaitForYellowStatus().get(); SearchResponse response = client().prepareSearch("idx_unmapped") - .addAggregation(geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)) - .field("location") - .unit(DistanceUnit.KILOMETERS) - .addUnboundedTo(500) - .addRange(500, 1000) - .addUnboundedFrom(1000)) - .get(); + .addAggregation( + geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") + .unit(DistanceUnit.KILOMETERS) + .addUnboundedTo(500) + .addRange(500, 1000) + .addUnboundedFrom(1000) + ) + .get(); assertSearchResponse(response); - Range geoDist = response.getAggregations().get("amsterdam_rings"); assertThat(geoDist, notNullValue()); assertThat(geoDist.getName(), equalTo("amsterdam_rings")); @@ -271,17 +272,17 @@ public void testUnmapped() throws Exception { public void testPartiallyUnmapped() throws Exception { SearchResponse response = client().prepareSearch("idx", "idx_unmapped") - .addAggregation(geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)) - .field("location") - .unit(DistanceUnit.KILOMETERS) - .addUnboundedTo(500) - .addRange(500, 1000) - .addUnboundedFrom(1000)) - .get(); + .addAggregation( + geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") + .unit(DistanceUnit.KILOMETERS) + .addUnboundedTo(500) + .addRange(500, 1000) + .addUnboundedFrom(1000) + ) + .get(); assertSearchResponse(response); - Range geoDist = response.getAggregations().get("amsterdam_rings"); assertThat(geoDist, notNullValue()); assertThat(geoDist.getName(), equalTo("amsterdam_rings")); @@ -318,28 +319,27 @@ public void testPartiallyUnmapped() throws Exception { public void testWithSubAggregation() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)) - .field("location") - .unit(DistanceUnit.KILOMETERS) - .addUnboundedTo(500) - .addRange(500, 1000) - .addUnboundedFrom(1000) - .subAggregation(terms("cities").field("city") - .collectMode(randomFrom(SubAggCollectionMode.values())))) - .get(); + .addAggregation( + geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") + .unit(DistanceUnit.KILOMETERS) + .addUnboundedTo(500) + .addRange(500, 1000) + .addUnboundedFrom(1000) + .subAggregation(terms("cities").field("city").collectMode(randomFrom(SubAggCollectionMode.values()))) + ) + .get(); assertSearchResponse(response); - Range geoDist = response.getAggregations().get("amsterdam_rings"); assertThat(geoDist, notNullValue()); assertThat(geoDist.getName(), equalTo("amsterdam_rings")); List buckets = geoDist.getBuckets(); assertThat(geoDist.getBuckets().size(), equalTo(3)); - assertThat(((InternalAggregation)geoDist).getProperty("_bucket_count"), equalTo(3)); - Object[] propertiesKeys = (Object[]) ((InternalAggregation)geoDist).getProperty("_key"); - Object[] propertiesDocCounts = (Object[]) ((InternalAggregation)geoDist).getProperty("_count"); - Object[] propertiesCities = (Object[]) ((InternalAggregation)geoDist).getProperty("cities"); + assertThat(((InternalAggregation) geoDist).getProperty("_bucket_count"), equalTo(3)); + Object[] propertiesKeys = (Object[]) ((InternalAggregation) geoDist).getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) geoDist).getProperty("_count"); + Object[] propertiesCities = (Object[]) ((InternalAggregation) geoDist).getProperty("cities"); Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); @@ -404,11 +404,14 @@ public void testWithSubAggregation() throws Exception { public void testEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") - .setQuery(matchAllQuery()) - .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0) - .subAggregation(geoDistance("geo_dist", new GeoPoint(52.3760, 4.894)).field("location") - .addRange("0-100", 0.0, 100.0))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation(geoDistance("geo_dist", new GeoPoint(52.3760, 4.894)).field("location").addRange("0-100", 0.0, 100.0)) + ) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); Histogram histo = searchResponse.getAggregations().get("histo"); @@ -430,13 +433,11 @@ public void testEmptyAggregation() throws Exception { assertThat(buckets.get(0).getDocCount(), equalTo(0L)); } - public void testNoRangesInQuery() { + public void testNoRangesInQuery() { try { - client().prepareSearch("idx") - .addAggregation(geoDistance("geo_dist", new GeoPoint(52.3760, 4.894)).field("location")) - .get(); + client().prepareSearch("idx").addAggregation(geoDistance("geo_dist", new GeoPoint(52.3760, 4.894)).field("location")).get(); fail(); - } catch (SearchPhaseExecutionException spee){ + } catch (SearchPhaseExecutionException spee) { Throwable rootCause = spee.getCause().getCause(); assertThat(rootCause, instanceOf(IllegalArgumentException.class)); assertEquals(rootCause.getMessage(), "No [ranges] specified for the [geo_dist] aggregation"); @@ -445,14 +446,15 @@ public void testNoRangesInQuery() { public void testMultiValues() throws Exception { SearchResponse response = client().prepareSearch("idx-multi") - .addAggregation(geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)) - .field("location") - .unit(DistanceUnit.KILOMETERS) - .distanceType(org.elasticsearch.common.geo.GeoDistance.ARC) - .addUnboundedTo(500) - .addRange(500, 1000) - .addUnboundedFrom(1000)) - .get(); + .addAggregation( + geoDistance("amsterdam_rings", new GeoPoint(52.3760, 4.894)).field("location") + .unit(DistanceUnit.KILOMETERS) + .distanceType(org.elasticsearch.common.geo.GeoDistance.ARC) + .addUnboundedTo(500) + .addRange(500, 1000) + .addUnboundedFrom(1000) + ) + .get(); assertSearchResponse(response); @@ -490,6 +492,4 @@ public void testMultiValues() throws Exception { assertThat(bucket.getDocCount(), equalTo(1L)); } - - } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java index a8584bd5a55ab..f7f4bd7794460 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java @@ -10,6 +10,7 @@ import com.carrotsearch.hppc.ObjectIntHashMap; import com.carrotsearch.hppc.ObjectIntMap; import com.carrotsearch.hppc.cursors.ObjectIntCursor; + import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; @@ -33,9 +34,9 @@ import java.util.Random; import java.util.Set; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.geometry.utils.Geohash.PRECISION; import static org.elasticsearch.geometry.utils.Geohash.stringEncode; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.search.aggregations.AggregationBuilders.geohashGrid; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; @@ -77,21 +78,20 @@ public void setupSuiteScopeCluster() throws Exception { Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); - assertAcked(prepareCreate("idx").setSettings(settings) - .setMapping("location", "type=geo_point", "city", "type=keyword")); + assertAcked(prepareCreate("idx").setSettings(settings).setMapping("location", "type=geo_point", "city", "type=keyword")); List cities = new ArrayList<>(); Random random = random(); expectedDocCountsForGeoHash = new ObjectIntHashMap<>(numDocs * 2); for (int i = 0; i < numDocs; i++) { - //generate random point + // generate random point double lat = (180d * random.nextDouble()) - 90d; double lng = (360d * random.nextDouble()) - 180d; String randomGeoHash = stringEncode(lng, lat, PRECISION); - //Index at the highest resolution + // Index at the highest resolution cities.add(indexCity("idx", randomGeoHash, lat + ", " + lng)); expectedDocCountsForGeoHash.put(randomGeoHash, expectedDocCountsForGeoHash.getOrDefault(randomGeoHash, 0) + 1); - //Update expected doc counts for all resolutions.. + // Update expected doc counts for all resolutions.. for (int precision = PRECISION - 1; precision > 0; precision--) { String hash = stringEncode(lng, lat, precision); if ((smallestGeoHash == null) || (hash.length() < smallestGeoHash.length())) { @@ -102,8 +102,9 @@ public void setupSuiteScopeCluster() throws Exception { } indexRandom(true, cities); - assertAcked(prepareCreate("multi_valued_idx").setSettings(settings) - .setMapping("location", "type=geo_point", "city", "type=keyword")); + assertAcked( + prepareCreate("multi_valued_idx").setSettings(settings).setMapping("location", "type=geo_point", "city", "type=keyword") + ); cities = new ArrayList<>(); multiValuedExpectedDocCountsForGeoHash = new ObjectIntHashMap<>(numDocs * 2); @@ -134,18 +135,15 @@ public void setupSuiteScopeCluster() throws Exception { public void testSimple() throws Exception { for (int precision = 1; precision <= PRECISION; precision++) { SearchResponse response = client().prepareSearch("idx") - .addAggregation(geohashGrid("geohashgrid") - .field("location") - .precision(precision) - ) - .get(); + .addAggregation(geohashGrid("geohashgrid").field("location").precision(precision)) + .get(); assertSearchResponse(response); GeoGrid geoGrid = response.getAggregations().get("geohashgrid"); List buckets = geoGrid.getBuckets(); - Object[] propertiesKeys = (Object[]) ((InternalAggregation)geoGrid).getProperty("_key"); - Object[] propertiesDocCounts = (Object[]) ((InternalAggregation)geoGrid).getProperty("_count"); + Object[] propertiesKeys = (Object[]) ((InternalAggregation) geoGrid).getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) geoGrid).getProperty("_count"); for (int i = 0; i < buckets.size(); i++) { GeoGrid.Bucket cell = buckets.get(i); String geohash = cell.getKeyAsString(); @@ -153,8 +151,7 @@ public void testSimple() throws Exception { long bucketCount = cell.getDocCount(); int expectedBucketCount = expectedDocCountsForGeoHash.get(geohash); assertNotSame(bucketCount, 0); - assertEquals("Geohash " + geohash + " has wrong doc count ", - expectedBucketCount, bucketCount); + assertEquals("Geohash " + geohash + " has wrong doc count ", expectedBucketCount, bucketCount); GeoPoint geoPoint = (GeoPoint) propertiesKeys[i]; assertThat(stringEncode(geoPoint.lon(), geoPoint.lat(), precision), equalTo(geohash)); assertThat((long) propertiesDocCounts[i], equalTo(bucketCount)); @@ -165,11 +162,8 @@ public void testSimple() throws Exception { public void testMultivalued() throws Exception { for (int precision = 1; precision <= PRECISION; precision++) { SearchResponse response = client().prepareSearch("multi_valued_idx") - .addAggregation(geohashGrid("geohashgrid") - .field("location") - .precision(precision) - ) - .get(); + .addAggregation(geohashGrid("geohashgrid").field("location").precision(precision)) + .get(); assertSearchResponse(response); @@ -180,8 +174,7 @@ public void testMultivalued() throws Exception { long bucketCount = cell.getDocCount(); int expectedBucketCount = multiValuedExpectedDocCountsForGeoHash.get(geohash); assertNotSame(bucketCount, 0); - assertEquals("Geohash " + geohash + " has wrong doc count ", - expectedBucketCount, bucketCount); + assertEquals("Geohash " + geohash + " has wrong doc count ", expectedBucketCount, bucketCount); } } } @@ -191,15 +184,11 @@ public void testFiltered() throws Exception { bbox.setCorners(smallestGeoHash).queryName("bbox"); for (int precision = 1; precision <= PRECISION; precision++) { SearchResponse response = client().prepareSearch("idx") - .addAggregation( - AggregationBuilders.filter("filtered", bbox) - .subAggregation( - geohashGrid("geohashgrid") - .field("location") - .precision(precision) - ) - ) - .get(); + .addAggregation( + AggregationBuilders.filter("filtered", bbox) + .subAggregation(geohashGrid("geohashgrid").field("location").precision(precision)) + ) + .get(); assertSearchResponse(response); @@ -212,8 +201,7 @@ public void testFiltered() throws Exception { int expectedBucketCount = expectedDocCountsForGeoHash.get(geohash); assertNotSame(bucketCount, 0); assertTrue("Buckets must be filtered", geohash.startsWith(smallestGeoHash)); - assertEquals("Geohash " + geohash + " has wrong doc count ", - expectedBucketCount, bucketCount); + assertEquals("Geohash " + geohash + " has wrong doc count ", expectedBucketCount, bucketCount); } } @@ -222,11 +210,8 @@ public void testFiltered() throws Exception { public void testUnmapped() throws Exception { for (int precision = 1; precision <= PRECISION; precision++) { SearchResponse response = client().prepareSearch("idx_unmapped") - .addAggregation(geohashGrid("geohashgrid") - .field("location") - .precision(precision) - ) - .get(); + .addAggregation(geohashGrid("geohashgrid").field("location").precision(precision)) + .get(); assertSearchResponse(response); @@ -239,11 +224,8 @@ public void testUnmapped() throws Exception { public void testPartiallyUnmapped() throws Exception { for (int precision = 1; precision <= PRECISION; precision++) { SearchResponse response = client().prepareSearch("idx", "idx_unmapped") - .addAggregation(geohashGrid("geohashgrid") - .field("location") - .precision(precision) - ) - .get(); + .addAggregation(geohashGrid("geohashgrid").field("location").precision(precision)) + .get(); assertSearchResponse(response); @@ -254,8 +236,7 @@ public void testPartiallyUnmapped() throws Exception { long bucketCount = cell.getDocCount(); int expectedBucketCount = expectedDocCountsForGeoHash.get(geohash); assertNotSame(bucketCount, 0); - assertEquals("Geohash " + geohash + " has wrong doc count ", - expectedBucketCount, bucketCount); + assertEquals("Geohash " + geohash + " has wrong doc count ", expectedBucketCount, bucketCount); } } } @@ -263,18 +244,13 @@ public void testPartiallyUnmapped() throws Exception { public void testTopMatch() throws Exception { for (int precision = 1; precision <= PRECISION; precision++) { SearchResponse response = client().prepareSearch("idx") - .addAggregation(geohashGrid("geohashgrid") - .field("location") - .size(1) - .shardSize(100) - .precision(precision) - ) - .get(); + .addAggregation(geohashGrid("geohashgrid").field("location").size(1).shardSize(100).precision(precision)) + .get(); assertSearchResponse(response); GeoGrid geoGrid = response.getAggregations().get("geohashgrid"); - //Check we only have one bucket with the best match for that resolution + // Check we only have one bucket with the best match for that resolution assertThat(geoGrid.getBuckets().size(), equalTo(1)); for (GeoGrid.Bucket cell : geoGrid.getBuckets()) { String geohash = cell.getKeyAsString(); @@ -286,8 +262,7 @@ public void testTopMatch() throws Exception { } } assertNotSame(bucketCount, 0); - assertEquals("Geohash " + geohash + " has wrong doc count ", - expectedBucketCount, bucketCount); + assertEquals("Geohash " + geohash + " has wrong doc count ", expectedBucketCount, bucketCount); } } } @@ -295,19 +270,24 @@ public void testTopMatch() throws Exception { public void testSizeIsZero() { final int size = 0; final int shardSize = 10000; - IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, - () -> client().prepareSearch("idx") - .addAggregation(geohashGrid("geohashgrid").field("location").size(size).shardSize(shardSize)).get()); + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> client().prepareSearch("idx") + .addAggregation(geohashGrid("geohashgrid").field("location").size(size).shardSize(shardSize)) + .get() + ); assertThat(exception.getMessage(), containsString("[size] must be greater than 0. Found [0] in [geohashgrid]")); } public void testShardSizeIsZero() { final int size = 100; final int shardSize = 0; - IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, - () -> client().prepareSearch("idx") - .addAggregation(geohashGrid("geohashgrid").field("location").size(size).shardSize(shardSize)) - .get()); + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> client().prepareSearch("idx") + .addAggregation(geohashGrid("geohashgrid").field("location").size(size).shardSize(shardSize)) + .get() + ); assertThat(exception.getMessage(), containsString("[shardSize] must be greater than 0. Found [0] in [geohashgrid]")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GlobalIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GlobalIT.java index 7137f8bc7b0f9..417bf7f9d382f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GlobalIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/GlobalIT.java @@ -40,19 +40,20 @@ public void setupSuiteScopeCluster() throws Exception { List builders = new ArrayList<>(); numDocs = randomIntBetween(3, 20); for (int i = 0; i < numDocs / 2; i++) { - builders.add(client().prepareIndex("idx").setId(""+i+1).setSource(jsonBuilder() - .startObject() - .field("value", i + 1) - .field("tag", "tag1") - .endObject())); + builders.add( + client().prepareIndex("idx") + .setId("" + i + 1) + .setSource(jsonBuilder().startObject().field("value", i + 1).field("tag", "tag1").endObject()) + ); } for (int i = numDocs / 2; i < numDocs; i++) { - builders.add(client().prepareIndex("idx").setId(""+i+1).setSource(jsonBuilder() - .startObject() - .field("value", i + 1) - .field("tag", "tag2") - .field("name", "name" + i+1) - .endObject())); + builders.add( + client().prepareIndex("idx") + .setId("" + i + 1) + .setSource( + jsonBuilder().startObject().field("value", i + 1).field("tag", "tag2").field("name", "name" + i + 1).endObject() + ) + ); } indexRandom(true, builders); ensureSearchable(); @@ -60,23 +61,21 @@ public void setupSuiteScopeCluster() throws Exception { public void testWithStatsSubAggregator() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setQuery(QueryBuilders.termQuery("tag", "tag1")) - .addAggregation(global("global") - .subAggregation(stats("value_stats").field("value"))) - .get(); + .setQuery(QueryBuilders.termQuery("tag", "tag1")) + .addAggregation(global("global").subAggregation(stats("value_stats").field("value"))) + .get(); assertSearchResponse(response); - Global global = response.getAggregations().get("global"); assertThat(global, notNullValue()); assertThat(global.getName(), equalTo("global")); assertThat(global.getDocCount(), equalTo((long) numDocs)); - assertThat((long) ((InternalAggregation)global).getProperty("_count"), equalTo((long) numDocs)); + assertThat((long) ((InternalAggregation) global).getProperty("_count"), equalTo((long) numDocs)); assertThat(global.getAggregations().asList().isEmpty(), is(false)); Stats stats = global.getAggregations().get("value_stats"); - assertThat((Stats) ((InternalAggregation)global).getProperty("value_stats"), sameInstance(stats)); + assertThat((Stats) ((InternalAggregation) global).getProperty("value_stats"), sameInstance(stats)); assertThat(stats, notNullValue()); assertThat(stats.getName(), equalTo("value_stats")); long sum = 0; @@ -93,13 +92,14 @@ public void testWithStatsSubAggregator() throws Exception { public void testNonTopLevel() throws Exception { try { client().prepareSearch("idx") - .setQuery(QueryBuilders.termQuery("tag", "tag1")) - .addAggregation(global("global") - .subAggregation(global("inner_global"))) - .get(); + .setQuery(QueryBuilders.termQuery("tag", "tag1")) + .addAggregation(global("global").subAggregation(global("inner_global"))) + .get(); - fail("expected to fail executing non-top-level global aggregator. global aggregations are only allowed as top level" + - "aggregations"); + fail( + "expected to fail executing non-top-level global aggregator. global aggregations are only allowed as top level" + + "aggregations" + ); } catch (ElasticsearchException e) { assertThat(e.getMessage(), is("all shards failed")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java index 108fcb36900ba..940b325c16991 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java @@ -139,23 +139,31 @@ public void setupSuiteScopeCluster() throws Exception { List builders = new ArrayList<>(); for (int i = 0; i < numDocs; i++) { - builders.add(client().prepareIndex("idx").setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, i + 1) - .startArray(MULTI_VALUED_FIELD_NAME).value(i + 1).value(i + 2).endArray() - .field("tag", "tag" + i) - .field("constant", 1) - .endObject())); + builders.add( + client().prepareIndex("idx") + .setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, i + 1) + .startArray(MULTI_VALUED_FIELD_NAME) + .value(i + 1) + .value(i + 2) + .endArray() + .field("tag", "tag" + i) + .field("constant", 1) + .endObject() + ) + ); } getMultiSortDocs(builders); assertAcked(prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer")); for (int i = 0; i < 2; i++) { - builders.add(client().prepareIndex("empty_bucket_idx").setId("" + i).setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, i * 2) - .endObject())); + builders.add( + client().prepareIndex("empty_bucket_idx") + .setId("" + i) + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) + ); } indexRandom(true, builders); ensureSearchable(); @@ -180,36 +188,55 @@ private void getMultiSortDocs(List builders) throws IOExcep addExpectedBucket(6, 1, 5, 1); addExpectedBucket(7, 1, 5, 1); - assertAcked(client().admin().indices().prepareCreate("sort_idx") - .setMapping(SINGLE_VALUED_FIELD_NAME, "type=double").get()); + assertAcked(client().admin().indices().prepareCreate("sort_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=double").get()); for (int i = 1; i <= 3; i++) { - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 1).field("l", 1).field("d", i).endObject())); - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 2).field("l", 2).field("d", i).endObject())); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 1).field("l", 1).field("d", i).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 2).field("l", 2).field("d", i).endObject()) + ); } - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3).field("l", 3).field("d", 1).endObject())); - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3.8).field("l", 3).field("d", 2).endObject())); - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).field("l", 3).field("d", 1).endObject())); - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4.4).field("l", 3).field("d", 3).endObject())); - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).field("l", 5).field("d", 1).endObject())); - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5.1).field("l", 5).field("d", 2).endObject())); - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 6).field("l", 5).field("d", 1).endObject())); - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 7).field("l", 5).field("d", 1).endObject())); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3).field("l", 3).field("d", 1).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3.8).field("l", 3).field("d", 2).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).field("l", 3).field("d", 1).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4.4).field("l", 3).field("d", 3).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).field("l", 5).field("d", 1).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5.1).field("l", 5).field("d", 2).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 6).field("l", 5).field("d", 1).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 7).field("l", 5).field("d", 1).endObject()) + ); } public void testSingleValuedField() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)) - .get(); + .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)) + .get(); assertSearchResponse(response); @@ -230,13 +257,12 @@ public void testSingleValuedField() throws Exception { public void singleValuedField_withOffset() throws Exception { int interval1 = 10; int offset = 5; - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval1).offset(offset)) - .get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval1).offset(offset)) + .get(); // from setup we have between 6 and 20 documents, each with value 1 in test field - int expectedNumberOfBuckets = (offset >= (numDocs % interval + 1)) ? numValueBuckets : numValueBuckets + 1; + int expectedNumberOfBuckets = (offset >= (numDocs % interval + 1)) ? numValueBuckets : numValueBuckets + 1; Histogram histo = response.getAggregations().get("histo"); assertThat(histo, notNullValue()); assertThat(histo.getName(), equalTo("histo")); @@ -251,7 +277,7 @@ public void singleValuedField_withOffset() throws Exception { // last bucket should have (numDocs % interval + 1) docs bucket = histo.getBuckets().get(0); assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo(numDocs%interval1 + 5L)); + assertThat(((Number) bucket.getKey()).longValue(), equalTo(numDocs % interval1 + 5L)); assertThat(bucket.getDocCount(), equalTo((numDocs % interval) + 1L)); } @@ -261,14 +287,13 @@ public void singleValuedField_withOffset() throws Exception { */ public void testSingleValuedFieldWithRandomOffset() throws Exception { int offset = randomIntBetween(2, interval); - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).offset(offset)) - .get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).offset(offset)) + .get(); assertSearchResponse(response); // shifting by offset>2 creates new extra bucket [0,offset-1] // if offset is >= number of values in original last bucket, that effect is canceled - int expectedNumberOfBuckets = (offset >= (numDocs % interval + 1)) ? numValueBuckets : numValueBuckets + 1; + int expectedNumberOfBuckets = (offset >= (numDocs % interval + 1)) ? numValueBuckets : numValueBuckets + 1; Histogram histo = response.getAggregations().get("histo"); assertThat(histo, notNullValue()); @@ -279,13 +304,13 @@ public void testSingleValuedFieldWithRandomOffset() throws Exception { for (int i = 0; i < expectedNumberOfBuckets; ++i) { Histogram.Bucket bucket = histo.getBuckets().get(i); assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) ((i-1) * interval + offset))); - if (i==0) { + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) ((i - 1) * interval + offset))); + if (i == 0) { // first bucket - long expectedFirstBucketCount = offset-1; + long expectedFirstBucketCount = offset - 1; assertThat(bucket.getDocCount(), equalTo(expectedFirstBucketCount)); docsCounted += expectedFirstBucketCount; - } else if(i buckets = new ArrayList<>(histo.getBuckets()); for (int i = 0; i < numValueBuckets; ++i) { @@ -436,17 +459,16 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { public void testSingleValuedFieldOrderedBySubAggregationAsc() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation( - histogram("histo") - .field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .order(BucketOrder.aggregation("sum", true)) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .get(); + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .order(BucketOrder.aggregation("sum", true)) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ) + .get(); assertSearchResponse(response); - Histogram histo = response.getAggregations().get("histo"); assertThat(histo, notNullValue()); assertThat(histo.getName(), equalTo("histo")); @@ -479,17 +501,16 @@ public void testSingleValuedFieldOrderedBySubAggregationAsc() throws Exception { public void testSingleValuedFieldOrderedBySubAggregationDesc() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation( - histogram("histo") - .field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .order(BucketOrder.aggregation("sum", false)) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .get(); + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .order(BucketOrder.aggregation("sum", false)) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ) + .get(); assertSearchResponse(response); - Histogram histo = response.getAggregations().get("histo"); assertThat(histo, notNullValue()); assertThat(histo.getName(), equalTo("histo")); @@ -522,17 +543,16 @@ public void testSingleValuedFieldOrderedBySubAggregationDesc() throws Exception public void testSingleValuedFieldOrderedByMultiValuedSubAggregationDesc() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation( - histogram("histo") - .field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .order(BucketOrder.aggregation("stats.sum", false)) - .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))) - .get(); + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .order(BucketOrder.aggregation("stats.sum", false)) + .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) + ) + .get(); assertSearchResponse(response); - Histogram histo = response.getAggregations().get("histo"); assertThat(histo, notNullValue()); assertThat(histo.getName(), equalTo("histo")); @@ -567,18 +587,16 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationDesc() throws public void testSingleValuedFieldOrderedBySubAggregationDescDeepOrderPath() throws Exception { boolean asc = randomBoolean(); SearchResponse response = client().prepareSearch("idx") - .addAggregation( - histogram("histo") - .field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .order(BucketOrder.aggregation("filter>max", asc)) - .subAggregation(filter("filter", matchAllQuery()) - .subAggregation(max("max").field(SINGLE_VALUED_FIELD_NAME)))) - .get(); + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .order(BucketOrder.aggregation("filter>max", asc)) + .subAggregation(filter("filter", matchAllQuery()).subAggregation(max("max").field(SINGLE_VALUED_FIELD_NAME))) + ) + .get(); assertSearchResponse(response); - Histogram histo = response.getAggregations().get("histo"); assertThat(histo, notNullValue()); assertThat(histo.getName(), equalTo("histo")); @@ -607,11 +625,12 @@ public void testSingleValuedFieldOrderedBySubAggregationDescDeepOrderPath() thro public void testSingleValuedFieldOrderedByTieBreaker() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(histogram("histo") - .field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .order(BucketOrder.aggregation("max_constant", randomBoolean())) - .subAggregation(max("max_constant").field("constant"))) + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .order(BucketOrder.aggregation("max_constant", randomBoolean())) + .subAggregation(max("max_constant").field("constant")) + ) .get(); assertSearchResponse(response); @@ -633,16 +652,17 @@ public void testSingleValuedFieldOrderedByTieBreaker() throws Exception { public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { boolean asc = true; try { - client() - .prepareSearch("idx") + client().prepareSearch("idx") .addAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME) .interval(interval) .order(BucketOrder.aggregation("inner_histo>avg", asc)) - .subAggregation(histogram("inner_histo") - .interval(interval) - .field(MULTI_VALUED_FIELD_NAME) - .subAggregation(avg("avg").field("value")))) + .subAggregation( + histogram("inner_histo").interval(interval) + .field(MULTI_VALUED_FIELD_NAME) + .subAggregation(avg("avg").field("value")) + ) + ) .get(); fail("Expected an exception"); } catch (SearchPhaseExecutionException e) { @@ -663,12 +683,12 @@ public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { public void testSingleValuedFieldWithValueScript() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation( - histogram("histo") - .field(SINGLE_VALUED_FIELD_NAME) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", emptyMap())) - .interval(interval)) - .get(); + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", emptyMap())) + .interval(interval) + ) + .get(); assertSearchResponse(response); @@ -695,12 +715,11 @@ public void testSingleValuedFieldWithValueScript() throws Exception { public void testMultiValuedField() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(histogram("histo").field(MULTI_VALUED_FIELD_NAME).interval(interval)) - .get(); + .addAggregation(histogram("histo").field(MULTI_VALUED_FIELD_NAME).interval(interval)) + .get(); assertSearchResponse(response); - Histogram histo = response.getAggregations().get("histo"); assertThat(histo, notNullValue()); assertThat(histo.getName(), equalTo("histo")); @@ -717,12 +736,11 @@ public void testMultiValuedField() throws Exception { public void testMultiValuedFieldOrderedByKeyDesc() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(histogram("histo").field(MULTI_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.key(false))) - .get(); + .addAggregation(histogram("histo").field(MULTI_VALUED_FIELD_NAME).interval(interval).order(BucketOrder.key(false))) + .get(); assertSearchResponse(response); - Histogram histo = response.getAggregations().get("histo"); assertThat(histo, notNullValue()); assertThat(histo.getName(), equalTo("histo")); @@ -739,12 +757,12 @@ public void testMultiValuedFieldOrderedByKeyDesc() throws Exception { public void testMultiValuedFieldWithValueScript() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation( - histogram("histo") - .field(MULTI_VALUED_FIELD_NAME) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", emptyMap())) - .interval(interval)) - .get(); + .addAggregation( + histogram("histo").field(MULTI_VALUED_FIELD_NAME) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", emptyMap())) + .interval(interval) + ) + .get(); assertSearchResponse(response); @@ -776,11 +794,11 @@ public void testMultiValuedFieldWithValueScript() throws Exception { public void testScriptSingleValue() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation( - histogram("histo") - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['l_value'].value", emptyMap())) - .interval(interval)) - .get(); + .addAggregation( + histogram("histo").script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['l_value'].value", emptyMap())) + .interval(interval) + ) + .get(); assertSearchResponse(response); @@ -800,11 +818,11 @@ public void testScriptSingleValue() throws Exception { public void testScriptMultiValued() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation( - histogram("histo") - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['l_values']", emptyMap())) - .interval(interval)) - .get(); + .addAggregation( + histogram("histo").script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['l_values']", emptyMap())) + .interval(interval) + ) + .get(); assertSearchResponse(response); @@ -824,12 +842,11 @@ public void testScriptMultiValued() throws Exception { public void testUnmapped() throws Exception { SearchResponse response = client().prepareSearch("idx_unmapped") - .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)) - .get(); + .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)) + .get(); assertSearchResponse(response); - Histogram histo = response.getAggregations().get("histo"); assertThat(histo, notNullValue()); assertThat(histo.getName(), equalTo("histo")); @@ -838,12 +855,11 @@ public void testUnmapped() throws Exception { public void testPartiallyUnmapped() throws Exception { SearchResponse response = client().prepareSearch("idx", "idx_unmapped") - .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)) - .get(); + .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)) + .get(); assertSearchResponse(response); - Histogram histo = response.getAggregations().get("histo"); assertThat(histo, notNullValue()); assertThat(histo.getName(), equalTo("histo")); @@ -859,14 +875,13 @@ public void testPartiallyUnmapped() throws Exception { } public void testPartiallyUnmappedWithExtendedBounds() throws Exception { - SearchResponse response = client() - .prepareSearch("idx", "idx_unmapped") - .addAggregation( - histogram("histo") - .field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .extendedBounds(-1 * 2 * interval, valueCounts.length * interval)) - .get(); + SearchResponse response = client().prepareSearch("idx", "idx_unmapped") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .extendedBounds(-1 * 2 * interval, valueCounts.length * interval) + ) + .get(); assertSearchResponse(response); @@ -896,10 +911,14 @@ public void testPartiallyUnmappedWithExtendedBounds() throws Exception { public void testEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") - .setQuery(matchAllQuery()) - .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1L).minDocCount(0) - .subAggregation(histogram("sub_histo").field(SINGLE_VALUED_FIELD_NAME).interval(1L))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(1L) + .minDocCount(0) + .subAggregation(histogram("sub_histo").field(SINGLE_VALUED_FIELD_NAME).interval(1L)) + ) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); Histogram histo = searchResponse.getAggregations().get("histo"); @@ -939,7 +958,6 @@ public void testSingleValuedFieldWithExtendedBounds() throws Exception { long boundsMaxKey = lastDataBucketKey + boundsMaxKeyDelta; long boundsMax = boundsMaxKey + randomIntBetween(0, interval - 1); - // it could be that the random bounds.min we chose ended up greater than bounds.max - this should cause an // error boolean invalidBoundsError = boundsMin > boundsMax; @@ -952,12 +970,13 @@ public void testSingleValuedFieldWithExtendedBounds() throws Exception { SearchResponse response = null; try { response = client().prepareSearch("idx") - .addAggregation(histogram("histo") - .field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .minDocCount(0) - .extendedBounds(boundsMin, boundsMax)) - .get(); + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .minDocCount(0) + .extendedBounds(boundsMin, boundsMax) + ) + .get(); if (invalidBoundsError) { fail("Expected an exception to be thrown when bounds.min is greater than bounds.max"); @@ -1015,7 +1034,6 @@ public void testEmptyWithExtendedBounds() throws Exception { long boundsMaxKey = lastDataBucketKey + boundsMaxKeyDelta; long boundsMax = boundsMaxKey + randomIntBetween(0, interval - 1); - // it could be that the random bounds.min we chose ended up greater than bounds.max - this should cause an // error boolean invalidBoundsError = boundsMin > boundsMax; @@ -1028,13 +1046,14 @@ public void testEmptyWithExtendedBounds() throws Exception { SearchResponse response = null; try { response = client().prepareSearch("idx") - .setQuery(QueryBuilders.termQuery("foo", "bar")) - .addAggregation(histogram("histo") - .field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .minDocCount(0) - .extendedBounds(boundsMin, boundsMax)) - .get(); + .setQuery(QueryBuilders.termQuery("foo", "bar")) + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .minDocCount(0) + .extendedBounds(boundsMin, boundsMax) + ) + .get(); if (invalidBoundsError) { fail("Expected an exception to be thrown when bounds.min is greater than bounds.max"); @@ -1073,7 +1092,8 @@ public void testEmptyWithExtendedBounds() throws Exception { public void testExeptionOnNegativerInterval() { try { client().prepareSearch("empty_bucket_idx") - .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(-1).minDocCount(0)).get(); + .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(-1).minDocCount(0)) + .get(); fail(); } catch (IllegalArgumentException e) { assertThat(e.toString(), containsString("[interval] must be >0 for histogram aggregation [histo]")); @@ -1082,13 +1102,15 @@ public void testExeptionOnNegativerInterval() { public void testDecimalIntervalAndOffset() throws Exception { assertAcked(prepareCreate("decimal_values").setMapping("d", "type=float").get()); - indexRandom(true, - client().prepareIndex("decimal_values").setId("1").setSource("d", -0.6), - client().prepareIndex("decimal_values").setId("2").setSource("d", 0.1)); + indexRandom( + true, + client().prepareIndex("decimal_values").setId("1").setSource("d", -0.6), + client().prepareIndex("decimal_values").setId("2").setSource("d", 0.1) + ); SearchResponse r = client().prepareSearch("decimal_values") - .addAggregation(histogram("histo").field("d").interval(0.7).offset(0.05)) - .get(); + .addAggregation(histogram("histo").field("d").interval(0.7).offset(0.05)) + .get(); assertSearchResponse(r); Histogram histogram = r.getAggregations().get("histo"); @@ -1105,48 +1127,140 @@ public void testDecimalIntervalAndOffset() throws Exception { * Ensure requests using nondeterministic scripts do not get cached. */ public void testScriptCaching() throws Exception { - assertAcked(prepareCreate("cache_test_idx").setMapping("d", "type=float") + assertAcked( + prepareCreate("cache_test_idx").setMapping("d", "type=float") .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) - .get()); - indexRandom(true, client().prepareIndex("cache_test_idx").setId("1").setSource("d", -0.6), - client().prepareIndex("cache_test_idx").setId("2").setSource("d", 0.1)); + .get() + ); + indexRandom( + true, + client().prepareIndex("cache_test_idx").setId("1").setSource("d", -0.6), + client().prepareIndex("cache_test_idx").setId("2").setSource("d", 0.1) + ); // Make sure we are starting with a clear cache - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(histogram("histo").field("d") - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", emptyMap())).interval(0.7).offset(0.05)) - .get(); + SearchResponse r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + histogram("histo").field("d") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", emptyMap())) + .interval(0.7) + .offset(0.05) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a deterministic script gets cached - r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(histogram("histo").field("d") - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", emptyMap())).interval(0.7).offset(0.05)).get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + histogram("histo").field("d") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", emptyMap())) + .interval(0.7) + .offset(0.05) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(1L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(1L) + ); // Ensure that non-scripted requests are cached as normal - r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(histogram("histo").field("d").interval(0.7).offset(0.05)) - .get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation(histogram("histo").field("d").interval(0.7).offset(0.05)) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(2L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(2L) + ); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndKeyDesc() throws Exception { @@ -1155,54 +1269,70 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndKeyDesc } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndKeyAsc() throws Exception { - long[] expectedKeys = new long[] { 1, 2, 3, 4, 5, 6, 7 }; + long[] expectedKeys = new long[] { 1, 2, 3, 4, 5, 6, 7 }; assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", true), BucketOrder.key(true)); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationDescAndKeyAsc() throws Exception { - long[] expectedKeys = new long[] { 5, 6, 7, 3, 4, 2, 1 }; + long[] expectedKeys = new long[] { 5, 6, 7, 3, 4, 2, 1 }; assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", false), BucketOrder.key(true)); } public void testSingleValuedFieldOrderedByCountAscAndSingleValueSubAggregationAsc() throws Exception { - long[] expectedKeys = new long[] { 6, 7, 3, 4, 5, 1, 2 }; + long[] expectedKeys = new long[] { 6, 7, 3, 4, 5, 1, 2 }; assertMultiSortResponse(expectedKeys, BucketOrder.count(true), BucketOrder.aggregation("avg_l", true)); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscSingleValueSubAggregationAsc() throws Exception { - long[] expectedKeys = new long[] { 6, 7, 3, 5, 4, 1, 2 }; + long[] expectedKeys = new long[] { 6, 7, 3, 5, 4, 1, 2 }; assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("sum_d", true), BucketOrder.aggregation("avg_l", true)); } public void testSingleValuedFieldOrderedByThreeCriteria() throws Exception { - long[] expectedKeys = new long[] { 2, 1, 4, 5, 3, 6, 7 }; - assertMultiSortResponse(expectedKeys, BucketOrder.count(false), BucketOrder.aggregation("sum_d", false), - BucketOrder.aggregation("avg_l", false)); + long[] expectedKeys = new long[] { 2, 1, 4, 5, 3, 6, 7 }; + assertMultiSortResponse( + expectedKeys, + BucketOrder.count(false), + BucketOrder.aggregation("sum_d", false), + BucketOrder.aggregation("avg_l", false) + ); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound() throws Exception { - long[] expectedKeys = new long[] { 1, 2, 3, 4, 5, 6, 7 }; + long[] expectedKeys = new long[] { 1, 2, 3, 4, 5, 6, 7 }; assertMultiSortResponse(expectedKeys, BucketOrder.aggregation("avg_l", true)); } public void testInvalidBounds() { - SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch("empty_bucket_idx") - .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).hardBounds(new DoubleBounds(0.0, 10.0)) - .extendedBounds(3, 20)).get()); - assertThat(e.toString(), containsString("Extended bounds have to be inside hard bounds, hard bounds")); - - e = expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch("empty_bucket_idx") - .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).hardBounds(new DoubleBounds(3.0, null)) - .extendedBounds(0, 20)).get()); + SearchPhaseExecutionException e = expectThrows( + SearchPhaseExecutionException.class, + () -> client().prepareSearch("empty_bucket_idx") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).hardBounds(new DoubleBounds(0.0, 10.0)).extendedBounds(3, 20) + ) + .get() + ); + assertThat(e.toString(), containsString("Extended bounds have to be inside hard bounds, hard bounds")); + + e = expectThrows( + SearchPhaseExecutionException.class, + () -> client().prepareSearch("empty_bucket_idx") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).hardBounds(new DoubleBounds(3.0, null)).extendedBounds(0, 20) + ) + .get() + ); assertThat(e.toString(), containsString("Extended bounds have to be inside hard bounds, hard bounds")); } public void testHardBounds() throws Exception { assertAcked(prepareCreate("test").setMapping("d", "type=double").get()); - indexRandom(true, + indexRandom( + true, client().prepareIndex("test").setId("1").setSource("d", -0.6), client().prepareIndex("test").setId("2").setSource("d", 0.5), - client().prepareIndex("test").setId("3").setSource("d", 0.1)); + client().prepareIndex("test").setId("3").setSource("d", 0.1) + ); SearchResponse r = client().prepareSearch("test") .addAggregation(histogram("histo").field("d").interval(0.1).hardBounds(new DoubleBounds(0.0, null))) @@ -1238,11 +1368,15 @@ public void testHardBounds() throws Exception { } private void assertMultiSortResponse(long[] expectedKeys, BucketOrder... order) { - SearchResponse response = client() - .prepareSearch("sort_idx") + SearchResponse response = client().prepareSearch("sort_idx") .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1).order(BucketOrder.compound(order)) - .subAggregation(avg("avg_l").field("l")).subAggregation(sum("sum_d").field("d"))).get(); + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(1) + .order(BucketOrder.compound(order)) + .subAggregation(avg("avg_l").field("l")) + .subAggregation(sum("sum_d").field("d")) + ) + .get(); assertSearchResponse(response); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpRangeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpRangeIT.java index b7d00ade7da0f..22cf163c44005 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpRangeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpRangeIT.java @@ -46,20 +46,17 @@ protected Collection> nodePlugins() { @Override public void setupSuiteScopeCluster() throws Exception { - assertAcked(prepareCreate("idx") - .setMapping("ip", "type=ip", "ips", "type=ip")); + assertAcked(prepareCreate("idx").setMapping("ip", "type=ip", "ips", "type=ip")); waitForRelocation(ClusterHealthStatus.GREEN); - indexRandom(true, - client().prepareIndex("idx").setId("1").setSource( - "ip", "192.168.1.7", - "ips", Arrays.asList("192.168.0.13", "192.168.1.2")), - client().prepareIndex("idx").setId("2").setSource( - "ip", "192.168.1.10", - "ips", Arrays.asList("192.168.1.25", "192.168.1.28")), - client().prepareIndex("idx").setId("3").setSource( - "ip", "2001:db8::ff00:42:8329", - "ips", Arrays.asList("2001:db8::ff00:42:8329", "2001:db8::ff00:42:8380"))); + indexRandom( + true, + client().prepareIndex("idx").setId("1").setSource("ip", "192.168.1.7", "ips", Arrays.asList("192.168.0.13", "192.168.1.2")), + client().prepareIndex("idx").setId("2").setSource("ip", "192.168.1.10", "ips", Arrays.asList("192.168.1.25", "192.168.1.28")), + client().prepareIndex("idx") + .setId("3") + .setSource("ip", "2001:db8::ff00:42:8329", "ips", Arrays.asList("2001:db8::ff00:42:8329", "2001:db8::ff00:42:8380")) + ); assertAcked(prepareCreate("idx_unmapped")); waitForRelocation(ClusterHealthStatus.GREEN); @@ -67,12 +64,15 @@ public void setupSuiteScopeCluster() throws Exception { } public void testSingleValuedField() { - SearchResponse rsp = client().prepareSearch("idx").addAggregation( + SearchResponse rsp = client().prepareSearch("idx") + .addAggregation( AggregationBuilders.ipRange("my_range") .field("ip") .addUnboundedTo("192.168.1.0") .addRange("192.168.1.0", "192.168.1.10") - .addUnboundedFrom("192.168.1.10")).get(); + .addUnboundedFrom("192.168.1.10") + ) + .get(); assertSearchResponse(rsp); Range range = rsp.getAggregations().get("my_range"); assertEquals(3, range.getBuckets().size()); @@ -97,12 +97,15 @@ public void testSingleValuedField() { } public void testMultiValuedField() { - SearchResponse rsp = client().prepareSearch("idx").addAggregation( + SearchResponse rsp = client().prepareSearch("idx") + .addAggregation( AggregationBuilders.ipRange("my_range") .field("ips") .addUnboundedTo("192.168.1.0") .addRange("192.168.1.0", "192.168.1.10") - .addUnboundedFrom("192.168.1.10")).get(); + .addUnboundedFrom("192.168.1.10") + ) + .get(); assertSearchResponse(rsp); Range range = rsp.getAggregations().get("my_range"); assertEquals(3, range.getBuckets().size()); @@ -127,12 +130,15 @@ public void testMultiValuedField() { } public void testIpMask() { - SearchResponse rsp = client().prepareSearch("idx").addAggregation( + SearchResponse rsp = client().prepareSearch("idx") + .addAggregation( AggregationBuilders.ipRange("my_range") .field("ips") .addMaskRange("::/0") .addMaskRange("0.0.0.0/0") - .addMaskRange("2001:db8::/64")).get(); + .addMaskRange("2001:db8::/64") + ) + .get(); assertSearchResponse(rsp); Range range = rsp.getAggregations().get("my_range"); assertEquals(3, range.getBuckets().size()); @@ -151,12 +157,15 @@ public void testIpMask() { } public void testPartiallyUnmapped() { - SearchResponse rsp = client().prepareSearch("idx", "idx_unmapped").addAggregation( + SearchResponse rsp = client().prepareSearch("idx", "idx_unmapped") + .addAggregation( AggregationBuilders.ipRange("my_range") .field("ip") .addUnboundedTo("192.168.1.0") .addRange("192.168.1.0", "192.168.1.10") - .addUnboundedFrom("192.168.1.10")).get(); + .addUnboundedFrom("192.168.1.10") + ) + .get(); assertSearchResponse(rsp); Range range = rsp.getAggregations().get("my_range"); assertEquals(3, range.getBuckets().size()); @@ -181,12 +190,15 @@ public void testPartiallyUnmapped() { } public void testUnmapped() { - SearchResponse rsp = client().prepareSearch("idx_unmapped").addAggregation( + SearchResponse rsp = client().prepareSearch("idx_unmapped") + .addAggregation( AggregationBuilders.ipRange("my_range") .field("ip") .addUnboundedTo("192.168.1.0") .addRange("192.168.1.0", "192.168.1.10") - .addUnboundedFrom("192.168.1.10")).get(); + .addUnboundedFrom("192.168.1.10") + ) + .get(); assertSearchResponse(rsp); Range range = rsp.getAggregations().get("my_range"); assertEquals(3, range.getBuckets().size()); @@ -211,30 +223,37 @@ public void testUnmapped() { } public void testRejectsScript() { - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> client().prepareSearch("idx").addAggregation( - AggregationBuilders.ipRange("my_range") - .script(new Script(ScriptType.INLINE, "mockscript", "dummy", Collections.emptyMap())) ).get()); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> client().prepareSearch("idx") + .addAggregation( + AggregationBuilders.ipRange("my_range") + .script(new Script(ScriptType.INLINE, "mockscript", "dummy", Collections.emptyMap())) + ) + .get() + ); assertThat(e.getMessage(), containsString("[ip_range] does not support scripts")); } public void testRejectsValueScript() { - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> client().prepareSearch("idx").addAggregation( - AggregationBuilders.ipRange("my_range") + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> client().prepareSearch("idx") + .addAggregation( + AggregationBuilders.ipRange("my_range") .field("ip") - .script(new Script(ScriptType.INLINE, "mockscript", "dummy", Collections.emptyMap())) ).get()); + .script(new Script(ScriptType.INLINE, "mockscript", "dummy", Collections.emptyMap())) + ) + .get() + ); assertThat(e.getMessage(), containsString("[ip_range] does not support scripts")); } - public void testNoRangesInQuery() { + public void testNoRangesInQuery() { try { - client().prepareSearch("idx").addAggregation( - AggregationBuilders.ipRange("my_range") - .field("ip")) - .get(); + client().prepareSearch("idx").addAggregation(AggregationBuilders.ipRange("my_range").field("ip")).get(); fail(); - } catch (SearchPhaseExecutionException spee){ + } catch (SearchPhaseExecutionException spee) { Throwable rootCause = spee.getCause().getCause(); assertThat(rootCause, instanceOf(IllegalArgumentException.class)); assertEquals(rootCause.getMessage(), "No [ranges] specified for the [my_range] aggregation"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpTermsIT.java index 098c86c8661a9..7f3535d76a9aa 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/IpTermsIT.java @@ -38,12 +38,12 @@ protected Map, Object>> pluginScripts() { Map, Object>> scripts = super.pluginScripts(); scripts.put("doc['ip'].value", vars -> { - Map doc = (Map) vars.get("doc"); + Map doc = (Map) vars.get("doc"); return doc.get("ip"); }); scripts.put("doc['ip']", vars -> { - Map doc = (Map) vars.get("doc"); + Map doc = (Map) vars.get("doc"); return ((ScriptDocValues) doc.get("ip")).get(0); }); @@ -53,15 +53,17 @@ protected Map, Object>> pluginScripts() { public void testScriptValue() throws Exception { assertAcked(prepareCreate("index").setMapping("ip", "type=ip")); - indexRandom(true, - client().prepareIndex("index").setId("1").setSource("ip", "192.168.1.7"), - client().prepareIndex("index").setId("2").setSource("ip", "192.168.1.7"), - client().prepareIndex("index").setId("3").setSource("ip", "2001:db8::2:1")); - - Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "doc['ip'].value", Collections.emptyMap()); - SearchResponse response = client().prepareSearch("index").addAggregation( - AggregationBuilders.terms("my_terms").script(script).executionHint(randomExecutionHint())).get(); + indexRandom( + true, + client().prepareIndex("index").setId("1").setSource("ip", "192.168.1.7"), + client().prepareIndex("index").setId("2").setSource("ip", "192.168.1.7"), + client().prepareIndex("index").setId("3").setSource("ip", "2001:db8::2:1") + ); + + Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['ip'].value", Collections.emptyMap()); + SearchResponse response = client().prepareSearch("index") + .addAggregation(AggregationBuilders.terms("my_terms").script(script).executionHint(randomExecutionHint())) + .get(); assertSearchResponse(response); Terms terms = response.getAggregations().get("my_terms"); assertEquals(2, terms.getBuckets().size()); @@ -79,15 +81,17 @@ public void testScriptValue() throws Exception { public void testScriptValues() throws Exception { assertAcked(prepareCreate("index").setMapping("ip", "type=ip")); - indexRandom(true, - client().prepareIndex("index").setId("1").setSource("ip", "192.168.1.7"), - client().prepareIndex("index").setId("2").setSource("ip", "192.168.1.7"), - client().prepareIndex("index").setId("3").setSource("ip", "2001:db8::2:1")); - - Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "doc['ip']", Collections.emptyMap()); - SearchResponse response = client().prepareSearch("index").addAggregation( - AggregationBuilders.terms("my_terms").script(script).executionHint(randomExecutionHint())).get(); + indexRandom( + true, + client().prepareIndex("index").setId("1").setSource("ip", "192.168.1.7"), + client().prepareIndex("index").setId("2").setSource("ip", "192.168.1.7"), + client().prepareIndex("index").setId("3").setSource("ip", "2001:db8::2:1") + ); + + Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['ip']", Collections.emptyMap()); + SearchResponse response = client().prepareSearch("index") + .addAggregation(AggregationBuilders.terms("my_terms").script(script).executionHint(randomExecutionHint())) + .get(); assertSearchResponse(response); Terms terms = response.getAggregations().get("my_terms"); assertEquals(2, terms.getBuckets().size()); @@ -105,13 +109,16 @@ public void testScriptValues() throws Exception { public void testMissingValue() throws Exception { assertAcked(prepareCreate("index").setMapping("ip", "type=ip")); - indexRandom(true, + indexRandom( + true, client().prepareIndex("index").setId("1").setSource("ip", "192.168.1.7"), client().prepareIndex("index").setId("2").setSource("ip", "192.168.1.7"), client().prepareIndex("index").setId("3").setSource("ip", "127.0.0.1"), - client().prepareIndex("index").setId("4").setSource("not_ip", "something")); - SearchResponse response = client().prepareSearch("index").addAggregation(AggregationBuilders - .terms("my_terms").field("ip").missing("127.0.0.1").executionHint(randomExecutionHint())).get(); + client().prepareIndex("index").setId("4").setSource("not_ip", "something") + ); + SearchResponse response = client().prepareSearch("index") + .addAggregation(AggregationBuilders.terms("my_terms").field("ip").missing("127.0.0.1").executionHint(randomExecutionHint())) + .get(); assertSearchResponse(response); Terms terms = response.getAggregations().get("my_terms"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java index 00a738ee8d58b..f0e6fbde555e2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java @@ -109,22 +109,32 @@ public void setupSuiteScopeCluster() throws Exception { createIndex("idx", "high_card_idx"); IndexRequestBuilder[] lowCardBuilders = new IndexRequestBuilder[NUM_DOCS]; for (int i = 0; i < lowCardBuilders.length; i++) { - lowCardBuilders[i] = client().prepareIndex("idx").setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, i) - .startArray(MULTI_VALUED_FIELD_NAME).value(i).value(i + 1).endArray() - .field("num_tag", i < lowCardBuilders.length / 2 + 1 ? 1 : 0) // used to test order by single-bucket sub agg - .field("constant", 1) - .endObject()); + lowCardBuilders[i] = client().prepareIndex("idx") + .setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, i) + .startArray(MULTI_VALUED_FIELD_NAME) + .value(i) + .value(i + 1) + .endArray() + .field("num_tag", i < lowCardBuilders.length / 2 + 1 ? 1 : 0) // used to test order by single-bucket sub agg + .field("constant", 1) + .endObject() + ); } indexRandom(true, lowCardBuilders); IndexRequestBuilder[] highCardBuilders = new IndexRequestBuilder[100]; // TODO randomize the size? for (int i = 0; i < highCardBuilders.length; i++) { - highCardBuilders[i] = client().prepareIndex("high_card_idx").setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, i) - .startArray(MULTI_VALUED_FIELD_NAME).value(i).value(i + 1).endArray() - .endObject()); + highCardBuilders[i] = client().prepareIndex("high_card_idx") + .setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, i) + .startArray(MULTI_VALUED_FIELD_NAME) + .value(i) + .value(i + 1) + .endArray() + .endObject() + ); } indexRandom(true, highCardBuilders); @@ -133,10 +143,11 @@ public void setupSuiteScopeCluster() throws Exception { assertAcked(prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer")); List builders = new ArrayList<>(); for (int i = 0; i < 2; i++) { - builders.add(client().prepareIndex("empty_bucket_idx").setId(""+i).setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, i * 2) - .endObject())); + builders.add( + client().prepareIndex("empty_bucket_idx") + .setId("" + i) + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) + ); } getMultiSortDocs(builders); @@ -192,67 +203,47 @@ private void getMultiSortDocs(List builders) throws IOExcep createIndex("sort_idx"); for (int i = 1; i <= 3; i++) { - builders.add(client().prepareIndex("sort_idx").setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, 1) - .field("l", 1) - .field("d", i) - .endObject())); - builders.add(client().prepareIndex("sort_idx").setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, 2) - .field("l", 2) - .field("d", i) - .endObject())); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 1).field("l", 1).field("d", i).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 2).field("l", 2).field("d", i).endObject()) + ); } - builders.add(client().prepareIndex("sort_idx").setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, 3) - .field("l", 3) - .field("d", 1) - .endObject())); - builders.add(client().prepareIndex("sort_idx").setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, 3) - .field("l", 3) - .field("d", 2) - .endObject())); - builders.add(client().prepareIndex("sort_idx").setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, 4) - .field("l", 3) - .field("d", 1) - .endObject())); - builders.add(client().prepareIndex("sort_idx").setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, 4) - .field("l", 3) - .field("d", 3) - .endObject())); - builders.add(client().prepareIndex("sort_idx").setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, 5) - .field("l", 5) - .field("d", 1) - .endObject())); - builders.add(client().prepareIndex("sort_idx").setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, 5) - .field("l", 5) - .field("d", 2) - .endObject())); - builders.add(client().prepareIndex("sort_idx").setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, 6) - .field("l", 5) - .field("d", 1) - .endObject())); - builders.add(client().prepareIndex("sort_idx").setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, 7) - .field("l", 5) - .field("d", 1) - .endObject())); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3).field("l", 3).field("d", 1).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 3).field("l", 3).field("d", 2).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).field("l", 3).field("d", 1).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 4).field("l", 3).field("d", 3).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).field("l", 5).field("d", 1).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 5).field("l", 5).field("d", 2).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 6).field("l", 5).field("d", 1).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, 7).field("l", 5).field("d", 1).endObject()) + ); } private String key(Terms.Bucket bucket) { @@ -261,14 +252,17 @@ private String key(Terms.Bucket bucket) { // the main purpose of this test is to make sure we're not allocating 2GB of memory per shard public void testSizeIsZero() { - IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, - () -> client().prepareSearch("high_card_idx") - .addAggregation(terms("terms") - .field(SINGLE_VALUED_FIELD_NAME) + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> client().prepareSearch("high_card_idx") + .addAggregation( + terms("terms").field(SINGLE_VALUED_FIELD_NAME) .collectMode(randomFrom(SubAggCollectionMode.values())) .minDocCount(randomInt(1)) - .size(0)) - .get()); + .size(0) + ) + .get() + ); assertThat(exception.getMessage(), containsString("[size] must be greater than 0. Found [0] in [terms]")); } @@ -283,7 +277,8 @@ public void testMultiValueFieldWithPartitionedFiltering() throws Exception { private void runTestFieldWithPartitionedFiltering(String field) throws Exception { // Find total number of unique terms SearchResponse allResponse = client().prepareSearch("idx") - .addAggregation(terms("terms").field(field).collectMode(randomFrom(SubAggCollectionMode.values()))).get(); + .addAggregation(terms("terms").field(field).collectMode(randomFrom(SubAggCollectionMode.values()))) + .get(); assertSearchResponse(allResponse); Terms terms = allResponse.getAggregations().get("terms"); assertThat(terms, notNullValue()); @@ -295,10 +290,12 @@ private void runTestFieldWithPartitionedFiltering(String field) throws Exception Set foundTerms = new HashSet<>(); for (int partition = 0; partition < numPartitions; partition++) { SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field(field).includeExclude(new IncludeExclude(partition, numPartitions)) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").field(field) + .includeExclude(new IncludeExclude(partition, numPartitions)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(response); terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); @@ -313,15 +310,15 @@ private void runTestFieldWithPartitionedFiltering(String field) throws Exception public void testSingleValuedFieldWithValueScript() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms") - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap()))) - .get(); + .addAggregation( + terms("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) + ) + .get(); assertSearchResponse(response); - Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); @@ -330,23 +327,23 @@ public void testSingleValuedFieldWithValueScript() throws Exception { for (int i = 0; i < 5; i++) { Terms.Bucket bucket = terms.getBucketByKey("" + (i + 1d)); assertThat(bucket, notNullValue()); - assertThat(key(bucket), equalTo("" + (i+1d))); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i+1)); + assertThat(key(bucket), equalTo("" + (i + 1d))); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i + 1)); assertThat(bucket.getDocCount(), equalTo(1L)); } } public void testMultiValuedFieldWithValueScript() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms") - .field(MULTI_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value - 1", Collections.emptyMap()))) - .get(); + .addAggregation( + terms("terms").field(MULTI_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value - 1", Collections.emptyMap())) + ) + .get(); assertSearchResponse(response); - Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); @@ -355,8 +352,8 @@ public void testMultiValuedFieldWithValueScript() throws Exception { for (int i = 0; i < 6; i++) { Terms.Bucket bucket = terms.getBucketByKey("" + (i - 1d)); assertThat(bucket, notNullValue()); - assertThat(key(bucket), equalTo("" + (i-1d))); - assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i-1)); + assertThat(key(bucket), equalTo("" + (i - 1d))); + assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i - 1)); if (i == 0 || i == 5) { assertThat(bucket.getDocCount(), equalTo(1L)); } else { @@ -367,16 +364,15 @@ public void testMultiValuedFieldWithValueScript() throws Exception { public void testMultiValuedFieldWithValueScriptNotUnique() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms") - .field(MULTI_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script( - ScriptType.INLINE, CustomScriptPlugin.NAME, "floor(_value / 1000 + 1)", Collections.emptyMap()))) - .get(); + .addAggregation( + terms("terms").field(MULTI_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "floor(_value / 1000 + 1)", Collections.emptyMap())) + ) + .get(); assertSearchResponse(response); - Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); @@ -407,18 +403,19 @@ public void testMultiValuedFieldWithValueScriptNotUnique() throws Exception { */ public void testScriptSingleValue() throws Exception { - Script script = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['" + SINGLE_VALUED_FIELD_NAME + "'].value", Collections.emptyMap()); + Script script = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "doc['" + SINGLE_VALUED_FIELD_NAME + "'].value", + Collections.emptyMap() + ); SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(script)) - .get(); + .addAggregation(terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())).script(script)) + .get(); assertSearchResponse(response); - Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); @@ -435,18 +432,19 @@ public void testScriptSingleValue() throws Exception { } public void testScriptMultiValued() throws Exception { - Script script = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['" + MULTI_VALUED_FIELD_NAME + "']", Collections.emptyMap()); + Script script = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "doc['" + MULTI_VALUED_FIELD_NAME + "']", + Collections.emptyMap() + ); SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(script)) - .get(); + .addAggregation(terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())).script(script)) + .get(); assertSearchResponse(response); - Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); @@ -467,14 +465,11 @@ public void testScriptMultiValued() throws Exception { public void testPartiallyUnmapped() throws Exception { SearchResponse response = client().prepareSearch("idx_unmapped", "idx") - .addAggregation(terms("terms") - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation(terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values()))) + .get(); assertSearchResponse(response); - Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); @@ -491,15 +486,13 @@ public void testPartiallyUnmapped() throws Exception { public void testPartiallyUnmappedWithFormat() throws Exception { SearchResponse response = client().prepareSearch("idx_unmapped", "idx") - .addAggregation(terms("terms") - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .format("0000")) - .get(); + .addAggregation( + terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())).format("0000") + ) + .get(); assertSearchResponse(response); - Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); @@ -518,16 +511,14 @@ public void testPartiallyUnmappedWithFormat() throws Exception { public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscWithTermsSubAgg() throws Exception { boolean asc = true; SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms") - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("avg_i", asc)) - .subAggregation( - avg("avg_i").field(SINGLE_VALUED_FIELD_NAME)) - .subAggregation( - terms("subTerms").field(MULTI_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values()))) - ).get(); - + .addAggregation( + terms("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("avg_i", asc)) + .subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME)) + .subAggregation(terms("subTerms").field(MULTI_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values()))) + ) + .get(); assertSearchResponse(response); @@ -562,13 +553,13 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscWithTermsS public void testSingleValuedFieldOrderedBySingleBucketSubAggregationAsc() throws Exception { boolean asc = randomBoolean(); SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("num_tags") - .field("num_tag") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("filter", asc)) - .subAggregation(filter("filter", QueryBuilders.matchAllQuery())) - ).get(); - + .addAggregation( + terms("num_tags").field("num_tag") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("filter", asc)) + .subAggregation(filter("filter", QueryBuilders.matchAllQuery())) + ) + .get(); assertSearchResponse(response); @@ -599,15 +590,17 @@ public void testSingleValuedFieldOrderedBySingleBucketSubAggregationAsc() throws public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevels() throws Exception { boolean asc = randomBoolean(); SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("tags") - .field("num_tag") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("filter1>filter2>max", asc)) - .subAggregation(filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( - filter("filter2", QueryBuilders.matchAllQuery()) - .subAggregation(max("max").field(SINGLE_VALUED_FIELD_NAME)))) - ).get(); - + .addAggregation( + terms("tags").field("num_tag") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("filter1>filter2>max", asc)) + .subAggregation( + filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( + filter("filter2", QueryBuilders.matchAllQuery()).subAggregation(max("max").field(SINGLE_VALUED_FIELD_NAME)) + ) + ) + ) + .get(); assertSearchResponse(response); @@ -654,11 +647,12 @@ public void testSingleValuedFieldOrderedByMissingSubAggregation() throws Excepti for (String index : Arrays.asList("idx", "idx_unmapped")) { try { client().prepareSearch(index) - .addAggregation(terms("terms") - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("avg_i", true)) - ).get(); + .addAggregation( + terms("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("avg_i", true)) + ) + .get(); fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation that doesn't exist"); @@ -672,13 +666,13 @@ public void testSingleValuedFieldOrderedByNonMetricsOrMultiBucketSubAggregation( for (String index : Arrays.asList("idx", "idx_unmapped")) { try { client().prepareSearch(index) - .addAggregation(terms("terms") - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("num_tags", true)) - .subAggregation(terms("num_tags").field("num_tags") - .collectMode(randomFrom(SubAggCollectionMode.values()))) - ).get(); + .addAggregation( + terms("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("num_tags", true)) + .subAggregation(terms("num_tags").field("num_tags").collectMode(randomFrom(SubAggCollectionMode.values()))) + ) + .get(); fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation which is not of a metrics type"); @@ -692,15 +686,18 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithUnknownMe for (String index : Arrays.asList("idx", "idx_unmapped")) { try { client().prepareSearch(index) - .addAggregation(terms("terms") - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.foo", true)) - .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) - ).get(); + .addAggregation( + terms("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.foo", true)) + .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) + ) + .get(); - fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " + - "with an unknown specified metric to order by"); + fail( + "Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " + + "with an unknown specified metric to order by" + ); } catch (ElasticsearchException e) { // expected @@ -712,15 +709,18 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithoutMetric for (String index : Arrays.asList("idx", "idx_unmapped")) { try { client().prepareSearch(index) - .addAggregation(terms("terms") - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats", true)) - .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) - ).get(); + .addAggregation( + terms("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats", true)) + .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) + ) + .get(); - fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " + - "where the metric name is not specified"); + fail( + "Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " + + "where the metric name is not specified" + ); } catch (ElasticsearchException e) { // expected @@ -731,12 +731,13 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithoutMetric public void testSingleValuedFieldOrderedByMultiValueSubAggregationAsc() throws Exception { boolean asc = true; SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms") - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.avg", asc)) - .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) - ).get(); + .addAggregation( + terms("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.avg", asc)) + .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) + ) + .get(); assertSearchResponse(response); @@ -761,12 +762,13 @@ public void testSingleValuedFieldOrderedByMultiValueSubAggregationAsc() throws E public void testSingleValuedFieldOrderedByMultiValueSubAggregationDesc() throws Exception { boolean asc = false; SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms") - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.avg", asc)) - .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) - ).get(); + .addAggregation( + terms("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.avg", asc)) + .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) + ) + .get(); assertSearchResponse(response); @@ -791,12 +793,13 @@ public void testSingleValuedFieldOrderedByMultiValueSubAggregationDesc() throws public void testSingleValuedFieldOrderedByMultiValueExtendedStatsAsc() throws Exception { boolean asc = true; SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms") - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.variance", asc)) - .subAggregation(extendedStats("stats").field(SINGLE_VALUED_FIELD_NAME)) - ).get(); + .addAggregation( + terms("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.variance", asc)) + .subAggregation(extendedStats("stats").field(SINGLE_VALUED_FIELD_NAME)) + ) + .get(); assertSearchResponse(response); @@ -845,9 +848,12 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscSingleValu public void testSingleValuedFieldOrderedByThreeCriteria() throws Exception { long[] expectedKeys = new long[] { 2, 1, 4, 5, 3, 6, 7 }; - assertMultiSortResponse(expectedKeys, BucketOrder.count(false), - BucketOrder.aggregation("sum_d", false), - BucketOrder.aggregation("avg_l", false)); + assertMultiSortResponse( + expectedKeys, + BucketOrder.count(false), + BucketOrder.aggregation("sum_d", false), + BucketOrder.aggregation("avg_l", false) + ); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound() throws Exception { @@ -857,13 +863,14 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound private void assertMultiSortResponse(long[] expectedKeys, BucketOrder... order) { SearchResponse response = client().prepareSearch("sort_idx") - .addAggregation(terms("terms") - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.compound(order)) - .subAggregation(avg("avg_l").field("l")) - .subAggregation(sum("sum_d").field("d")) - ).get(); + .addAggregation( + terms("terms").field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.compound(order)) + .subAggregation(avg("avg_l").field("l")) + .subAggregation(sum("sum_d").field("d")) + ) + .get(); assertSearchResponse(response); @@ -896,47 +903,132 @@ public void testOtherDocCount() { * Ensure requests using nondeterministic scripts do not get cached. */ public void testScriptCaching() throws Exception { - assertAcked(prepareCreate("cache_test_idx").setMapping("d", "type=long") + assertAcked( + prepareCreate("cache_test_idx").setMapping("d", "type=long") .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) - .get()); - indexRandom(true, client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2)); + .get() + ); + indexRandom( + true, + client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + ); // Make sure we are starting with a clear cache - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation( - terms("terms").field("d").script( - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap()))).get(); + SearchResponse r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + terms("terms").field("d") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap())) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a deterministic script gets cached - r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation( - terms("terms").field("d").script( - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap()))).get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + terms("terms").field("d") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(1L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(1L) + ); // Ensure that non-scripted requests are cached as normal r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(terms("terms").field("d")).get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(2L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(2L) + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java index 4dd5b4ec6cefe..5018e8f300a29 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java @@ -94,8 +94,7 @@ protected Map, Object>> pluginScripts() { @Override public void setupSuiteScopeCluster() throws Exception { - assertAcked(client().admin().indices().prepareCreate("idx") - .setMapping("s", "type=keyword").get()); + assertAcked(client().admin().indices().prepareCreate("idx").setMapping("s", "type=keyword").get()); cardinality = randomIntBetween(8, 30); final List indexRequests = new ArrayList<>(); @@ -116,14 +115,18 @@ public void setupSuiteScopeCluster() throws Exception { String dateTerm = DateFormatter.forPattern("yyyy-MM-dd").format(time); final int frequency = randomBoolean() ? 1 : randomIntBetween(2, 20); for (int j = 0; j < frequency; ++j) { - indexRequests.add(client().prepareIndex("idx").setSource(jsonBuilder() - .startObject() - .field("s", stringTerm) - .field("l", longTerm) - .field("d", doubleTerm) - .field("date", dateTerm) - .field("match", randomBoolean()) - .endObject())); + indexRequests.add( + client().prepareIndex("idx") + .setSource( + jsonBuilder().startObject() + .field("s", stringTerm) + .field("l", longTerm) + .field("d", doubleTerm) + .field("date", dateTerm) + .field("match", randomBoolean()) + .endObject() + ) + ); } } cardinality = stringTerms.size(); @@ -142,10 +145,17 @@ TermsAggregationBuilder apply(TermsAggregationBuilder builder, String field) { YES { @Override TermsAggregationBuilder apply(TermsAggregationBuilder builder, String field) { - return builder.script(new org.elasticsearch.script.Script(ScriptType.INLINE, - CustomScriptPlugin.NAME, "doc['" + field + "']", Collections.emptyMap())); + return builder.script( + new org.elasticsearch.script.Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "doc['" + field + "']", + Collections.emptyMap() + ) + ); } }; + abstract TermsAggregationBuilder apply(TermsAggregationBuilder builder, String field); } @@ -299,15 +309,17 @@ private void testMinDocCountOnTerms(String field, Script script, BucketOrder ord private void testMinDocCountOnTerms(String field, Script script, BucketOrder order, String include, boolean retry) throws Exception { // all terms final SearchResponse allTermsResponse = client().prepareSearch("idx") - .setSize(0) - .setQuery(QUERY) - .addAggregation(script.apply(terms("terms"), field) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .executionHint(randomExecutionHint()) - .order(order) - .size(cardinality + randomInt(10)) - .minDocCount(0)) - .get(); + .setSize(0) + .setQuery(QUERY) + .addAggregation( + script.apply(terms("terms"), field) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .executionHint(randomExecutionHint()) + .order(order) + .size(cardinality + randomInt(10)) + .minDocCount(0) + ) + .get(); assertAllSuccessful(allTermsResponse); final Terms allTerms = allTermsResponse.getAggregations().get("terms"); @@ -316,16 +328,19 @@ private void testMinDocCountOnTerms(String field, Script script, BucketOrder ord for (long minDocCount = 0; minDocCount < 20; ++minDocCount) { final int size = randomIntBetween(1, cardinality + 2); final SearchRequest request = client().prepareSearch("idx") - .setSize(0) - .setQuery(QUERY) - .addAggregation(script.apply(terms("terms"), field) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .executionHint(randomExecutionHint()) - .order(order) - .size(size) - .includeExclude(include == null ? null : new IncludeExclude(include, null)) - .shardSize(cardinality + randomInt(10)) - .minDocCount(minDocCount)).request(); + .setSize(0) + .setQuery(QUERY) + .addAggregation( + script.apply(terms("terms"), field) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .executionHint(randomExecutionHint()) + .order(order) + .size(size) + .includeExclude(include == null ? null : new IncludeExclude(include, null)) + .shardSize(cardinality + randomInt(10)) + .minDocCount(minDocCount) + ) + .request(); final SearchResponse response = client().search(request).get(); assertAllSuccessful(response); assertSubset(allTerms, (Terms) response.getAggregations().get("terms"), minDocCount, size, include); @@ -367,48 +382,40 @@ public void testDateHistogramKeyDesc() throws Exception { private void testMinDocCountOnHistogram(BucketOrder order) throws Exception { final int interval = randomIntBetween(1, 3); final SearchResponse allResponse = client().prepareSearch("idx") - .setSize(0) - .setQuery(QUERY) - .addAggregation(histogram("histo").field("d").interval(interval).order(order).minDocCount(0)) - .get(); + .setSize(0) + .setQuery(QUERY) + .addAggregation(histogram("histo").field("d").interval(interval).order(order).minDocCount(0)) + .get(); final Histogram allHisto = allResponse.getAggregations().get("histo"); for (long minDocCount = 0; minDocCount < 50; ++minDocCount) { final SearchResponse response = client().prepareSearch("idx") - .setSize(0) - .setQuery(QUERY) - .addAggregation(histogram("histo").field("d").interval(interval).order(order).minDocCount(minDocCount)) - .get(); + .setSize(0) + .setQuery(QUERY) + .addAggregation(histogram("histo").field("d").interval(interval).order(order).minDocCount(minDocCount)) + .get(); assertSubset(allHisto, (Histogram) response.getAggregations().get("histo"), minDocCount); } } private void testMinDocCountOnDateHistogram(BucketOrder order) throws Exception { final SearchResponse allResponse = client().prepareSearch("idx") - .setSize(0) - .setQuery(QUERY) - .addAggregation( - dateHistogram("histo") - .field("date") - .fixedInterval(DateHistogramInterval.DAY) - .order(order) - .minDocCount(0)) - .get(); + .setSize(0) + .setQuery(QUERY) + .addAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).order(order).minDocCount(0)) + .get(); final Histogram allHisto = allResponse.getAggregations().get("histo"); for (long minDocCount = 0; minDocCount < 50; ++minDocCount) { final SearchResponse response = client().prepareSearch("idx") - .setSize(0) - .setQuery(QUERY) - .addAggregation( - dateHistogram("histo") - .field("date") - .fixedInterval(DateHistogramInterval.DAY) - .order(order) - .minDocCount(minDocCount)) - .get(); + .setSize(0) + .setQuery(QUERY) + .addAggregation( + dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).order(order).minDocCount(minDocCount) + ) + .get(); assertSubset(allHisto, response.getAggregations().get("histo"), minDocCount); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NaNSortingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NaNSortingIT.java index e2d5bcf41c09a..a072d2ae4a10b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NaNSortingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NaNSortingIT.java @@ -13,13 +13,13 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.Avg; import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.ExtendedStats; import org.elasticsearch.search.aggregations.metrics.ExtendedStatsAggregationBuilder; -import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.test.ESIntegTestCase; @@ -44,6 +44,7 @@ public AvgAggregationBuilder builder() { factory.field("numeric_field"); return factory; } + @Override public double getValue(Aggregation aggregation) { return ((Avg) aggregation).getValue(); @@ -56,26 +57,30 @@ public ExtendedStatsAggregationBuilder builder() { factory.field("numeric_field"); return factory; } + @Override public String sortKey() { return name + ".variance"; } + @Override public double getValue(Aggregation aggregation) { return ((ExtendedStats) aggregation).getVariance(); } }, - STD_DEVIATION("std_deviation"){ + STD_DEVIATION("std_deviation") { @Override public ExtendedStatsAggregationBuilder builder() { ExtendedStatsAggregationBuilder factory = extendedStats(name); factory.field("numeric_field"); return factory; } + @Override public String sortKey() { return name + ".std_deviation"; } + @Override public double getValue(Aggregation aggregation) { return ((ExtendedStats) aggregation).getStdDeviation(); @@ -88,8 +93,11 @@ public double getValue(Aggregation aggregation) { public String name; - public abstract ValuesSourceAggregationBuilder.LeafOnly> builder(); + public abstract + ValuesSourceAggregationBuilder.LeafOnly< + ValuesSource.Numeric, + ? extends ValuesSourceAggregationBuilder.LeafOnly> + builder(); public String sortKey() { return name; @@ -100,13 +108,14 @@ public String sortKey() { @Override public void setupSuiteScopeCluster() throws Exception { - assertAcked(client().admin().indices().prepareCreate("idx") - .setMapping("string_value", "type=keyword").get()); + assertAcked(client().admin().indices().prepareCreate("idx").setMapping("string_value", "type=keyword").get()); final int numDocs = randomIntBetween(2, 10); for (int i = 0; i < numDocs; ++i) { final long value = randomInt(5); - XContentBuilder source = jsonBuilder().startObject().field("long_value", value).field("double_value", value + 0.05) - .field("string_value", "str_" + value); + XContentBuilder source = jsonBuilder().startObject() + .field("long_value", value) + .field("double_value", value + 0.05) + .field("string_value", "str_" + value); if (randomBoolean()) { source.field("numeric_value", randomDouble()); } @@ -142,9 +151,13 @@ public void testTerms(String fieldName) { final boolean asc = randomBoolean(); SubAggregation agg = randomFrom(SubAggregation.values()); SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms").field(fieldName).collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(agg.builder()).order(BucketOrder.aggregation(agg.sortKey(), asc))) - .get(); + .addAggregation( + terms("terms").field(fieldName) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(agg.builder()) + .order(BucketOrder.aggregation(agg.sortKey(), asc)) + ) + .get(); assertSearchResponse(response); final Terms terms = response.getAggregations().get("terms"); @@ -167,10 +180,13 @@ public void testLongHistogram() { final boolean asc = randomBoolean(); SubAggregation agg = randomFrom(SubAggregation.values()); SearchResponse response = client().prepareSearch("idx") - .addAggregation(histogram("histo") - .field("long_value").interval(randomIntBetween(1, 2)) - .subAggregation(agg.builder()).order(BucketOrder.aggregation(agg.sortKey(), asc))) - .get(); + .addAggregation( + histogram("histo").field("long_value") + .interval(randomIntBetween(1, 2)) + .subAggregation(agg.builder()) + .order(BucketOrder.aggregation(agg.sortKey(), asc)) + ) + .get(); assertSearchResponse(response); final Histogram histo = response.getAggregations().get("histo"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java index 35c0889896ce7..d12e572c9da6c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java @@ -70,8 +70,7 @@ public class NestedIT extends ESIntegTestCase { @Override public void setupSuiteScopeCluster() throws Exception { - assertAcked(prepareCreate("idx") - .setMapping("nested", "type=nested", "incorrect", "type=object")); + assertAcked(prepareCreate("idx").setMapping("nested", "type=nested", "incorrect", "type=object")); ensureGreen("idx"); List builders = new ArrayList<>(); @@ -93,68 +92,90 @@ public void setupSuiteScopeCluster() throws Exception { assertTrue(totalChildren > 0); for (int i = 0; i < numParents; i++) { - XContentBuilder source = jsonBuilder() - .startObject() - .field("value", i + 1) - .startArray("nested"); + XContentBuilder source = jsonBuilder().startObject().field("value", i + 1).startArray("nested"); for (int j = 0; j < numChildren[i]; ++j) { source = source.startObject().field("value", i + 1 + j).endObject(); } source = source.endArray().endObject(); - builders.add(client().prepareIndex("idx").setId(""+i+1).setSource(source)); + builders.add(client().prepareIndex("idx").setId("" + i + 1).setSource(source)); } prepareCreate("empty_bucket_idx").setMapping("value", "type=integer", "nested", "type=nested").get(); ensureGreen("empty_bucket_idx"); for (int i = 0; i < 2; i++) { - builders.add(client().prepareIndex("empty_bucket_idx").setId(""+i).setSource(jsonBuilder() - .startObject() - .field("value", i*2) - .startArray("nested") - .startObject().field("value", i + 1).endObject() - .startObject().field("value", i + 2).endObject() - .startObject().field("value", i + 3).endObject() - .startObject().field("value", i + 4).endObject() - .startObject().field("value", i + 5).endObject() - .endArray() - .endObject())); + builders.add( + client().prepareIndex("empty_bucket_idx") + .setId("" + i) + .setSource( + jsonBuilder().startObject() + .field("value", i * 2) + .startArray("nested") + .startObject() + .field("value", i + 1) + .endObject() + .startObject() + .field("value", i + 2) + .endObject() + .startObject() + .field("value", i + 3) + .endObject() + .startObject() + .field("value", i + 4) + .endObject() + .startObject() + .field("value", i + 5) + .endObject() + .endArray() + .endObject() + ) + ); } - assertAcked(prepareCreate("idx_nested_nested_aggs") - .setMapping(jsonBuilder().startObject().startObject("_doc").startObject("properties") - .startObject("nested1") - .field("type", "nested") - .startObject("properties") - .startObject("nested2") - .field("type", "nested") - .endObject() - .endObject() - .endObject() - .endObject().endObject().endObject())); + assertAcked( + prepareCreate("idx_nested_nested_aggs").setMapping( + jsonBuilder().startObject() + .startObject("_doc") + .startObject("properties") + .startObject("nested1") + .field("type", "nested") + .startObject("properties") + .startObject("nested2") + .field("type", "nested") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + ) + ); ensureGreen("idx_nested_nested_aggs"); builders.add( - client().prepareIndex("idx_nested_nested_aggs").setId("1") - .setSource(jsonBuilder().startObject() - .startArray("nested1") - .startObject() - .field("a", "a") - .startArray("nested2") - .startObject() - .field("b", 2) - .endObject() - .endArray() - .endObject() - .startObject() - .field("a", "b") - .startArray("nested2") - .startObject() - .field("b", 2) - .endObject() - .endArray() - .endObject() - .endArray() - .endObject()) + client().prepareIndex("idx_nested_nested_aggs") + .setId("1") + .setSource( + jsonBuilder().startObject() + .startArray("nested1") + .startObject() + .field("a", "a") + .startArray("nested2") + .startObject() + .field("b", 2) + .endObject() + .endArray() + .endObject() + .startObject() + .field("a", "b") + .startArray("nested2") + .startObject() + .field("b", 2) + .endObject() + .endArray() + .endObject() + .endArray() + .endObject() + ) ); indexRandom(true, builders); ensureSearchable(); @@ -162,13 +183,11 @@ public void setupSuiteScopeCluster() throws Exception { public void testSimple() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(nested("nested", "nested") - .subAggregation(stats("nested_value_stats").field("nested.value"))) - .get(); + .addAggregation(nested("nested", "nested").subAggregation(stats("nested_value_stats").field("nested.value"))) + .get(); assertSearchResponse(response); - double min = Double.POSITIVE_INFINITY; double max = Double.NEGATIVE_INFINITY; long sum = 0; @@ -200,9 +219,8 @@ public void testSimple() throws Exception { public void testNonExistingNestedField() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") - .addAggregation(nested("nested", "value") - .subAggregation(stats("nested_value_stats").field("nested.value"))) - .get(); + .addAggregation(nested("nested", "value").subAggregation(stats("nested_value_stats").field("nested.value"))) + .get(); Nested nested = searchResponse.getAggregations().get("nested"); assertThat(nested, Matchers.notNullValue()); @@ -212,14 +230,13 @@ public void testNonExistingNestedField() throws Exception { public void testNestedWithSubTermsAgg() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(nested("nested", "nested") - .subAggregation(terms("values").field("nested.value").size(100) - .collectMode(aggCollectionMode))) - .get(); + .addAggregation( + nested("nested", "nested").subAggregation(terms("values").field("nested.value").size(100).collectMode(aggCollectionMode)) + ) + .get(); assertSearchResponse(response); - long docCount = 0; long[] counts = new long[numParents + 6]; for (int i = 0; i < numParents; ++i) { @@ -240,7 +257,7 @@ public void testNestedWithSubTermsAgg() throws Exception { assertThat(nested, notNullValue()); assertThat(nested.getName(), equalTo("nested")); assertThat(nested.getDocCount(), equalTo(docCount)); - assertThat(((InternalAggregation)nested).getProperty("_count"), equalTo(docCount)); + assertThat(((InternalAggregation) nested).getProperty("_count"), equalTo(docCount)); assertThat(nested.getAggregations().asList().isEmpty(), is(false)); LongTerms values = nested.getAggregations().get("values"); @@ -258,20 +275,21 @@ public void testNestedWithSubTermsAgg() throws Exception { assertEquals(counts[i], bucket.getDocCount()); } } - assertThat(((InternalAggregation)nested).getProperty("values"), sameInstance(values)); + assertThat(((InternalAggregation) nested).getProperty("values"), sameInstance(values)); } public void testNestedAsSubAggregation() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("top_values").field("value").size(100) - .collectMode(aggCollectionMode) - .subAggregation(nested("nested", "nested") - .subAggregation(max("max_value").field("nested.value")))) - .get(); + .addAggregation( + terms("top_values").field("value") + .size(100) + .collectMode(aggCollectionMode) + .subAggregation(nested("nested", "nested").subAggregation(max("max_value").field("nested.value"))) + ) + .get(); assertSearchResponse(response); - LongTerms values = response.getAggregations().get("top_values"); assertThat(values, notNullValue()); assertThat(values.getName(), equalTo("top_values")); @@ -291,15 +309,16 @@ public void testNestedAsSubAggregation() throws Exception { public void testNestNestedAggs() throws Exception { SearchResponse response = client().prepareSearch("idx_nested_nested_aggs") - .addAggregation(nested("level1", "nested1") - .subAggregation(terms("a").field("nested1.a.keyword") - .collectMode(aggCollectionMode) - .subAggregation(nested("level2", "nested1.nested2") - .subAggregation(sum("sum").field("nested1.nested2.b"))))) - .get(); + .addAggregation( + nested("level1", "nested1").subAggregation( + terms("a").field("nested1.a.keyword") + .collectMode(aggCollectionMode) + .subAggregation(nested("level2", "nested1.nested2").subAggregation(sum("sum").field("nested1.nested2.b"))) + ) + ) + .get(); assertSearchResponse(response); - Nested level1 = response.getAggregations().get("level1"); assertThat(level1, notNullValue()); assertThat(level1.getName(), equalTo("level1")); @@ -326,10 +345,9 @@ public void testNestNestedAggs() throws Exception { public void testEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") - .setQuery(matchAllQuery()) - .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0) - .subAggregation(nested("nested", "nested"))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(nested("nested", "nested"))) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); Histogram histo = searchResponse.getAggregations().get("histo"); @@ -345,10 +363,7 @@ public void testEmptyAggregation() throws Exception { public void testNestedOnObjectField() throws Exception { try { - client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(nested("object_field", "incorrect")) - .get(); + client().prepareSearch("idx").setQuery(matchAllQuery()).addAggregation(nested("object_field", "incorrect")).get(); fail(); } catch (SearchPhaseExecutionException e) { assertThat(e.toString(), containsString("[nested] nested path [incorrect] is not nested")); @@ -357,69 +372,108 @@ public void testNestedOnObjectField() throws Exception { // Test based on: https://github.com/elastic/elasticsearch/issues/9280 public void testParentFilterResolvedCorrectly() throws Exception { - XContentBuilder mapping = jsonBuilder().startObject().startObject("_doc").startObject("properties") - .startObject("comments") - .field("type", "nested") - .startObject("properties") - .startObject("cid").field("type", "long").endObject() - .startObject("identifier").field("type", "keyword").endObject() - .startObject("tags") - .field("type", "nested") - .startObject("properties") - .startObject("tid").field("type", "long").endObject() - .startObject("name").field("type", "keyword").endObject() - .endObject() - .endObject() - .endObject() - .endObject() - .startObject("dates") - .field("type", "object") - .startObject("properties") - .startObject("day").field("type", "date").field("format", "date_optional_time").endObject() - .startObject("month") - .field("type", "object") - .startObject("properties") - .startObject("end").field("type", "date").field("format", "date_optional_time").endObject() - .startObject("start").field("type", "date").field("format", "date_optional_time").endObject() - .startObject("label").field("type", "keyword").endObject() - .endObject() - .endObject() - .endObject() - .endObject() - .endObject().endObject().endObject(); - assertAcked(prepareCreate("idx2") - .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)) - .setMapping(mapping)); + XContentBuilder mapping = jsonBuilder().startObject() + .startObject("_doc") + .startObject("properties") + .startObject("comments") + .field("type", "nested") + .startObject("properties") + .startObject("cid") + .field("type", "long") + .endObject() + .startObject("identifier") + .field("type", "keyword") + .endObject() + .startObject("tags") + .field("type", "nested") + .startObject("properties") + .startObject("tid") + .field("type", "long") + .endObject() + .startObject("name") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .startObject("dates") + .field("type", "object") + .startObject("properties") + .startObject("day") + .field("type", "date") + .field("format", "date_optional_time") + .endObject() + .startObject("month") + .field("type", "object") + .startObject("properties") + .startObject("end") + .field("type", "date") + .field("format", "date_optional_time") + .endObject() + .startObject("start") + .field("type", "date") + .field("format", "date_optional_time") + .endObject() + .startObject("label") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject(); + assertAcked( + prepareCreate("idx2").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)) + .setMapping(mapping) + ); ensureGreen("idx2"); List indexRequests = new ArrayList<>(2); - indexRequests.add(client().prepareIndex("idx2").setId("1") - .setSource("{\"dates\": {\"month\": {\"label\": \"2014-11\", \"end\": \"2014-11-30\", \"start\": \"2014-11-01\"}, " + - "\"day\": \"2014-11-30\"}, \"comments\": [{\"cid\": 3,\"identifier\": \"29111\"}, {\"cid\": 4,\"tags\": [" + - "{\"tid\" :44,\"name\": \"Roles\"}], \"identifier\": \"29101\"}]}", XContentType.JSON)); - indexRequests.add(client().prepareIndex("idx2").setId("2") - .setSource("{\"dates\": {\"month\": {\"label\": \"2014-12\", \"end\": \"2014-12-31\", \"start\": \"2014-12-01\"}, " + - "\"day\": \"2014-12-03\"}, \"comments\": [{\"cid\": 1, \"identifier\": \"29111\"}, {\"cid\": 2,\"tags\": [" + - "{\"tid\" : 22, \"name\": \"DataChannels\"}], \"identifier\": \"29101\"}]}", XContentType.JSON)); + indexRequests.add( + client().prepareIndex("idx2") + .setId("1") + .setSource( + "{\"dates\": {\"month\": {\"label\": \"2014-11\", \"end\": \"2014-11-30\", \"start\": \"2014-11-01\"}, " + + "\"day\": \"2014-11-30\"}, \"comments\": [{\"cid\": 3,\"identifier\": \"29111\"}, {\"cid\": 4,\"tags\": [" + + "{\"tid\" :44,\"name\": \"Roles\"}], \"identifier\": \"29101\"}]}", + XContentType.JSON + ) + ); + indexRequests.add( + client().prepareIndex("idx2") + .setId("2") + .setSource( + "{\"dates\": {\"month\": {\"label\": \"2014-12\", \"end\": \"2014-12-31\", \"start\": \"2014-12-01\"}, " + + "\"day\": \"2014-12-03\"}, \"comments\": [{\"cid\": 1, \"identifier\": \"29111\"}, {\"cid\": 2,\"tags\": [" + + "{\"tid\" : 22, \"name\": \"DataChannels\"}], \"identifier\": \"29101\"}]}", + XContentType.JSON + ) + ); indexRandom(true, indexRequests); SearchResponse response = client().prepareSearch("idx2") - .addAggregation( - terms("startDate").field("dates.month.start").subAggregation( - terms("endDate").field("dates.month.end").subAggregation( - terms("period").field("dates.month.label").subAggregation( - nested("ctxt_idfier_nested", "comments") - .subAggregation(filter("comment_filter", termQuery("comments.identifier", "29111")) - .subAggregation(nested("nested_tags", "comments.tags") - .subAggregation( - terms("tag").field("comments.tags.name") - ) - ) + .addAggregation( + terms("startDate").field("dates.month.start") + .subAggregation( + terms("endDate").field("dates.month.end") + .subAggregation( + terms("period").field("dates.month.label") + .subAggregation( + nested("ctxt_idfier_nested", "comments").subAggregation( + filter("comment_filter", termQuery("comments.identifier", "29111")).subAggregation( + nested("nested_tags", "comments.tags").subAggregation( + terms("tag").field("comments.tags.name") ) + ) ) - ) - ) - ).get(); + ) + ) + ) + ) + .get(); assertNoFailures(response); assertHitCount(response, 2); @@ -462,39 +516,59 @@ public void testParentFilterResolvedCorrectly() throws Exception { public void testNestedSameDocIdProcessedMultipleTime() throws Exception { assertAcked( - prepareCreate("idx4") - .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)) - .setMapping("categories", "type=keyword", "name", "type=text", "property", "type=nested") + prepareCreate("idx4").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)) + .setMapping("categories", "type=keyword", "name", "type=text", "property", "type=nested") ); ensureGreen("idx4"); - client().prepareIndex("idx4").setId("1").setSource(jsonBuilder().startObject() + client().prepareIndex("idx4") + .setId("1") + .setSource( + jsonBuilder().startObject() .field("name", "product1") .array("categories", "1", "2", "3", "4") .startArray("property") - .startObject().field("id", 1).endObject() - .startObject().field("id", 2).endObject() - .startObject().field("id", 3).endObject() + .startObject() + .field("id", 1) + .endObject() + .startObject() + .field("id", 2) + .endObject() + .startObject() + .field("id", 3) + .endObject() + .endArray() + .endObject() + ) + .get(); + client().prepareIndex("idx4") + .setId("2") + .setSource( + jsonBuilder().startObject() + .field("name", "product2") + .array("categories", "1", "2") + .startArray("property") + .startObject() + .field("id", 1) + .endObject() + .startObject() + .field("id", 5) + .endObject() + .startObject() + .field("id", 4) + .endObject() .endArray() - .endObject()).get(); - client().prepareIndex("idx4").setId("2").setSource(jsonBuilder().startObject() - .field("name", "product2") - .array("categories", "1", "2") - .startArray("property") - .startObject().field("id", 1).endObject() - .startObject().field("id", 5).endObject() - .startObject().field("id", 4).endObject() - .endArray() - .endObject()).get(); + .endObject() + ) + .get(); refresh(); SearchResponse response = client().prepareSearch("idx4") - .addAggregation(terms("category").field("categories").subAggregation( - nested("property", "property").subAggregation( - terms("property_id").field("property.id") - ) - )) - .get(); + .addAggregation( + terms("category").field("categories") + .subAggregation(nested("property", "property").subAggregation(terms("property_id").field("property.id"))) + ) + .get(); assertNoFailures(response); assertHitCount(response, 2); @@ -547,110 +621,151 @@ public void testNestedSameDocIdProcessedMultipleTime() throws Exception { } public void testFilterAggInsideNestedAgg() throws Exception { - assertAcked(prepareCreate("classes") - .setMapping(jsonBuilder().startObject().startObject("_doc").startObject("properties") - .startObject("name").field("type", "text").endObject() - .startObject("methods") - .field("type", "nested") - .startObject("properties") - .startObject("name").field("type", "text").endObject() - .startObject("return_type").field("type", "keyword").endObject() - .startObject("parameters") - .field("type", "nested") - .startObject("properties") - .startObject("name").field("type", "text").endObject() - .startObject("type").field("type", "keyword").endObject() - .endObject() - .endObject() - .endObject() - .endObject().endObject().endObject().endObject())); + assertAcked( + prepareCreate("classes").setMapping( + jsonBuilder().startObject() + .startObject("_doc") + .startObject("properties") + .startObject("name") + .field("type", "text") + .endObject() + .startObject("methods") + .field("type", "nested") + .startObject("properties") + .startObject("name") + .field("type", "text") + .endObject() + .startObject("return_type") + .field("type", "keyword") + .endObject() + .startObject("parameters") + .field("type", "nested") + .startObject("properties") + .startObject("name") + .field("type", "text") + .endObject() + .startObject("type") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + ) + ); - client().prepareIndex("classes").setId("1").setSource(jsonBuilder().startObject() + client().prepareIndex("classes") + .setId("1") + .setSource( + jsonBuilder().startObject() .field("name", "QueryBuilder") .startArray("methods") - .startObject() - .field("name", "toQuery") - .field("return_type", "Query") - .startArray("parameters") - .startObject() - .field("name", "context") - .field("type", "SearchExecutionContext") - .endObject() - .endArray() - .endObject() - .startObject() - .field("name", "queryName") - .field("return_type", "QueryBuilder") - .startArray("parameters") - .startObject() - .field("name", "queryName") - .field("type", "String") - .endObject() - .endArray() - .endObject() - .startObject() - .field("name", "boost") - .field("return_type", "QueryBuilder") - .startArray("parameters") - .startObject() - .field("name", "boost") - .field("type", "float") - .endObject() - .endArray() - .endObject() + .startObject() + .field("name", "toQuery") + .field("return_type", "Query") + .startArray("parameters") + .startObject() + .field("name", "context") + .field("type", "SearchExecutionContext") + .endObject() + .endArray() + .endObject() + .startObject() + .field("name", "queryName") + .field("return_type", "QueryBuilder") + .startArray("parameters") + .startObject() + .field("name", "queryName") + .field("type", "String") + .endObject() + .endArray() + .endObject() + .startObject() + .field("name", "boost") + .field("return_type", "QueryBuilder") + .startArray("parameters") + .startObject() + .field("name", "boost") + .field("type", "float") + .endObject() + .endArray() + .endObject() .endArray() - .endObject()).get(); - client().prepareIndex("classes").setId("2").setSource(jsonBuilder().startObject() + .endObject() + ) + .get(); + client().prepareIndex("classes") + .setId("2") + .setSource( + jsonBuilder().startObject() .field("name", "Document") .startArray("methods") - .startObject() - .field("name", "add") - .field("return_type", "void") - .startArray("parameters") - .startObject() - .field("name", "field") - .field("type", "IndexableField") - .endObject() - .endArray() - .endObject() - .startObject() - .field("name", "removeField") - .field("return_type", "void") - .startArray("parameters") - .startObject() - .field("name", "name") - .field("type", "String") - .endObject() - .endArray() - .endObject() - .startObject() - .field("name", "removeFields") - .field("return_type", "void") - .startArray("parameters") - .startObject() - .field("name", "name") - .field("type", "String") - .endObject() - .endArray() - .endObject() + .startObject() + .field("name", "add") + .field("return_type", "void") + .startArray("parameters") + .startObject() + .field("name", "field") + .field("type", "IndexableField") + .endObject() .endArray() - .endObject()).get(); + .endObject() + .startObject() + .field("name", "removeField") + .field("return_type", "void") + .startArray("parameters") + .startObject() + .field("name", "name") + .field("type", "String") + .endObject() + .endArray() + .endObject() + .startObject() + .field("name", "removeFields") + .field("return_type", "void") + .startArray("parameters") + .startObject() + .field("name", "name") + .field("type", "String") + .endObject() + .endArray() + .endObject() + .endArray() + .endObject() + ) + .get(); refresh(); - SearchResponse response = client().prepareSearch("classes").addAggregation(nested("to_method", "methods") - .subAggregation(filter("num_string_params", - nestedQuery("methods.parameters", termQuery("methods.parameters.type", "String"), ScoreMode.None))) - ).get(); + SearchResponse response = client().prepareSearch("classes") + .addAggregation( + nested("to_method", "methods").subAggregation( + filter( + "num_string_params", + nestedQuery("methods.parameters", termQuery("methods.parameters.type", "String"), ScoreMode.None) + ) + ) + ) + .get(); Nested toMethods = response.getAggregations().get("to_method"); Filter numStringParams = toMethods.getAggregations().get("num_string_params"); assertThat(numStringParams.getDocCount(), equalTo(3L)); - response = client().prepareSearch("classes").addAggregation(nested("to_method", "methods") - .subAggregation(terms("return_type").field("methods.return_type").subAggregation( - filter("num_string_params", nestedQuery("methods.parameters", - termQuery("methods.parameters.type", "String"), ScoreMode.None)) + response = client().prepareSearch("classes") + .addAggregation( + nested("to_method", "methods").subAggregation( + terms("return_type").field("methods.return_type") + .subAggregation( + filter( + "num_string_params", + nestedQuery("methods.parameters", termQuery("methods.parameters.type", "String"), ScoreMode.None) + ) ) - )).get(); + ) + ) + .get(); toMethods = response.getAggregations().get("to_method"); Terms terms = toMethods.getAggregations().get("return_type"); Bucket bucket = terms.getBucketByKey("void"); @@ -671,43 +786,47 @@ public void testFilterAggInsideNestedAgg() throws Exception { public void testExtractInnerHitBuildersWithDuplicateHitName() throws Exception { assertAcked( - prepareCreate("idxduplicatehitnames") - .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)) - .setMapping("categories", "type=keyword", "name", "type=text", "property", "type=nested") + prepareCreate("idxduplicatehitnames").setSettings( + Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0) + ).setMapping("categories", "type=keyword", "name", "type=text", "property", "type=nested") ); ensureGreen("idxduplicatehitnames"); - SearchRequestBuilder searchRequestBuilder = client() - .prepareSearch("idxduplicatehitnames") - .setQuery(boolQuery() - .should(nestedQuery("property", termQuery("property.id", 1D), ScoreMode.None).innerHit(new InnerHitBuilder("ih1"))) - .should(nestedQuery("property", termQuery("property.id", 1D), ScoreMode.None).innerHit(new InnerHitBuilder("ih2"))) - .should(nestedQuery("property", termQuery("property.id", 1D), ScoreMode.None).innerHit(new InnerHitBuilder("ih1")))); + SearchRequestBuilder searchRequestBuilder = client().prepareSearch("idxduplicatehitnames") + .setQuery( + boolQuery().should( + nestedQuery("property", termQuery("property.id", 1D), ScoreMode.None).innerHit(new InnerHitBuilder("ih1")) + ) + .should(nestedQuery("property", termQuery("property.id", 1D), ScoreMode.None).innerHit(new InnerHitBuilder("ih2"))) + .should(nestedQuery("property", termQuery("property.id", 1D), ScoreMode.None).innerHit(new InnerHitBuilder("ih1"))) + ); assertFailures( searchRequestBuilder, RestStatus.BAD_REQUEST, - containsString("[inner_hits] already contains an entry for key [ih1]")); + containsString("[inner_hits] already contains an entry for key [ih1]") + ); } public void testExtractInnerHitBuildersWithDuplicatePath() throws Exception { assertAcked( - prepareCreate("idxnullhitnames") - .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)) - .setMapping("categories", "type=keyword", "name", "type=text", "property", "type=nested") + prepareCreate("idxnullhitnames").setSettings( + Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0) + ).setMapping("categories", "type=keyword", "name", "type=text", "property", "type=nested") ); ensureGreen("idxnullhitnames"); - SearchRequestBuilder searchRequestBuilder = client() - .prepareSearch("idxnullhitnames") - .setQuery(boolQuery() - .should(nestedQuery("property", termQuery("property.id", 1D), ScoreMode.None).innerHit(new InnerHitBuilder())) - .should(nestedQuery("property", termQuery("property.id", 1D), ScoreMode.None).innerHit(new InnerHitBuilder())) - .should(nestedQuery("property", termQuery("property.id", 1D), ScoreMode.None).innerHit(new InnerHitBuilder()))); + SearchRequestBuilder searchRequestBuilder = client().prepareSearch("idxnullhitnames") + .setQuery( + boolQuery().should(nestedQuery("property", termQuery("property.id", 1D), ScoreMode.None).innerHit(new InnerHitBuilder())) + .should(nestedQuery("property", termQuery("property.id", 1D), ScoreMode.None).innerHit(new InnerHitBuilder())) + .should(nestedQuery("property", termQuery("property.id", 1D), ScoreMode.None).innerHit(new InnerHitBuilder())) + ); assertFailures( searchRequestBuilder, RestStatus.BAD_REQUEST, - containsString("[inner_hits] already contains an entry for key [property]")); + containsString("[inner_hits] already contains an entry for key [property]") + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java index fafc30d20d0cd..b5aba9e4113c0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/RangeIT.java @@ -98,30 +98,38 @@ public void setupSuiteScopeCluster() throws Exception { numDocs = randomIntBetween(10, 20); List builders = new ArrayList<>(); for (int i = 0; i < numDocs; i++) { - builders.add(client().prepareIndex("idx").setSource(jsonBuilder() - .startObject() - .field(SINGLE_VALUED_FIELD_NAME, i+1) - .startArray(MULTI_VALUED_FIELD_NAME).value(i+1).value(i+2).endArray() - .endObject())); + builders.add( + client().prepareIndex("idx") + .setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, i + 1) + .startArray(MULTI_VALUED_FIELD_NAME) + .value(i + 1) + .value(i + 2) + .endArray() + .endObject() + ) + ); } createIndex("idx_unmapped"); prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer").get(); for (int i = 0; i < 2; i++) { - builders.add(client().prepareIndex("empty_bucket_idx").setId("" + i).setSource(jsonBuilder() - .startObject() - // shift sequence by 1, to ensure we have negative values, and value 3 on the edge of the tested ranges - .field(SINGLE_VALUED_FIELD_NAME, i * 2 - 1) - .endObject())); + builders.add( + client().prepareIndex("empty_bucket_idx") + .setId("" + i) + .setSource( + jsonBuilder().startObject() + // shift sequence by 1, to ensure we have negative values, and value 3 on the edge of the tested ranges + .field(SINGLE_VALUED_FIELD_NAME, i * 2 - 1) + .endObject() + ) + ); } // Create two indices and add the field 'route_length_miles' as an alias in // one, and a concrete field in the other. - prepareCreate("old_index") - .setMapping("distance", "type=double", "route_length_miles", "type=alias,path=distance") - .get(); - prepareCreate("new_index") - .setMapping("route_length_miles", "type=double") - .get(); + prepareCreate("old_index").setMapping("distance", "type=double", "route_length_miles", "type=alias,path=distance").get(); + prepareCreate("new_index").setMapping("route_length_miles", "type=double").get(); builders.add(client().prepareIndex("old_index").setSource("distance", 42.0)); builders.add(client().prepareIndex("old_index").setSource("distance", 50.5)); @@ -134,13 +142,13 @@ public void setupSuiteScopeCluster() throws Exception { public void testRangeAsSubAggregation() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms").field(MULTI_VALUED_FIELD_NAME).size(100) - .collectMode(randomFrom(SubAggCollectionMode.values())).subAggregation( - range("range").field(SINGLE_VALUED_FIELD_NAME) - .addUnboundedTo(3) - .addRange(3, 6) - .addUnboundedFrom(6))) - .get(); + .addAggregation( + terms("terms").field(MULTI_VALUED_FIELD_NAME) + .size(100) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6)) + ) + .get(); assertSearchResponse(response); Terms terms = response.getAggregations().get("terms"); @@ -197,16 +205,11 @@ public void testRangeAsSubAggregation() throws Exception { public void testSingleValueField() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(range("range") - .field(SINGLE_VALUED_FIELD_NAME) - .addUnboundedTo(3) - .addRange(3, 6) - .addUnboundedFrom(6)) - .get(); + .addAggregation(range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6)) + .get(); assertSearchResponse(response); - Range range = response.getAggregations().get("range"); assertThat(range, notNullValue()); assertThat(range.getName(), equalTo("range")); @@ -242,15 +245,12 @@ public void testSingleValueField() throws Exception { } public void testSingleValueFieldWithFormat() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6).format("#")) - .get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation(range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6).format("#")) + .get(); assertSearchResponse(response); - Range range = response.getAggregations().get("range"); assertThat(range, notNullValue()); assertThat(range.getName(), equalTo("range")); @@ -287,16 +287,13 @@ public void testSingleValueFieldWithFormat() throws Exception { public void testSingleValueFieldWithCustomKey() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(range("range") - .field(SINGLE_VALUED_FIELD_NAME) - .addUnboundedTo("r1", 3) - .addRange("r2", 3, 6) - .addUnboundedFrom("r3", 6)) - .get(); + .addAggregation( + range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo("r1", 3).addRange("r2", 3, 6).addUnboundedFrom("r3", 6) + ) + .get(); assertSearchResponse(response); - Range range = response.getAggregations().get("range"); assertThat(range, notNullValue()); assertThat(range.getName(), equalTo("range")); @@ -333,25 +330,25 @@ public void testSingleValueFieldWithCustomKey() throws Exception { public void testSingleValuedFieldWithSubAggregation() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(range("range") - .field(SINGLE_VALUED_FIELD_NAME) - .addUnboundedTo(3) - .addRange(3, 6) - .addUnboundedFrom(6) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .get(); + .addAggregation( + range("range").field(SINGLE_VALUED_FIELD_NAME) + .addUnboundedTo(3) + .addRange(3, 6) + .addUnboundedFrom(6) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ) + .get(); assertSearchResponse(response); - Range range = response.getAggregations().get("range"); assertThat(range, notNullValue()); assertThat(range.getName(), equalTo("range")); List buckets = range.getBuckets(); assertThat(range.getBuckets().size(), equalTo(3)); - Object[] propertiesKeys = (Object[]) ((InternalAggregation)range).getProperty("_key"); - Object[] propertiesDocCounts = (Object[]) ((InternalAggregation)range).getProperty("_count"); - Object[] propertiesCounts = (Object[]) ((InternalAggregation)range).getProperty("sum.value"); + Object[] propertiesKeys = (Object[]) ((InternalAggregation) range).getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) range).getProperty("_count"); + Object[] propertiesCounts = (Object[]) ((InternalAggregation) range).getProperty("sum.value"); Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); @@ -404,16 +401,15 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { } public void testSingleValuedFieldWithValueScript() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - range("range") - .field(SINGLE_VALUED_FIELD_NAME) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) - .addUnboundedTo(3) - .addRange(3, 6) - .addUnboundedFrom(6)) - .get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + range("range").field(SINGLE_VALUED_FIELD_NAME) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) + .addUnboundedTo(3) + .addRange(3, 6) + .addUnboundedFrom(6) + ) + .get(); assertSearchResponse(response); @@ -466,16 +462,11 @@ public void testSingleValuedFieldWithValueScript() throws Exception { public void testMultiValuedField() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(range("range") - .field(MULTI_VALUED_FIELD_NAME) - .addUnboundedTo(3) - .addRange(3, 6) - .addUnboundedFrom(6)) - .get(); + .addAggregation(range("range").field(MULTI_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6)) + .get(); assertSearchResponse(response); - Range range = response.getAggregations().get("range"); assertThat(range, notNullValue()); assertThat(range.getName(), equalTo("range")); @@ -524,20 +515,18 @@ public void testMultiValuedField() throws Exception { */ public void testMultiValuedFieldWithValueScript() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - range("range") - .field(MULTI_VALUED_FIELD_NAME) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) - .addUnboundedTo(3) - .addRange(3, 6) - .addUnboundedFrom(6)) - .get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + range("range").field(MULTI_VALUED_FIELD_NAME) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) + .addUnboundedTo(3) + .addRange(3, 6) + .addUnboundedFrom(6) + ) + .get(); assertSearchResponse(response); - Range range = response.getAggregations().get("range"); assertThat(range, notNullValue()); assertThat(range.getName(), equalTo("range")); @@ -590,21 +579,18 @@ public void testMultiValuedFieldWithValueScript() throws Exception { */ public void testScriptSingleValue() throws Exception { - Script script = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['" + SINGLE_VALUED_FIELD_NAME + "'].value", Collections.emptyMap()); - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - range("range") - .script(script) - .addUnboundedTo(3) - .addRange(3, 6) - .addUnboundedFrom(6)) - .get(); + Script script = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "doc['" + SINGLE_VALUED_FIELD_NAME + "'].value", + Collections.emptyMap() + ); + SearchResponse response = client().prepareSearch("idx") + .addAggregation(range("range").script(script).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6)) + .get(); assertSearchResponse(response); - Range range = response.getAggregations().get("range"); assertThat(range, notNullValue()); assertThat(range.getName(), equalTo("range")); @@ -641,15 +627,11 @@ public void testScriptSingleValue() throws Exception { public void testEmptyRange() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(range("range") - .field(MULTI_VALUED_FIELD_NAME) - .addUnboundedTo(-1) - .addUnboundedFrom(1000)) - .get(); + .addAggregation(range("range").field(MULTI_VALUED_FIELD_NAME).addUnboundedTo(-1).addUnboundedFrom(1000)) + .get(); assertSearchResponse(response); - Range range = response.getAggregations().get("range"); assertThat(range, notNullValue()); assertThat(range.getName(), equalTo("range")); @@ -675,14 +657,11 @@ public void testEmptyRange() throws Exception { assertThat(bucket.getDocCount(), equalTo(0L)); } - public void testNoRangesInQuery() { + public void testNoRangesInQuery() { try { - client().prepareSearch("idx") - .addAggregation(range("foobar") - .field(SINGLE_VALUED_FIELD_NAME)) - .get(); + client().prepareSearch("idx").addAggregation(range("foobar").field(SINGLE_VALUED_FIELD_NAME)).get(); fail(); - } catch (SearchPhaseExecutionException spee){ + } catch (SearchPhaseExecutionException spee) { Throwable rootCause = spee.getCause().getCause(); assertThat(rootCause, instanceOf(IllegalArgumentException.class)); assertEquals(rootCause.getMessage(), "No [ranges] specified for the [foobar] aggregation"); @@ -690,21 +669,18 @@ public void testNoRangesInQuery() { } public void testScriptMultiValued() throws Exception { - Script script = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['" + MULTI_VALUED_FIELD_NAME + "']", Collections.emptyMap()); - - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - range("range") - .script(script) - .addUnboundedTo(3) - .addRange(3, 6) - .addUnboundedFrom(6)) - .get(); + Script script = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "doc['" + MULTI_VALUED_FIELD_NAME + "']", + Collections.emptyMap() + ); - assertSearchResponse(response); + SearchResponse response = client().prepareSearch("idx") + .addAggregation(range("range").script(script).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6)) + .get(); + assertSearchResponse(response); Range range = response.getAggregations().get("range"); assertThat(range, notNullValue()); @@ -759,16 +735,11 @@ public void testScriptMultiValued() throws Exception { public void testUnmapped() throws Exception { SearchResponse response = client().prepareSearch("idx_unmapped") - .addAggregation(range("range") - .field(SINGLE_VALUED_FIELD_NAME) - .addUnboundedTo(3) - .addRange(3, 6) - .addUnboundedFrom(6)) - .get(); + .addAggregation(range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6)) + .get(); assertSearchResponse(response); - Range range = response.getAggregations().get("range"); assertThat(range, notNullValue()); assertThat(range.getName(), equalTo("range")); @@ -807,16 +778,11 @@ public void testPartiallyUnmapped() throws Exception { client().admin().cluster().prepareHealth("idx_unmapped").setWaitForYellowStatus().get(); SearchResponse response = client().prepareSearch("idx", "idx_unmapped") - .addAggregation(range("range") - .field(SINGLE_VALUED_FIELD_NAME) - .addUnboundedTo(3) - .addRange(3, 6) - .addUnboundedFrom(6)) - .get(); + .addAggregation(range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6)) + .get(); assertSearchResponse(response); - Range range = response.getAggregations().get("range"); assertThat(range, notNullValue()); assertThat(range.getName(), equalTo("range")); @@ -853,17 +819,13 @@ public void testPartiallyUnmapped() throws Exception { public void testOverlappingRanges() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(range("range") - .field(MULTI_VALUED_FIELD_NAME) - .addUnboundedTo(5) - .addRange(3, 6) - .addRange(4, 5) - .addUnboundedFrom(4)) - .get(); + .addAggregation( + range("range").field(MULTI_VALUED_FIELD_NAME).addUnboundedTo(5).addRange(3, 6).addRange(4, 5).addUnboundedFrom(4) + ) + .get(); assertSearchResponse(response); - Range range = response.getAggregations().get("range"); assertThat(range, notNullValue()); assertThat(range.getName(), equalTo("range")); @@ -909,17 +871,14 @@ public void testOverlappingRanges() throws Exception { public void testEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") - .setQuery(matchAllQuery()) - .addAggregation( - histogram("histo") - .field(SINGLE_VALUED_FIELD_NAME) - .interval(1L) - .minDocCount(0) - .subAggregation( - range("range") - .field(SINGLE_VALUED_FIELD_NAME) - .addRange("0-2", 0.0, 2.0))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(1L) + .minDocCount(0) + .subAggregation(range("range").field(SINGLE_VALUED_FIELD_NAME).addRange("0-2", 0.0, 2.0)) + ) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); Histogram histo = searchResponse.getAggregations().get("histo"); @@ -947,62 +906,142 @@ public void testEmptyAggregation() throws Exception { * Ensure requests using nondeterministic scripts do not get cached. */ public void testScriptCaching() throws Exception { - assertAcked(prepareCreate("cache_test_idx").setMapping("i", "type=integer") + assertAcked( + prepareCreate("cache_test_idx").setMapping("i", "type=integer") .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) - .get()); - indexRandom(true, - client().prepareIndex("cache_test_idx").setId("1").setSource(jsonBuilder().startObject().field("i", 1).endObject()), - client().prepareIndex("cache_test_idx").setId("2").setSource(jsonBuilder().startObject().field("i", 2).endObject())); + .get() + ); + indexRandom( + true, + client().prepareIndex("cache_test_idx").setId("1").setSource(jsonBuilder().startObject().field("i", 1).endObject()), + client().prepareIndex("cache_test_idx").setId("2").setSource(jsonBuilder().startObject().field("i", 2).endObject()) + ); // Make sure we are starting with a clear cache - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a nondeterministic script does not get cached Map params = new HashMap<>(); params.put("fieldname", "date"); - SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation( - range("foo").field("i").script( - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap())).addRange(0, 10)) - .get(); + SearchResponse r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + range("foo").field("i") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap())) + .addRange(0, 10) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a deterministic script gets cached - r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation( - range("foo").field("i").script( - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())).addRange(0, 10)) - .get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + range("foo").field("i") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value + 1", Collections.emptyMap())) + .addRange(0, 10) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(1L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(1L) + ); // Ensure that non-scripted requests are cached as normal r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(range("foo").field("i").addRange(0, 10)).get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(2L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(2L) + ); } public void testFieldAlias() { SearchResponse response = client().prepareSearch("old_index", "new_index") - .addAggregation(range("range") - .field("route_length_miles") - .addUnboundedTo(50.0) - .addRange(50.0, 150.0) - .addUnboundedFrom(150.0)) + .addAggregation(range("range").field("route_length_miles").addUnboundedTo(50.0).addRange(50.0, 150.0).addUnboundedFrom(150.0)) .get(); assertSearchResponse(response); @@ -1029,15 +1068,11 @@ public void testFieldAlias() { assertThat(bucket.getDocCount(), equalTo(0L)); } - public void testFieldAliasWithMissingValue() { SearchResponse response = client().prepareSearch("old_index", "new_index") - .addAggregation(range("range") - .field("route_length_miles") - .missing(0.0) - .addUnboundedTo(50.0) - .addRange(50.0, 150.0) - .addUnboundedFrom(150.0)) + .addAggregation( + range("range").field("route_length_miles").missing(0.0).addUnboundedTo(50.0).addRange(50.0, 150.0).addUnboundedFrom(150.0) + ) .get(); assertSearchResponse(response); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java index cfc333b87acd2..20b6f761b0ec4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedIT.java @@ -12,13 +12,13 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.nested.Nested; import org.elasticsearch.search.aggregations.bucket.nested.ReverseNested; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.ValueCount; -import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -50,30 +50,52 @@ public class ReverseNestedIT extends ESIntegTestCase { @Override public void setupSuiteScopeCluster() throws Exception { - assertAcked(prepareCreate("idx1") - .setMapping( - jsonBuilder().startObject().startObject("properties") - .startObject("field1").field("type", "keyword").endObject() - .startObject("alias") - .field("type", "alias") - .field("path", "field1") - .endObject() - .startObject("nested1").field("type", "nested").startObject("properties") - .startObject("field2").field("type", "keyword").endObject() - .endObject().endObject() - .endObject().endObject() - )); - assertAcked(prepareCreate("idx2") - .setMapping( - jsonBuilder().startObject().startObject("properties") - .startObject("nested1").field("type", "nested").startObject("properties") - .startObject("field1").field("type", "keyword").endObject() - .startObject("nested2").field("type", "nested").startObject("properties") - .startObject("field2").field("type", "keyword").endObject() - .endObject().endObject() - .endObject().endObject() - .endObject().endObject() - ) + assertAcked( + prepareCreate("idx1").setMapping( + jsonBuilder().startObject() + .startObject("properties") + .startObject("field1") + .field("type", "keyword") + .endObject() + .startObject("alias") + .field("type", "alias") + .field("path", "field1") + .endObject() + .startObject("nested1") + .field("type", "nested") + .startObject("properties") + .startObject("field2") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + ) + ); + assertAcked( + prepareCreate("idx2").setMapping( + jsonBuilder().startObject() + .startObject("properties") + .startObject("nested1") + .field("type", "nested") + .startObject("properties") + .startObject("field1") + .field("type", "keyword") + .endObject() + .startObject("nested2") + .field("type", "nested") + .startObject("properties") + .startObject("field2") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + ) ); insertIdx1(Arrays.asList("a", "b", "c"), Arrays.asList("1", "2", "3", "4")); @@ -89,22 +111,21 @@ public void setupSuiteScopeCluster() throws Exception { insertIdx1(Arrays.asList("z"), Arrays.asList("5", "9")); refresh(); - insertIdx2(new String[][]{new String[]{"a", "0", "0", "1", "2"}, new String[]{"b", "0", "1", "1", "2"}, new String[]{"a", "0"}}); - insertIdx2(new String[][]{new String[]{"c", "1", "1", "2", "2"}, new String[]{"d", "3", "4"}}); + insertIdx2( + new String[][] { new String[] { "a", "0", "0", "1", "2" }, new String[] { "b", "0", "1", "1", "2" }, new String[] { "a", "0" } } + ); + insertIdx2(new String[][] { new String[] { "c", "1", "1", "2", "2" }, new String[] { "d", "3", "4" } }); refresh(); - insertIdx2(new String[][]{new String[]{"a", "0", "0", "0", "0"}, new String[]{"b", "0", "0", "0", "0"}}); - insertIdx2(new String[][]{new String[]{"e", "1", "2"}, new String[]{"f", "3", "4"}}); + insertIdx2(new String[][] { new String[] { "a", "0", "0", "0", "0" }, new String[] { "b", "0", "0", "0", "0" } }); + insertIdx2(new String[][] { new String[] { "e", "1", "2" }, new String[] { "f", "3", "4" } }); refresh(); ensureSearchable(); } private void insertIdx1(List values1, List values2) throws Exception { - XContentBuilder source = jsonBuilder() - .startObject() - .array("field1", values1.toArray()) - .startArray("nested1"); + XContentBuilder source = jsonBuilder().startObject().array("field1", values1.toArray()).startArray("nested1"); for (String value1 : values2) { source.startObject().field("field2", value1).endObject(); } @@ -113,9 +134,7 @@ private void insertIdx1(List values1, List values2) throws Excep } private void insertIdx2(String[][] values) throws Exception { - XContentBuilder source = jsonBuilder() - .startObject() - .startArray("nested1"); + XContentBuilder source = jsonBuilder().startObject().startArray("nested1"); for (String[] value : values) { source.startObject().field("field1", value[0]).startArray("nested2"); for (int i = 1; i < value.length; i++) { @@ -129,18 +148,17 @@ private void insertIdx2(String[][] values) throws Exception { public void testSimpleReverseNestedToRoot() throws Exception { SearchResponse response = client().prepareSearch("idx1") - .addAggregation(nested("nested1", "nested1") + .addAggregation( + nested("nested1", "nested1").subAggregation( + terms("field2").field("nested1.field2") .subAggregation( - terms("field2").field("nested1.field2") - .subAggregation( - reverseNested("nested1_to_field1") - .subAggregation( - terms("field1").field("field1") - .collectMode(randomFrom(SubAggCollectionMode.values())) - ) - ) + reverseNested("nested1_to_field1").subAggregation( + terms("field1").field("field1").collectMode(randomFrom(SubAggCollectionMode.values())) + ) ) - ).get(); + ) + ) + .get(); assertSearchResponse(response); @@ -160,9 +178,9 @@ public void testSimpleReverseNestedToRoot() throws Exception { assertThat(bucket.getKeyAsString(), equalTo("1")); assertThat(bucket.getDocCount(), equalTo(6L)); ReverseNested reverseNested = bucket.getAggregations().get("nested1_to_field1"); - assertThat(((InternalAggregation)reverseNested).getProperty("_count"), equalTo(5L)); + assertThat(((InternalAggregation) reverseNested).getProperty("_count"), equalTo(5L)); Terms tags = reverseNested.getAggregations().get("field1"); - assertThat(((InternalAggregation)reverseNested).getProperty("field1"), sameInstance(tags)); + assertThat(((InternalAggregation) reverseNested).getProperty("field1"), sameInstance(tags)); List tagsBuckets = new ArrayList<>(tags.getBuckets()); assertThat(tagsBuckets.size(), equalTo(6)); assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c")); @@ -317,13 +335,12 @@ public void testSimpleReverseNestedToRoot() throws Exception { public void testSimpleNested1ToRootToNested2() throws Exception { SearchResponse response = client().prepareSearch("idx2") - .addAggregation(nested("nested1", "nested1") - .subAggregation( - reverseNested("nested1_to_root") - .subAggregation(nested("root_to_nested2", "nested1.nested2")) - ) - ) - .get(); + .addAggregation( + nested("nested1", "nested1").subAggregation( + reverseNested("nested1_to_root").subAggregation(nested("root_to_nested2", "nested1.nested2")) + ) + ) + .get(); assertSearchResponse(response); Nested nested = response.getAggregations().get("nested1"); @@ -339,20 +356,23 @@ public void testSimpleNested1ToRootToNested2() throws Exception { public void testSimpleReverseNestedToNested1() throws Exception { SearchResponse response = client().prepareSearch("idx2") - .addAggregation(nested("nested1", "nested1.nested2") + .addAggregation( + nested("nested1", "nested1.nested2").subAggregation( + terms("field2").field("nested1.nested2.field2") + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .size(10000) + .subAggregation( + reverseNested("nested1_to_field1").path("nested1") .subAggregation( - terms("field2").field("nested1.nested2.field2").order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .size(10000) - .subAggregation( - reverseNested("nested1_to_field1").path("nested1") - .subAggregation( - terms("field1").field("nested1.field1").order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ) - ) + terms("field1").field("nested1.field1") + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) ) - ).get(); + ) + ) + ) + .get(); assertSearchResponse(response); @@ -442,16 +462,16 @@ public void testSimpleReverseNestedToNested1() throws Exception { public void testReverseNestedAggWithoutNestedAgg() { try { client().prepareSearch("idx2") - .addAggregation(terms("field2").field("nested1.nested2.field2") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation( - reverseNested("nested1_to_field1") - .subAggregation( - terms("field1").field("nested1.field1") - .collectMode(randomFrom(SubAggCollectionMode.values())) - ) - ) - ).get(); + .addAggregation( + terms("field2").field("nested1.nested2.field2") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation( + reverseNested("nested1_to_field1").subAggregation( + terms("field1").field("nested1.field1").collectMode(randomFrom(SubAggCollectionMode.values())) + ) + ) + ) + .get(); fail("Expected SearchPhaseExecutionException"); } catch (SearchPhaseExecutionException e) { assertThat(e.getMessage(), is("all shards failed")); @@ -460,9 +480,9 @@ public void testReverseNestedAggWithoutNestedAgg() { public void testNonExistingNestedField() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx2") - .setQuery(matchAllQuery()) - .addAggregation(nested("nested2", "nested1.nested2").subAggregation(reverseNested("incorrect").path("nested3"))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(nested("nested2", "nested1.nested2").subAggregation(reverseNested("incorrect").path("nested3"))) + .get(); Nested nested = searchResponse.getAggregations().get("nested2"); assertThat(nested, notNullValue()); @@ -473,9 +493,9 @@ public void testNonExistingNestedField() throws Exception { // Test that parsing the reverse_nested agg doesn't fail, because the parent nested agg is unmapped: searchResponse = client().prepareSearch("idx1") - .setQuery(matchAllQuery()) - .addAggregation(nested("incorrect1", "incorrect1").subAggregation(reverseNested("incorrect2").path("incorrect2"))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(nested("incorrect1", "incorrect1").subAggregation(reverseNested("incorrect2").path("incorrect2"))) + .get(); nested = searchResponse.getAggregations().get("incorrect1"); assertThat(nested, notNullValue()); @@ -484,94 +504,138 @@ public void testNonExistingNestedField() throws Exception { } public void testSameParentDocHavingMultipleBuckets() throws Exception { - XContentBuilder mapping = jsonBuilder().startObject().startObject("_doc").field("dynamic", "strict").startObject("properties") - .startObject("id").field("type", "long").endObject() - .startObject("category") - .field("type", "nested") - .startObject("properties") - .startObject("name").field("type", "keyword").endObject() - .endObject() - .endObject() - .startObject("sku") - .field("type", "nested") - .startObject("properties") - .startObject("sku_type").field("type", "keyword").endObject() - .startObject("colors") - .field("type", "nested") - .startObject("properties") - .startObject("name").field("type", "keyword").endObject() - .endObject() - .endObject() - .endObject() - .endObject() - .endObject().endObject().endObject(); + XContentBuilder mapping = jsonBuilder().startObject() + .startObject("_doc") + .field("dynamic", "strict") + .startObject("properties") + .startObject("id") + .field("type", "long") + .endObject() + .startObject("category") + .field("type", "nested") + .startObject("properties") + .startObject("name") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .startObject("sku") + .field("type", "nested") + .startObject("properties") + .startObject("sku_type") + .field("type", "keyword") + .endObject() + .startObject("colors") + .field("type", "nested") + .startObject("properties") + .startObject("name") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject(); assertAcked( - prepareCreate("idx3") - .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)) - .setMapping(mapping) + prepareCreate("idx3").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)) + .setMapping(mapping) ); - client().prepareIndex("idx3").setId("1").setRefreshPolicy(IMMEDIATE).setSource( + client().prepareIndex("idx3") + .setId("1") + .setRefreshPolicy(IMMEDIATE) + .setSource( jsonBuilder().startObject() - .startArray("sku") - .startObject() - .field("sku_type", "bar1") - .startArray("colors") - .startObject().field("name", "red").endObject() - .startObject().field("name", "green").endObject() - .startObject().field("name", "yellow").endObject() - .endArray() - .endObject() - .startObject() - .field("sku_type", "bar1") - .startArray("colors") - .startObject().field("name", "red").endObject() - .startObject().field("name", "blue").endObject() - .startObject().field("name", "white").endObject() - .endArray() - .endObject() - .startObject() - .field("sku_type", "bar1") - .startArray("colors") - .startObject().field("name", "black").endObject() - .startObject().field("name", "blue").endObject() - .endArray() - .endObject() - .startObject() - .field("sku_type", "bar2") - .startArray("colors") - .startObject().field("name", "orange").endObject() - .endArray() - .endObject() - .startObject() - .field("sku_type", "bar2") - .startArray("colors") - .startObject().field("name", "pink").endObject() - .endArray() - .endObject() - .endArray() - .startArray("category") - .startObject().field("name", "abc").endObject() - .startObject().field("name", "klm").endObject() - .startObject().field("name", "xyz").endObject() - .endArray() - .endObject() - ).get(); + .startArray("sku") + .startObject() + .field("sku_type", "bar1") + .startArray("colors") + .startObject() + .field("name", "red") + .endObject() + .startObject() + .field("name", "green") + .endObject() + .startObject() + .field("name", "yellow") + .endObject() + .endArray() + .endObject() + .startObject() + .field("sku_type", "bar1") + .startArray("colors") + .startObject() + .field("name", "red") + .endObject() + .startObject() + .field("name", "blue") + .endObject() + .startObject() + .field("name", "white") + .endObject() + .endArray() + .endObject() + .startObject() + .field("sku_type", "bar1") + .startArray("colors") + .startObject() + .field("name", "black") + .endObject() + .startObject() + .field("name", "blue") + .endObject() + .endArray() + .endObject() + .startObject() + .field("sku_type", "bar2") + .startArray("colors") + .startObject() + .field("name", "orange") + .endObject() + .endArray() + .endObject() + .startObject() + .field("sku_type", "bar2") + .startArray("colors") + .startObject() + .field("name", "pink") + .endObject() + .endArray() + .endObject() + .endArray() + .startArray("category") + .startObject() + .field("name", "abc") + .endObject() + .startObject() + .field("name", "klm") + .endObject() + .startObject() + .field("name", "xyz") + .endObject() + .endArray() + .endObject() + ) + .get(); SearchResponse response = client().prepareSearch("idx3") - .addAggregation( - nested("nested_0", "category").subAggregation( - terms("group_by_category").field("category.name").subAggregation( - reverseNested("to_root").subAggregation( - nested("nested_1", "sku").subAggregation( - filter("filter_by_sku", termQuery("sku.sku_type", "bar1")).subAggregation( - count("sku_count").field("sku.sku_type") - ) - ) - ) + .addAggregation( + nested("nested_0", "category").subAggregation( + terms("group_by_category").field("category.name") + .subAggregation( + reverseNested("to_root").subAggregation( + nested("nested_1", "sku").subAggregation( + filter("filter_by_sku", termQuery("sku.sku_type", "bar1")).subAggregation( + count("sku_count").field("sku.sku_type") + ) ) + ) ) - ).get(); + ) + ) + .get(); assertNoFailures(response); assertHitCount(response, 1); @@ -579,7 +643,7 @@ public void testSameParentDocHavingMultipleBuckets() throws Exception { assertThat(nested0.getDocCount(), equalTo(3L)); Terms terms = nested0.getAggregations().get("group_by_category"); assertThat(terms.getBuckets().size(), equalTo(3)); - for (String bucketName : new String[]{"abc", "klm", "xyz"}) { + for (String bucketName : new String[] { "abc", "klm", "xyz" }) { logger.info("Checking results for bucket {}", bucketName); Terms.Bucket bucket = terms.getBucketByKey(bucketName); assertThat(bucket.getDocCount(), equalTo(1L)); @@ -594,26 +658,26 @@ public void testSameParentDocHavingMultipleBuckets() throws Exception { } response = client().prepareSearch("idx3") - .addAggregation( - nested("nested_0", "category").subAggregation( - terms("group_by_category").field("category.name").subAggregation( - reverseNested("to_root").subAggregation( - nested("nested_1", "sku").subAggregation( - filter("filter_by_sku", termQuery("sku.sku_type", "bar1")).subAggregation( - nested("nested_2", "sku.colors").subAggregation( - filter("filter_sku_color", termQuery("sku.colors.name", "red")) - .subAggregation( - reverseNested("reverse_to_sku").path("sku").subAggregation( - count("sku_count").field("sku.sku_type") - ) - ) - ) - ) - ) + .addAggregation( + nested("nested_0", "category").subAggregation( + terms("group_by_category").field("category.name") + .subAggregation( + reverseNested("to_root").subAggregation( + nested("nested_1", "sku").subAggregation( + filter("filter_by_sku", termQuery("sku.sku_type", "bar1")).subAggregation( + nested("nested_2", "sku.colors").subAggregation( + filter("filter_sku_color", termQuery("sku.colors.name", "red")).subAggregation( + reverseNested("reverse_to_sku").path("sku") + .subAggregation(count("sku_count").field("sku.sku_type")) + ) ) + ) ) + ) ) - ).get(); + ) + ) + .get(); assertNoFailures(response); assertHitCount(response, 1); @@ -621,7 +685,7 @@ public void testSameParentDocHavingMultipleBuckets() throws Exception { assertThat(nested0.getDocCount(), equalTo(3L)); terms = nested0.getAggregations().get("group_by_category"); assertThat(terms.getBuckets().size(), equalTo(3)); - for (String bucketName : new String[]{"abc", "klm", "xyz"}) { + for (String bucketName : new String[] { "abc", "klm", "xyz" }) { logger.info("Checking results for bucket {}", bucketName); Terms.Bucket bucket = terms.getBucketByKey(bucketName); assertThat(bucket.getDocCount(), equalTo(1L)); @@ -644,14 +708,17 @@ public void testSameParentDocHavingMultipleBuckets() throws Exception { public void testFieldAlias() { SearchResponse response = client().prepareSearch("idx1") - .addAggregation(nested("nested1", "nested1") - .subAggregation( + .addAggregation( + nested("nested1", "nested1").subAggregation( terms("field2").field("nested1.field2") .subAggregation( - reverseNested("nested1_to_field1") - .subAggregation( - terms("field1").field("alias") - .collectMode(randomFrom(SubAggCollectionMode.values())))))).get(); + reverseNested("nested1_to_field1").subAggregation( + terms("field1").field("alias").collectMode(randomFrom(SubAggCollectionMode.values())) + ) + ) + ) + ) + .get(); assertSearchResponse(response); @@ -662,7 +729,7 @@ public void testFieldAlias() { ReverseNested reverseNested = bucket.getAggregations().get("nested1_to_field1"); Terms reverseNestedTerms = reverseNested.getAggregations().get("field1"); - assertThat(((InternalAggregation)reverseNested).getProperty("field1"), sameInstance(reverseNestedTerms)); + assertThat(((InternalAggregation) reverseNested).getProperty("field1"), sameInstance(reverseNestedTerms)); assertThat(reverseNestedTerms.getBuckets().size(), equalTo(6)); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java index badc322ee5425..ac598d2fff0c9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java @@ -47,42 +47,47 @@ public String randomExecutionHint() { return randomBoolean() ? null : randomFrom(SamplerAggregator.ExecutionMode.values()).toString(); } - @Override public void setupSuiteScopeCluster() throws Exception { - assertAcked(prepareCreate("test") - .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, NUM_SHARDS).put(SETTING_NUMBER_OF_REPLICAS, 0)) - .setMapping( - "author", "type=keyword", "name", "type=text", "genre", - "type=keyword", "price", "type=float")); + assertAcked( + prepareCreate("test").setSettings( + Settings.builder().put(SETTING_NUMBER_OF_SHARDS, NUM_SHARDS).put(SETTING_NUMBER_OF_REPLICAS, 0) + ).setMapping("author", "type=keyword", "name", "type=text", "genre", "type=keyword", "price", "type=float") + ); createIndex("idx_unmapped"); // idx_unmapped_author is same as main index but missing author field - assertAcked(prepareCreate("idx_unmapped_author") - .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, NUM_SHARDS).put(SETTING_NUMBER_OF_REPLICAS, 0)) - .setMapping("name", "type=text", "genre", "type=keyword", "price", "type=float")); + assertAcked( + prepareCreate("idx_unmapped_author").setSettings( + Settings.builder().put(SETTING_NUMBER_OF_SHARDS, NUM_SHARDS).put(SETTING_NUMBER_OF_REPLICAS, 0) + ).setMapping("name", "type=text", "genre", "type=keyword", "price", "type=float") + ); ensureGreen(); String data[] = { - // "id,cat,name,price,inStock,author_t,series_t,sequence_i,genre_s", - "0553573403,book,A Game of Thrones,7.99,true,George R.R. Martin,A Song of Ice and Fire,1,fantasy", - "0553579908,book,A Clash of Kings,7.99,true,George R.R. Martin,A Song of Ice and Fire,2,fantasy", - "055357342X,book,A Storm of Swords,7.99,true,George R.R. Martin,A Song of Ice and Fire,3,fantasy", - "0553293354,book,Foundation,17.99,true,Isaac Asimov,Foundation Novels,1,scifi", - "0812521390,book,The Black Company,6.99,false,Glen Cook,The Chronicles of The Black Company,1,fantasy", - "0812550706,book,Ender's Game,6.99,true,Orson Scott Card,Ender,1,scifi", - "0441385532,book,Jhereg,7.95,false,Steven Brust,Vlad Taltos,1,fantasy", - "0380014300,book,Nine Princes In Amber,6.99,true,Roger Zelazny,the Chronicles of Amber,1,fantasy", - "0805080481,book,The Book of Three,5.99,true,Lloyd Alexander,The Chronicles of Prydain,1,fantasy", - "080508049X,book,The Black Cauldron,5.99,true,Lloyd Alexander,The Chronicles of Prydain,2,fantasy" - - }; + // "id,cat,name,price,inStock,author_t,series_t,sequence_i,genre_s", + "0553573403,book,A Game of Thrones,7.99,true,George R.R. Martin,A Song of Ice and Fire,1,fantasy", + "0553579908,book,A Clash of Kings,7.99,true,George R.R. Martin,A Song of Ice and Fire,2,fantasy", + "055357342X,book,A Storm of Swords,7.99,true,George R.R. Martin,A Song of Ice and Fire,3,fantasy", + "0553293354,book,Foundation,17.99,true,Isaac Asimov,Foundation Novels,1,scifi", + "0812521390,book,The Black Company,6.99,false,Glen Cook,The Chronicles of The Black Company,1,fantasy", + "0812550706,book,Ender's Game,6.99,true,Orson Scott Card,Ender,1,scifi", + "0441385532,book,Jhereg,7.95,false,Steven Brust,Vlad Taltos,1,fantasy", + "0380014300,book,Nine Princes In Amber,6.99,true,Roger Zelazny,the Chronicles of Amber,1,fantasy", + "0805080481,book,The Book of Three,5.99,true,Lloyd Alexander,The Chronicles of Prydain,1,fantasy", + "080508049X,book,The Black Cauldron,5.99,true,Lloyd Alexander,The Chronicles of Prydain,2,fantasy" + + }; for (int i = 0; i < data.length; i++) { String[] parts = data[i].split(","); - client().prepareIndex("test").setId("" + i) - .setSource("author", parts[5], "name", parts[2], "genre", parts[8], "price",Float.parseFloat(parts[3])).get(); - client().prepareIndex("idx_unmapped_author").setId("" + i) - .setSource("name", parts[2], "genre", parts[8],"price",Float.parseFloat(parts[3])).get(); + client().prepareIndex("test") + .setId("" + i) + .setSource("author", parts[5], "name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])) + .get(); + client().prepareIndex("idx_unmapped_author") + .setId("" + i) + .setSource("name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3])) + .get(); } client().admin().indices().refresh(new RefreshRequest("test")).get(); } @@ -91,13 +96,14 @@ public void testIssue10719() throws Exception { // Tests that we can refer to nested elements under a sample in a path // statement boolean asc = randomBoolean(); - SearchResponse response = client().prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) - .addAggregation(terms("genres") - .field("genre") - .order(BucketOrder.aggregation("sample>max_price.value", asc)) - .subAggregation(sampler("sample").shardSize(100) - .subAggregation(max("max_price").field("price"))) - ).get(); + SearchResponse response = client().prepareSearch("test") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .addAggregation( + terms("genres").field("genre") + .order(BucketOrder.aggregation("sample>max_price.value", asc)) + .subAggregation(sampler("sample").shardSize(100).subAggregation(max("max_price").field("price"))) + ) + .get(); assertSearchResponse(response); Terms genres = response.getAggregations().get("genres"); List genreBuckets = genres.getBuckets(); @@ -121,8 +127,13 @@ public void testIssue10719() throws Exception { public void testSimpleSampler() throws Exception { SamplerAggregationBuilder sampleAgg = sampler("sample").shardSize(100); sampleAgg.subAggregation(terms("authors").field("author")); - SearchResponse response = client().prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("genre", "fantasy")).setFrom(0).setSize(60).addAggregation(sampleAgg).get(); + SearchResponse response = client().prepareSearch("test") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(new TermQueryBuilder("genre", "fantasy")) + .setFrom(0) + .setSize(60) + .addAggregation(sampleAgg) + .get(); assertSearchResponse(response); Sampler sample = response.getAggregations().get("sample"); Terms authors = sample.getAggregations().get("authors"); @@ -139,11 +150,12 @@ public void testUnmappedChildAggNoDiversity() throws Exception { SamplerAggregationBuilder sampleAgg = sampler("sample").shardSize(100); sampleAgg.subAggregation(terms("authors").field("author")); SearchResponse response = client().prepareSearch("idx_unmapped") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("genre", "fantasy")) - .setFrom(0).setSize(60) - .addAggregation(sampleAgg) - .get(); + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(new TermQueryBuilder("genre", "fantasy")) + .setFrom(0) + .setSize(60) + .addAggregation(sampleAgg) + .get(); assertSearchResponse(response); Sampler sample = response.getAggregations().get("sample"); assertThat(sample.getDocCount(), equalTo(0L)); @@ -155,11 +167,13 @@ public void testPartiallyUnmappedChildAggNoDiversity() throws Exception { SamplerAggregationBuilder sampleAgg = sampler("sample").shardSize(100); sampleAgg.subAggregation(terms("authors").field("author")); SearchResponse response = client().prepareSearch("idx_unmapped", "test") - .setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("genre", "fantasy")) - .setFrom(0).setSize(60).setExplain(true) - .addAggregation(sampleAgg) - .get(); + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(new TermQueryBuilder("genre", "fantasy")) + .setFrom(0) + .setSize(60) + .setExplain(true) + .addAggregation(sampleAgg) + .get(); assertSearchResponse(response); Sampler sample = response.getAggregations().get("sample"); assertThat(sample.getDocCount(), greaterThan(0L)); @@ -170,8 +184,13 @@ public void testPartiallyUnmappedChildAggNoDiversity() throws Exception { public void testRidiculousShardSizeSampler() throws Exception { SamplerAggregationBuilder sampleAgg = sampler("sample").shardSize(Integer.MAX_VALUE); sampleAgg.subAggregation(terms("authors").field("author")); - SearchResponse response = client().prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) - .setQuery(new TermQueryBuilder("genre", "fantasy")).setFrom(0).setSize(60).addAggregation(sampleAgg).get(); + SearchResponse response = client().prepareSearch("test") + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setQuery(new TermQueryBuilder("genre", "fantasy")) + .setFrom(0) + .setSize(60) + .addAggregation(sampleAgg) + .get(); assertSearchResponse(response); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java index 0cb218750672c..e182852f461c3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardReduceIT.java @@ -50,41 +50,51 @@ public class ShardReduceIT extends ESIntegTestCase { private IndexRequestBuilder indexDoc(String date, int value) throws Exception { - return client().prepareIndex("idx").setSource(jsonBuilder() - .startObject() - .field("value", value) - .field("ip", "10.0.0." + value) - .field("location", Geohash.stringEncode(5, 52, Geohash.PRECISION)) - .field("date", date) - .field("term-l", 1) - .field("term-d", 1.5) - .field("term-s", "term") - .startObject("nested") - .field("date", date) - .endObject() - .endObject()); + return client().prepareIndex("idx") + .setSource( + jsonBuilder().startObject() + .field("value", value) + .field("ip", "10.0.0." + value) + .field("location", Geohash.stringEncode(5, 52, Geohash.PRECISION)) + .field("date", date) + .field("term-l", 1) + .field("term-d", 1.5) + .field("term-s", "term") + .startObject("nested") + .field("date", date) + .endObject() + .endObject() + ); } @Override public void setupSuiteScopeCluster() throws Exception { - assertAcked(prepareCreate("idx") - .setMapping("nested", "type=nested", "ip", "type=ip", - "location", "type=geo_point", "term-s", "type=keyword")); - - indexRandom(true, - indexDoc("2014-01-01", 1), - indexDoc("2014-01-02", 2), - indexDoc("2014-01-04", 3)); + assertAcked( + prepareCreate("idx").setMapping( + "nested", + "type=nested", + "ip", + "type=ip", + "location", + "type=geo_point", + "term-s", + "type=keyword" + ) + ); + + indexRandom(true, indexDoc("2014-01-01", 1), indexDoc("2014-01-02", 2), indexDoc("2014-01-04", 3)); ensureSearchable(); } public void testGlobal() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setQuery(QueryBuilders.matchAllQuery()) - .addAggregation(global("global") - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY) - .minDocCount(0))) - .get(); + .setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + global("global").subAggregation( + dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0) + ) + ) + .get(); assertSearchResponse(response); @@ -95,11 +105,13 @@ public void testGlobal() throws Exception { public void testFilter() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setQuery(QueryBuilders.matchAllQuery()) - .addAggregation(filter("filter", QueryBuilders.matchAllQuery()) - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY) - .minDocCount(0))) - .get(); + .setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + filter("filter", QueryBuilders.matchAllQuery()).subAggregation( + dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0) + ) + ) + .get(); assertSearchResponse(response); @@ -110,11 +122,12 @@ public void testFilter() throws Exception { public void testMissing() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setQuery(QueryBuilders.matchAllQuery()) - .addAggregation(missing("missing").field("foobar") - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY) - .minDocCount(0))) - .get(); + .setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + missing("missing").field("foobar") + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ) + .get(); assertSearchResponse(response); @@ -125,13 +138,16 @@ public void testMissing() throws Exception { public void testGlobalWithFilterWithMissing() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setQuery(QueryBuilders.matchAllQuery()) - .addAggregation(global("global") - .subAggregation(filter("filter", QueryBuilders.matchAllQuery()) - .subAggregation(missing("missing").field("foobar") - .subAggregation(dateHistogram("histo").field("date") - .fixedInterval(DateHistogramInterval.DAY).minDocCount(0))))) - .get(); + .setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + global("global").subAggregation( + filter("filter", QueryBuilders.matchAllQuery()).subAggregation( + missing("missing").field("foobar") + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ) + ) + ) + .get(); assertSearchResponse(response); @@ -144,11 +160,13 @@ public void testGlobalWithFilterWithMissing() throws Exception { public void testNested() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setQuery(QueryBuilders.matchAllQuery()) - .addAggregation(nested("nested", "nested") - .subAggregation(dateHistogram("histo").field("nested.date").fixedInterval(DateHistogramInterval.DAY) - .minDocCount(0))) - .get(); + .setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + nested("nested", "nested").subAggregation( + dateHistogram("histo").field("nested.date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0) + ) + ) + .get(); assertSearchResponse(response); @@ -159,12 +177,13 @@ public void testNested() throws Exception { public void testStringTerms() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setQuery(QueryBuilders.matchAllQuery()) - .addAggregation(terms("terms").field("term-s") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY) - .minDocCount(0))) - .get(); + .setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + terms("terms").field("term-s") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ) + .get(); assertSearchResponse(response); @@ -175,12 +194,13 @@ public void testStringTerms() throws Exception { public void testLongTerms() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setQuery(QueryBuilders.matchAllQuery()) - .addAggregation(terms("terms").field("term-l") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY) - .minDocCount(0))) - .get(); + .setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + terms("terms").field("term-l") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ) + .get(); assertSearchResponse(response); @@ -191,12 +211,13 @@ public void testLongTerms() throws Exception { public void testDoubleTerms() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setQuery(QueryBuilders.matchAllQuery()) - .addAggregation(terms("terms").field("term-d") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY) - .minDocCount(0))) - .get(); + .setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + terms("terms").field("term-d") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ) + .get(); assertSearchResponse(response); @@ -207,11 +228,13 @@ public void testDoubleTerms() throws Exception { public void testRange() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setQuery(QueryBuilders.matchAllQuery()) - .addAggregation(range("range").field("value").addRange("r1", 0, 10) - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY) - .minDocCount(0))) - .get(); + .setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + range("range").field("value") + .addRange("r1", 0, 10) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ) + .get(); assertSearchResponse(response); @@ -222,11 +245,13 @@ public void testRange() throws Exception { public void testDateRange() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setQuery(QueryBuilders.matchAllQuery()) - .addAggregation(dateRange("range").field("date").addRange("r1", "2014-01-01", "2014-01-10") - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY) - .minDocCount(0))) - .get(); + .setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + dateRange("range").field("date") + .addRange("r1", "2014-01-01", "2014-01-10") + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ) + .get(); assertSearchResponse(response); @@ -237,11 +262,13 @@ public void testDateRange() throws Exception { public void testIpRange() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setQuery(QueryBuilders.matchAllQuery()) - .addAggregation(ipRange("range").field("ip").addRange("r1", "10.0.0.1", "10.0.0.10") - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY) - .minDocCount(0))) - .get(); + .setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + ipRange("range").field("ip") + .addRange("r1", "10.0.0.1", "10.0.0.10") + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ) + .get(); assertSearchResponse(response); @@ -252,11 +279,13 @@ public void testIpRange() throws Exception { public void testHistogram() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setQuery(QueryBuilders.matchAllQuery()) - .addAggregation(histogram("topHisto").field("value").interval(5) - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY) - .minDocCount(0))) - .get(); + .setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + histogram("topHisto").field("value") + .interval(5) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ) + .get(); assertSearchResponse(response); @@ -267,11 +296,13 @@ public void testHistogram() throws Exception { public void testDateHistogram() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setQuery(QueryBuilders.matchAllQuery()) - .addAggregation(dateHistogram("topHisto").field("date").calendarInterval(DateHistogramInterval.MONTH) - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY) - .minDocCount(0))) - .get(); + .setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + dateHistogram("topHisto").field("date") + .calendarInterval(DateHistogramInterval.MONTH) + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ) + .get(); assertSearchResponse(response); @@ -283,11 +314,12 @@ public void testDateHistogram() throws Exception { public void testGeoHashGrid() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setQuery(QueryBuilders.matchAllQuery()) - .addAggregation(geohashGrid("grid").field("location") - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY) - .minDocCount(0))) - .get(); + .setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + geohashGrid("grid").field("location") + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ) + .get(); assertSearchResponse(response); @@ -298,11 +330,12 @@ public void testGeoHashGrid() throws Exception { public void testGeoTileGrid() throws Exception { SearchResponse response = client().prepareSearch("idx") - .setQuery(QueryBuilders.matchAllQuery()) - .addAggregation(geotileGrid("grid").field("location") - .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY) - .minDocCount(0))) - .get(); + .setQuery(QueryBuilders.matchAllQuery()) + .addAggregation( + geotileGrid("grid").field("location") + .subAggregation(dateHistogram("histo").field("date").fixedInterval(DateHistogramInterval.DAY).minDocCount(0)) + ) + .get(); assertSearchResponse(response); @@ -311,5 +344,4 @@ public void testGeoTileGrid() throws Exception { assertThat(histo.getBuckets().size(), equalTo(4)); } - } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java index 3ae1f3ff4d283..dc3cd1c897780 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java @@ -27,12 +27,13 @@ public void testNoShardSizeString() throws Exception { indexData(); SearchResponse response = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false)) + ) + .get(); - Terms terms = response.getAggregations().get("keys"); + Terms terms = response.getAggregations().get("keys"); List buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(3)); Map expected = new HashMap<>(); @@ -50,12 +51,17 @@ public void testShardSizeEqualsSizeString() throws Exception { indexData(); SearchResponse response = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(terms("keys").field("key").size(3).shardSize(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .shardSize(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.count(false)) + ) + .get(); - Terms terms = response.getAggregations().get("keys"); + Terms terms = response.getAggregations().get("keys"); List buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(3)); Map expected = new HashMap<>(); @@ -74,10 +80,15 @@ public void testWithShardSizeString() throws Exception { indexData(); SearchResponse response = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(BucketOrder.count(false))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .shardSize(5) + .order(BucketOrder.count(false)) + ) + .get(); Terms terms = response.getAggregations().get("keys"); List buckets = terms.getBuckets(); @@ -97,11 +108,17 @@ public void testWithShardSizeStringSingleShard() throws Exception { indexData(); - SearchResponse response = client().prepareSearch("idx").setRouting(routing1) - .setQuery(matchAllQuery()) - .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(BucketOrder.count(false))) - .get(); + SearchResponse response = client().prepareSearch("idx") + .setRouting(routing1) + .setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .shardSize(5) + .order(BucketOrder.count(false)) + ) + .get(); Terms terms = response.getAggregations().get("keys"); List buckets = terms.getBuckets(); @@ -110,7 +127,7 @@ public void testWithShardSizeStringSingleShard() throws Exception { expected.put("1", 5L); expected.put("2", 4L); expected.put("3", 3L); // <-- count is now fixed - for (Terms.Bucket bucket: buckets) { + for (Terms.Bucket bucket : buckets) { assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKey()))); } } @@ -121,12 +138,13 @@ public void testNoShardSizeTermOrderString() throws Exception { indexData(); SearchResponse response = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true)) + ) + .get(); - Terms terms = response.getAggregations().get("keys"); + Terms terms = response.getAggregations().get("keys"); List buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(3)); Map expected = new HashMap<>(); @@ -144,10 +162,11 @@ public void testNoShardSizeLong() throws Exception { indexData(); SearchResponse response = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false)) + ) + .get(); Terms terms = response.getAggregations().get("keys"); List buckets = terms.getBuckets(); @@ -167,10 +186,15 @@ public void testShardSizeEqualsSizeLong() throws Exception { indexData(); SearchResponse response = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(terms("keys").field("key").size(3).shardSize(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .shardSize(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.count(false)) + ) + .get(); Terms terms = response.getAggregations().get("keys"); List buckets = terms.getBuckets(); @@ -190,10 +214,15 @@ public void testWithShardSizeLong() throws Exception { indexData(); SearchResponse response = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(BucketOrder.count(false))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .shardSize(5) + .order(BucketOrder.count(false)) + ) + .get(); Terms terms = response.getAggregations().get("keys"); List buckets = terms.getBuckets(); @@ -213,11 +242,17 @@ public void testWithShardSizeLongSingleShard() throws Exception { indexData(); - SearchResponse response = client().prepareSearch("idx").setRouting(routing1) - .setQuery(matchAllQuery()) - .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(BucketOrder.count(false))) - .get(); + SearchResponse response = client().prepareSearch("idx") + .setRouting(routing1) + .setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .shardSize(5) + .order(BucketOrder.count(false)) + ) + .get(); Terms terms = response.getAggregations().get("keys"); List buckets = terms.getBuckets(); @@ -237,10 +272,11 @@ public void testNoShardSizeTermOrderLong() throws Exception { indexData(); SearchResponse response = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true)) + ) + .get(); Terms terms = response.getAggregations().get("keys"); List buckets = terms.getBuckets(); @@ -260,10 +296,11 @@ public void testNoShardSizeDouble() throws Exception { indexData(); SearchResponse response = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false)) + ) + .get(); Terms terms = response.getAggregations().get("keys"); List buckets = terms.getBuckets(); @@ -283,10 +320,15 @@ public void testShardSizeEqualsSizeDouble() throws Exception { indexData(); SearchResponse response = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(terms("keys").field("key").size(3).shardSize(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.count(false))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .shardSize(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.count(false)) + ) + .get(); Terms terms = response.getAggregations().get("keys"); List buckets = terms.getBuckets(); @@ -306,10 +348,15 @@ public void testWithShardSizeDouble() throws Exception { indexData(); SearchResponse response = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(BucketOrder.count(false))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .shardSize(5) + .order(BucketOrder.count(false)) + ) + .get(); Terms terms = response.getAggregations().get("keys"); List buckets = terms.getBuckets(); @@ -328,11 +375,17 @@ public void testWithShardSizeDoubleSingleShard() throws Exception { indexData(); - SearchResponse response = client().prepareSearch("idx").setRouting(routing1) - .setQuery(matchAllQuery()) - .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(BucketOrder.count(false))) - .get(); + SearchResponse response = client().prepareSearch("idx") + .setRouting(routing1) + .setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key") + .size(3) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .shardSize(5) + .order(BucketOrder.count(false)) + ) + .get(); Terms terms = response.getAggregations().get("keys"); List buckets = terms.getBuckets(); @@ -352,10 +405,11 @@ public void testNoShardSizeTermOrderDouble() throws Exception { indexData(); SearchResponse response = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(terms("keys").field("key").size(3) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + terms("keys").field("key").size(3).collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.key(true)) + ) + .get(); Terms terms = response.getAggregations().get("keys"); List buckets = terms.getBuckets(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index 4395f34758208..1aa729a7119c8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -86,12 +86,15 @@ public Map, Object>> pluginScripts() { Map, Object>> scripts = new HashMap<>(); scripts.put("script_with_params", params -> { double factor = ((Number) params.get("param")).doubleValue(); - return factor * (longValue(params.get("_subset_freq")) + longValue(params.get("_subset_size")) + - longValue(params.get("_superset_freq")) + longValue(params.get("_superset_size"))) / factor; + return factor * (longValue(params.get("_subset_freq")) + longValue(params.get("_subset_size")) + longValue( + params.get("_superset_freq") + ) + longValue(params.get("_superset_size"))) / factor; }); - scripts.put("script_no_params", params -> - longValue(params.get("_subset_freq")) + longValue(params.get("_subset_size")) + - longValue(params.get("_superset_freq")) + longValue(params.get("_superset_size")) + scripts.put( + "script_no_params", + params -> longValue(params.get("_subset_freq")) + longValue(params.get("_subset_size")) + longValue( + params.get("_superset_freq") + ) + longValue(params.get("_superset_size")) ); return scripts; } @@ -116,21 +119,18 @@ public void testXContentResponse() throws Exception { SharedSignificantTermsTestMethods.index01Docs(type, settings, this); SearchRequestBuilder request; - if ("text".equals(type) && randomBoolean() ) { + if ("text".equals(type) && randomBoolean()) { // Use significant_text on text fields but occasionally run with alternative of // significant_terms on legacy fieldData=true too. request = client().prepareSearch(INDEX_NAME) - .addAggregation(terms("class").field(CLASS_FIELD) - .subAggregation(significantText("sig_terms", TEXT_FIELD))); + .addAggregation(terms("class").field(CLASS_FIELD).subAggregation(significantText("sig_terms", TEXT_FIELD))); } else { request = client().prepareSearch(INDEX_NAME) - .addAggregation(terms("class").field(CLASS_FIELD) - .subAggregation(significantTerms("sig_terms").field(TEXT_FIELD))); + .addAggregation(terms("class").field(CLASS_FIELD).subAggregation(significantTerms("sig_terms").field(TEXT_FIELD))); } SearchResponse response = request.get(); - assertSearchResponse(response); StringTerms classes = response.getAggregations().get("class"); assertThat(classes.getBuckets().size(), equalTo(2)); @@ -150,57 +150,57 @@ public void testXContentResponse() throws Exception { responseBuilder.endObject(); String result = "{\"class\":{\"doc_count_error_upper_bound\":0,\"sum_other_doc_count\":0," - + "\"buckets\":[" - + "{" - + "\"key\":\"0\"," - + "\"doc_count\":4," - + "\"sig_terms\":{" - + "\"doc_count\":4," - + "\"bg_count\":7," - + "\"buckets\":[" - + "{" - + "\"key\":" + (type.equals("long") ? "0," : "\"0\",") - + "\"doc_count\":4," - + "\"score\":0.39999999999999997," - + "\"bg_count\":5" - + "}" - + "]" - + "}" - + "}," - + "{" - + "\"key\":\"1\"," - + "\"doc_count\":3," - + "\"sig_terms\":{" - + "\"doc_count\":3," - + "\"bg_count\":7," - + "\"buckets\":[" - + "{" - + "\"key\":" + (type.equals("long") ? "1," : "\"1\",") - + "\"doc_count\":3," - + "\"score\":0.75," - + "\"bg_count\":4" - + "}]}}]}}"; + + "\"buckets\":[" + + "{" + + "\"key\":\"0\"," + + "\"doc_count\":4," + + "\"sig_terms\":{" + + "\"doc_count\":4," + + "\"bg_count\":7," + + "\"buckets\":[" + + "{" + + "\"key\":" + + (type.equals("long") ? "0," : "\"0\",") + + "\"doc_count\":4," + + "\"score\":0.39999999999999997," + + "\"bg_count\":5" + + "}" + + "]" + + "}" + + "}," + + "{" + + "\"key\":\"1\"," + + "\"doc_count\":3," + + "\"sig_terms\":{" + + "\"doc_count\":3," + + "\"bg_count\":7," + + "\"buckets\":[" + + "{" + + "\"key\":" + + (type.equals("long") ? "1," : "\"1\",") + + "\"doc_count\":3," + + "\"score\":0.75," + + "\"bg_count\":4" + + "}]}}]}}"; assertThat(Strings.toString(responseBuilder), equalTo(result)); } public void testPopularTermManyDeletedDocs() throws Exception { String settings = "{\"index.number_of_shards\": 1, \"index.number_of_replicas\": 0}"; - assertAcked(prepareCreate(INDEX_NAME).setSettings(settings, XContentType.JSON) - .setMapping("text", "type=keyword", CLASS_FIELD, "type=keyword")); - String[] cat1v1 = {"constant", "one"}; - String[] cat1v2 = {"constant", "uno"}; - String[] cat2v1 = {"constant", "two"}; - String[] cat2v2 = {"constant", "duo"}; + assertAcked( + prepareCreate(INDEX_NAME).setSettings(settings, XContentType.JSON) + .setMapping("text", "type=keyword", CLASS_FIELD, "type=keyword") + ); + String[] cat1v1 = { "constant", "one" }; + String[] cat1v2 = { "constant", "uno" }; + String[] cat2v1 = { "constant", "two" }; + String[] cat2v2 = { "constant", "duo" }; List indexRequestBuilderList = new ArrayList<>(); - indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME).setId("1") - .setSource(TEXT_FIELD, cat1v1, CLASS_FIELD, "1")); - indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME).setId("2") - .setSource(TEXT_FIELD, cat1v2, CLASS_FIELD, "1")); - indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME).setId("3") - .setSource(TEXT_FIELD, cat2v1, CLASS_FIELD, "2")); - indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME).setId("4") - .setSource(TEXT_FIELD, cat2v2, CLASS_FIELD, "2")); + indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME).setId("1").setSource(TEXT_FIELD, cat1v1, CLASS_FIELD, "1")); + indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME).setId("2").setSource(TEXT_FIELD, cat1v2, CLASS_FIELD, "1")); + indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME).setId("3").setSource(TEXT_FIELD, cat2v1, CLASS_FIELD, "2")); + indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME).setId("4").setSource(TEXT_FIELD, cat2v2, CLASS_FIELD, "2")); indexRandom(true, false, indexRequestBuilderList); // Now create some holes in the index with selective deletes caused by updates. @@ -215,26 +215,15 @@ public void testPopularTermManyDeletedDocs() throws Exception { } indexRandom(true, false, indexRequestBuilderList); - SearchRequestBuilder request; - if (randomBoolean() ) { + if (randomBoolean()) { request = client().prepareSearch(INDEX_NAME) .addAggregation( - terms("class") - .field(CLASS_FIELD) - .subAggregation( - significantTerms("sig_terms") - .field(TEXT_FIELD) - .minDocCount(1))); - } else - { + terms("class").field(CLASS_FIELD).subAggregation(significantTerms("sig_terms").field(TEXT_FIELD).minDocCount(1)) + ); + } else { request = client().prepareSearch(INDEX_NAME) - .addAggregation( - terms("class") - .field(CLASS_FIELD) - .subAggregation( - significantText("sig_terms", TEXT_FIELD) - .minDocCount(1))); + .addAggregation(terms("class").field(CLASS_FIELD).subAggregation(significantText("sig_terms", TEXT_FIELD).minDocCount(1))); } request.get(); @@ -254,32 +243,33 @@ public void testBackgroundVsSeparateSet() throws Exception { // 1. terms agg on class and significant terms // 2. filter buckets and set the background to the other class and set is_background false // both should yield exact same result - public void testBackgroundVsSeparateSet(SignificanceHeuristic significanceHeuristicExpectingSuperset, - SignificanceHeuristic significanceHeuristicExpectingSeparateSets, - String type) throws Exception { + public void testBackgroundVsSeparateSet( + SignificanceHeuristic significanceHeuristicExpectingSuperset, + SignificanceHeuristic significanceHeuristicExpectingSeparateSets, + String type + ) throws Exception { final boolean useSigText = randomBoolean() && type.equals("text"); SearchRequestBuilder request1; if (useSigText) { request1 = client().prepareSearch(INDEX_NAME) - .addAggregation(terms("class") - .field(CLASS_FIELD) - .subAggregation( - significantText("sig_terms", TEXT_FIELD) - .minDocCount(1) - .significanceHeuristic( - significanceHeuristicExpectingSuperset))); - }else - { + .addAggregation( + terms("class").field(CLASS_FIELD) + .subAggregation( + significantText("sig_terms", TEXT_FIELD).minDocCount(1) + .significanceHeuristic(significanceHeuristicExpectingSuperset) + ) + ); + } else { request1 = client().prepareSearch(INDEX_NAME) - .addAggregation(terms("class") - .field(CLASS_FIELD) - .subAggregation( - significantTerms("sig_terms") - .field(TEXT_FIELD) - .minDocCount(1) - .significanceHeuristic( - significanceHeuristicExpectingSuperset))); + .addAggregation( + terms("class").field(CLASS_FIELD) + .subAggregation( + significantTerms("sig_terms").field(TEXT_FIELD) + .minDocCount(1) + .significanceHeuristic(significanceHeuristicExpectingSuperset) + ) + ); } SearchResponse response1 = request1.get(); @@ -288,31 +278,38 @@ public void testBackgroundVsSeparateSet(SignificanceHeuristic significanceHeuris SearchRequestBuilder request2; if (useSigText) { request2 = client().prepareSearch(INDEX_NAME) - .addAggregation(filter("0", QueryBuilders.termQuery(CLASS_FIELD, "0")) - .subAggregation(significantText("sig_terms", TEXT_FIELD) - .minDocCount(1) - .backgroundFilter(QueryBuilders.termQuery(CLASS_FIELD, "1")) - .significanceHeuristic(significanceHeuristicExpectingSeparateSets))) - .addAggregation(filter("1", QueryBuilders.termQuery(CLASS_FIELD, "1")) - .subAggregation(significantText("sig_terms", TEXT_FIELD) - .minDocCount(1) - .backgroundFilter(QueryBuilders.termQuery(CLASS_FIELD, "0")) - .significanceHeuristic(significanceHeuristicExpectingSeparateSets))); - }else - { + .addAggregation( + filter("0", QueryBuilders.termQuery(CLASS_FIELD, "0")).subAggregation( + significantText("sig_terms", TEXT_FIELD).minDocCount(1) + .backgroundFilter(QueryBuilders.termQuery(CLASS_FIELD, "1")) + .significanceHeuristic(significanceHeuristicExpectingSeparateSets) + ) + ) + .addAggregation( + filter("1", QueryBuilders.termQuery(CLASS_FIELD, "1")).subAggregation( + significantText("sig_terms", TEXT_FIELD).minDocCount(1) + .backgroundFilter(QueryBuilders.termQuery(CLASS_FIELD, "0")) + .significanceHeuristic(significanceHeuristicExpectingSeparateSets) + ) + ); + } else { request2 = client().prepareSearch(INDEX_NAME) - .addAggregation(filter("0", QueryBuilders.termQuery(CLASS_FIELD, "0")) - .subAggregation(significantTerms("sig_terms") - .field(TEXT_FIELD) - .minDocCount(1) - .backgroundFilter(QueryBuilders.termQuery(CLASS_FIELD, "1")) - .significanceHeuristic(significanceHeuristicExpectingSeparateSets))) - .addAggregation(filter("1", QueryBuilders.termQuery(CLASS_FIELD, "1")) - .subAggregation(significantTerms("sig_terms") - .field(TEXT_FIELD) - .minDocCount(1) - .backgroundFilter(QueryBuilders.termQuery(CLASS_FIELD, "0")) - .significanceHeuristic(significanceHeuristicExpectingSeparateSets))); + .addAggregation( + filter("0", QueryBuilders.termQuery(CLASS_FIELD, "0")).subAggregation( + significantTerms("sig_terms").field(TEXT_FIELD) + .minDocCount(1) + .backgroundFilter(QueryBuilders.termQuery(CLASS_FIELD, "1")) + .significanceHeuristic(significanceHeuristicExpectingSeparateSets) + ) + ) + .addAggregation( + filter("1", QueryBuilders.termQuery(CLASS_FIELD, "1")).subAggregation( + significantTerms("sig_terms").field(TEXT_FIELD) + .minDocCount(1) + .backgroundFilter(QueryBuilders.termQuery(CLASS_FIELD, "0")) + .significanceHeuristic(significanceHeuristicExpectingSeparateSets) + ) + ); } SearchResponse response2 = request2.get(); @@ -351,21 +348,32 @@ public void testScoresEqualForPositiveAndNegative() throws Exception { public void testScoresEqualForPositiveAndNegative(SignificanceHeuristic heuristic) throws Exception { - //check that results for both classes are the same with exclude negatives = false and classes are routing ids + // check that results for both classes are the same with exclude negatives = false and classes are routing ids SearchRequestBuilder request; if (randomBoolean()) { request = client().prepareSearch("test") - .addAggregation(terms("class").field("class").subAggregation(significantTerms("mySignificantTerms") - .field("text") - .executionHint(randomExecutionHint()) - .significanceHeuristic(heuristic) - .minDocCount(1).shardSize(1000).size(1000))); - }else - { + .addAggregation( + terms("class").field("class") + .subAggregation( + significantTerms("mySignificantTerms").field("text") + .executionHint(randomExecutionHint()) + .significanceHeuristic(heuristic) + .minDocCount(1) + .shardSize(1000) + .size(1000) + ) + ); + } else { request = client().prepareSearch("test") - .addAggregation(terms("class").field("class").subAggregation(significantText("mySignificantTerms", "text") - .significanceHeuristic(heuristic) - .minDocCount(1).shardSize(1000).size(1000))); + .addAggregation( + terms("class").field("class") + .subAggregation( + significantText("mySignificantTerms", "text").significanceHeuristic(heuristic) + .minDocCount(1) + .shardSize(1000) + .size(1000) + ) + ); } SearchResponse response = request.get(); assertSearchResponse(response); @@ -397,17 +405,15 @@ public void testSubAggregations() throws Exception { QueryBuilder query = QueryBuilders.termsQuery(TEXT_FIELD, "a", "b"); AggregationBuilder subAgg = terms("class").field(CLASS_FIELD); - AggregationBuilder agg = significantTerms("significant_terms") - .field(TEXT_FIELD) + AggregationBuilder agg = significantTerms("significant_terms").field(TEXT_FIELD) .executionHint(randomExecutionHint()) .significanceHeuristic(new ChiSquare(true, true)) - .minDocCount(1).shardSize(1000).size(1000) + .minDocCount(1) + .shardSize(1000) + .size(1000) .subAggregation(subAgg); - SearchResponse response = client().prepareSearch("test") - .setQuery(query) - .addAggregation(agg) - .get(); + SearchResponse response = client().prepareSearch("test").setQuery(query).addAggregation(agg).get(); assertSearchResponse(response); SignificantTerms sigTerms = response.getAggregations().get("significant_terms"); @@ -420,36 +426,35 @@ public void testSubAggregations() throws Exception { } private void indexEqualTestData() throws ExecutionException, InterruptedException { - assertAcked(prepareCreate("test") - .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)) - .setMapping("text", "type=text,fielddata=true", "class", "type=keyword")); + assertAcked( + prepareCreate("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)) + .setMapping("text", "type=text,fielddata=true", "class", "type=keyword") + ); createIndex("idx_unmapped"); ensureGreen(); String data[] = { - "A\ta", - "A\ta", - "A\tb", - "A\tb", - "A\tb", - "B\tc", - "B\tc", - "B\tc", - "B\tc", - "B\td", - "B\td", - "B\td", - "B\td", - "B\td", - "A\tc d", - "B\ta b" - }; + "A\ta", + "A\ta", + "A\tb", + "A\tb", + "A\tb", + "B\tc", + "B\tc", + "B\tc", + "B\tc", + "B\td", + "B\td", + "B\td", + "B\td", + "B\td", + "A\tc d", + "B\ta b" }; List indexRequestBuilders = new ArrayList<>(); for (int i = 0; i < data.length; i++) { String[] parts = data[i].split("\t"); - indexRequestBuilders.add(client().prepareIndex("test").setId("" + i) - .setSource("class", parts[0], "text", parts[1])); + indexRequestBuilders.add(client().prepareIndex("test").setId("" + i).setSource("class", parts[0], "text", parts[1])); } indexRandom(true, false, indexRequestBuilders); } @@ -462,26 +467,38 @@ public void testScriptScore() throws ExecutionException, InterruptedException, I SearchRequestBuilder request; if ("text".equals(type) && randomBoolean()) { request = client().prepareSearch(INDEX_NAME) - .addAggregation(terms("class").field(CLASS_FIELD) - .subAggregation(significantText("mySignificantTerms", TEXT_FIELD) - .significanceHeuristic(scriptHeuristic) - .minDocCount(1).shardSize(2).size(2))); + .addAggregation( + terms("class").field(CLASS_FIELD) + .subAggregation( + significantText("mySignificantTerms", TEXT_FIELD).significanceHeuristic(scriptHeuristic) + .minDocCount(1) + .shardSize(2) + .size(2) + ) + ); } else { request = client().prepareSearch(INDEX_NAME) - .addAggregation(terms("class").field(CLASS_FIELD) - .subAggregation(significantTerms("mySignificantTerms") - .field(TEXT_FIELD) - .executionHint(randomExecutionHint()) - .significanceHeuristic(scriptHeuristic) - .minDocCount(1).shardSize(2).size(2))); + .addAggregation( + terms("class").field(CLASS_FIELD) + .subAggregation( + significantTerms("mySignificantTerms").field(TEXT_FIELD) + .executionHint(randomExecutionHint()) + .significanceHeuristic(scriptHeuristic) + .minDocCount(1) + .shardSize(2) + .size(2) + ) + ); } SearchResponse response = request.get(); assertSearchResponse(response); for (Terms.Bucket classBucket : ((Terms) response.getAggregations().get("class")).getBuckets()) { SignificantTerms sigTerms = classBucket.getAggregations().get("mySignificantTerms"); for (SignificantTerms.Bucket bucket : sigTerms.getBuckets()) { - assertThat(bucket.getSignificanceScore(), - is((double) bucket.getSubsetDf() + bucket.getSubsetSize() + bucket.getSupersetDf() + bucket.getSupersetSize())); + assertThat( + bucket.getSignificanceScore(), + is((double) bucket.getSubsetDf() + bucket.getSubsetSize() + bucket.getSupersetDf() + bucket.getSupersetSize()) + ); } } } @@ -504,7 +521,7 @@ private void indexRandomFrequencies01(String type) throws ExecutionException, In textMappings += ",fielddata=true"; } assertAcked(prepareCreate(INDEX_NAME).setMapping(TEXT_FIELD, textMappings, CLASS_FIELD, "type=keyword")); - String[] gb = {"0", "1"}; + String[] gb = { "0", "1" }; List indexRequestBuilderList = new ArrayList<>(); for (int i = 0; i < randomInt(20); i++) { int randNum = randomInt(2); @@ -514,8 +531,9 @@ private void indexRandomFrequencies01(String type) throws ExecutionException, In } else { text[0] = gb[randNum]; } - indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME) - .setSource(TEXT_FIELD, text, CLASS_FIELD, randomBoolean() ? "one" : "zero")); + indexRequestBuilderList.add( + client().prepareIndex(INDEX_NAME).setSource(TEXT_FIELD, text, CLASS_FIELD, randomBoolean() ? "one" : "zero") + ); } indexRandom(true, indexRequestBuilderList); } @@ -529,17 +547,40 @@ public void testReduceFromSeveralShards() throws IOException, ExecutionException * Ensure requests using nondeterministic scripts do not get cached. */ public void testScriptCaching() throws Exception { - assertAcked(prepareCreate("cache_test_idx").setMapping("s", "type=long", "t", "type=text") + assertAcked( + prepareCreate("cache_test_idx").setMapping("s", "type=long", "t", "type=text") .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) - .get()); - indexRandom(true, client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1, "t", "foo"), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2, "t", "bar")); + .get() + ); + indexRandom( + true, + client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1, "t", "foo"), + client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2, "t", "bar") + ); // Make sure we are starting with a clear cache - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a nondeterministic script does not get cached ScriptHeuristic scriptHeuristic = new ScriptHeuristic( @@ -548,35 +589,79 @@ public void testScriptCaching() throws Exception { boolean useSigText = randomBoolean(); SearchResponse r; if (useSigText) { - r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation(significantText("foo", "t").significanceHeuristic(scriptHeuristic)).get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation(significantText("foo", "t").significanceHeuristic(scriptHeuristic)) + .get(); } else { - r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation(significantTerms("foo").field("s").significanceHeuristic(scriptHeuristic)).get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation(significantTerms("foo").field("s").significanceHeuristic(scriptHeuristic)) + .get(); } assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a deterministic script gets cached scriptHeuristic = getScriptSignificanceHeuristic(); useSigText = randomBoolean(); if (useSigText) { - r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation(significantText("foo", "t").significanceHeuristic(scriptHeuristic)).get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation(significantText("foo", "t").significanceHeuristic(scriptHeuristic)) + .get(); } else { - r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation(significantTerms("foo").field("s").significanceHeuristic(scriptHeuristic)).get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation(significantTerms("foo").field("s").significanceHeuristic(scriptHeuristic)) + .get(); } assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(1L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(1L) + ); // Ensure that non-scripted requests are cached as normal if (useSigText) { @@ -586,9 +671,27 @@ public void testScriptCaching() throws Exception { } assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(2L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(2L) + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java index a2e76892342ba..c22d38e3f79e8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java @@ -43,7 +43,6 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { private static final String LONG_FIELD_NAME = "l_value"; private static final String DOUBLE_FIELD_NAME = "d_value"; - public static String randomExecutionHint() { return randomBoolean() ? null : randomFrom(ExecutionMode.values()).toString(); } @@ -52,42 +51,55 @@ public static String randomExecutionHint() { @Override public void setupSuiteScopeCluster() throws Exception { - assertAcked(client().admin().indices().prepareCreate("idx") - .setMapping(STRING_FIELD_NAME, "type=keyword").get()); + assertAcked(client().admin().indices().prepareCreate("idx").setMapping(STRING_FIELD_NAME, "type=keyword").get()); List builders = new ArrayList<>(); int numDocs = between(10, 200); - int numUniqueTerms = between(2,numDocs/2); + int numUniqueTerms = between(2, numDocs / 2); for (int i = 0; i < numDocs; i++) { - builders.add(client().prepareIndex("idx").setId(""+i).setSource(jsonBuilder() - .startObject() - .field(STRING_FIELD_NAME, "val" + randomInt(numUniqueTerms)) - .field(LONG_FIELD_NAME, randomInt(numUniqueTerms)) - .field(DOUBLE_FIELD_NAME, 1.0 * randomInt(numUniqueTerms)) - .endObject())); + builders.add( + client().prepareIndex("idx") + .setId("" + i) + .setSource( + jsonBuilder().startObject() + .field(STRING_FIELD_NAME, "val" + randomInt(numUniqueTerms)) + .field(LONG_FIELD_NAME, randomInt(numUniqueTerms)) + .field(DOUBLE_FIELD_NAME, 1.0 * randomInt(numUniqueTerms)) + .endObject() + ) + ); } - assertAcked(prepareCreate("idx_single_shard") - .setMapping(STRING_FIELD_NAME, "type=keyword") - .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1))); + assertAcked( + prepareCreate("idx_single_shard").setMapping(STRING_FIELD_NAME, "type=keyword") + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)) + ); for (int i = 0; i < numDocs; i++) { - builders.add(client().prepareIndex("idx_single_shard").setId(""+i).setSource(jsonBuilder() - .startObject() - .field(STRING_FIELD_NAME, "val" + randomInt(numUniqueTerms)) - .field(LONG_FIELD_NAME, randomInt(numUniqueTerms)) - .field(DOUBLE_FIELD_NAME, 1.0 * randomInt(numUniqueTerms)) - .endObject())); + builders.add( + client().prepareIndex("idx_single_shard") + .setId("" + i) + .setSource( + jsonBuilder().startObject() + .field(STRING_FIELD_NAME, "val" + randomInt(numUniqueTerms)) + .field(LONG_FIELD_NAME, randomInt(numUniqueTerms)) + .field(DOUBLE_FIELD_NAME, 1.0 * randomInt(numUniqueTerms)) + .endObject() + ) + ); } - numRoutingValues = between(1,40); - assertAcked(prepareCreate("idx_with_routing") - .setMapping("{ \"_routing\" : { \"required\" : true } }")); + numRoutingValues = between(1, 40); + assertAcked(prepareCreate("idx_with_routing").setMapping("{ \"_routing\" : { \"required\" : true } }")); for (int i = 0; i < numDocs; i++) { - builders.add(client().prepareIndex("idx_single_shard").setId("" + i) - .setRouting(String.valueOf(randomInt(numRoutingValues))) - .setSource(jsonBuilder() - .startObject() - .field(STRING_FIELD_NAME, "val" + randomInt(numUniqueTerms)) - .field(LONG_FIELD_NAME, randomInt(numUniqueTerms)) - .field(DOUBLE_FIELD_NAME, 1.0 * randomInt(numUniqueTerms)) - .endObject())); + builders.add( + client().prepareIndex("idx_single_shard") + .setId("" + i) + .setRouting(String.valueOf(randomInt(numRoutingValues))) + .setSource( + jsonBuilder().startObject() + .field(STRING_FIELD_NAME, "val" + randomInt(numUniqueTerms)) + .field(LONG_FIELD_NAME, randomInt(numUniqueTerms)) + .field(DOUBLE_FIELD_NAME, 1.0 * randomInt(numUniqueTerms)) + .endObject() + ) + ); } Map shard0DocsPerTerm = new HashMap<>(); @@ -127,25 +139,13 @@ public void setupSuiteScopeCluster() throws Exception { shard2DocsPerTerm.put("D", 1); buildIndex(shard2DocsPerTerm, "idx_fixed_docs_2", 2, builders); - Map shard3DocsPerTerm = Map.of( - "A", 1, - "B", 1, - "C", 1 - ); + Map shard3DocsPerTerm = Map.of("A", 1, "B", 1, "C", 1); buildIndex(shard3DocsPerTerm, "idx_fixed_docs_3", 3, builders); - Map shard4DocsPerTerm = Map.of( - "K", 1, - "L", 1, - "M", 1 - ); + Map shard4DocsPerTerm = Map.of("K", 1, "L", 1, "M", 1); buildIndex(shard4DocsPerTerm, "idx_fixed_docs_4", 4, builders); - Map shard5DocsPerTerm = Map.of( - "X", 1, - "Y", 1, - "Z", 1 - ); + Map shard5DocsPerTerm = Map.of("X", 1, "Y", 1, "Z", 1); buildIndex(shard5DocsPerTerm, "idx_fixed_docs_5", 5, builders); indexRandom(true, builders); @@ -194,7 +194,7 @@ private void assertDocCountErrorWithinBounds(int size, SearchResponse accurateRe assertThat(testBucket.getDocCount() - testBucket.getDocCountError(), lessThanOrEqualTo(accurateBucket.getDocCount())); } - for (Terms.Bucket accurateBucket: accurateTerms.getBuckets()) { + for (Terms.Bucket accurateBucket : accurateTerms.getBuckets()) { assertThat(accurateBucket, notNullValue()); Terms.Bucket testBucket = accurateTerms.getBucketByKey(accurateBucket.getKeyAsString()); if (testBucket == null) { @@ -250,7 +250,7 @@ private void assertUnboundedDocCountError(int size, SearchResponse accurateRespo Terms testTerms = testResponse.getAggregations().get("terms"); assertThat(testTerms, notNullValue()); assertThat(testTerms.getName(), equalTo("terms")); - assertThat(testTerms.getDocCountError(),anyOf(equalTo(-1L), equalTo(0L))); + assertThat(testTerms.getDocCountError(), anyOf(equalTo(-1L), equalTo(0L))); List testBuckets = testTerms.getBuckets(); assertThat(testBuckets.size(), lessThanOrEqualTo(size)); assertThat(accurateTerms.getBuckets().size(), greaterThanOrEqualTo(testBuckets.size())); @@ -268,25 +268,28 @@ public void testStringValueField() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(10000).shardSize(10000) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(testResponse); @@ -297,25 +300,28 @@ public void testStringValueFieldSingleShard() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(10000).shardSize(10000) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(testResponse); @@ -327,15 +333,16 @@ public void testStringValueFieldWithRouting() throws Exception { int shardSize = randomIntBetween(size, size * 2); SearchResponse testResponse = client().prepareSearch("idx_with_routing") - .setRouting(String.valueOf(between(1, numRoutingValues))) - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .setRouting(String.valueOf(between(1, numRoutingValues))) + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(testResponse); @@ -346,27 +353,30 @@ public void testStringValueFieldDocCountAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(10000).shardSize(10000) - .order(BucketOrder.count(true)) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.count(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.count(true)) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.count(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(testResponse); @@ -377,27 +387,30 @@ public void testStringValueFieldTermSortAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(10000).shardSize(10000) - .order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(testResponse); @@ -408,27 +421,30 @@ public void testStringValueFieldTermSortDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(10000).shardSize(10000) - .order(BucketOrder.key(false)) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.key(false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.key(false)) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.key(false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(testResponse); @@ -439,29 +455,32 @@ public void testStringValueFieldSubAggAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(10000).shardSize(10000) - .order(BucketOrder.aggregation("sortAgg", true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.aggregation("sortAgg", true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ) + .get(); assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.aggregation("sortAgg", true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.aggregation("sortAgg", true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ) + .get(); assertSearchResponse(testResponse); @@ -472,29 +491,32 @@ public void testStringValueFieldSubAggDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(10000).shardSize(10000) - .order(BucketOrder.aggregation("sortAgg", false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.aggregation("sortAgg", false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ) + .get(); assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.aggregation("sortAgg", false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.aggregation("sortAgg", false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ) + .get(); assertSearchResponse(testResponse); @@ -505,25 +527,28 @@ public void testLongValueField() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(10000).shardSize(10000) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(testResponse); @@ -534,25 +559,28 @@ public void testLongValueFieldSingleShard() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(10000).shardSize(10000) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(testResponse); @@ -564,15 +592,16 @@ public void testLongValueFieldWithRouting() throws Exception { int shardSize = randomIntBetween(size, size * 2); SearchResponse testResponse = client().prepareSearch("idx_with_routing") - .setRouting(String.valueOf(between(1, numRoutingValues))) - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .setRouting(String.valueOf(between(1, numRoutingValues))) + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(testResponse); @@ -583,27 +612,30 @@ public void testLongValueFieldDocCountAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(10000).shardSize(10000) - .order(BucketOrder.count(true)) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.count(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.count(true)) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.count(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(testResponse); @@ -614,27 +646,30 @@ public void testLongValueFieldTermSortAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(10000).shardSize(10000) - .order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(testResponse); @@ -645,27 +680,30 @@ public void testLongValueFieldTermSortDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(10000).shardSize(10000) - .order(BucketOrder.key(false)) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.key(false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.key(false)) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.key(false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(testResponse); @@ -676,29 +714,32 @@ public void testLongValueFieldSubAggAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(10000).shardSize(10000) - .order(BucketOrder.aggregation("sortAgg", true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.aggregation("sortAgg", true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ) + .get(); assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.aggregation("sortAgg", true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.aggregation("sortAgg", true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ) + .get(); assertSearchResponse(testResponse); @@ -709,29 +750,32 @@ public void testLongValueFieldSubAggDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(10000).shardSize(10000) - .order(BucketOrder.aggregation("sortAgg", false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(DOUBLE_FIELD_NAME))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.aggregation("sortAgg", false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(DOUBLE_FIELD_NAME)) + ) + .get(); assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(LONG_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.aggregation("sortAgg", false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(DOUBLE_FIELD_NAME))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(LONG_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.aggregation("sortAgg", false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(DOUBLE_FIELD_NAME)) + ) + .get(); assertSearchResponse(testResponse); @@ -742,25 +786,28 @@ public void testDoubleValueField() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(10000).shardSize(10000) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(testResponse); @@ -771,25 +818,28 @@ public void testDoubleValueFieldSingleShard() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(10000).shardSize(10000) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(testResponse); @@ -801,15 +851,16 @@ public void testDoubleValueFieldWithRouting() throws Exception { int shardSize = randomIntBetween(size, size * 2); SearchResponse testResponse = client().prepareSearch("idx_with_routing") - .setRouting(String.valueOf(between(1, numRoutingValues))) - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .setRouting(String.valueOf(between(1, numRoutingValues))) + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(testResponse); @@ -820,27 +871,30 @@ public void testDoubleValueFieldDocCountAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(10000).shardSize(10000) - .order(BucketOrder.count(true)) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.count(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.count(true)) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.count(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(testResponse); @@ -851,27 +905,30 @@ public void testDoubleValueFieldTermSortAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(10000).shardSize(10000) - .order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.key(true)) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.key(true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(testResponse); @@ -882,27 +939,30 @@ public void testDoubleValueFieldTermSortDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(10000).shardSize(10000) - .order(BucketOrder.key(false)) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.key(false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.key(false)) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.key(false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(testResponse); @@ -913,29 +973,32 @@ public void testDoubleValueFieldSubAggAsc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(10000).shardSize(10000) - .order(BucketOrder.aggregation("sortAgg", true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.aggregation("sortAgg", true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ) + .get(); assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.aggregation("sortAgg", true)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.aggregation("sortAgg", true)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ) + .get(); assertSearchResponse(testResponse); @@ -946,29 +1009,32 @@ public void testDoubleValueFieldSubAggDesc() throws Exception { int size = randomIntBetween(1, 20); int shardSize = randomIntBetween(size, size * 2); SearchResponse accurateResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(10000).shardSize(10000) - .order(BucketOrder.aggregation("sortAgg", false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(10000) + .shardSize(10000) + .order(BucketOrder.aggregation("sortAgg", false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ) + .get(); assertSearchResponse(accurateResponse); SearchResponse testResponse = client().prepareSearch("idx_single_shard") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(DOUBLE_FIELD_NAME) - .showTermDocCountError(true) - .size(size) - .shardSize(shardSize) - .order(BucketOrder.aggregation("sortAgg", false)) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(DOUBLE_FIELD_NAME) + .showTermDocCountError(true) + .size(size) + .shardSize(shardSize) + .order(BucketOrder.aggregation("sortAgg", false)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)) + ) + .get(); assertSearchResponse(testResponse); @@ -982,13 +1048,15 @@ public void testDoubleValueFieldSubAggDesc() throws Exception { */ public void testFixedDocs() throws Exception { SearchResponse response = client().prepareSearch("idx_fixed_docs_0", "idx_fixed_docs_1", "idx_fixed_docs_2") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(5).shardSize(5) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(5) + .shardSize(5) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(response); Terms terms = response.getAggregations().get("terms"); @@ -1016,7 +1084,6 @@ public void testFixedDocs() throws Exception { assertThat(bucket.getDocCount(), equalTo(50L)); assertThat(bucket.getDocCountError(), equalTo(15L)); - bucket = buckets.get(3); assertThat(bucket, notNullValue()); assertThat(bucket.getKey(), equalTo("G")); @@ -1036,12 +1103,14 @@ public void testFixedDocs() throws Exception { */ public void testIncrementalReduction() { SearchResponse response = client().prepareSearch("idx_fixed_docs_3", "idx_fixed_docs_4", "idx_fixed_docs_5") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(STRING_FIELD_NAME) - .showTermDocCountError(true) - .size(5).shardSize(5) - .collectMode(randomFrom(SubAggCollectionMode.values()))) + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(STRING_FIELD_NAME) + .showTermDocCountError(true) + .size(5) + .shardSize(5) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) .get(); assertSearchResponse(response); Terms terms = response.getAggregations().get("terms"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java index 167c452cbc0fd..4a431fbf48b40 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java @@ -46,40 +46,50 @@ public void testShardMinDocCountSignificantTermsTest() throws Exception { } else { textMappings = "type=text,fielddata=true"; } - assertAcked(prepareCreate(index).setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)) - .setMapping("text", textMappings)); + assertAcked( + prepareCreate(index).setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)) + .setMapping("text", textMappings) + ); List indexBuilders = new ArrayList<>(); - addTermsDocs("1", 1, 0, indexBuilders);//high score but low doc freq + addTermsDocs("1", 1, 0, indexBuilders);// high score but low doc freq addTermsDocs("2", 1, 0, indexBuilders); addTermsDocs("3", 1, 0, indexBuilders); addTermsDocs("4", 1, 0, indexBuilders); - addTermsDocs("5", 3, 1, indexBuilders);//low score but high doc freq + addTermsDocs("5", 3, 1, indexBuilders);// low score but high doc freq addTermsDocs("6", 3, 1, indexBuilders); addTermsDocs("7", 0, 3, indexBuilders);// make sure the terms all get score > 0 except for this one indexRandom(true, false, indexBuilders); // first, check that indeed when not setting the shardMinDocCount parameter 0 terms are returned SearchResponse response = client().prepareSearch(index) - .addAggregation( - (filter("inclass", QueryBuilders.termQuery("class", true))) - .subAggregation(significantTerms("mySignificantTerms").field("text").minDocCount(2).size(2).shardSize(2) - .executionHint(randomExecutionHint())) + .addAggregation( + (filter("inclass", QueryBuilders.termQuery("class", true))).subAggregation( + significantTerms("mySignificantTerms").field("text") + .minDocCount(2) + .size(2) + .shardSize(2) + .executionHint(randomExecutionHint()) ) - .get(); + ) + .get(); assertSearchResponse(response); InternalFilter filteredBucket = response.getAggregations().get("inclass"); SignificantTerms sigterms = filteredBucket.getAggregations().get("mySignificantTerms"); assertThat(sigterms.getBuckets().size(), equalTo(0)); - response = client().prepareSearch(index) - .addAggregation( - (filter("inclass", QueryBuilders.termQuery("class", true))) - .subAggregation(significantTerms("mySignificantTerms").field("text").minDocCount(2).shardSize(2) - .shardMinDocCount(2).size(2).executionHint(randomExecutionHint())) + .addAggregation( + (filter("inclass", QueryBuilders.termQuery("class", true))).subAggregation( + significantTerms("mySignificantTerms").field("text") + .minDocCount(2) + .shardSize(2) + .shardMinDocCount(2) + .size(2) + .executionHint(randomExecutionHint()) ) - .get(); + ) + .get(); assertSearchResponse(response); filteredBucket = response.getAggregations().get("inclass"); sigterms = filteredBucket.getAggregations().get("mySignificantTerms"); @@ -99,41 +109,52 @@ private void addTermsDocs(String term, int numInClass, int numNotInClass, List indexBuilders = new ArrayList<>(); - addTermsDocs("1", 1, indexBuilders);//low doc freq but high score + addTermsDocs("1", 1, indexBuilders);// low doc freq but high score addTermsDocs("2", 1, indexBuilders); addTermsDocs("3", 1, indexBuilders); addTermsDocs("4", 1, indexBuilders); - addTermsDocs("5", 3, indexBuilders);//low score but high doc freq + addTermsDocs("5", 3, indexBuilders);// low score but high doc freq addTermsDocs("6", 3, indexBuilders); indexRandom(true, false, indexBuilders); // first, check that indeed when not setting the shardMinDocCount parameter 0 terms are returned SearchResponse response = client().prepareSearch(index) - .addAggregation( - terms("myTerms").field("text").minDocCount(2).size(2).shardSize(2).executionHint(randomExecutionHint()) - .order(BucketOrder.key(true)) - ) - .get(); + .addAggregation( + terms("myTerms").field("text") + .minDocCount(2) + .size(2) + .shardSize(2) + .executionHint(randomExecutionHint()) + .order(BucketOrder.key(true)) + ) + .get(); assertSearchResponse(response); Terms sigterms = response.getAggregations().get("myTerms"); assertThat(sigterms.getBuckets().size(), equalTo(0)); response = client().prepareSearch(index) - .addAggregation( - terms("myTerms").field("text").minDocCount(2).shardMinDocCount(2).size(2).shardSize(2) - .executionHint(randomExecutionHint()).order(BucketOrder.key(true)) - ) - .get(); + .addAggregation( + terms("myTerms").field("text") + .minDocCount(2) + .shardMinDocCount(2) + .size(2) + .shardSize(2) + .executionHint(randomExecutionHint()) + .order(BucketOrder.key(true)) + ) + .get(); assertSearchResponse(response); sigterms = response.getAggregations().get("myTerms"); assertThat(sigterms.getBuckets().size(), equalTo(2)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsIT.java index d76aa092d2685..252a9e39f8cae 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsIT.java @@ -55,7 +55,7 @@ public void testSingleValuedString() { assertNumRareTerms(10, numDocs); } - private void assertNumRareTerms(int maxDocs, int rareTerms) { + private void assertNumRareTerms(int maxDocs, int rareTerms) { final SearchRequestBuilder requestBuilder = client().prepareSearch(index); requestBuilder.addAggregation(new RareTermsAggregationBuilder("rareTerms").field("str_value.keyword").maxDocCount(maxDocs)); final SearchResponse response = requestBuilder.get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java index def6db7c965f9..370f9301a9ca3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsIT.java @@ -129,42 +129,63 @@ protected Map, Object>> nonDeterministicPlu @Override public void setupSuiteScopeCluster() throws Exception { - assertAcked(client().admin().indices().prepareCreate("idx") - .setMapping(SINGLE_VALUED_FIELD_NAME, "type=keyword", - MULTI_VALUED_FIELD_NAME, "type=keyword", - "tag", "type=keyword").get()); + assertAcked( + client().admin() + .indices() + .prepareCreate("idx") + .setMapping(SINGLE_VALUED_FIELD_NAME, "type=keyword", MULTI_VALUED_FIELD_NAME, "type=keyword", "tag", "type=keyword") + .get() + ); List builders = new ArrayList<>(); for (int i = 0; i < 5; i++) { - builders.add(client().prepareIndex("idx").setSource( - jsonBuilder().startObject() + builders.add( + client().prepareIndex("idx") + .setSource( + jsonBuilder().startObject() .field(SINGLE_VALUED_FIELD_NAME, "val" + i) .field("i", i) .field("constant", 1) .field("tag", i < 5 / 2 + 1 ? "more" : "less") .startArray(MULTI_VALUED_FIELD_NAME) - .value("val" + i) - .value("val" + (i + 1)) + .value("val" + i) + .value("val" + (i + 1)) .endArray() - .endObject())); + .endObject() + ) + ); } getMultiSortDocs(builders); - assertAcked(client().admin().indices().prepareCreate("high_card_idx") - .setMapping(SINGLE_VALUED_FIELD_NAME, "type=keyword", - MULTI_VALUED_FIELD_NAME, "type=keyword", - "tag", "type=keyword").get()); + assertAcked( + client().admin() + .indices() + .prepareCreate("high_card_idx") + .setMapping(SINGLE_VALUED_FIELD_NAME, "type=keyword", MULTI_VALUED_FIELD_NAME, "type=keyword", "tag", "type=keyword") + .get() + ); for (int i = 0; i < 100; i++) { - builders.add(client().prepareIndex("high_card_idx").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val" + Strings.padStart(i + "", 3, '0')) - .startArray(MULTI_VALUED_FIELD_NAME).value("val" + Strings.padStart(i + "", 3, '0')) - .value("val" + Strings.padStart((i + 1) + "", 3, '0')).endArray().endObject())); + builders.add( + client().prepareIndex("high_card_idx") + .setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, "val" + Strings.padStart(i + "", 3, '0')) + .startArray(MULTI_VALUED_FIELD_NAME) + .value("val" + Strings.padStart(i + "", 3, '0')) + .value("val" + Strings.padStart((i + 1) + "", 3, '0')) + .endArray() + .endObject() + ) + ); } prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer").get(); for (int i = 0; i < 2; i++) { - builders.add(client().prepareIndex("empty_bucket_idx").setId("" + i).setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject())); + builders.add( + client().prepareIndex("empty_bucket_idx") + .setId("" + i) + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) + ); } indexRandom(true, builders); createIndex("idx_unmapped"); @@ -216,32 +237,55 @@ private void getMultiSortDocs(List builders) throws IOExcep bucketProps.put("sum_d", 1d); expectedMultiSortBuckets.put((String) bucketProps.get("_term"), bucketProps); - assertAcked(client().admin().indices().prepareCreate("sort_idx") - .setMapping(SINGLE_VALUED_FIELD_NAME, "type=keyword", - MULTI_VALUED_FIELD_NAME, "type=keyword", - "tag", "type=keyword").get()); + assertAcked( + client().admin() + .indices() + .prepareCreate("sort_idx") + .setMapping(SINGLE_VALUED_FIELD_NAME, "type=keyword", MULTI_VALUED_FIELD_NAME, "type=keyword", "tag", "type=keyword") + .get() + ); for (int i = 1; i <= 3; i++) { - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val1").field("l", 1).field("d", i).endObject())); - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val2").field("l", 2).field("d", i).endObject())); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val1").field("l", 1).field("d", i).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val2").field("l", 2).field("d", i).endObject()) + ); } - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val3").field("l", 3).field("d", 1).endObject())); - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val3").field("l", 3).field("d", 2).endObject())); - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val4").field("l", 3).field("d", 1).endObject())); - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val4").field("l", 3).field("d", 3).endObject())); - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val5").field("l", 5).field("d", 1).endObject())); - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val5").field("l", 5).field("d", 2).endObject())); - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val6").field("l", 5).field("d", 1).endObject())); - builders.add(client().prepareIndex("sort_idx").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val7").field("l", 5).field("d", 1).endObject())); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val3").field("l", 3).field("d", 1).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val3").field("l", 3).field("d", 2).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val4").field("l", 3).field("d", 1).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val4").field("l", 3).field("d", 3).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val5").field("l", 5).field("d", 1).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val5").field("l", 5).field("d", 2).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val6").field("l", 5).field("d", 1).endObject()) + ); + builders.add( + client().prepareIndex("sort_idx") + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val7").field("l", 5).field("d", 1).endObject()) + ); } private String key(Terms.Bucket bucket) { @@ -251,11 +295,18 @@ private String key(Terms.Bucket bucket) { // the main purpose of this test is to make sure we're not allocating 2GB of memory per shard public void testSizeIsZero() { final int minDocCount = randomInt(1); - IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> client() - .prepareSearch("high_card_idx") + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> client().prepareSearch("high_card_idx") .addAggregation( - terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())).minDocCount(minDocCount).size(0)).get()); + terms("terms").executionHint(randomExecutionHint()) + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .minDocCount(minDocCount) + .size(0) + ) + .get() + ); assertThat(exception.getMessage(), containsString("[size] must be greater than 0. Found [0] in [terms]")); } @@ -270,8 +321,8 @@ public void testMultiValueFieldWithPartitionedFiltering() throws Exception { private void runTestFieldWithPartitionedFiltering(String field) throws Exception { // Find total number of unique terms SearchResponse allResponse = client().prepareSearch("idx") - .addAggregation(terms("terms").field(field).size(10000).collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + .addAggregation(terms("terms").field(field).size(10000).collectMode(randomFrom(SubAggCollectionMode.values()))) + .get(); assertSearchResponse(allResponse); Terms terms = allResponse.getAggregations().get("terms"); assertThat(terms, notNullValue()); @@ -282,9 +333,13 @@ private void runTestFieldWithPartitionedFiltering(String field) throws Exception final int numPartitions = randomIntBetween(2, 4); Set foundTerms = new HashSet<>(); for (int partition = 0; partition < numPartitions; partition++) { - SearchResponse response = client().prepareSearch("idx").addAggregation(terms("terms").field(field) - .includeExclude(new IncludeExclude(partition, numPartitions)).collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field(field) + .includeExclude(new IncludeExclude(partition, numPartitions)) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(response); terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); @@ -297,16 +352,15 @@ private void runTestFieldWithPartitionedFiltering(String field) throws Exception } public void testSingleValuedFieldWithValueScript() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") + SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms") - .executionHint(randomExecutionHint()) - .field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "'foo_' + _value", Collections.emptyMap()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "'foo_' + _value", Collections.emptyMap())) + ) + .get(); assertSearchResponse(response); @@ -324,17 +378,15 @@ public void testSingleValuedFieldWithValueScript() throws Exception { } public void testMultiValuedFieldWithValueScriptNotUnique() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") + SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms") - .executionHint(randomExecutionHint()) - .field(MULTI_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script( - ScriptType.INLINE, CustomScriptPlugin.NAME, "_value.substring(0,3)", Collections.emptyMap()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(MULTI_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value.substring(0,3)", Collections.emptyMap())) + ) + .get(); assertSearchResponse(response); @@ -350,16 +402,21 @@ public void testMultiValuedFieldWithValueScriptNotUnique() throws Exception { } public void testMultiValuedScript() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - - .addAggregation( - terms("terms") - .executionHint(randomExecutionHint()) - .script(new Script(ScriptType.INLINE, - CustomScriptPlugin.NAME, "doc['" + MULTI_VALUED_FIELD_NAME + "']", Collections.emptyMap())) - .collectMode(randomFrom(SubAggCollectionMode.values()))) - .get(); + SearchResponse response = client().prepareSearch("idx") + + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .script( + new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "doc['" + MULTI_VALUED_FIELD_NAME + "']", + Collections.emptyMap() + ) + ) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(response); @@ -381,16 +438,15 @@ public void testMultiValuedScript() throws Exception { } public void testMultiValuedFieldWithValueScript() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") + SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms") - .executionHint(randomExecutionHint()) - .field(MULTI_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "'foo_' + _value", Collections.emptyMap()))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(MULTI_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "'foo_' + _value", Collections.emptyMap())) + ) + .get(); assertSearchResponse(response); @@ -424,18 +480,19 @@ public void testMultiValuedFieldWithValueScript() throws Exception { */ public void testScriptSingleValue() throws Exception { - Script script = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['" + SINGLE_VALUED_FIELD_NAME + "'].value", Collections.emptyMap()); + Script script = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "doc['" + SINGLE_VALUED_FIELD_NAME + "'].value", + Collections.emptyMap() + ); - SearchResponse response = client() - .prepareSearch("idx") + SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .executionHint(randomExecutionHint()) - .script(script)) - .get(); + .addAggregation( + terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())).executionHint(randomExecutionHint()).script(script) + ) + .get(); assertSearchResponse(response); @@ -453,18 +510,19 @@ public void testScriptSingleValue() throws Exception { } public void testScriptSingleValueExplicitSingleValue() throws Exception { - Script script = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['" + SINGLE_VALUED_FIELD_NAME + "'].value", Collections.emptyMap()); + Script script = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "doc['" + SINGLE_VALUED_FIELD_NAME + "'].value", + Collections.emptyMap() + ); - SearchResponse response = client() - .prepareSearch("idx") + SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .executionHint(randomExecutionHint()) - .script(script)) - .get(); + .addAggregation( + terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())).executionHint(randomExecutionHint()).script(script) + ) + .get(); assertSearchResponse(response); @@ -482,16 +540,21 @@ public void testScriptSingleValueExplicitSingleValue() throws Exception { } public void testScriptMultiValued() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - - .addAggregation( - terms("terms") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .executionHint(randomExecutionHint()) - .script(new Script(ScriptType.INLINE, - CustomScriptPlugin.NAME, "doc['" + MULTI_VALUED_FIELD_NAME + "']", Collections.emptyMap()))) - .get(); + SearchResponse response = client().prepareSearch("idx") + + .addAggregation( + terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())) + .executionHint(randomExecutionHint()) + .script( + new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "doc['" + MULTI_VALUED_FIELD_NAME + "']", + Collections.emptyMap() + ) + ) + ) + .get(); assertSearchResponse(response); @@ -513,12 +576,14 @@ public void testScriptMultiValued() throws Exception { } public void testPartiallyUnmapped() throws Exception { - SearchResponse response = client() - .prepareSearch("idx", "idx_unmapped") + SearchResponse response = client().prepareSearch("idx", "idx_unmapped") - .addAggregation( - terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values()))).get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + ) + .get(); assertSearchResponse(response); @@ -537,13 +602,14 @@ public void testPartiallyUnmapped() throws Exception { public void testStringTermsNestedIntoPerBucketAggregator() throws Exception { // no execution hint so that the logic that decides whether or not to use ordinals is executed - SearchResponse response = client() - .prepareSearch("idx") + SearchResponse response = client().prepareSearch("idx") - .addAggregation( - filter("filter", termQuery(MULTI_VALUED_FIELD_NAME, "val3")).subAggregation( - terms("terms").field(MULTI_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())))) - .get(); + .addAggregation( + filter("filter", termQuery(MULTI_VALUED_FIELD_NAME, "val3")).subAggregation( + terms("terms").field(MULTI_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())) + ) + ) + .get(); assertThat(response.getFailedShards(), equalTo(0)); @@ -565,14 +631,15 @@ public void testStringTermsNestedIntoPerBucketAggregator() throws Exception { public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { boolean asc = true; try { - client() - .prepareSearch("idx") + client().prepareSearch("idx") .addAggregation( - terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("inner_terms>avg", asc)) - .subAggregation(terms("inner_terms").field(MULTI_VALUED_FIELD_NAME).subAggregation(avg("avg").field("i")))) + terms("terms").executionHint(randomExecutionHint()) + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("inner_terms>avg", asc)) + .subAggregation(terms("inner_terms").field(MULTI_VALUED_FIELD_NAME).subAggregation(avg("avg").field("i"))) + ) .get(); fail("Expected an exception"); } catch (SearchPhaseExecutionException e) { @@ -599,13 +666,16 @@ public void testSingleValuedFieldOrderedByIllegalAgg() throws Exception { public void testSingleValuedFieldOrderedBySingleBucketSubAggregationAsc() throws Exception { boolean asc = randomBoolean(); - SearchResponse response = client() - .prepareSearch("idx") + SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("tags").executionHint(randomExecutionHint()).field("tag") - .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.aggregation("filter", asc)) - .subAggregation(filter("filter", QueryBuilders.matchAllQuery()))).get(); + .addAggregation( + terms("tags").executionHint(randomExecutionHint()) + .field("tag") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("filter", asc)) + .subAggregation(filter("filter", QueryBuilders.matchAllQuery())) + ) + .get(); assertSearchResponse(response); @@ -635,19 +705,20 @@ public void testSingleValuedFieldOrderedBySingleBucketSubAggregationAsc() throws public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevels() throws Exception { boolean asc = randomBoolean(); - SearchResponse response = client() - .prepareSearch("idx") - - .addAggregation( - terms("tags") - .executionHint(randomExecutionHint()) - .field("tag") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("filter1>filter2>stats.max", asc)) - .subAggregation( - filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( - filter("filter2", QueryBuilders.matchAllQuery()).subAggregation( - stats("stats").field("i"))))).get(); + SearchResponse response = client().prepareSearch("idx") + + .addAggregation( + terms("tags").executionHint(randomExecutionHint()) + .field("tag") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("filter1>filter2>stats.max", asc)) + .subAggregation( + filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( + filter("filter2", QueryBuilders.matchAllQuery()).subAggregation(stats("stats").field("i")) + ) + ) + ) + .get(); assertSearchResponse(response); @@ -698,19 +769,20 @@ public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevelsS statsNameBuilder.append(randomAlphaOfLengthBetween(3, 10).replace("[", "").replace("]", "").replace(">", "")); String statsName = statsNameBuilder.toString(); boolean asc = randomBoolean(); - SearchResponse response = client() - .prepareSearch("idx") - - .addAggregation( - terms("tags") - .executionHint(randomExecutionHint()) - .field("tag") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("filter1>" + filter2Name + ">" + statsName + ".max", asc)) - .subAggregation( - filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( - filter(filter2Name, QueryBuilders.matchAllQuery()).subAggregation( - stats(statsName).field("i"))))).get(); + SearchResponse response = client().prepareSearch("idx") + + .addAggregation( + terms("tags").executionHint(randomExecutionHint()) + .field("tag") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("filter1>" + filter2Name + ">" + statsName + ".max", asc)) + .subAggregation( + filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( + filter(filter2Name, QueryBuilders.matchAllQuery()).subAggregation(stats(statsName).field("i")) + ) + ) + ) + .get(); assertSearchResponse(response); @@ -761,19 +833,20 @@ public void testSingleValuedFieldOrderedBySubAggregationAscMultiHierarchyLevelsS statsNameBuilder.append(randomAlphaOfLengthBetween(3, 10).replace("[", "").replace("]", "").replace(">", "")); String statsName = statsNameBuilder.toString(); boolean asc = randomBoolean(); - SearchResponse response = client() - .prepareSearch("idx") - - .addAggregation( - terms("tags") - .executionHint(randomExecutionHint()) - .field("tag") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("filter1>" + filter2Name + ">" + statsName + "[max]", asc)) - .subAggregation( - filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( - filter(filter2Name, QueryBuilders.matchAllQuery()).subAggregation( - stats(statsName).field("i"))))).get(); + SearchResponse response = client().prepareSearch("idx") + + .addAggregation( + terms("tags").executionHint(randomExecutionHint()) + .field("tag") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("filter1>" + filter2Name + ">" + statsName + "[max]", asc)) + .subAggregation( + filter("filter1", QueryBuilders.matchAllQuery()).subAggregation( + filter(filter2Name, QueryBuilders.matchAllQuery()).subAggregation(stats(statsName).field("i")) + ) + ) + ) + .get(); assertSearchResponse(response); @@ -821,10 +894,13 @@ public void testSingleValuedFieldOrderedByMissingSubAggregation() throws Excepti try { client().prepareSearch(index) - .addAggregation( - terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("avg_i", true))).get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("avg_i", true)) + ) + .get(); fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation that doesn't exist"); @@ -839,15 +915,19 @@ public void testSingleValuedFieldOrderedByNonMetricsOrMultiBucketSubAggregation( try { client().prepareSearch(index) - .addAggregation( - terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("values", true)) - .subAggregation(terms("values").field("i").collectMode(randomFrom(SubAggCollectionMode.values())))) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("values", true)) + .subAggregation(terms("values").field("i").collectMode(randomFrom(SubAggCollectionMode.values()))) + ) + .get(); - fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation " + - "which is not of a metrics or single-bucket type"); + fail( + "Expected search to fail when trying to sort terms aggregation by sug-aggregation " + + "which is not of a metrics or single-bucket type" + ); } catch (ElasticsearchException e) { // expected @@ -858,16 +938,22 @@ public void testSingleValuedFieldOrderedByNonMetricsOrMultiBucketSubAggregation( public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithUnknownMetric() throws Exception { for (String index : Arrays.asList("idx", "idx_unmapped")) { try { - SearchResponse response = client() - .prepareSearch(index) + SearchResponse response = client().prepareSearch(index) - .addAggregation( - terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.foo", true)).subAggregation(stats("stats").field("i"))) - .get(); - fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " - + "with an unknown specified metric to order by. response had " + response.getFailedShards() + " failed shards."); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.foo", true)) + .subAggregation(stats("stats").field("i")) + ) + .get(); + fail( + "Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " + + "with an unknown specified metric to order by. response had " + + response.getFailedShards() + + " failed shards." + ); } catch (ElasticsearchException e) { // expected @@ -880,14 +966,20 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithoutMetric try { client().prepareSearch(index) - .addAggregation( - terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats", true)).subAggregation(stats("stats").field("i"))).execute() - .actionGet(); - - fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " - + "where the metric name is not specified"); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats", true)) + .subAggregation(stats("stats").field("i")) + ) + .execute() + .actionGet(); + + fail( + "Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " + + "where the metric name is not specified" + ); } catch (ElasticsearchException e) { // expected @@ -897,13 +989,16 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationWithoutMetric public void testSingleValuedFieldOrderedByMultiValueSubAggregationAsc() throws Exception { boolean asc = true; - SearchResponse response = client() - .prepareSearch("idx") + SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.aggregation("stats.avg", asc)) - .subAggregation(stats("stats").field("i"))).get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.avg", asc)) + .subAggregation(stats("stats").field("i")) + ) + .get(); assertSearchResponse(response); @@ -927,13 +1022,16 @@ public void testSingleValuedFieldOrderedByMultiValueSubAggregationAsc() throws E public void testSingleValuedFieldOrderedByMultiValueSubAggregationDesc() throws Exception { boolean asc = false; - SearchResponse response = client() - .prepareSearch("idx") + SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.aggregation("stats.avg", asc)) - .subAggregation(stats("stats").field("i"))).get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.avg", asc)) + .subAggregation(stats("stats").field("i")) + ) + .get(); assertSearchResponse(response); @@ -958,14 +1056,16 @@ public void testSingleValuedFieldOrderedByMultiValueSubAggregationDesc() throws public void testSingleValuedFieldOrderedByMultiValueExtendedStatsAsc() throws Exception { boolean asc = true; - SearchResponse response = client() - .prepareSearch("idx") + SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.sum_of_squares", asc)) - .subAggregation(extendedStats("stats").field("i"))).get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.sum_of_squares", asc)) + .subAggregation(extendedStats("stats").field("i")) + ) + .get(); assertSearchResponse(response); @@ -990,16 +1090,17 @@ public void testSingleValuedFieldOrderedByMultiValueExtendedStatsAsc() throws Ex public void testSingleValuedFieldOrderedByStatsAggAscWithTermsSubAgg() throws Exception { boolean asc = true; - SearchResponse response = client() - .prepareSearch("idx") - - .addAggregation( - terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("stats.sum_of_squares", asc)) - .subAggregation(extendedStats("stats").field("i")) - .subAggregation(terms("subTerms").field("s_values").collectMode(randomFrom(SubAggCollectionMode.values())))) - .get(); + SearchResponse response = client().prepareSearch("idx") + + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("stats.sum_of_squares", asc)) + .subAggregation(extendedStats("stats").field("i")) + .subAggregation(terms("subTerms").field("s_values").collectMode(randomFrom(SubAggCollectionMode.values()))) + ) + .get(); assertSearchResponse(response); @@ -1060,8 +1161,12 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscSingleValu public void testSingleValuedFieldOrderedByThreeCriteria() throws Exception { String[] expectedKeys = new String[] { "val2", "val1", "val4", "val5", "val3", "val6", "val7" }; - assertMultiSortResponse(expectedKeys, BucketOrder.count(false), BucketOrder.aggregation("sum_d", false), - BucketOrder.aggregation("avg_l", false)); + assertMultiSortResponse( + expectedKeys, + BucketOrder.count(false), + BucketOrder.aggregation("sum_d", false), + BucketOrder.aggregation("avg_l", false) + ); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound() throws Exception { @@ -1070,12 +1175,16 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound } private void assertMultiSortResponse(String[] expectedKeys, BucketOrder... order) { - SearchResponse response = client() - .prepareSearch("sort_idx") - .addAggregation( - terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME) - .collectMode(randomFrom(SubAggCollectionMode.values())).order(BucketOrder.compound(order)) - .subAggregation(avg("avg_l").field("l")).subAggregation(sum("sum_d").field("d"))).get(); + SearchResponse response = client().prepareSearch("sort_idx") + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(SINGLE_VALUED_FIELD_NAME) + .collectMode(randomFrom(SubAggCollectionMode.values())) + .order(BucketOrder.compound(order)) + .subAggregation(avg("avg_l").field("l")) + .subAggregation(sum("sum_d").field("d")) + ) + .get(); assertSearchResponse(response); @@ -1100,12 +1209,14 @@ private void assertMultiSortResponse(String[] expectedKeys, BucketOrder... order } public void testIndexMetaField() throws Exception { - SearchResponse response = client() - .prepareSearch("idx", "empty_bucket_idx") + SearchResponse response = client().prepareSearch("idx", "empty_bucket_idx") - .addAggregation( - terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())).executionHint(randomExecutionHint()) - .field(IndexFieldMapper.NAME)).get(); + .addAggregation( + terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())) + .executionHint(randomExecutionHint()) + .field(IndexFieldMapper.NAME) + ) + .get(); assertSearchResponse(response); Terms terms = response.getAggregations().get("terms"); @@ -1131,67 +1242,145 @@ public void testOtherDocCount() { * Ensure requests using nondeterministic scripts do not get cached. */ public void testScriptCaching() throws Exception { - assertAcked(prepareCreate("cache_test_idx").setMapping("d", "type=keyword") + assertAcked( + prepareCreate("cache_test_idx").setMapping("d", "type=keyword") .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) - .get()); - indexRandom(true, client().prepareIndex("cache_test_idx").setId("1").setSource("s", "foo"), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", "bar")); + .get() + ); + indexRandom( + true, + client().prepareIndex("cache_test_idx").setId("1").setSource("s", "foo"), + client().prepareIndex("cache_test_idx").setId("2").setSource("s", "bar") + ); // Make sure we are starting with a clear cache - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - terms("terms").field("d").script( - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap()))) - .get(); + SearchResponse r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + terms("terms").field("d") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap())) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a deterministic script gets cached - r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - terms("terms").field("d").script( - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "'foo_' + _value", Collections.emptyMap()))) - .get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + terms("terms").field("d") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "'foo_' + _value", Collections.emptyMap())) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(1L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(1L) + ); // Ensure that non-scripted requests are cached as normal r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(terms("terms").field("d")).get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(2L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(2L) + ); } public void testScriptWithValueType() throws Exception { - SearchSourceBuilder builder = new SearchSourceBuilder() - .size(0) - .aggregation(terms("terms") - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "42", Collections.emptyMap())) - .userValueTypeHint(randomFrom(ValueType.NUMERIC, ValueType.NUMBER))); + SearchSourceBuilder builder = new SearchSourceBuilder().size(0) + .aggregation( + terms("terms").script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "42", Collections.emptyMap())) + .userValueTypeHint(randomFrom(ValueType.NUMERIC, ValueType.NUMBER)) + ); String source = builder.toString(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, source)) { - SearchResponse response = client() - .prepareSearch("idx") - .setSource(SearchSourceBuilder.fromXContent(parser)) - .get(); + SearchResponse response = client().prepareSearch("idx").setSource(SearchSourceBuilder.fromXContent(parser)).get(); assertSearchResponse(response); Terms terms = response.getAggregations().get("terms"); @@ -1203,10 +1392,10 @@ public void testScriptWithValueType() throws Exception { String invalidValueType = source.replaceAll("\"value_type\":\"n.*\"", "\"value_type\":\"foobar\""); try (XContentParser parser = createParser(JsonXContent.jsonXContent, invalidValueType)) { - XContentParseException ex = expectThrows(XContentParseException.class, () -> client() - .prepareSearch("idx") - .setSource(SearchSourceBuilder.fromXContent(parser)) - .get()); + XContentParseException ex = expectThrows( + XContentParseException.class, + () -> client().prepareSearch("idx").setSource(SearchSourceBuilder.fromXContent(parser)).get() + ); assertThat(ex.getCause(), instanceOf(IllegalArgumentException.class)); assertThat(ex.getCause().getMessage(), containsString("Unknown value type [foobar]")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/CardinalityIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/CardinalityIT.java index 2f452ca9c5c23..f700cf7456d1b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/CardinalityIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/CardinalityIT.java @@ -68,12 +68,12 @@ protected Map, Object>> pluginScripts() { }); scripts.put("doc[' + singleNumericField() + '].value", vars -> { - Map doc =(Map) vars.get("doc"); + Map doc = (Map) vars.get("doc"); return doc.get(singleNumericField()); }); scripts.put("doc[' + multiNumericField(false) + ']", vars -> { - Map doc =(Map) vars.get("doc"); + Map doc = (Map) vars.get("doc"); return (ScriptDocValues) doc.get(multiNumericField(false)); }); @@ -93,9 +93,9 @@ protected Map, Object>> nonDeterministicPlu @Override public Settings indexSettings() { return Settings.builder() - .put("index.number_of_shards", numberOfShards()) - .put("index.number_of_replicas", numberOfReplicas()) - .build(); + .put("index.number_of_shards", numberOfShards()) + .put("index.number_of_replicas", numberOfReplicas()) + .build(); } static long numDocs; @@ -105,40 +105,47 @@ public Settings indexSettings() { public void setupSuiteScopeCluster() throws Exception { prepareCreate("idx").setMapping( - jsonBuilder().startObject().startObject("_doc").startObject("properties") - .startObject("str_value") - .field("type", "keyword") - .endObject() - .startObject("str_values") - .field("type", "keyword") - .endObject() - .startObject("l_value") - .field("type", "long") - .endObject() - .startObject("l_values") - .field("type", "long") - .endObject() - .startObject("d_value") - .field("type", "double") - .endObject() - .startObject("d_values") - .field("type", "double") - .endObject() - .endObject().endObject().endObject()).get(); + jsonBuilder().startObject() + .startObject("_doc") + .startObject("properties") + .startObject("str_value") + .field("type", "keyword") + .endObject() + .startObject("str_values") + .field("type", "keyword") + .endObject() + .startObject("l_value") + .field("type", "long") + .endObject() + .startObject("l_values") + .field("type", "long") + .endObject() + .startObject("d_value") + .field("type", "double") + .endObject() + .startObject("d_values") + .field("type", "double") + .endObject() + .endObject() + .endObject() + .endObject() + ).get(); numDocs = randomIntBetween(2, 100); precisionThreshold = randomIntBetween(0, 1 << randomInt(20)); IndexRequestBuilder[] builders = new IndexRequestBuilder[(int) numDocs]; for (int i = 0; i < numDocs; ++i) { - builders[i] = client().prepareIndex("idx").setSource(jsonBuilder() - .startObject() + builders[i] = client().prepareIndex("idx") + .setSource( + jsonBuilder().startObject() .field("str_value", "s" + i) - .array("str_values", new String[]{"s" + (i * 2), "s" + (i * 2 + 1)}) + .array("str_values", new String[] { "s" + (i * 2), "s" + (i * 2 + 1) }) .field("l_value", i) - .array("l_values", new int[] {i * 2, i * 2 + 1}) + .array("l_values", new int[] { i * 2, i * 2 + 1 }) .field("d_value", i) - .array("d_values", new double[]{i * 2, i * 2 + 1}) - .endObject()); + .array("d_values", new double[] { i * 2, i * 2 + 1 }) + .endObject() + ); } indexRandom(true, builders); createIndex("idx_unmapped"); @@ -161,7 +168,8 @@ private void assertCount(Cardinality count, long value) { assertThat(count.getValue(), greaterThan(0L)); } } - private static String singleNumericField() { + + private static String singleNumericField() { return randomBoolean() ? "l_value" : "d_value"; } @@ -171,8 +179,8 @@ private static String multiNumericField(boolean hash) { public void testUnmapped() throws Exception { SearchResponse response = client().prepareSearch("idx_unmapped") - .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value")) - .get(); + .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value")) + .get(); assertSearchResponse(response); @@ -184,8 +192,8 @@ public void testUnmapped() throws Exception { public void testPartiallyUnmapped() throws Exception { SearchResponse response = client().prepareSearch("idx", "idx_unmapped") - .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value")) - .get(); + .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value")) + .get(); assertSearchResponse(response); @@ -197,8 +205,8 @@ public void testPartiallyUnmapped() throws Exception { public void testSingleValuedString() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value")) - .get(); + .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value")) + .get(); assertSearchResponse(response); @@ -210,8 +218,8 @@ public void testSingleValuedString() throws Exception { public void testSingleValuedNumeric() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField())) - .get(); + .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField())) + .get(); assertSearchResponse(response); @@ -222,11 +230,14 @@ public void testSingleValuedNumeric() throws Exception { } public void testSingleValuedNumericGetProperty() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - global("global").subAggregation( - cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField()))) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + global("global").subAggregation( + cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField()) + ) + ) + .get(); assertSearchResponse(searchResponse); @@ -242,15 +253,15 @@ public void testSingleValuedNumericGetProperty() throws Exception { assertThat(cardinality.getName(), equalTo("cardinality")); long expectedValue = numDocs; assertCount(cardinality, expectedValue); - assertThat(((InternalAggregation)global).getProperty("cardinality"), equalTo(cardinality)); - assertThat(((InternalAggregation)global).getProperty("cardinality.value"), equalTo((double) cardinality.getValue())); - assertThat((double) ((InternalAggregation)cardinality).getProperty("value"), equalTo((double) cardinality.getValue())); + assertThat(((InternalAggregation) global).getProperty("cardinality"), equalTo(cardinality)); + assertThat(((InternalAggregation) global).getProperty("cardinality.value"), equalTo((double) cardinality.getValue())); + assertThat((double) ((InternalAggregation) cardinality).getProperty("value"), equalTo((double) cardinality.getValue())); } public void testSingleValuedNumericHashed() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField())) - .get(); + .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField())) + .get(); assertSearchResponse(response); @@ -262,8 +273,8 @@ public void testSingleValuedNumericHashed() throws Exception { public void testMultiValuedString() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_values")) - .get(); + .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_values")) + .get(); assertSearchResponse(response); @@ -275,8 +286,8 @@ public void testMultiValuedString() throws Exception { public void testMultiValuedNumeric() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(multiNumericField(false))) - .get(); + .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(multiNumericField(false))) + .get(); assertSearchResponse(response); @@ -288,8 +299,8 @@ public void testMultiValuedNumeric() throws Exception { public void testMultiValuedNumericHashed() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(multiNumericField(true))) - .get(); + .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(multiNumericField(true))) + .get(); assertSearchResponse(response); @@ -301,11 +312,11 @@ public void testMultiValuedNumericHashed() throws Exception { public void testSingleValuedStringScript() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation( - cardinality("cardinality") - .precisionThreshold(precisionThreshold) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['str_value'].value", emptyMap()))) - .get(); + .addAggregation( + cardinality("cardinality").precisionThreshold(precisionThreshold) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['str_value'].value", emptyMap())) + ) + .get(); assertSearchResponse(response); @@ -317,11 +328,11 @@ public void testSingleValuedStringScript() throws Exception { public void testMultiValuedStringScript() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation( - cardinality("cardinality") - .precisionThreshold(precisionThreshold) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['str_values']", emptyMap()))) - .get(); + .addAggregation( + cardinality("cardinality").precisionThreshold(precisionThreshold) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['str_values']", emptyMap())) + ) + .get(); assertSearchResponse(response); @@ -334,8 +345,8 @@ public void testMultiValuedStringScript() throws Exception { public void testSingleValuedNumericScript() throws Exception { Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc[' + singleNumericField() + '].value", emptyMap()); SearchResponse response = client().prepareSearch("idx") - .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).script(script)) - .get(); + .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).script(script)) + .get(); assertSearchResponse(response); @@ -346,11 +357,15 @@ public void testSingleValuedNumericScript() throws Exception { } public void testMultiValuedNumericScript() throws Exception { - Script script = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc[' + multiNumericField(false) + ']", Collections.emptyMap()); + Script script = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "doc[' + multiNumericField(false) + ']", + Collections.emptyMap() + ); SearchResponse response = client().prepareSearch("idx") - .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).script(script)) - .get(); + .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).script(script)) + .get(); assertSearchResponse(response); @@ -362,12 +377,12 @@ public void testMultiValuedNumericScript() throws Exception { public void testSingleValuedStringValueScript() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation( - cardinality("cardinality") - .precisionThreshold(precisionThreshold) - .field("str_value") - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value", emptyMap()))) - .get(); + .addAggregation( + cardinality("cardinality").precisionThreshold(precisionThreshold) + .field("str_value") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value", emptyMap())) + ) + .get(); assertSearchResponse(response); @@ -379,12 +394,12 @@ public void testSingleValuedStringValueScript() throws Exception { public void testMultiValuedStringValueScript() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation( - cardinality("cardinality") - .precisionThreshold(precisionThreshold) - .field("str_values") - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value", emptyMap()))) - .get(); + .addAggregation( + cardinality("cardinality").precisionThreshold(precisionThreshold) + .field("str_values") + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value", emptyMap())) + ) + .get(); assertSearchResponse(response); @@ -396,12 +411,12 @@ public void testMultiValuedStringValueScript() throws Exception { public void testSingleValuedNumericValueScript() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation( - cardinality("cardinality") - .precisionThreshold(precisionThreshold) - .field(singleNumericField()) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value", emptyMap()))) - .get(); + .addAggregation( + cardinality("cardinality").precisionThreshold(precisionThreshold) + .field(singleNumericField()) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value", emptyMap())) + ) + .get(); assertSearchResponse(response); @@ -413,12 +428,12 @@ public void testSingleValuedNumericValueScript() throws Exception { public void testMultiValuedNumericValueScript() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation( - cardinality("cardinality") - .precisionThreshold(precisionThreshold) - .field(multiNumericField(false)) - .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value", emptyMap()))) - .get(); + .addAggregation( + cardinality("cardinality").precisionThreshold(precisionThreshold) + .field(multiNumericField(false)) + .script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value", emptyMap())) + ) + .get(); assertSearchResponse(response); @@ -430,10 +445,12 @@ public void testMultiValuedNumericValueScript() throws Exception { public void testAsSubAgg() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms").field("str_value") - .collectMode(randomFrom(SubAggCollectionMode.values())) - .subAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_values"))) - .get(); + .addAggregation( + terms("terms").field("str_value") + .collectMode(randomFrom(SubAggCollectionMode.values())) + .subAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_values")) + ) + .get(); assertSearchResponse(response); @@ -451,50 +468,130 @@ public void testAsSubAgg() throws Exception { * Ensure requests using nondeterministic scripts do not get cached. */ public void testScriptCaching() throws Exception { - assertAcked(prepareCreate("cache_test_idx").setMapping("d", "type=long") + assertAcked( + prepareCreate("cache_test_idx").setMapping("d", "type=long") .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) - .get()); - indexRandom(true, client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2)); + .get() + ); + indexRandom( + true, + client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + ); // Make sure we are starting with a clear cache - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - cardinality("foo").field("d").script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", - emptyMap()))) - .get(); + SearchResponse r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + cardinality("foo").field("d").script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", emptyMap())) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a deterministic script gets cached - r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation( - cardinality("foo").field("d").script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value", emptyMap()))) - .get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + cardinality("foo").field("d").script(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value", emptyMap())) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(1L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(1L) + ); // Ensure that non-scripted requests are cached as normal r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(cardinality("foo").field("d")).get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(2L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(2L) + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java index bdaa8c6dcd6a8..463c728c3e7a0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java @@ -33,23 +33,33 @@ public void testRequestBreaker() throws Exception { final String requestBreaker = randomIntBetween(1, 10000) + "kb"; logger.info("--> Using request breaker setting: {}", requestBreaker); - indexRandom(true, IntStream.range(0, randomIntBetween(10, 1000)) - .mapToObj(i -> - client().prepareIndex("test").setId("id_" + i) - .setSource(Map.of("field0", randomAlphaOfLength(5), "field1", randomAlphaOfLength(5))) - ).toArray(IndexRequestBuilder[]::new)); + indexRandom( + true, + IntStream.range(0, randomIntBetween(10, 1000)) + .mapToObj( + i -> client().prepareIndex("test") + .setId("id_" + i) + .setSource(Map.of("field0", randomAlphaOfLength(5), "field1", randomAlphaOfLength(5))) + ) + .toArray(IndexRequestBuilder[]::new) + ); - client().admin().cluster().prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), - requestBreaker)) + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), requestBreaker) + ) .get(); try { client().prepareSearch("test") - .addAggregation(terms("terms").field("field0.keyword") - .collectMode(randomFrom(Aggregator.SubAggCollectionMode.values())) - .order(BucketOrder.aggregation("cardinality", randomBoolean())) - .subAggregation(cardinality("cardinality").precisionThreshold(randomLongBetween(1, 40000)).field("field1.keyword"))) + .addAggregation( + terms("terms").field("field0.keyword") + .collectMode(randomFrom(Aggregator.SubAggCollectionMode.values())) + .order(BucketOrder.aggregation("cardinality", randomBoolean())) + .subAggregation(cardinality("cardinality").precisionThreshold(randomLongBetween(1, 40000)).field("field1.keyword")) + ) .get(); } catch (ElasticsearchException e) { if (ExceptionsHelper.unwrap(e, CircuitBreakingException.class) == null) { @@ -57,7 +67,9 @@ public void testRequestBreaker() throws Exception { } } - client().admin().cluster().prepareUpdateSettings() + client().admin() + .cluster() + .prepareUpdateSettings() .setTransientSettings(Settings.builder().putNull(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey())) .get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java index 3b40cedff572a..c4c7cc5eb5268 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java @@ -13,6 +13,7 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.AggregationTestScriptsPlugin; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; @@ -20,7 +21,6 @@ import org.elasticsearch.search.aggregations.bucket.missing.Missing; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.ExtendedStats.Bounds; -import org.elasticsearch.search.aggregations.BucketOrder; import java.util.Collection; import java.util.Collections; @@ -74,8 +74,8 @@ private static double variancePopulation(int... vals) { sum += val; sumOfSqrs += val * val; } - double variance = (sumOfSqrs - ((sum * sum) / vals.length)) / vals.length; - return variance < 0 ? 0 : variance; + double variance = (sumOfSqrs - ((sum * sum) / vals.length)) / vals.length; + return variance < 0 ? 0 : variance; } private static double varianceSampling(int... vals) { @@ -85,17 +85,18 @@ private static double varianceSampling(int... vals) { sum += val; sumOfSqrs += val * val; } - double variance = (sumOfSqrs - ((sum * sum) / vals.length)) / (vals.length - 1); - return variance < 0 ? 0 : variance; + double variance = (sumOfSqrs - ((sum * sum) / vals.length)) / (vals.length - 1); + return variance < 0 ? 0 : variance; } @Override public void testEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") - .setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(extendedStats("stats").field("value"))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(extendedStats("stats").field("value")) + ) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); Histogram histo = searchResponse.getAggregations().get("histo"); @@ -120,14 +121,15 @@ public void testEmptyAggregation() throws Exception { assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER_POPULATION)), is(true)); assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER_POPULATION)), is(true)); assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.UPPER_SAMPLING)), is(true)); - assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER_SAMPLING)), is(true));} + assertThat(Double.isNaN(stats.getStdDeviationBound(Bounds.LOWER_SAMPLING)), is(true)); + } @Override public void testUnmapped() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx_unmapped") - .setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").field("value")) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(extendedStats("stats").field("value")) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); @@ -157,11 +159,15 @@ public void testUnmapped() throws Exception { public void testPartiallyUnmapped() { double sigma = randomDouble() * 5; ExtendedStats s1 = client().prepareSearch("idx") - .addAggregation(extendedStats("stats").field("value").sigma(sigma)).get() - .getAggregations().get("stats"); + .addAggregation(extendedStats("stats").field("value").sigma(sigma)) + .get() + .getAggregations() + .get("stats"); ExtendedStats s2 = client().prepareSearch("idx", "idx_unmapped") - .addAggregation(extendedStats("stats").field("value").sigma(sigma)).get() - .getAggregations().get("stats"); + .addAggregation(extendedStats("stats").field("value").sigma(sigma)) + .get() + .getAggregations() + .get("stats"); assertEquals(s1.getAvg(), s2.getAvg(), 1e-10); assertEquals(s1.getCount(), s2.getCount()); assertEquals(s1.getMin(), s2.getMin(), 0d); @@ -175,27 +181,28 @@ public void testPartiallyUnmapped() { assertEquals(s1.getStdDeviationBound(Bounds.LOWER_POPULATION), s2.getStdDeviationBound(Bounds.LOWER_POPULATION), 1e-10); assertEquals(s1.getStdDeviationBound(Bounds.UPPER_POPULATION), s2.getStdDeviationBound(Bounds.UPPER_POPULATION), 1e-10); assertEquals(s1.getStdDeviationBound(Bounds.LOWER_SAMPLING), s2.getStdDeviationBound(Bounds.LOWER_SAMPLING), 1e-10); - assertEquals(s1.getStdDeviationBound(Bounds.UPPER_SAMPLING), s2.getStdDeviationBound(Bounds.UPPER_SAMPLING), 1e-10);} + assertEquals(s1.getStdDeviationBound(Bounds.UPPER_SAMPLING), s2.getStdDeviationBound(Bounds.UPPER_SAMPLING), 1e-10); + } @Override public void testSingleValuedField() throws Exception { double sigma = randomDouble() * randomIntBetween(1, 10); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").field("value").sigma(sigma)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(extendedStats("stats").field("value").sigma(sigma)) + .get(); assertHitCount(searchResponse, 10); ExtendedStats stats = searchResponse.getAggregations().get("stats"); assertThat(stats, notNullValue()); assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10)); + assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); assertThat(stats.getMin(), equalTo(1.0)); assertThat(stats.getMax(), equalTo(10.0)); - assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10)); + assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); assertThat(stats.getCount(), equalTo(10L)); - assertThat(stats.getSumOfSquares(), equalTo((double) 1+4+9+16+25+36+49+64+81+100)); + assertThat(stats.getSumOfSquares(), equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100)); assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); @@ -209,21 +216,21 @@ public void testSingleValuedFieldDefaultSigma() throws Exception { // Same as previous test, but uses a default value for sigma SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").field("value")) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(extendedStats("stats").field("value")) + .get(); assertHitCount(searchResponse, 10); ExtendedStats stats = searchResponse.getAggregations().get("stats"); assertThat(stats, notNullValue()); assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10)); + assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); assertThat(stats.getMin(), equalTo(1.0)); assertThat(stats.getMax(), equalTo(10.0)); - assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10)); + assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); assertThat(stats.getCount(), equalTo(10L)); - assertThat(stats.getSumOfSquares(), equalTo((double) 1+4+9+16+25+36+49+64+81+100)); + assertThat(stats.getSumOfSquares(), equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100)); assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); @@ -235,8 +242,10 @@ public void testSingleValuedFieldDefaultSigma() throws Exception { public void testSingleValuedField_WithFormatter() throws Exception { double sigma = randomDouble() * randomIntBetween(1, 10); - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").format("0000.0").field("value").sigma(sigma)).get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation(extendedStats("stats").format("0000.0").field("value").sigma(sigma)) + .get(); assertHitCount(searchResponse, 10); @@ -271,8 +280,10 @@ public void testSingleValuedField_WithFormatter() throws Exception { @Override public void testSingleValuedFieldGetProperty() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(global("global").subAggregation(extendedStats("stats").field("value"))).get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation(global("global").subAggregation(extendedStats("stats").field("value"))) + .get(); assertHitCount(searchResponse, 10); @@ -286,69 +297,75 @@ public void testSingleValuedFieldGetProperty() throws Exception { ExtendedStats stats = global.getAggregations().get("stats"); assertThat(stats, notNullValue()); assertThat(stats.getName(), equalTo("stats")); - ExtendedStats statsFromProperty = (ExtendedStats) ((InternalAggregation)global).getProperty("stats"); + ExtendedStats statsFromProperty = (ExtendedStats) ((InternalAggregation) global).getProperty("stats"); assertThat(statsFromProperty, notNullValue()); assertThat(statsFromProperty, sameInstance(stats)); double expectedAvgValue = (double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10; assertThat(stats.getAvg(), equalTo(expectedAvgValue)); - assertThat((double) ((InternalAggregation)global).getProperty("stats.avg"), equalTo(expectedAvgValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.avg"), equalTo(expectedAvgValue)); double expectedMinValue = 1.0; assertThat(stats.getMin(), equalTo(expectedMinValue)); - assertThat((double) ((InternalAggregation)global).getProperty("stats.min"), equalTo(expectedMinValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.min"), equalTo(expectedMinValue)); double expectedMaxValue = 10.0; assertThat(stats.getMax(), equalTo(expectedMaxValue)); - assertThat((double) ((InternalAggregation)global).getProperty("stats.max"), equalTo(expectedMaxValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.max"), equalTo(expectedMaxValue)); double expectedSumValue = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10; assertThat(stats.getSum(), equalTo(expectedSumValue)); - assertThat((double) ((InternalAggregation)global).getProperty("stats.sum"), equalTo(expectedSumValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.sum"), equalTo(expectedSumValue)); long expectedCountValue = 10; assertThat(stats.getCount(), equalTo(expectedCountValue)); - assertThat((double) ((InternalAggregation)global).getProperty("stats.count"), equalTo((double) expectedCountValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.count"), equalTo((double) expectedCountValue)); double expectedSumOfSquaresValue = (double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100; assertThat(stats.getSumOfSquares(), equalTo(expectedSumOfSquaresValue)); - assertThat((double) ((InternalAggregation)global).getProperty("stats.sum_of_squares"), equalTo(expectedSumOfSquaresValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.sum_of_squares"), equalTo(expectedSumOfSquaresValue)); double expectedVarianceValue = variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); assertThat(stats.getVariance(), equalTo(expectedVarianceValue)); - assertThat((double) ((InternalAggregation)global).getProperty("stats.variance"), equalTo(expectedVarianceValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.variance"), equalTo(expectedVarianceValue)); double expectedVariancePopulationValue = variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); assertThat(stats.getVariancePopulation(), equalTo(expectedVariancePopulationValue)); - assertThat((double) ((InternalAggregation)global).getProperty("stats.variance_population"), - equalTo(expectedVariancePopulationValue)); + assertThat( + (double) ((InternalAggregation) global).getProperty("stats.variance_population"), + equalTo(expectedVariancePopulationValue) + ); double expectedVarianceSamplingValue = varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); assertThat(stats.getVarianceSampling(), equalTo(expectedVarianceSamplingValue)); - assertThat((double) ((InternalAggregation)global).getProperty("stats.variance_sampling"), equalTo(expectedVarianceSamplingValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.variance_sampling"), equalTo(expectedVarianceSamplingValue)); double expectedStdDevValue = stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); assertThat(stats.getStdDeviation(), equalTo(expectedStdDevValue)); - assertThat((double) ((InternalAggregation)global).getProperty("stats.std_deviation"), equalTo(expectedStdDevValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.std_deviation"), equalTo(expectedStdDevValue)); double expectedStdDevPopulationValue = stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); assertThat(stats.getStdDeviationPopulation(), equalTo(expectedStdDevValue)); - assertThat((double) ((InternalAggregation)global).getProperty("stats.std_deviation_population"), - equalTo(expectedStdDevPopulationValue)); + assertThat( + (double) ((InternalAggregation) global).getProperty("stats.std_deviation_population"), + equalTo(expectedStdDevPopulationValue) + ); double expectedStdDevSamplingValue = stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10); assertThat(stats.getStdDeviationSampling(), equalTo(expectedStdDevSamplingValue)); - assertThat((double) ((InternalAggregation)global).getProperty("stats.std_deviation_sampling"), - equalTo(expectedStdDevSamplingValue)); + assertThat( + (double) ((InternalAggregation) global).getProperty("stats.std_deviation_sampling"), + equalTo(expectedStdDevSamplingValue) + ); } @Override public void testSingleValuedFieldPartiallyUnmapped() throws Exception { double sigma = randomDouble() * randomIntBetween(1, 10); SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped") - .setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").field("value").sigma(sigma)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(extendedStats("stats").field("value").sigma(sigma)) + .get(); assertHitCount(searchResponse, 10); ExtendedStats stats = searchResponse.getAggregations().get("stats"); assertThat(stats, notNullValue()); assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10)); + assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); assertThat(stats.getMin(), equalTo(1.0)); assertThat(stats.getMax(), equalTo(10.0)); - assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10)); + assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); assertThat(stats.getCount(), equalTo(10L)); - assertThat(stats.getSumOfSquares(), equalTo((double) 1+4+9+16+25+36+49+64+81+100)); + assertThat(stats.getSumOfSquares(), equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100)); assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); @@ -362,26 +379,25 @@ public void testSingleValuedFieldPartiallyUnmapped() throws Exception { public void testSingleValuedFieldWithValueScript() throws Exception { double sigma = randomDouble() * randomIntBetween(1, 10); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - extendedStats("stats") - .field("value") - .script(new Script(ScriptType.INLINE, - AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap())) - .sigma(sigma)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + extendedStats("stats").field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap())) + .sigma(sigma) + ) + .get(); assertHitCount(searchResponse, 10); ExtendedStats stats = searchResponse.getAggregations().get("stats"); assertThat(stats, notNullValue()); assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10)); + assertThat(stats.getAvg(), equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11) / 10)); assertThat(stats.getMin(), equalTo(2.0)); assertThat(stats.getMax(), equalTo(11.0)); - assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11)); + assertThat(stats.getSum(), equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11)); assertThat(stats.getCount(), equalTo(10L)); - assertThat(stats.getSumOfSquares(), equalTo((double) 4+9+16+25+36+49+64+81+100+121)); + assertThat(stats.getSumOfSquares(), equalTo((double) 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121)); assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); @@ -397,25 +413,25 @@ public void testSingleValuedFieldWithValueScriptWithParams() throws Exception { params.put("inc", 1); double sigma = randomDouble() * randomIntBetween(1, 10); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - extendedStats("stats") - .field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + inc", params)) - .sigma(sigma)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + extendedStats("stats").field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + inc", params)) + .sigma(sigma) + ) + .get(); assertHitCount(searchResponse, 10); ExtendedStats stats = searchResponse.getAggregations().get("stats"); assertThat(stats, notNullValue()); assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10)); + assertThat(stats.getAvg(), equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11) / 10)); assertThat(stats.getMin(), equalTo(2.0)); assertThat(stats.getMax(), equalTo(11.0)); - assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11)); + assertThat(stats.getSum(), equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11)); assertThat(stats.getCount(), equalTo(10L)); - assertThat(stats.getSumOfSquares(), equalTo((double) 4+9+16+25+36+49+64+81+100+121)); + assertThat(stats.getSumOfSquares(), equalTo((double) 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121)); assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); @@ -429,31 +445,42 @@ public void testSingleValuedFieldWithValueScriptWithParams() throws Exception { public void testMultiValuedField() throws Exception { double sigma = randomDouble() * randomIntBetween(1, 10); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(extendedStats("stats").field("values").sigma(sigma)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(extendedStats("stats").field("values").sigma(sigma)) + .get(); assertHitCount(searchResponse, 10); ExtendedStats stats = searchResponse.getAggregations().get("stats"); assertThat(stats, notNullValue()); assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12) / 20)); + assertThat( + stats.getAvg(), + equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12) / 20) + ); assertThat(stats.getMin(), equalTo(2.0)); assertThat(stats.getMax(), equalTo(12.0)); - assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12)); + assertThat(stats.getSum(), equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12)); assertThat(stats.getCount(), equalTo(20L)); - assertThat(stats.getSumOfSquares(), equalTo((double) 4+9+16+25+36+49+64+81+100+121+9+16+25+36+49+64+81+100+121+144)); + assertThat( + stats.getSumOfSquares(), + equalTo((double) 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121 + 144) + ); assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); - assertThat(stats.getVariancePopulation(), - equalTo(variancePopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); - assertThat(stats.getVarianceSampling(), - equalTo(varianceSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); + assertThat( + stats.getVariancePopulation(), + equalTo(variancePopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) + ); + assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); - assertThat(stats.getStdDeviationPopulation(), - equalTo(stdDevPopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); - assertThat(stats.getStdDeviationSampling(), - equalTo(stdDevSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); + assertThat( + stats.getStdDeviationPopulation(), + equalTo(stdDevPopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) + ); + assertThat( + stats.getStdDeviationSampling(), + equalTo(stdDevSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) + ); checkUpperLowerBounds(stats, sigma); } @@ -461,34 +488,43 @@ public void testMultiValuedField() throws Exception { public void testMultiValuedFieldWithValueScript() throws Exception { double sigma = randomDouble() * randomIntBetween(1, 10); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - extendedStats("stats") - .field("values") - .script(new Script(ScriptType.INLINE, - AggregationTestScriptsPlugin.NAME, "_value - 1", Collections.emptyMap())) - .sigma(sigma)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + extendedStats("stats").field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", Collections.emptyMap())) + .sigma(sigma) + ) + .get(); assertHitCount(searchResponse, 10); ExtendedStats stats = searchResponse.getAggregations().get("stats"); assertThat(stats, notNullValue()); assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10+2+3+4+5+6+7+8+9+10+11) / 20)); + assertThat( + stats.getAvg(), + equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11) / 20) + ); assertThat(stats.getMin(), equalTo(1.0)); assertThat(stats.getMax(), equalTo(11.0)); - assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10+2+3+4+5+6+7+8+9+10+11)); + assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11)); assertThat(stats.getCount(), equalTo(20L)); - assertThat(stats.getSumOfSquares(), equalTo((double) 1+4+9+16+25+36+49+64+81+100+4+9+16+25+36+49+64+81+100+121)); - assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11))); - assertThat(stats.getVariancePopulation(), - equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11))); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11))); - assertThat(stats.getStdDeviationPopulation(), - equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11))); - assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11))); + assertThat( + stats.getSumOfSquares(), + equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121) + ); + assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat( + stats.getVariancePopulation(), + equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) + ); + assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat( + stats.getStdDeviationPopulation(), + equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) + ); + assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); checkUpperLowerBounds(stats, sigma); } @@ -499,33 +535,43 @@ public void testMultiValuedFieldWithValueScriptWithParams() throws Exception { params.put("dec", 1); double sigma = randomDouble() * randomIntBetween(1, 10); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - extendedStats("stats") - .field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) - .sigma(sigma)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + extendedStats("stats").field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) + .sigma(sigma) + ) + .get(); assertHitCount(searchResponse, 10); ExtendedStats stats = searchResponse.getAggregations().get("stats"); assertThat(stats, notNullValue()); assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10+2+3+4+5+6+7+8+9+10+11) / 20)); + assertThat( + stats.getAvg(), + equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11) / 20) + ); assertThat(stats.getMin(), equalTo(1.0)); assertThat(stats.getMax(), equalTo(11.0)); - assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10+2+3+4+5+6+7+8+9+10+11)); + assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11)); assertThat(stats.getCount(), equalTo(20L)); - assertThat(stats.getSumOfSquares(), equalTo((double) 1+4+9+16+25+36+49+64+81+100+4+9+16+25+36+49+64+81+100+121)); - assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11))); - assertThat(stats.getVariancePopulation(), - equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11))); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11))); - assertThat(stats.getStdDeviationPopulation(), - equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11))); - assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11))); + assertThat( + stats.getSumOfSquares(), + equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121) + ); + assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat( + stats.getVariancePopulation(), + equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) + ); + assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat( + stats.getStdDeviationPopulation(), + equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) + ); + assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); checkUpperLowerBounds(stats, sigma); } @@ -533,31 +579,31 @@ public void testMultiValuedFieldWithValueScriptWithParams() throws Exception { public void testScriptSingleValued() throws Exception { double sigma = randomDouble() * randomIntBetween(1, 10); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - extendedStats("stats") - .script(new Script(ScriptType.INLINE, - AggregationTestScriptsPlugin.NAME, "doc['value'].value", Collections.emptyMap())) - .sigma(sigma)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + extendedStats("stats").script( + new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", Collections.emptyMap()) + ).sigma(sigma) + ) + .get(); assertHitCount(searchResponse, 10); ExtendedStats stats = searchResponse.getAggregations().get("stats"); assertThat(stats, notNullValue()); assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10)); + assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); assertThat(stats.getMin(), equalTo(1.0)); assertThat(stats.getMax(), equalTo(10.0)); - assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10)); + assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); assertThat(stats.getCount(), equalTo(10L)); - assertThat(stats.getSumOfSquares(), equalTo((double) 1+4+9+16+25+36+49+64+81+100)); - assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10))); - assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10))); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10))); - assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10))); - assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10))); + assertThat(stats.getSumOfSquares(), equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100)); + assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); + assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); checkUpperLowerBounds(stats, sigma); } @@ -570,30 +616,27 @@ public void testScriptSingleValuedWithParams() throws Exception { double sigma = randomDouble() * randomIntBetween(1, 10); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - extendedStats("stats") - .script(script) - .sigma(sigma)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(extendedStats("stats").script(script).sigma(sigma)) + .get(); assertHitCount(searchResponse, 10); ExtendedStats stats = searchResponse.getAggregations().get("stats"); assertThat(stats, notNullValue()); assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10)); + assertThat(stats.getAvg(), equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11) / 10)); assertThat(stats.getMin(), equalTo(2.0)); assertThat(stats.getMax(), equalTo(11.0)); - assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11)); + assertThat(stats.getSum(), equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11)); assertThat(stats.getCount(), equalTo(10L)); - assertThat(stats.getSumOfSquares(), equalTo((double) 4+9+16+25+36+49+64+81+100+121)); - assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11))); - assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11))); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11))); - assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11))); - assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11))); + assertThat(stats.getSumOfSquares(), equalTo((double) 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121)); + assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getStdDeviationPopulation(), equalTo(stdDevPopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); + assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); checkUpperLowerBounds(stats, sigma); } @@ -601,35 +644,46 @@ public void testScriptSingleValuedWithParams() throws Exception { public void testScriptMultiValued() throws Exception { double sigma = randomDouble() * randomIntBetween(1, 10); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - extendedStats("stats") - .script(new Script(ScriptType.INLINE, - AggregationTestScriptsPlugin.NAME, "doc['values']", Collections.emptyMap())) - .sigma(sigma)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + extendedStats("stats").script( + new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['values']", Collections.emptyMap()) + ).sigma(sigma) + ) + .get(); assertHitCount(searchResponse, 10); ExtendedStats stats = searchResponse.getAggregations().get("stats"); assertThat(stats, notNullValue()); assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12) / 20)); + assertThat( + stats.getAvg(), + equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12) / 20) + ); assertThat(stats.getMin(), equalTo(2.0)); assertThat(stats.getMax(), equalTo(12.0)); - assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12)); + assertThat(stats.getSum(), equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12)); assertThat(stats.getCount(), equalTo(20L)); - assertThat(stats.getSumOfSquares(), equalTo((double) 4+9+16+25+36+49+64+81+100+121+9+16+25+36+49+64+81+100+121+144)); - assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 12))); - assertThat(stats.getVariancePopulation(), - equalTo(variancePopulation(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 12))); - assertThat(stats.getVarianceSampling(), - equalTo(varianceSampling(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 12))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 12))); - assertThat(stats.getStdDeviationPopulation(), - equalTo(stdDevPopulation(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 12))); - assertThat(stats.getStdDeviationSampling(), - equalTo(stdDevSampling(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 12))); + assertThat( + stats.getSumOfSquares(), + equalTo((double) 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 121 + 144) + ); + assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); + assertThat( + stats.getVariancePopulation(), + equalTo(variancePopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) + ); + assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); + assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); + assertThat( + stats.getStdDeviationPopulation(), + equalTo(stdDevPopulation(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) + ); + assertThat( + stats.getStdDeviationSampling(), + equalTo(stdDevSampling(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) + ); checkUpperLowerBounds(stats, sigma); } @@ -638,46 +692,52 @@ public void testScriptMultiValuedWithParams() throws Exception { Map params = new HashMap<>(); params.put("dec", 1); - Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "[ doc['value'].value, doc['value'].value - dec ]", - params); + Script script = new Script( + ScriptType.INLINE, + AggregationTestScriptsPlugin.NAME, + "[ doc['value'].value, doc['value'].value - dec ]", + params + ); double sigma = randomDouble() * randomIntBetween(1, 10); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - extendedStats("stats") - .script(script) - .sigma(sigma)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(extendedStats("stats").script(script).sigma(sigma)) + .get(); assertHitCount(searchResponse, 10); ExtendedStats stats = searchResponse.getAggregations().get("stats"); assertThat(stats, notNullValue()); assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10+0+1+2+3+4+5+6+7+8+9) / 20)); + assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9) / 20)); assertThat(stats.getMin(), equalTo(0.0)); assertThat(stats.getMax(), equalTo(10.0)); - assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10+0+1+2+3+4+5+6+7+8+9)); + assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9)); assertThat(stats.getCount(), equalTo(20L)); - assertThat(stats.getSumOfSquares(), equalTo((double) 1+4+9+16+25+36+49+64+81+100+0+1+4+9+16+25+36+49+64+81)); - assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8 ,9))); - assertThat(stats.getVariancePopulation(), - equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8 ,9))); - assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8 ,9))); - assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8 ,9))); - assertThat(stats.getStdDeviationPopulation(), - equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8 ,9))); - assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8 ,9))); + assertThat( + stats.getSumOfSquares(), + equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100 + 0 + 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81) + ); + assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9))); + assertThat(stats.getVariancePopulation(), equalTo(variancePopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9))); + assertThat(stats.getVarianceSampling(), equalTo(varianceSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9))); + assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9))); + assertThat( + stats.getStdDeviationPopulation(), + equalTo(stdDevPopulation(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9)) + ); + assertThat(stats.getStdDeviationSampling(), equalTo(stdDevSampling(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9))); checkUpperLowerBounds(stats, sigma); } public void testEmptySubAggregation() { SearchResponse searchResponse = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(terms("value").field("value") - .subAggregation(missing("values").field("values") - .subAggregation(extendedStats("stats").field("value")))) + .addAggregation( + terms("value").field("value") + .subAggregation(missing("values").field("values").subAggregation(extendedStats("stats").field("value"))) + ) .get(); assertHitCount(searchResponse, 10); @@ -716,12 +776,14 @@ public void testEmptySubAggregation() { @Override public void testOrderByEmptyAggregation() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(terms("terms").field("value") + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + terms("terms").field("value") .order(BucketOrder.compound(BucketOrder.aggregation("filter>extendedStats.avg", true))) - .subAggregation( - filter("filter", termQuery("value", 100)).subAggregation(extendedStats("extendedStats").field("value")))) - .get(); + .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(extendedStats("extendedStats").field("value"))) + ) + .get(); assertHitCount(searchResponse, 10); @@ -765,10 +827,14 @@ public void testOrderByEmptyAggregation() throws Exception { private void checkUpperLowerBounds(ExtendedStats stats, double sigma) { assertThat(stats.getStdDeviationBound(ExtendedStats.Bounds.UPPER), equalTo(stats.getAvg() + (stats.getStdDeviation() * sigma))); assertThat(stats.getStdDeviationBound(ExtendedStats.Bounds.LOWER), equalTo(stats.getAvg() - (stats.getStdDeviation() * sigma))); - assertThat(stats.getStdDeviationBound(Bounds.UPPER_POPULATION), equalTo(stats.getAvg() + - (stats.getStdDeviationPopulation() * sigma))); - assertThat(stats.getStdDeviationBound(Bounds.LOWER_POPULATION), equalTo(stats.getAvg() - - (stats.getStdDeviationPopulation() * sigma))); + assertThat( + stats.getStdDeviationBound(Bounds.UPPER_POPULATION), + equalTo(stats.getAvg() + (stats.getStdDeviationPopulation() * sigma)) + ); + assertThat( + stats.getStdDeviationBound(Bounds.LOWER_POPULATION), + equalTo(stats.getAvg() - (stats.getStdDeviationPopulation() * sigma)) + ); assertThat(stats.getStdDeviationBound(Bounds.UPPER_SAMPLING), equalTo(stats.getAvg() + (stats.getStdDeviationSampling() * sigma))); assertThat(stats.getStdDeviationBound(Bounds.LOWER_SAMPLING), equalTo(stats.getAvg() - (stats.getStdDeviationSampling() * sigma))); } @@ -778,50 +844,133 @@ private void checkUpperLowerBounds(ExtendedStats stats, double sigma) { * Ensure requests using nondeterministic scripts do not get cached. */ public void testScriptCaching() throws Exception { - assertAcked(prepareCreate("cache_test_idx").setMapping("d", "type=long") + assertAcked( + prepareCreate("cache_test_idx").setMapping("d", "type=long") .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) - .get()); - indexRandom(true, client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2)); + .get() + ); + indexRandom( + true, + client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + ); // Make sure we are starting with a clear cache - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation(extendedStats("foo").field("d") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", Collections.emptyMap()))) - .get(); + SearchResponse r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + extendedStats("foo").field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", Collections.emptyMap())) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a deterministic script gets cached - r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation(extendedStats("foo").field("d") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap()))) - .get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + extendedStats("foo").field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap())) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(1L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(1L) + ); // Ensure that non-scripted requests are cached as normal r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(extendedStats("foo").field("d")).get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(2L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(2L) + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsIT.java index b63cc43d60090..af498a17fe5b2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsIT.java @@ -38,9 +38,8 @@ public class GeoBoundsIT extends AbstractGeoTestCase { public void testSingleValuedField() throws Exception { SearchResponse response = client().prepareSearch(IDX_NAME) - .addAggregation(geoBounds(aggName).field(SINGLE_VALUED_FIELD_NAME) - .wrapLongitude(false)) - .get(); + .addAggregation(geoBounds(aggName).field(SINGLE_VALUED_FIELD_NAME).wrapLongitude(false)) + .get(); assertSearchResponse(response); @@ -56,12 +55,10 @@ public void testSingleValuedField() throws Exception { } public void testSingleValuedField_getProperty() throws Exception { - SearchResponse searchResponse = client() - .prepareSearch(IDX_NAME) - .setQuery(matchAllQuery()) - .addAggregation( - global("global").subAggregation(geoBounds(aggName).field(SINGLE_VALUED_FIELD_NAME).wrapLongitude(false))) - .get(); + SearchResponse searchResponse = client().prepareSearch(IDX_NAME) + .setQuery(matchAllQuery()) + .addAggregation(global("global").subAggregation(geoBounds(aggName).field(SINGLE_VALUED_FIELD_NAME).wrapLongitude(false))) + .get(); assertSearchResponse(searchResponse); @@ -75,30 +72,32 @@ public void testSingleValuedField_getProperty() throws Exception { GeoBounds geobounds = global.getAggregations().get(aggName); assertThat(geobounds, notNullValue()); assertThat(geobounds.getName(), equalTo(aggName)); - assertThat((GeoBounds) ((InternalAggregation)global).getProperty(aggName), sameInstance(geobounds)); + assertThat((GeoBounds) ((InternalAggregation) global).getProperty(aggName), sameInstance(geobounds)); GeoPoint topLeft = geobounds.topLeft(); GeoPoint bottomRight = geobounds.bottomRight(); assertThat(topLeft.lat(), closeTo(singleTopLeft.lat(), GEOHASH_TOLERANCE)); assertThat(topLeft.lon(), closeTo(singleTopLeft.lon(), GEOHASH_TOLERANCE)); assertThat(bottomRight.lat(), closeTo(singleBottomRight.lat(), GEOHASH_TOLERANCE)); assertThat(bottomRight.lon(), closeTo(singleBottomRight.lon(), GEOHASH_TOLERANCE)); - assertThat((double) ((InternalAggregation)global).getProperty(aggName + ".top"), closeTo(singleTopLeft.lat(), GEOHASH_TOLERANCE)); - assertThat((double) ((InternalAggregation)global).getProperty(aggName + ".left"), closeTo(singleTopLeft.lon(), GEOHASH_TOLERANCE)); - assertThat((double) ((InternalAggregation)global).getProperty(aggName + ".bottom"), - closeTo(singleBottomRight.lat(), GEOHASH_TOLERANCE)); - assertThat((double) ((InternalAggregation)global).getProperty(aggName + ".right"), - closeTo(singleBottomRight.lon(), GEOHASH_TOLERANCE)); + assertThat((double) ((InternalAggregation) global).getProperty(aggName + ".top"), closeTo(singleTopLeft.lat(), GEOHASH_TOLERANCE)); + assertThat((double) ((InternalAggregation) global).getProperty(aggName + ".left"), closeTo(singleTopLeft.lon(), GEOHASH_TOLERANCE)); + assertThat( + (double) ((InternalAggregation) global).getProperty(aggName + ".bottom"), + closeTo(singleBottomRight.lat(), GEOHASH_TOLERANCE) + ); + assertThat( + (double) ((InternalAggregation) global).getProperty(aggName + ".right"), + closeTo(singleBottomRight.lon(), GEOHASH_TOLERANCE) + ); } public void testMultiValuedField() throws Exception { SearchResponse response = client().prepareSearch(IDX_NAME) - .addAggregation(geoBounds(aggName).field(MULTI_VALUED_FIELD_NAME) - .wrapLongitude(false)) - .get(); + .addAggregation(geoBounds(aggName).field(MULTI_VALUED_FIELD_NAME).wrapLongitude(false)) + .get(); assertSearchResponse(response); - GeoBounds geoBounds = response.getAggregations().get(aggName); assertThat(geoBounds, notNullValue()); assertThat(geoBounds.getName(), equalTo(aggName)); @@ -112,9 +111,8 @@ public void testMultiValuedField() throws Exception { public void testUnmapped() throws Exception { SearchResponse response = client().prepareSearch(UNMAPPED_IDX_NAME) - .addAggregation(geoBounds(aggName).field(SINGLE_VALUED_FIELD_NAME) - .wrapLongitude(false)) - .get(); + .addAggregation(geoBounds(aggName).field(SINGLE_VALUED_FIELD_NAME).wrapLongitude(false)) + .get(); assertSearchResponse(response); @@ -129,9 +127,8 @@ public void testUnmapped() throws Exception { public void testPartiallyUnmapped() throws Exception { SearchResponse response = client().prepareSearch(IDX_NAME, UNMAPPED_IDX_NAME) - .addAggregation(geoBounds(aggName).field(SINGLE_VALUED_FIELD_NAME) - .wrapLongitude(false)) - .get(); + .addAggregation(geoBounds(aggName).field(SINGLE_VALUED_FIELD_NAME).wrapLongitude(false)) + .get(); assertSearchResponse(response); @@ -148,10 +145,9 @@ public void testPartiallyUnmapped() throws Exception { public void testEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch(EMPTY_IDX_NAME) - .setQuery(matchAllQuery()) - .addAggregation(geoBounds(aggName).field(SINGLE_VALUED_FIELD_NAME) - .wrapLongitude(false)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(geoBounds(aggName).field(SINGLE_VALUED_FIELD_NAME).wrapLongitude(false)) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); GeoBounds geoBounds = searchResponse.getAggregations().get(aggName); @@ -165,9 +161,8 @@ public void testEmptyAggregation() throws Exception { public void testSingleValuedFieldNearDateLine() throws Exception { SearchResponse response = client().prepareSearch(DATELINE_IDX_NAME) - .addAggregation(geoBounds(aggName).field(SINGLE_VALUED_FIELD_NAME) - .wrapLongitude(false)) - .get(); + .addAggregation(geoBounds(aggName).field(SINGLE_VALUED_FIELD_NAME).wrapLongitude(false)) + .get(); assertSearchResponse(response); @@ -190,8 +185,8 @@ public void testSingleValuedFieldNearDateLineWrapLongitude() throws Exception { GeoPoint geoValuesTopLeft = new GeoPoint(38, 170); GeoPoint geoValuesBottomRight = new GeoPoint(-24, -175); SearchResponse response = client().prepareSearch(DATELINE_IDX_NAME) - .addAggregation(geoBounds(aggName).field(SINGLE_VALUED_FIELD_NAME).wrapLongitude(true)) - .get(); + .addAggregation(geoBounds(aggName).field(SINGLE_VALUED_FIELD_NAME).wrapLongitude(true)) + .get(); assertSearchResponse(response); @@ -211,9 +206,11 @@ public void testSingleValuedFieldNearDateLineWrapLongitude() throws Exception { */ public void testSingleValuedFieldAsSubAggToHighCardTermsAgg() { SearchResponse response = client().prepareSearch(HIGH_CARD_IDX_NAME) - .addAggregation(terms("terms").field(NUMBER_FIELD_NAME).subAggregation(geoBounds(aggName).field(SINGLE_VALUED_FIELD_NAME) - .wrapLongitude(false))) - .get(); + .addAggregation( + terms("terms").field(NUMBER_FIELD_NAME) + .subAggregation(geoBounds(aggName).field(SINGLE_VALUED_FIELD_NAME).wrapLongitude(false)) + ) + .get(); assertSearchResponse(response); @@ -238,7 +235,8 @@ public void testSingleValuedFieldAsSubAggToHighCardTermsAgg() { public void testSingleValuedFieldWithZeroLon() throws Exception { SearchResponse response = client().prepareSearch(IDX_ZERO_NAME) - .addAggregation(geoBounds(aggName).field(SINGLE_VALUED_FIELD_NAME).wrapLongitude(false)).get(); + .addAggregation(geoBounds(aggName).field(SINGLE_VALUED_FIELD_NAME).wrapLongitude(false)) + .get(); assertSearchResponse(response); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidIT.java index ce222a9000651..f377a5be1c845 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidIT.java @@ -36,9 +36,9 @@ public class GeoCentroidIT extends AbstractGeoTestCase { public void testEmptyAggregation() throws Exception { SearchResponse response = client().prepareSearch(EMPTY_IDX_NAME) - .setQuery(matchAllQuery()) - .addAggregation(geoCentroid(aggName).field(SINGLE_VALUED_FIELD_NAME)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(geoCentroid(aggName).field(SINGLE_VALUED_FIELD_NAME)) + .get(); assertSearchResponse(response); GeoCentroid geoCentroid = response.getAggregations().get(aggName); @@ -52,8 +52,8 @@ public void testEmptyAggregation() throws Exception { public void testUnmapped() throws Exception { SearchResponse response = client().prepareSearch(UNMAPPED_IDX_NAME) - .addAggregation(geoCentroid(aggName).field(SINGLE_VALUED_FIELD_NAME)) - .get(); + .addAggregation(geoCentroid(aggName).field(SINGLE_VALUED_FIELD_NAME)) + .get(); assertSearchResponse(response); GeoCentroid geoCentroid = response.getAggregations().get(aggName); @@ -66,8 +66,8 @@ public void testUnmapped() throws Exception { public void testPartiallyUnmapped() throws Exception { SearchResponse response = client().prepareSearch(IDX_NAME, UNMAPPED_IDX_NAME) - .addAggregation(geoCentroid(aggName).field(SINGLE_VALUED_FIELD_NAME)) - .get(); + .addAggregation(geoCentroid(aggName).field(SINGLE_VALUED_FIELD_NAME)) + .get(); assertSearchResponse(response); GeoCentroid geoCentroid = response.getAggregations().get(aggName); @@ -81,9 +81,9 @@ public void testPartiallyUnmapped() throws Exception { public void testSingleValuedField() throws Exception { SearchResponse response = client().prepareSearch(IDX_NAME) - .setQuery(matchAllQuery()) - .addAggregation(geoCentroid(aggName).field(SINGLE_VALUED_FIELD_NAME)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(geoCentroid(aggName).field(SINGLE_VALUED_FIELD_NAME)) + .get(); assertSearchResponse(response); GeoCentroid geoCentroid = response.getAggregations().get(aggName); @@ -97,9 +97,9 @@ public void testSingleValuedField() throws Exception { public void testSingleValueFieldGetProperty() throws Exception { SearchResponse response = client().prepareSearch(IDX_NAME) - .setQuery(matchAllQuery()) - .addAggregation(global("global").subAggregation(geoCentroid(aggName).field(SINGLE_VALUED_FIELD_NAME))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(global("global").subAggregation(geoCentroid(aggName).field(SINGLE_VALUED_FIELD_NAME))) + .get(); assertSearchResponse(response); Global global = response.getAggregations().get("global"); @@ -112,24 +112,28 @@ public void testSingleValueFieldGetProperty() throws Exception { GeoCentroid geoCentroid = global.getAggregations().get(aggName); assertThat(geoCentroid, notNullValue()); assertThat(geoCentroid.getName(), equalTo(aggName)); - assertThat((GeoCentroid) ((InternalAggregation)global).getProperty(aggName), sameInstance(geoCentroid)); + assertThat((GeoCentroid) ((InternalAggregation) global).getProperty(aggName), sameInstance(geoCentroid)); GeoPoint centroid = geoCentroid.centroid(); assertThat(centroid.lat(), closeTo(singleCentroid.lat(), GEOHASH_TOLERANCE)); assertThat(centroid.lon(), closeTo(singleCentroid.lon(), GEOHASH_TOLERANCE)); - assertThat(((GeoPoint) ((InternalAggregation)global).getProperty(aggName + ".value")).lat(), - closeTo(singleCentroid.lat(), GEOHASH_TOLERANCE)); - assertThat(((GeoPoint) ((InternalAggregation)global).getProperty(aggName + ".value")).lon(), - closeTo(singleCentroid.lon(), GEOHASH_TOLERANCE)); - assertThat((double) ((InternalAggregation)global).getProperty(aggName + ".lat"), closeTo(singleCentroid.lat(), GEOHASH_TOLERANCE)); - assertThat((double) ((InternalAggregation)global).getProperty(aggName + ".lon"), closeTo(singleCentroid.lon(), GEOHASH_TOLERANCE)); + assertThat( + ((GeoPoint) ((InternalAggregation) global).getProperty(aggName + ".value")).lat(), + closeTo(singleCentroid.lat(), GEOHASH_TOLERANCE) + ); + assertThat( + ((GeoPoint) ((InternalAggregation) global).getProperty(aggName + ".value")).lon(), + closeTo(singleCentroid.lon(), GEOHASH_TOLERANCE) + ); + assertThat((double) ((InternalAggregation) global).getProperty(aggName + ".lat"), closeTo(singleCentroid.lat(), GEOHASH_TOLERANCE)); + assertThat((double) ((InternalAggregation) global).getProperty(aggName + ".lon"), closeTo(singleCentroid.lon(), GEOHASH_TOLERANCE)); assertEquals(numDocs, (long) ((InternalAggregation) global).getProperty(aggName + ".count")); } public void testMultiValuedField() throws Exception { SearchResponse searchResponse = client().prepareSearch(IDX_NAME) - .setQuery(matchAllQuery()) - .addAggregation(geoCentroid(aggName).field(MULTI_VALUED_FIELD_NAME)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(geoCentroid(aggName).field(MULTI_VALUED_FIELD_NAME)) + .get(); assertSearchResponse(searchResponse); GeoCentroid geoCentroid = searchResponse.getAggregations().get(aggName); @@ -143,9 +147,10 @@ public void testMultiValuedField() throws Exception { public void testSingleValueFieldAsSubAggToGeohashGrid() throws Exception { SearchResponse response = client().prepareSearch(HIGH_CARD_IDX_NAME) - .addAggregation(geohashGrid("geoGrid").field(SINGLE_VALUED_FIELD_NAME) - .subAggregation(geoCentroid(aggName).field(SINGLE_VALUED_FIELD_NAME))) - .get(); + .addAggregation( + geohashGrid("geoGrid").field(SINGLE_VALUED_FIELD_NAME).subAggregation(geoCentroid(aggName).field(SINGLE_VALUED_FIELD_NAME)) + ) + .get(); assertSearchResponse(response); GeoGrid grid = response.getAggregations().get("geoGrid"); @@ -156,10 +161,16 @@ public void testSingleValueFieldAsSubAggToGeohashGrid() throws Exception { String geohash = cell.getKeyAsString(); GeoPoint expectedCentroid = expectedCentroidsForGeoHash.get(geohash); GeoCentroid centroidAgg = cell.getAggregations().get(aggName); - assertThat("Geohash " + geohash + " has wrong centroid latitude ", expectedCentroid.lat(), - closeTo(centroidAgg.centroid().lat(), GEOHASH_TOLERANCE)); - assertThat("Geohash " + geohash + " has wrong centroid longitude", expectedCentroid.lon(), - closeTo(centroidAgg.centroid().lon(), GEOHASH_TOLERANCE)); + assertThat( + "Geohash " + geohash + " has wrong centroid latitude ", + expectedCentroid.lat(), + closeTo(centroidAgg.centroid().lat(), GEOHASH_TOLERANCE) + ); + assertThat( + "Geohash " + geohash + " has wrong centroid longitude", + expectedCentroid.lon(), + closeTo(centroidAgg.centroid().lon(), GEOHASH_TOLERANCE) + ); } } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java index 2809361425fb4..46b6421086703 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java @@ -14,12 +14,12 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.AggregationTestScriptsPlugin; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.BucketOrder; import java.util.Arrays; import java.util.Collection; @@ -58,15 +58,15 @@ private static double[] randomPercents(long minValue, long maxValue) { final double[] percents = new double[length]; for (int i = 0; i < percents.length; ++i) { switch (randomInt(20)) { - case 0: - percents[i] = minValue; - break; - case 1: - percents[i] = maxValue; - break; - default: - percents[i] = (randomDouble() * (maxValue - minValue)) + minValue; - break; + case 0: + percents[i] = minValue; + break; + case 1: + percents[i] = maxValue; + break; + default: + percents[i] = (randomDouble() * (maxValue - minValue)) + minValue; + break; } } Arrays.sort(percents); @@ -105,19 +105,19 @@ private void assertConsistent(double[] pcts, PercentileRanks values, long minVal @Override public void testEmptyAggregation() throws Exception { int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = client() - .prepareSearch("empty_bucket_idx") - .setQuery(matchAllQuery()) - .addAggregation( - histogram("histo") - .field("value") - .interval(1L) - .minDocCount(0) - .subAggregation( - percentileRanks("percentile_ranks", new double[]{10, 15}) - .field("value").method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits))) - .get(); + SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation( + percentileRanks("percentile_ranks", new double[] { 10, 15 }).field("value") + .method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + ) + ) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); Histogram histo = searchResponse.getAggregations().get("histo"); @@ -135,15 +135,14 @@ public void testEmptyAggregation() throws Exception { @Override public void testUnmapped() throws Exception { int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = client() - .prepareSearch("idx_unmapped") - .setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", new double[]{0, 10, 15, 100}) - .method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("value")) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx_unmapped") + .setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", new double[] { 0, 10, 15, 100 }).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("value") + ) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); @@ -160,13 +159,14 @@ public void testUnmapped() throws Exception { public void testSingleValuedField() throws Exception { int sigDigits = randomSignificantDigits(); final double[] pcts = randomPercents(minValue, maxValue); - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR).numberOfSignificantValueDigits(sigDigits) - .field("value")) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("value") + ) + .get(); assertHitCount(searchResponse, 10); @@ -177,30 +177,34 @@ public void testSingleValuedField() throws Exception { public void testNullValuesField() throws Exception { int sigDigits = randomSignificantDigits(); final double[] pcts = null; - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts) - .method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("value")) - .get()); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("value") + ) + .get() + ); assertThat(e.getMessage(), equalTo("[values] must not be null: [percentile_ranks]")); } public void testEmptyValuesField() throws Exception { int sigDigits = randomSignificantDigits(); final double[] pcts = new double[0]; - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts) - .method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("value")) - .get()); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("value") + ) + .get() + ); assertThat(e.getMessage(), equalTo("[values] must not be an empty array: [percentile_ranks]")); } @@ -208,16 +212,16 @@ public void testEmptyValuesField() throws Exception { public void testSingleValuedFieldGetProperty() throws Exception { int sigDigits = randomSignificantDigits(); final double[] pcts = randomPercents(minValue, maxValue); - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - global("global").subAggregation( - percentileRanks("percentile_ranks", pcts) - .method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("value"))) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + global("global").subAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("value") + ) + ) + .get(); assertHitCount(searchResponse, 10); @@ -231,20 +235,21 @@ public void testSingleValuedFieldGetProperty() throws Exception { PercentileRanks values = global.getAggregations().get("percentile_ranks"); assertThat(values, notNullValue()); assertThat(values.getName(), equalTo("percentile_ranks")); - assertThat(((InternalAggregation)global).getProperty("percentile_ranks"), sameInstance(values)); + assertThat(((InternalAggregation) global).getProperty("percentile_ranks"), sameInstance(values)); } public void testSingleValuedFieldOutsideRange() throws Exception { int sigDigits = randomSignificantDigits(); final double[] pcts = new double[] { minValue - 1, maxValue + 1 }; - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR).numberOfSignificantValueDigits(sigDigits) - .field("value")) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("value") + ) + .get(); assertHitCount(searchResponse, 10); @@ -256,13 +261,14 @@ public void testSingleValuedFieldOutsideRange() throws Exception { public void testSingleValuedFieldPartiallyUnmapped() throws Exception { int sigDigits = randomSignificantDigits(); final double[] pcts = randomPercents(minValue, maxValue); - SearchResponse searchResponse = client() - .prepareSearch("idx", "idx_unmapped") - .setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR).numberOfSignificantValueDigits(sigDigits) - .field("value")) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped") + .setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("value") + ) + .get(); assertHitCount(searchResponse, 10); @@ -274,16 +280,15 @@ public void testSingleValuedFieldPartiallyUnmapped() throws Exception { public void testSingleValuedFieldWithValueScript() throws Exception { int sigDigits = randomSignificantDigits(); final double[] pcts = randomPercents(minValue - 1, maxValue - 1); - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts) - .method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap()))) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + ) + .get(); assertHitCount(searchResponse, 10); @@ -297,16 +302,15 @@ public void testSingleValuedFieldWithValueScriptWithParams() throws Exception { Map params = new HashMap<>(); params.put("dec", 1); final double[] pcts = randomPercents(minValue - 1, maxValue - 1); - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts) - .method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params))) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) + ) + .get(); assertHitCount(searchResponse, 10); @@ -318,13 +322,14 @@ public void testSingleValuedFieldWithValueScriptWithParams() throws Exception { public void testMultiValuedField() throws Exception { int sigDigits = randomSignificantDigits(); final double[] pcts = randomPercents(minValues, maxValues); - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR).numberOfSignificantValueDigits(sigDigits) - .field("values")) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("values") + ) + .get(); assertHitCount(searchResponse, 10); @@ -336,16 +341,15 @@ public void testMultiValuedField() throws Exception { public void testMultiValuedFieldWithValueScript() throws Exception { int sigDigits = randomSignificantDigits(); final double[] pcts = randomPercents(minValues - 1, maxValues - 1); - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts) - .method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap()))) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + ) + .get(); assertHitCount(searchResponse, 10); @@ -356,16 +360,15 @@ public void testMultiValuedFieldWithValueScript() throws Exception { public void testMultiValuedFieldWithValueScriptReverse() throws Exception { int sigDigits = randomSignificantDigits(); final double[] pcts = randomPercents(20 - maxValues, 20 - minValues); - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts) - .method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "20 - _value", emptyMap()))) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "20 - _value", emptyMap())) + ) + .get(); assertHitCount(searchResponse, 10); @@ -379,16 +382,15 @@ public void testMultiValuedFieldWithValueScriptWithParams() throws Exception { Map params = new HashMap<>(); params.put("dec", 1); final double[] pcts = randomPercents(minValues - 1, maxValues - 1); - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts) - .method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params))) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) + ) + .get(); assertHitCount(searchResponse, 10); @@ -400,15 +402,14 @@ public void testMultiValuedFieldWithValueScriptWithParams() throws Exception { public void testScriptSingleValued() throws Exception { int sigDigits = randomSignificantDigits(); final double[] pcts = randomPercents(minValue, maxValue); - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts) - .method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", emptyMap()))) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", emptyMap())) + ) + .get(); assertHitCount(searchResponse, 10); @@ -425,15 +426,14 @@ public void testScriptSingleValuedWithParams() throws Exception { Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value - dec", params); final double[] pcts = randomPercents(minValue - 1, maxValue - 1); - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts) - .method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .script(script)) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .script(script) + ) + .get(); assertHitCount(searchResponse, 10); @@ -448,15 +448,14 @@ public void testScriptMultiValued() throws Exception { Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['values']", emptyMap()); - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts) - .method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .script(script)) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .script(script) + ) + .get(); assertHitCount(searchResponse, 10); @@ -470,15 +469,14 @@ public void testScriptMultiValuedWithParams() throws Exception { Script script = AggregationTestScriptsPlugin.DECREMENT_ALL_VALUES; final double[] pcts = randomPercents(minValues - 1, maxValues - 1); - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts) - .method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .script(script)) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .script(script) + ) + .get(); assertHitCount(searchResponse, 10); @@ -489,15 +487,19 @@ public void testScriptMultiValuedWithParams() throws Exception { public void testOrderBySubAggregation() { int sigDigits = randomSignificantDigits(); boolean asc = randomBoolean(); - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value").interval(2L) - .subAggregation( - percentileRanks("percentile_ranks", new double[]{99}).field("value").method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits)) - .order(BucketOrder.aggregation("percentile_ranks", "99", asc))).get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(2L) + .subAggregation( + percentileRanks("percentile_ranks", new double[] { 99 }).field("value") + .method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + ) + .order(BucketOrder.aggregation("percentile_ranks", "99", asc)) + ) + .get(); assertHitCount(searchResponse, 10); @@ -517,11 +519,18 @@ public void testOrderBySubAggregation() { @Override public void testOrderByEmptyAggregation() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(terms("terms").field("value").order(BucketOrder.compound(BucketOrder.aggregation("filter>ranks.99", true))) - .subAggregation(filter("filter", termQuery("value", 100)) - .subAggregation(percentileRanks("ranks", new double[]{99}).method(PercentilesMethod.HDR).field("value")))) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + terms("terms").field("value") + .order(BucketOrder.compound(BucketOrder.aggregation("filter>ranks.99", true))) + .subAggregation( + filter("filter", termQuery("value", 100)).subAggregation( + percentileRanks("ranks", new double[] { 99 }).method(PercentilesMethod.HDR).field("value") + ) + ) + ) + .get(); assertHitCount(searchResponse, 10); @@ -551,55 +560,138 @@ public void testOrderByEmptyAggregation() throws Exception { * Ensure requests using nondeterministic scripts do not get cached. */ public void testScriptCaching() throws Exception { - assertAcked(prepareCreate("cache_test_idx").setMapping("d", "type=long") + assertAcked( + prepareCreate("cache_test_idx").setMapping("d", "type=long") .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) - .get()); - indexRandom(true, client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2)); + .get() + ); + indexRandom( + true, + client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + ); // Make sure we are starting with a clear cache - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = client() - .prepareSearch("cache_test_idx").setSize(0) - .addAggregation(percentileRanks("foo", new double[]{50.0}) - .method(PercentilesMethod.HDR).field("d") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap()))) - .get(); + SearchResponse r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + percentileRanks("foo", new double[] { 50.0 }).method(PercentilesMethod.HDR) + .field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap())) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a deterministic script gets cached - r = client() - .prepareSearch("cache_test_idx").setSize(0) - .addAggregation(percentileRanks("foo", new double[]{50.0}) - .method(PercentilesMethod.HDR).field("d") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap()))) - .get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + percentileRanks("foo", new double[] { 50.0 }).method(PercentilesMethod.HDR) + .field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(1L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(1L) + ); // Ensure that non-scripted requests are cached as normal - r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation(percentileRanks("foo", new double[]{50.0}).method(PercentilesMethod.HDR).field("d")).get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation(percentileRanks("foo", new double[] { 50.0 }).method(PercentilesMethod.HDR).field("d")) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(2L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(2L) + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java index 9b46cee19d929..0a5d19b7af884 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java @@ -61,18 +61,18 @@ private static double[] randomPercentiles() { final Set uniquedPercentiles = new HashSet<>(); while (uniquedPercentiles.size() < length) { switch (randomInt(20)) { - case 0: - uniquedPercentiles.add(0.0); - break; - case 1: - uniquedPercentiles.add(100.0); - break; - default: - uniquedPercentiles.add(randomDouble() * 100); - break; + case 0: + uniquedPercentiles.add(0.0); + break; + case 1: + uniquedPercentiles.add(100.0); + break; + default: + uniquedPercentiles.add(randomDouble() * 100); + break; } } - double[] percentiles= uniquedPercentiles.stream().mapToDouble(Double::doubleValue).sorted().toArray(); + double[] percentiles = uniquedPercentiles.stream().mapToDouble(Double::doubleValue).sorted().toArray(); LogManager.getLogger(HDRPercentilesIT.class).info("Using percentiles={}", Arrays.toString(percentiles)); return percentiles; } @@ -108,20 +108,20 @@ private void assertConsistent(double[] pcts, Percentiles percentiles, long minVa @Override public void testEmptyAggregation() throws Exception { int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = client() - .prepareSearch("empty_bucket_idx") - .setQuery(matchAllQuery()) - .addAggregation( - histogram("histo") - .field("value") - .interval(1L) - .minDocCount(0) - .subAggregation( - percentiles("percentiles").field("value") - .numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .percentiles(10, 15))) - .get(); + SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation( + percentiles("percentiles").field("value") + .numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .percentiles(10, 15) + ) + ) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); Histogram histo = searchResponse.getAggregations().get("histo"); @@ -139,12 +139,15 @@ public void testEmptyAggregation() throws Exception { @Override public void testUnmapped() throws Exception { int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = client() - .prepareSearch("idx_unmapped") - .setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits).method(PercentilesMethod.HDR).field("value") - .percentiles(0, 10, 15, 100)).get(); + SearchResponse searchResponse = client().prepareSearch("idx_unmapped") + .setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .field("value") + .percentiles(0, 10, 15, 100) + ) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); @@ -161,13 +164,15 @@ public void testUnmapped() throws Exception { public void testSingleValuedField() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomIntBetween(1, 5); - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits).method(PercentilesMethod.HDR).field("value") - .percentiles(pcts)) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .field("value") + .percentiles(pcts) + ) + .get(); assertHitCount(searchResponse, 10); @@ -179,14 +184,17 @@ public void testSingleValuedField() throws Exception { public void testSingleValuedFieldGetProperty() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - global("global").subAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits).method(PercentilesMethod.HDR) - .field("value") - .percentiles(pcts))).get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + global("global").subAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .field("value") + .percentiles(pcts) + ) + ) + .get(); assertHitCount(searchResponse, 10); @@ -200,7 +208,7 @@ public void testSingleValuedFieldGetProperty() throws Exception { Percentiles percentiles = global.getAggregations().get("percentiles"); assertThat(percentiles, notNullValue()); assertThat(percentiles.getName(), equalTo("percentiles")); - assertThat(((InternalAggregation)global).getProperty("percentiles"), sameInstance(percentiles)); + assertThat(((InternalAggregation) global).getProperty("percentiles"), sameInstance(percentiles)); } @@ -208,13 +216,15 @@ public void testSingleValuedFieldGetProperty() throws Exception { public void testSingleValuedFieldPartiallyUnmapped() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = client() - .prepareSearch("idx", "idx_unmapped") - .setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits).method(PercentilesMethod.HDR).field("value") - .percentiles(pcts)) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped") + .setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .field("value") + .percentiles(pcts) + ) + .get(); assertHitCount(searchResponse, 10); @@ -226,17 +236,16 @@ public void testSingleValuedFieldPartiallyUnmapped() throws Exception { public void testSingleValuedFieldWithValueScript() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles") - .numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - .percentiles(pcts)) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + .percentiles(pcts) + ) + .get(); assertHitCount(searchResponse, 10); @@ -251,17 +260,16 @@ public void testSingleValuedFieldWithValueScriptWithParams() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles") - .numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) - .percentiles(pcts)) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) + .percentiles(pcts) + ) + .get(); assertHitCount(searchResponse, 10); @@ -273,13 +281,15 @@ public void testSingleValuedFieldWithValueScriptWithParams() throws Exception { public void testMultiValuedField() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles").numberOfSignificantValueDigits(sigDigits).method(PercentilesMethod.HDR).field("values") - .percentiles(pcts)) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .field("values") + .percentiles(pcts) + ) + .get(); assertHitCount(searchResponse, 10); @@ -291,17 +301,16 @@ public void testMultiValuedField() throws Exception { public void testMultiValuedFieldWithValueScript() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles") - .numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - .percentiles(pcts)) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + .percentiles(pcts) + ) + .get(); assertHitCount(searchResponse, 10); @@ -312,17 +321,16 @@ public void testMultiValuedFieldWithValueScript() throws Exception { public void testMultiValuedFieldWithValueScriptReverse() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles") - .numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "20 - _value", emptyMap())) - .percentiles(pcts)) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "20 - _value", emptyMap())) + .percentiles(pcts) + ) + .get(); assertHitCount(searchResponse, 10); @@ -337,17 +345,16 @@ public void testMultiValuedFieldWithValueScriptWithParams() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles") - .numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) - .percentiles(pcts)) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) + .percentiles(pcts) + ) + .get(); assertHitCount(searchResponse, 10); @@ -359,16 +366,15 @@ public void testMultiValuedFieldWithValueScriptWithParams() throws Exception { public void testScriptSingleValued() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles") - .numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", emptyMap())) - .percentiles(pcts)) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", emptyMap())) + .percentiles(pcts) + ) + .get(); assertHitCount(searchResponse, 10); @@ -385,16 +391,15 @@ public void testScriptSingleValuedWithParams() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles") - .numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .script(script) - .percentiles(pcts)) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .script(script) + .percentiles(pcts) + ) + .get(); assertHitCount(searchResponse, 10); @@ -409,16 +414,15 @@ public void testScriptMultiValued() throws Exception { Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['values']", emptyMap()); - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles") - .numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .script(script) - .percentiles(pcts)) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .script(script) + .percentiles(pcts) + ) + .get(); assertHitCount(searchResponse, 10); @@ -432,16 +436,15 @@ public void testScriptMultiValuedWithParams() throws Exception { final double[] pcts = randomPercentiles(); int sigDigits = randomSignificantDigits(); - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - percentiles("percentiles") - .numberOfSignificantValueDigits(sigDigits) - .method(PercentilesMethod.HDR) - .script(script) - .percentiles(pcts)) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + percentiles("percentiles").numberOfSignificantValueDigits(sigDigits) + .method(PercentilesMethod.HDR) + .script(script) + .percentiles(pcts) + ) + .get(); assertHitCount(searchResponse, 10); @@ -452,17 +455,20 @@ public void testScriptMultiValuedWithParams() throws Exception { public void testOrderBySubAggregation() { int sigDigits = randomSignificantDigits(); boolean asc = randomBoolean(); - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value").interval(2L) - .subAggregation( - percentiles("percentiles").field("value") - .method(PercentilesMethod.HDR) - .numberOfSignificantValueDigits(sigDigits) - .percentiles(99)) - .order(BucketOrder.aggregation("percentiles", "99", asc))).get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(2L) + .subAggregation( + percentiles("percentiles").field("value") + .method(PercentilesMethod.HDR) + .numberOfSignificantValueDigits(sigDigits) + .percentiles(99) + ) + .order(BucketOrder.aggregation("percentiles", "99", asc)) + ) + .get(); assertHitCount(searchResponse, 10); @@ -483,12 +489,17 @@ public void testOrderBySubAggregation() { @Override public void testOrderByEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - terms("terms").field("value").order(BucketOrder.compound(BucketOrder.aggregation("filter>percentiles.99", true))) - .subAggregation(filter("filter", termQuery("value", 100)) - .subAggregation(percentiles("percentiles").method(PercentilesMethod.HDR).field("value")))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + terms("terms").field("value") + .order(BucketOrder.compound(BucketOrder.aggregation("filter>percentiles.99", true))) + .subAggregation( + filter("filter", termQuery("value", 100)).subAggregation( + percentiles("percentiles").method(PercentilesMethod.HDR).field("value") + ) + ) + ) + .get(); assertHitCount(searchResponse, 10); @@ -518,51 +529,140 @@ public void testOrderByEmptyAggregation() throws Exception { * Ensure requests using nondeterministic scripts do not get cached. */ public void testScriptCaching() throws Exception { - assertAcked(prepareCreate("cache_test_idx").setMapping("d", "type=long") + assertAcked( + prepareCreate("cache_test_idx").setMapping("d", "type=long") .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) - .get()); - indexRandom(true, client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2)); + .get() + ); + indexRandom( + true, + client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + ); // Make sure we are starting with a clear cache - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation(percentiles("foo").method(PercentilesMethod.HDR).field("d").percentiles(50.0) - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap()))) - .get(); + SearchResponse r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + percentiles("foo").method(PercentilesMethod.HDR) + .field("d") + .percentiles(50.0) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap())) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a deterministic script gets cached - r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation(percentiles("foo").method(PercentilesMethod.HDR).field("d").percentiles(50.0) - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap()))) - .get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + percentiles("foo").method(PercentilesMethod.HDR) + .field("d") + .percentiles(50.0) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(1L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(1L) + ); // Ensure that non-scripted requests are cached as normal - r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation(percentiles("foo").method(PercentilesMethod.HDR).field("d").percentiles(50.0)).get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation(percentiles("foo").method(PercentilesMethod.HDR).field("d").percentiles(50.0)) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(2L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(2L) + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java index f85b5fd9c4de0..ba66cd18d53ad 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java @@ -44,8 +44,8 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.range; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; -import static org.elasticsearch.search.aggregations.metrics.MedianAbsoluteDeviationAggregatorTests.IsCloseToRelative.closeToRelative; import static org.elasticsearch.search.aggregations.metrics.MedianAbsoluteDeviationAggregatorTests.ExactMedianAbsoluteDeviation.calculateMAD; +import static org.elasticsearch.search.aggregations.metrics.MedianAbsoluteDeviationAggregatorTests.IsCloseToRelative.closeToRelative; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; @@ -69,10 +69,7 @@ public class MedianAbsoluteDeviationIT extends AbstractNumericTestCase { @Override public void setupSuiteScopeCluster() throws Exception { - final Settings settings = Settings.builder() - .put(SETTING_NUMBER_OF_SHARDS, 1) - .put(SETTING_NUMBER_OF_REPLICAS, 0) - .build(); + final Settings settings = Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0).build(); createIndex("idx", settings); createIndex("idx_unmapped", settings); @@ -96,15 +93,17 @@ public void setupSuiteScopeCluster() throws Exception { multiValueSample[i * 2] = firstMultiValueDatapoint; multiValueSample[(i * 2) + 1] = secondMultiValueDatapoint; - IndexRequestBuilder builder = client().prepareIndex("idx").setId(String.valueOf(i)) - .setSource(jsonBuilder() - .startObject() + IndexRequestBuilder builder = client().prepareIndex("idx") + .setId(String.valueOf(i)) + .setSource( + jsonBuilder().startObject() .field("value", singleValueDatapoint) .startArray("values") - .value(firstMultiValueDatapoint) - .value(secondMultiValueDatapoint) + .value(firstMultiValueDatapoint) + .value(secondMultiValueDatapoint) .endArray() - .endObject()); + .endObject() + ); builders.add(builder); } @@ -114,16 +113,15 @@ public void setupSuiteScopeCluster() throws Exception { indexRandom(true, builders); - prepareCreate("empty_bucket_idx") - .setMapping("value", "type=integer") - .get(); + prepareCreate("empty_bucket_idx").setMapping("value", "type=integer").get(); builders = new ArrayList<>(); for (int i = 0; i < 2; i++) { - builders.add(client().prepareIndex("empty_bucket_idx").setId(String.valueOf(i)).setSource(jsonBuilder() - .startObject() - .field("value", i*2) - .endObject())); + builders.add( + client().prepareIndex("empty_bucket_idx") + .setId(String.valueOf(i)) + .setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) + ); } indexRandom(true, builders); ensureSearchable(); @@ -144,16 +142,8 @@ private static MedianAbsoluteDeviationAggregationBuilder randomBuilder() { @Override public void testEmptyAggregation() throws Exception { - final SearchResponse response = client() - .prepareSearch("empty_bucket_idx") - .addAggregation( - histogram("histogram") - .field("value") - .interval(1) - .minDocCount(0) - .subAggregation( - randomBuilder() - .field("value"))) + final SearchResponse response = client().prepareSearch("empty_bucket_idx") + .addAggregation(histogram("histogram").field("value").interval(1).minDocCount(0).subAggregation(randomBuilder().field("value"))) .get(); assertHitCount(response, 2); @@ -176,12 +166,9 @@ public void testUnmapped() throws Exception { @Override public void testSingleValuedField() throws Exception { - final SearchResponse response = client() - .prepareSearch("idx") + final SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation( - randomBuilder() - .field("value")) + .addAggregation(randomBuilder().field("value")) .get(); assertHitCount(response, NUMBER_OF_DOCS); @@ -194,14 +181,9 @@ public void testSingleValuedField() throws Exception { @Override public void testSingleValuedFieldGetProperty() throws Exception { - final SearchResponse response = client() - .prepareSearch("idx") + final SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation( - global("global") - .subAggregation( - randomBuilder() - .field("value"))) + .addAggregation(global("global").subAggregation(randomBuilder().field("value"))) .get(); assertHitCount(response, NUMBER_OF_DOCS); @@ -221,12 +203,9 @@ public void testSingleValuedFieldGetProperty() throws Exception { @Override public void testSingleValuedFieldPartiallyUnmapped() throws Exception { - final SearchResponse response = client() - .prepareSearch("idx", "idx_unmapped") + final SearchResponse response = client().prepareSearch("idx", "idx_unmapped") .setQuery(matchAllQuery()) - .addAggregation( - randomBuilder() - .field("value")) + .addAggregation(randomBuilder().field("value")) .get(); assertHitCount(response, NUMBER_OF_DOCS); @@ -239,13 +218,12 @@ public void testSingleValuedFieldPartiallyUnmapped() throws Exception { @Override public void testSingleValuedFieldWithValueScript() throws Exception { - final SearchResponse response = client() - .prepareSearch("idx") + final SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation( - randomBuilder() - .field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap()))) + randomBuilder().field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap())) + ) .get(); assertHitCount(response, NUMBER_OF_DOCS); @@ -254,9 +232,7 @@ public void testSingleValuedFieldWithValueScript() throws Exception { assertThat(mad, notNullValue()); assertThat(mad.getName(), is("mad")); - final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(singleValueSample) - .map(point -> point + 1) - .toArray()); + final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(singleValueSample).map(point -> point + 1).toArray()); assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); } @@ -265,13 +241,12 @@ public void testSingleValuedFieldWithValueScriptWithParams() throws Exception { final Map params = new HashMap<>(); params.put("inc", 1); - final SearchResponse response = client() - .prepareSearch("idx") + final SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation( - randomBuilder() - .field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + inc", params))) + randomBuilder().field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + inc", params)) + ) .get(); assertHitCount(response, NUMBER_OF_DOCS); @@ -280,20 +255,15 @@ public void testSingleValuedFieldWithValueScriptWithParams() throws Exception { assertThat(mad, notNullValue()); assertThat(mad.getName(), is("mad")); - final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(singleValueSample) - .map(point -> point + 1) - .toArray()); + final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(singleValueSample).map(point -> point + 1).toArray()); assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); } @Override public void testMultiValuedField() throws Exception { - final SearchResponse response = client() - .prepareSearch("idx") + final SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation( - randomBuilder() - .field("values")) + .addAggregation(randomBuilder().field("values")) .get(); assertHitCount(response, NUMBER_OF_DOCS); @@ -306,13 +276,12 @@ public void testMultiValuedField() throws Exception { @Override public void testMultiValuedFieldWithValueScript() throws Exception { - final SearchResponse response = client() - .prepareSearch("idx") + final SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation( - randomBuilder() - .field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap()))) + randomBuilder().field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap())) + ) .get(); assertHitCount(response, NUMBER_OF_DOCS); @@ -320,9 +289,7 @@ public void testMultiValuedFieldWithValueScript() throws Exception { final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); assertThat(mad, notNullValue()); - final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(multiValueSample) - .map(point -> point + 1) - .toArray()); + final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(multiValueSample).map(point -> point + 1).toArray()); assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); } @@ -331,13 +298,12 @@ public void testMultiValuedFieldWithValueScriptWithParams() throws Exception { final Map params = new HashMap<>(); params.put("inc", 1); - final SearchResponse response = client() - .prepareSearch("idx") + final SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation( - randomBuilder() - .field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + inc", params))) + randomBuilder().field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + inc", params)) + ) .get(); assertHitCount(response, NUMBER_OF_DOCS); @@ -345,20 +311,19 @@ public void testMultiValuedFieldWithValueScriptWithParams() throws Exception { final MedianAbsoluteDeviation mad = response.getAggregations().get("mad"); assertThat(mad, notNullValue()); - final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(multiValueSample) - .map(point -> point + 1) - .toArray()); + final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(multiValueSample).map(point -> point + 1).toArray()); assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); } @Override public void testScriptSingleValued() throws Exception { - final SearchResponse response = client() - .prepareSearch("idx") + final SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation( - randomBuilder() - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", Collections.emptyMap()))) + randomBuilder().script( + new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", Collections.emptyMap()) + ) + ) .get(); assertHitCount(response, NUMBER_OF_DOCS); @@ -374,12 +339,11 @@ public void testScriptSingleValuedWithParams() throws Exception { final Map params = new HashMap<>(); params.put("inc", 1); - final SearchResponse response = client() - .prepareSearch("idx") + final SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation( - randomBuilder() - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value + inc", params))) + randomBuilder().script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value + inc", params)) + ) .get(); assertHitCount(response, NUMBER_OF_DOCS); @@ -388,24 +352,19 @@ public void testScriptSingleValuedWithParams() throws Exception { assertThat(mad, notNullValue()); assertThat(mad.getName(), is("mad")); - final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(singleValueSample) - .map(point -> point + 1) - .toArray()); + final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(singleValueSample).map(point -> point + 1).toArray()); assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); } @Override public void testScriptMultiValued() throws Exception { - final SearchResponse response = client() - .prepareSearch("idx") + final SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation( - randomBuilder() - .script(new Script( - ScriptType.INLINE, - AggregationTestScriptsPlugin.NAME, - "doc['values']", - Collections.emptyMap()))) + randomBuilder().script( + new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['values']", Collections.emptyMap()) + ) + ) .get(); assertHitCount(response, NUMBER_OF_DOCS); @@ -421,16 +380,18 @@ public void testScriptMultiValuedWithParams() throws Exception { final Map params = new HashMap<>(); params.put("inc", 1); - final SearchResponse response = client() - .prepareSearch("idx") + final SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation( - randomBuilder() - .script(new Script( + randomBuilder().script( + new Script( ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "[ doc['value'].value, doc['value'].value + inc ]", - params))) + params + ) + ) + ) .get(); assertHitCount(response, NUMBER_OF_DOCS); @@ -439,25 +400,22 @@ public void testScriptMultiValuedWithParams() throws Exception { assertThat(mad, notNullValue()); assertThat(mad.getName(), is("mad")); - final double fromIncrementedSampleMAD = calculateMAD(Arrays.stream(singleValueSample) - .flatMap(point -> LongStream.of(point, point + 1)) - .toArray()); + final double fromIncrementedSampleMAD = calculateMAD( + Arrays.stream(singleValueSample).flatMap(point -> LongStream.of(point, point + 1)).toArray() + ); assertThat(mad.getMedianAbsoluteDeviation(), closeToRelative(fromIncrementedSampleMAD)); } public void testAsSubAggregation() throws Exception { final int rangeBoundary = (MAX_SAMPLE_VALUE + MIN_SAMPLE_VALUE) / 2; - final SearchResponse response = client() - .prepareSearch("idx") + final SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation( - range("range") - .field("value") + range("range").field("value") .addRange(MIN_SAMPLE_VALUE, rangeBoundary) .addRange(rangeBoundary, MAX_SAMPLE_VALUE) - .subAggregation( - randomBuilder() - .field("value"))) + .subAggregation(randomBuilder().field("value")) + ) .get(); assertHitCount(response, NUMBER_OF_DOCS); @@ -494,19 +452,16 @@ public void testAsSubAggregation() throws Exception { @Override public void testOrderByEmptyAggregation() throws Exception { final int numberOfBuckets = 10; - final SearchResponse response = client() - .prepareSearch("idx") + final SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) .addAggregation( - terms("terms") - .field("value") + terms("terms").field("value") .size(numberOfBuckets) .order(BucketOrder.compound(BucketOrder.aggregation("filter>mad", true))) .subAggregation( - filter("filter", termQuery("value", MAX_SAMPLE_VALUE + 1)) - .subAggregation( - randomBuilder() - .field("value")))) + filter("filter", termQuery("value", MAX_SAMPLE_VALUE + 1)).subAggregation(randomBuilder().field("value")) + ) + ) .get(); assertHitCount(response, NUMBER_OF_DOCS); @@ -537,55 +492,132 @@ public void testOrderByEmptyAggregation() throws Exception { */ public void testScriptCaching() throws Exception { assertAcked( - prepareCreate("cache_test_idx") - .setMapping("d", "type=long") - .setSettings(Settings.builder() - .put("requests.cache.enable", true) - .put("number_of_shards", 1) - .put("number_of_replicas", 1)) - .get()); - - indexRandom(true, + prepareCreate("cache_test_idx").setMapping("d", "type=long") + .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) + .get() + ); + + indexRandom( + true, client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2)); + client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + ); // Make sure we are starting with a clear cache - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation(randomBuilder() - .field("d") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap()))).get(); + SearchResponse r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + randomBuilder().field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap())) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a deterministic script gets cached - r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation(randomBuilder() - .field("d") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap()))).get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + randomBuilder().field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(1L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(1L) + ); // Ensure that non-scripted requests are cached as normal r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(randomBuilder().field("d")).get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(2L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(2L) + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java index d3c8795b0d884..b4ccec896eca4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java @@ -79,22 +79,27 @@ public static class CustomScriptPlugin extends MockScriptPlugin { protected Map, Object>> pluginScripts() { Map, Object>> scripts = new HashMap<>(); - scripts.put("state['count'] = 1", vars -> - aggScript(vars, state -> state.put("count", 1))); - - scripts.put("state.list.add(1)", vars -> - aggScript(vars, state -> { - // Lazily populate state.list for tests without an init script - if (state.containsKey("list") == false) { - state.put("list", new ArrayList<>()); - } + scripts.put("state['count'] = 1", vars -> aggScript(vars, state -> state.put("count", 1))); + + scripts.put("state.list.add(1)", vars -> aggScript(vars, state -> { + // Lazily populate state.list for tests without an init script + if (state.containsKey("list") == false) { + state.put("list", new ArrayList<>()); + } - ((List) state.get("list")).add(1); - })); + ((List) state.get("list")).add(1); + })); - scripts.put("state[param1] = param2", vars -> - aggScript(vars, state -> state.put((String) XContentMapValues.extractValue("params.param1", vars), - XContentMapValues.extractValue("params.param2", vars)))); + scripts.put( + "state[param1] = param2", + vars -> aggScript( + vars, + state -> state.put( + (String) XContentMapValues.extractValue("params.param1", vars), + XContentMapValues.extractValue("params.param2", vars) + ) + ) + ); scripts.put("vars.multiplier = 3", vars -> { ((Map) vars.get("vars")).put("multiplier", 3); @@ -105,15 +110,14 @@ protected Map, Object>> pluginScripts() { return state; }); - scripts.put("state.list.add(vars.multiplier)", vars -> - aggScript(vars, state -> { - // Lazily populate state.list for tests without an init script - if (state.containsKey("list") == false) { - state.put("list", new ArrayList<>()); - } + scripts.put("state.list.add(vars.multiplier)", vars -> aggScript(vars, state -> { + // Lazily populate state.list for tests without an init script + if (state.containsKey("list") == false) { + state.put("list", new ArrayList<>()); + } - ((List) state.get("list")).add(XContentMapValues.extractValue("vars.multiplier", vars)); - })); + ((List) state.get("list")).add(XContentMapValues.extractValue("vars.multiplier", vars)); + })); /* * Equivalent to: @@ -184,7 +188,7 @@ protected Map, Object>> pluginScripts() { List> states = (List>) vars.get("states"); - if(states == null) { + if (states == null) { return newAggregation; } @@ -240,13 +244,12 @@ protected Map, Object>> pluginScripts() { protected Map, Object>> nonDeterministicPluginScripts() { Map, Object>> scripts = new HashMap<>(); - scripts.put("state.data = Math.random()", vars -> - aggScript(vars, state -> state.put("data", ScriptedMetricIT.randomDouble()))); - - - scripts.put("state['count'] = Math.random() >= 0.5 ? 1 : 0", vars -> - aggScript(vars, state -> state.put("count", ScriptedMetricIT.randomDouble() >= 0.5 ? 1 : 0))); + scripts.put("state.data = Math.random()", vars -> aggScript(vars, state -> state.put("data", ScriptedMetricIT.randomDouble()))); + scripts.put( + "state['count'] = Math.random() >= 0.5 ? 1 : 0", + vars -> aggScript(vars, state -> state.put("count", ScriptedMetricIT.randomDouble() >= 0.5 ? 1 : 0)) + ); scripts.put("return Math.random()", vars -> ScriptedMetricIT.randomDouble()); @@ -269,9 +272,13 @@ public void setupSuiteScopeCluster() throws Exception { numDocs = randomIntBetween(10, 100); for (int i = 0; i < numDocs; i++) { - builders.add(client().prepareIndex("idx").setId("" + i).setSource( - jsonBuilder().startObject().field("value", randomAlphaOfLengthBetween(5, 15)) - .field("l_value", i).endObject())); + builders.add( + client().prepareIndex("idx") + .setId("" + i) + .setSource( + jsonBuilder().startObject().field("value", randomAlphaOfLengthBetween(5, 15)).field("l_value", i).endObject() + ) + ); } indexRandom(true, builders); @@ -286,32 +293,73 @@ public void setupSuiteScopeCluster() throws Exception { prepareCreate("empty_bucket_idx").setMapping("value", "type=integer").get(); builders = new ArrayList<>(); for (int i = 0; i < 2; i++) { - builders.add(client().prepareIndex("empty_bucket_idx").setId("" + i).setSource( - jsonBuilder().startObject().field("value", i * 2).endObject())); + builders.add( + client().prepareIndex("empty_bucket_idx") + .setId("" + i) + .setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) + ); } // When using the MockScriptPlugin we can map Stored scripts to inline scripts: // the id of the stored script is used in test method while the source of the stored script // must match a predefined script from CustomScriptPlugin.pluginScripts() method - assertAcked(client().admin().cluster().preparePutStoredScript() + assertAcked( + client().admin() + .cluster() + .preparePutStoredScript() .setId("initScript_stored") - .setContent(new BytesArray("{\"script\": {\"lang\": \"" + MockScriptPlugin.NAME + "\"," + - " \"source\": \"vars.multiplier = 3\"} }"), XContentType.JSON)); - - assertAcked(client().admin().cluster().preparePutStoredScript() + .setContent( + new BytesArray( + "{\"script\": {\"lang\": \"" + MockScriptPlugin.NAME + "\"," + " \"source\": \"vars.multiplier = 3\"} }" + ), + XContentType.JSON + ) + ); + + assertAcked( + client().admin() + .cluster() + .preparePutStoredScript() .setId("mapScript_stored") - .setContent(new BytesArray("{\"script\": {\"lang\": \"" + MockScriptPlugin.NAME + "\"," + - " \"source\": \"state.list.add(vars.multiplier)\"} }"), XContentType.JSON)); - - assertAcked(client().admin().cluster().preparePutStoredScript() + .setContent( + new BytesArray( + "{\"script\": {\"lang\": \"" + MockScriptPlugin.NAME + "\"," + " \"source\": \"state.list.add(vars.multiplier)\"} }" + ), + XContentType.JSON + ) + ); + + assertAcked( + client().admin() + .cluster() + .preparePutStoredScript() .setId("combineScript_stored") - .setContent(new BytesArray("{\"script\": {\"lang\": \"" + MockScriptPlugin.NAME + "\"," + - " \"source\": \"sum state values as a new aggregation\"} }"), XContentType.JSON)); - - assertAcked(client().admin().cluster().preparePutStoredScript() + .setContent( + new BytesArray( + "{\"script\": {\"lang\": \"" + + MockScriptPlugin.NAME + + "\"," + + " \"source\": \"sum state values as a new aggregation\"} }" + ), + XContentType.JSON + ) + ); + + assertAcked( + client().admin() + .cluster() + .preparePutStoredScript() .setId("reduceScript_stored") - .setContent(new BytesArray("{\"script\": {\"lang\": \"" + MockScriptPlugin.NAME + "\"," + - " \"source\": \"sum all states (lists) values as a new aggregation\"} }"), XContentType.JSON)); + .setContent( + new BytesArray( + "{\"script\": {\"lang\": \"" + + MockScriptPlugin.NAME + + "\"," + + " \"source\": \"sum all states (lists) values as a new aggregation\"} }" + ), + XContentType.JSON + ) + ); indexRandom(true, builders); ensureSearchable(); @@ -333,8 +381,10 @@ public void setUp() throws Exception { Files.write(scripts.resolve("init_script.mockscript"), "vars.multiplier = 3".getBytes("UTF-8")); Files.write(scripts.resolve("map_script.mockscript"), "state.list.add(vars.multiplier)".getBytes("UTF-8")); Files.write(scripts.resolve("combine_script.mockscript"), "sum state values as a new aggregation".getBytes("UTF-8")); - Files.write(scripts.resolve("reduce_script.mockscript"), - "sum all states (lists) values as a new aggregation".getBytes("UTF-8")); + Files.write( + scripts.resolve("reduce_script.mockscript"), + "sum all states (lists) values as a new aggregation".getBytes("UTF-8") + ); } catch (IOException e) { throw new RuntimeException("failed to create scripts"); } @@ -347,15 +397,13 @@ protected Path nodeConfigPath(int nodeOrdinal) { public void testMap() { Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state['count'] = 1", Collections.emptyMap()); - Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op aggregation", Collections.emptyMap()); - Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "no-op list aggregation", Collections.emptyMap()); + Script combineScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op aggregation", Collections.emptyMap()); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op list aggregation", Collections.emptyMap()); SearchResponse response = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(scriptedMetric("scripted").mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(scriptedMetric("scripted").mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript)) + .get(); assertSearchResponse(response); assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); @@ -392,18 +440,17 @@ public void testMapWithParams() { Map aggregationParams = Collections.singletonMap("param2", 1); Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state[param1] = param2", scriptParams); - Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op aggregation", Collections.emptyMap()); - Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "no-op list aggregation", Collections.emptyMap()); + Script combineScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op aggregation", Collections.emptyMap()); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op list aggregation", Collections.emptyMap()); SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(scriptedMetric("scripted") - .params(aggregationParams) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript)) + .addAggregation( + scriptedMetric("scripted").params(aggregationParams) + .mapScript(mapScript) + .combineScript(combineScript) + .reduceScript(reduceScript) + ) .get(); assertSearchResponse(response); assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); @@ -421,8 +468,8 @@ public void testMapWithParams() { for (Object object : aggregationList) { assertThat(object, notNullValue()); assertThat(object, instanceOf(Map.class)); - Map map = (Map) object; - for (Map.Entry entry : map.entrySet()) { + Map map = (Map) object; + for (Map.Entry entry : map.entrySet()) { assertThat(entry, notNullValue()); assertThat(entry.getKey(), notNullValue()); assertThat(entry.getKey(), instanceOf(String.class)); @@ -445,21 +492,18 @@ public void testInitMutatesParams() { Map params = new HashMap<>(); params.put("vars", varsMap); - SearchResponse response = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted") - .params(params) - .initScript( - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap())) - .mapScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "state.list.add(vars.multiplier)", Collections.emptyMap())) - .combineScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "no-op aggregation", Collections.emptyMap())) - .reduceScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "no-op list aggregation", Collections.emptyMap()))) - .get(); + SearchResponse response = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(params) + .initScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap())) + .mapScript( + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", Collections.emptyMap()) + ) + .combineScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op aggregation", Collections.emptyMap())) + .reduceScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op list aggregation", Collections.emptyMap())) + ) + .get(); assertSearchResponse(response); assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); @@ -500,21 +544,20 @@ public void testMapCombineWithParams() { params.put("vars", varsMap); Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(1)", Collections.emptyMap()); - Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); - Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "no-op list aggregation", Collections.emptyMap()); - - SearchResponse response = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted") - .params(params) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript)) - .get(); + Script combineScript = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "sum state values as a new aggregation", + Collections.emptyMap() + ); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op list aggregation", Collections.emptyMap()); + + SearchResponse response = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(params).mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript) + ) + .get(); assertSearchResponse(response); assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); @@ -554,24 +597,30 @@ public void testInitMapCombineWithParams() { params.put("vars", varsMap); Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", - Collections.emptyMap()); - Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); - Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "no-op list aggregation", Collections.emptyMap()); - - SearchResponse response = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted") - .params(params) - .initScript(initScript) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript)) - .get(); + Script mapScript = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "state.list.add(vars.multiplier)", + Collections.emptyMap() + ); + Script combineScript = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "sum state values as a new aggregation", + Collections.emptyMap() + ); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op list aggregation", Collections.emptyMap()); + + SearchResponse response = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(params) + .initScript(initScript) + .mapScript(mapScript) + .combineScript(combineScript) + .reduceScript(reduceScript) + ) + .get(); assertSearchResponse(response); assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); @@ -611,24 +660,35 @@ public void testInitMapCombineReduceWithParams() { params.put("vars", varsMap); Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", - Collections.emptyMap()); - Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); - Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "sum all states (lists) values as a new aggregation", Collections.emptyMap()); - - SearchResponse response = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted") - .params(params) - .initScript(initScript) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript)) - .get(); + Script mapScript = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "state.list.add(vars.multiplier)", + Collections.emptyMap() + ); + Script combineScript = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "sum state values as a new aggregation", + Collections.emptyMap() + ); + Script reduceScript = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "sum all states (lists) values as a new aggregation", + Collections.emptyMap() + ); + + SearchResponse response = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(params) + .initScript(initScript) + .mapScript(mapScript) + .combineScript(combineScript) + .reduceScript(reduceScript) + ) + .get(); assertSearchResponse(response); assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); @@ -656,26 +716,37 @@ public void testInitMapCombineReduceGetProperty() throws Exception { params.put("vars", varsMap); Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", - Collections.emptyMap()); - Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); - Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "sum all states (lists) values as a new aggregation", Collections.emptyMap()); - - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - global("global") - .subAggregation( - scriptedMetric("scripted") - .params(params) - .initScript(initScript) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript))) - .get(); + Script mapScript = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "state.list.add(vars.multiplier)", + Collections.emptyMap() + ); + Script combineScript = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "sum state values as a new aggregation", + Collections.emptyMap() + ); + Script reduceScript = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "sum all states (lists) values as a new aggregation", + Collections.emptyMap() + ); + + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + global("global").subAggregation( + scriptedMetric("scripted").params(params) + .initScript(initScript) + .mapScript(mapScript) + .combineScript(combineScript) + .reduceScript(reduceScript) + ) + ) + .get(); assertSearchResponse(searchResponse); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(numDocs)); @@ -698,9 +769,9 @@ public void testInitMapCombineReduceGetProperty() throws Exception { assertThat(object, notNullValue()); assertThat(object, instanceOf(Number.class)); assertThat(((Number) object).longValue(), equalTo(numDocs * 3)); - assertThat(((InternalAggregation)global).getProperty("scripted"), sameInstance(scriptedMetricAggregation)); - assertThat((List) ((InternalAggregation)global).getProperty("scripted.value"), sameInstance(aggregationList)); - assertThat((List) ((InternalAggregation)scriptedMetricAggregation).getProperty("value"), sameInstance(aggregationList)); + assertThat(((InternalAggregation) global).getProperty("scripted"), sameInstance(scriptedMetricAggregation)); + assertThat((List) ((InternalAggregation) global).getProperty("scripted.value"), sameInstance(aggregationList)); + assertThat((List) ((InternalAggregation) scriptedMetricAggregation).getProperty("value"), sameInstance(aggregationList)); } public void testMapCombineReduceWithParams() { @@ -710,23 +781,31 @@ public void testMapCombineReduceWithParams() { Map params = new HashMap<>(); params.put("vars", varsMap); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", - Collections.emptyMap()); - Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); - Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "sum all states (lists) values as a new aggregation", Collections.emptyMap()); - - SearchResponse response = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted") - .params(params) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript)) - .get(); + Script mapScript = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "state.list.add(vars.multiplier)", + Collections.emptyMap() + ); + Script combineScript = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "sum state values as a new aggregation", + Collections.emptyMap() + ); + Script reduceScript = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "sum all states (lists) values as a new aggregation", + Collections.emptyMap() + ); + + SearchResponse response = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(params).mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript) + ) + .get(); assertSearchResponse(response); assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); @@ -753,24 +832,30 @@ public void testInitMapReduceWithParams() { params.put("vars", varsMap); Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", - Collections.emptyMap()); - Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op aggregation", Collections.emptyMap()); - Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "sum all states' state.list values as a new aggregation", Collections.emptyMap()); - - SearchResponse response = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted") - .params(params) - .initScript(initScript) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript)) - .get(); + Script mapScript = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "state.list.add(vars.multiplier)", + Collections.emptyMap() + ); + Script combineScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op aggregation", Collections.emptyMap()); + Script reduceScript = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "sum all states' state.list values as a new aggregation", + Collections.emptyMap() + ); + + SearchResponse response = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(params) + .initScript(initScript) + .mapScript(mapScript) + .combineScript(combineScript) + .reduceScript(reduceScript) + ) + .get(); assertSearchResponse(response); assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); @@ -795,23 +880,26 @@ public void testMapReduceWithParams() { Map params = new HashMap<>(); params.put("vars", varsMap); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", - Collections.emptyMap()); - Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op aggregation", Collections.emptyMap()); - Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "sum all states' state.list values as a new aggregation", Collections.emptyMap()); - - SearchResponse response = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted") - .params(params) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript)) - .get(); + Script mapScript = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "state.list.add(vars.multiplier)", + Collections.emptyMap() + ); + Script combineScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op aggregation", Collections.emptyMap()); + Script reduceScript = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "sum all states' state.list values as a new aggregation", + Collections.emptyMap() + ); + + SearchResponse response = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(params).mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript) + ) + .get(); assertSearchResponse(response); assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); @@ -841,24 +929,35 @@ public void testInitMapCombineReduceWithParamsAndReduceParams() { reduceParams.put("multiplier", 4); Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", - Collections.emptyMap()); - Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); - Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "multiplied sum all states (lists) values as a new aggregation", reduceParams); - - SearchResponse response = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted") - .params(params) - .initScript(initScript) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript)) - .get(); + Script mapScript = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "state.list.add(vars.multiplier)", + Collections.emptyMap() + ); + Script combineScript = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "sum state values as a new aggregation", + Collections.emptyMap() + ); + Script reduceScript = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "multiplied sum all states (lists) values as a new aggregation", + reduceParams + ); + + SearchResponse response = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(params) + .initScript(initScript) + .mapScript(mapScript) + .combineScript(combineScript) + .reduceScript(reduceScript) + ) + .get(); assertSearchResponse(response); assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); @@ -884,21 +983,16 @@ public void testInitMapCombineReduceWithParamsStored() { Map params = new HashMap<>(); params.put("vars", varsMap); - SearchResponse response = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted") - .params(params) - .initScript( - new Script(ScriptType.STORED, null, "initScript_stored", Collections.emptyMap())) - .mapScript( - new Script(ScriptType.STORED, null, "mapScript_stored", Collections.emptyMap())) - .combineScript( - new Script(ScriptType.STORED, null, "combineScript_stored", Collections.emptyMap())) - .reduceScript( - new Script(ScriptType.STORED, null, "reduceScript_stored", Collections.emptyMap()))) - .get(); + SearchResponse response = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(params) + .initScript(new Script(ScriptType.STORED, null, "initScript_stored", Collections.emptyMap())) + .mapScript(new Script(ScriptType.STORED, null, "mapScript_stored", Collections.emptyMap())) + .combineScript(new Script(ScriptType.STORED, null, "combineScript_stored", Collections.emptyMap())) + .reduceScript(new Script(ScriptType.STORED, null, "reduceScript_stored", Collections.emptyMap())) + ) + .get(); assertSearchResponse(response); assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); @@ -925,28 +1019,40 @@ public void testInitMapCombineReduceWithParamsAsSubAgg() { params.put("vars", varsMap); Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", - Collections.emptyMap()); - Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); - Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "sum all states (lists) values as a new aggregation", Collections.emptyMap()); - - SearchResponse response = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()).setSize(1000) - .addAggregation( - histogram("histo") - .field("l_value") - .interval(1) - .subAggregation( - scriptedMetric("scripted") - .params(params) - .initScript(initScript) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript))) - .get(); + Script mapScript = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "state.list.add(vars.multiplier)", + Collections.emptyMap() + ); + Script combineScript = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "sum state values as a new aggregation", + Collections.emptyMap() + ); + Script reduceScript = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "sum all states (lists) values as a new aggregation", + Collections.emptyMap() + ); + + SearchResponse response = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .setSize(1000) + .addAggregation( + histogram("histo").field("l_value") + .interval(1) + .subAggregation( + scriptedMetric("scripted").params(params) + .initScript(initScript) + .mapScript(mapScript) + .combineScript(combineScript) + .reduceScript(reduceScript) + ) + ) + .get(); assertSearchResponse(response); assertThat(response.getHits().getTotalHits().value, equalTo(numDocs)); Aggregation aggregation = response.getAggregations().get("histo"); @@ -986,24 +1092,40 @@ public void testEmptyAggregation() throws Exception { params.put("vars", varsMap); Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)", - Collections.emptyMap()); - Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap()); - Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "sum all states (lists) values as a new aggregation", Collections.emptyMap()); + Script mapScript = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "state.list.add(vars.multiplier)", + Collections.emptyMap() + ); + Script combineScript = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "sum state values as a new aggregation", + Collections.emptyMap() + ); + Script reduceScript = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "sum all states (lists) values as a new aggregation", + Collections.emptyMap() + ); SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") - .setQuery(matchAllQuery()) - .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0) - .subAggregation( - scriptedMetric("scripted") - .params(params) - .initScript(initScript) - .mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation( + scriptedMetric("scripted").params(params) + .initScript(initScript) + .mapScript(mapScript) + .combineScript(combineScript) + .reduceScript(reduceScript) + ) + ) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); Histogram histo = searchResponse.getAggregations().get("histo"); @@ -1022,106 +1144,224 @@ public void testEmptyAggregation() throws Exception { assertThat(aggregationResult.get(0), equalTo(0)); } - /** * Make sure that a request using a deterministic script gets cached and nondeterministic scripts do not get cached. */ public void testScriptCaching() throws Exception { Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state['count'] = 1", Collections.emptyMap()); - Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op aggregation", Collections.emptyMap()); - Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "no-op list aggregation", Collections.emptyMap()); + Script combineScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op aggregation", Collections.emptyMap()); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op list aggregation", Collections.emptyMap()); - Script ndInitScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.data = Math.random()", - Collections.emptyMap()); + Script ndInitScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.data = Math.random()", Collections.emptyMap()); - Script ndMapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state['count'] = Math.random() >= 0.5 ? 1 : 0", - Collections.emptyMap()); + Script ndMapScript = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "state['count'] = Math.random() >= 0.5 ? 1 : 0", + Collections.emptyMap() + ); - Script ndRandom = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "return Math.random()", - Collections.emptyMap()); + Script ndRandom = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "return Math.random()", Collections.emptyMap()); - assertAcked(prepareCreate("cache_test_idx").setMapping("d", "type=long") + assertAcked( + prepareCreate("cache_test_idx").setMapping("d", "type=long") .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) - .get()); - indexRandom(true, client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2)); + .get() + ); + indexRandom( + true, + client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + ); // Make sure we are starting with a clear cache - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a non-deterministic init script causes the result to not be cached - SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation(scriptedMetric("foo").initScript(ndInitScript).mapScript(mapScript).combineScript(combineScript) - .reduceScript(reduceScript)).get(); + SearchResponse r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + scriptedMetric("foo").initScript(ndInitScript).mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a non-deterministic map script causes the result to not be cached - r = client().prepareSearch("cache_test_idx").setSize(0) + r = client().prepareSearch("cache_test_idx") + .setSize(0) .addAggregation(scriptedMetric("foo").mapScript(ndMapScript).combineScript(combineScript).reduceScript(reduceScript)) .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a non-deterministic combine script causes the result to not be cached - r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation(scriptedMetric("foo").mapScript(mapScript).combineScript(ndRandom).reduceScript(reduceScript)).get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation(scriptedMetric("foo").mapScript(mapScript).combineScript(ndRandom).reduceScript(reduceScript)) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // NOTE: random reduce scripts don't hit the query shard context (they are done on the coordinator) and so can be cached. - r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation(scriptedMetric("foo").mapScript(mapScript).combineScript(combineScript).reduceScript(ndRandom)).get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation(scriptedMetric("foo").mapScript(mapScript).combineScript(combineScript).reduceScript(ndRandom)) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(1L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(1L) + ); // Test that all deterministic scripts cause the request to be cached - r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation(scriptedMetric("foo").mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript)) - .get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation(scriptedMetric("foo").mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript)) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(2L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(2L) + ); } public void testConflictingAggAndScriptParams() { Map params = Collections.singletonMap("param1", "12"); Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(1)", params); - Script combineScript = - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op aggregation", Collections.emptyMap()); - Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "no-op list aggregation", Collections.emptyMap()); + Script combineScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op aggregation", Collections.emptyMap()); + Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op list aggregation", Collections.emptyMap()); SearchRequestBuilder builder = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(scriptedMetric("scripted") - .params(params).mapScript(mapScript) - .combineScript(combineScript) - .reduceScript(reduceScript)); + .addAggregation( + scriptedMetric("scripted").params(params).mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript) + ); SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, builder::get); assertThat(ex.getCause().getMessage(), containsString("Parameter name \"param1\" used in both aggregation and script parameters")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java index f3512e9a7edfa..bbbdc41cadfa2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java @@ -15,12 +15,12 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.AggregationTestScriptsPlugin; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.BucketOrder; import java.util.Collection; import java.util.Collections; @@ -50,9 +50,9 @@ protected Collection> nodePlugins() { @Override public void testEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") - .setQuery(matchAllQuery()) - .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(stats("stats").field("value"))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(stats("stats").field("value"))) + .get(); assertShardExecutionState(searchResponse, 0); @@ -75,9 +75,9 @@ public void testEmptyAggregation() throws Exception { @Override public void testSingleValuedField() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(stats("stats").field("value")) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(stats("stats").field("value")) + .get(); assertShardExecutionState(searchResponse, 0); @@ -86,17 +86,19 @@ public void testSingleValuedField() throws Exception { Stats stats = searchResponse.getAggregations().get("stats"); assertThat(stats, notNullValue()); assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10)); + assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10)); assertThat(stats.getMin(), equalTo(1.0)); assertThat(stats.getMax(), equalTo(10.0)); - assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10)); + assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); assertThat(stats.getCount(), equalTo(10L)); } public void testSingleValuedField_WithFormatter() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(stats("stats").format("0000.0").field("value")).get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation(stats("stats").format("0000.0").field("value")) + .get(); assertHitCount(searchResponse, 10); @@ -116,8 +118,10 @@ public void testSingleValuedField_WithFormatter() throws Exception { @Override public void testSingleValuedFieldGetProperty() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(global("global").subAggregation(stats("stats").field("value"))).get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation(global("global").subAggregation(stats("stats").field("value"))) + .get(); assertHitCount(searchResponse, 10); @@ -131,32 +135,32 @@ public void testSingleValuedFieldGetProperty() throws Exception { Stats stats = global.getAggregations().get("stats"); assertThat(stats, notNullValue()); assertThat(stats.getName(), equalTo("stats")); - Stats statsFromProperty = (Stats) ((InternalAggregation)global).getProperty("stats"); + Stats statsFromProperty = (Stats) ((InternalAggregation) global).getProperty("stats"); assertThat(statsFromProperty, notNullValue()); assertThat(statsFromProperty, sameInstance(stats)); double expectedAvgValue = (double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10; assertThat(stats.getAvg(), equalTo(expectedAvgValue)); - assertThat((double) ((InternalAggregation)global).getProperty("stats.avg"), equalTo(expectedAvgValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.avg"), equalTo(expectedAvgValue)); double expectedMinValue = 1.0; assertThat(stats.getMin(), equalTo(expectedMinValue)); - assertThat((double) ((InternalAggregation)global).getProperty("stats.min"), equalTo(expectedMinValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.min"), equalTo(expectedMinValue)); double expectedMaxValue = 10.0; assertThat(stats.getMax(), equalTo(expectedMaxValue)); - assertThat((double) ((InternalAggregation)global).getProperty("stats.max"), equalTo(expectedMaxValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.max"), equalTo(expectedMaxValue)); double expectedSumValue = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10; assertThat(stats.getSum(), equalTo(expectedSumValue)); - assertThat((double) ((InternalAggregation)global).getProperty("stats.sum"), equalTo(expectedSumValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.sum"), equalTo(expectedSumValue)); long expectedCountValue = 10; assertThat(stats.getCount(), equalTo(expectedCountValue)); - assertThat((double) ((InternalAggregation)global).getProperty("stats.count"), equalTo((double) expectedCountValue)); + assertThat((double) ((InternalAggregation) global).getProperty("stats.count"), equalTo((double) expectedCountValue)); } @Override public void testMultiValuedField() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(stats("stats").field("values")) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(stats("stats").field("values")) + .get(); assertShardExecutionState(searchResponse, 0); @@ -165,19 +169,26 @@ public void testMultiValuedField() throws Exception { Stats stats = searchResponse.getAggregations().get("stats"); assertThat(stats, notNullValue()); assertThat(stats.getName(), equalTo("stats")); - assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12) / 20)); + assertThat( + stats.getAvg(), + equalTo((double) (2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12) / 20) + ); assertThat(stats.getMin(), equalTo(2.0)); assertThat(stats.getMax(), equalTo(12.0)); - assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12)); + assertThat(stats.getSum(), equalTo((double) 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12)); assertThat(stats.getCount(), equalTo(20L)); } @Override public void testOrderByEmptyAggregation() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(terms("terms").field("value").order(BucketOrder.compound(BucketOrder.aggregation("filter>stats.avg", true))) - .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(stats("stats").field("value")))) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + terms("terms").field("value") + .order(BucketOrder.compound(BucketOrder.aggregation("filter>stats.avg", true))) + .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(stats("stats").field("value"))) + ) + .get(); assertHitCount(searchResponse, 10); @@ -222,47 +233,132 @@ private void assertShardExecutionState(SearchResponse response, int expectedFail * Ensure requests using nondeterministic scripts do not get cached. */ public void testScriptCaching() throws Exception { - assertAcked(prepareCreate("cache_test_idx").setMapping("d", "type=long") + assertAcked( + prepareCreate("cache_test_idx").setMapping("d", "type=long") .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) - .get()); - indexRandom(true, client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2)); + .get() + ); + indexRandom( + true, + client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + ); // Make sure we are starting with a clear cache - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation( - stats("foo").field("d").script( - new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", Collections.emptyMap()))).get(); + SearchResponse r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + stats("foo").field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", Collections.emptyMap())) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a deterministic script gets cached - r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation( - stats("foo").field("d").script( - new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap()))).get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + stats("foo").field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value + 1", Collections.emptyMap())) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(1L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(1L) + ); // Ensure that non-scripted requests are cached as normal r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(stats("foo").field("d")).get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(2L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(2L) + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/SumIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/SumIT.java index 097bd20deb1c4..efca97589e8e1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/SumIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/SumIT.java @@ -55,17 +55,15 @@ public void setupSuiteScopeCluster() throws Exception { // Create two indices and add the field 'route_length_miles' as an alias in // one, and a concrete field in the other. - prepareCreate("old_index") - .setMapping( - "transit_mode", "type=keyword", - "distance", "type=double", - "route_length_miles", "type=alias,path=distance") - .get(); - prepareCreate("new_index") - .setMapping( - "transit_mode", "type=keyword", - "route_length_miles", "type=double") - .get(); + prepareCreate("old_index").setMapping( + "transit_mode", + "type=keyword", + "distance", + "type=double", + "route_length_miles", + "type=alias,path=distance" + ).get(); + prepareCreate("new_index").setMapping("transit_mode", "type=keyword", "route_length_miles", "type=double").get(); List builders = new ArrayList<>(); builders.add(client().prepareIndex("old_index").setSource("transit_mode", "train", "distance", 42.0)); @@ -80,9 +78,9 @@ public void setupSuiteScopeCluster() throws Exception { public void testEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") - .setQuery(matchAllQuery()) - .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(sum("sum").field("value"))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0).subAggregation(sum("sum").field("value"))) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); Histogram histo = searchResponse.getAggregations().get("histo"); @@ -103,21 +101,23 @@ public void testUnmapped() throws Exception {} @Override public void testSingleValuedField() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(sum("sum").field("value")) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(sum("sum").field("value")) + .get(); assertHitCount(searchResponse, 10); Sum sum = searchResponse.getAggregations().get("sum"); assertThat(sum, notNullValue()); assertThat(sum.getName(), equalTo("sum")); - assertThat(sum.getValue(), equalTo((double) 1+2+3+4+5+6+7+8+9+10)); + assertThat(sum.getValue(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10)); } public void testSingleValuedFieldWithFormatter() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(sum("sum").format("0000.0").field("value")).get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation(sum("sum").format("0000.0").field("value")) + .get(); assertHitCount(searchResponse, 10); @@ -131,8 +131,10 @@ public void testSingleValuedFieldWithFormatter() throws Exception { @Override public void testSingleValuedFieldGetProperty() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(global("global").subAggregation(sum("sum").field("value"))).get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation(global("global").subAggregation(sum("sum").field("value"))) + .get(); assertHitCount(searchResponse, 10); @@ -148,33 +150,37 @@ public void testSingleValuedFieldGetProperty() throws Exception { assertThat(sum.getName(), equalTo("sum")); double expectedSumValue = (double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10; assertThat(sum.getValue(), equalTo(expectedSumValue)); - assertThat((Sum) ((InternalAggregation)global).getProperty("sum"), equalTo(sum)); - assertThat((double) ((InternalAggregation)global).getProperty("sum.value"), equalTo(expectedSumValue)); - assertThat((double) ((InternalAggregation)sum).getProperty("value"), equalTo(expectedSumValue)); + assertThat((Sum) ((InternalAggregation) global).getProperty("sum"), equalTo(sum)); + assertThat((double) ((InternalAggregation) global).getProperty("sum.value"), equalTo(expectedSumValue)); + assertThat((double) ((InternalAggregation) sum).getProperty("value"), equalTo(expectedSumValue)); } @Override public void testMultiValuedField() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(sum("sum").field("values")) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(sum("sum").field("values")) + .get(); assertHitCount(searchResponse, 10); Sum sum = searchResponse.getAggregations().get("sum"); assertThat(sum, notNullValue()); assertThat(sum.getName(), equalTo("sum")); - assertThat(sum.getValue(), equalTo((double) 2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12)); + assertThat(sum.getValue(), equalTo((double) 2 + 3 + 3 + 4 + 4 + 5 + 5 + 6 + 6 + 7 + 7 + 8 + 8 + 9 + 9 + 10 + 10 + 11 + 11 + 12)); } @Override public void testOrderByEmptyAggregation() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(terms("terms").field("value").order(BucketOrder.compound(BucketOrder.aggregation("filter>sum", true))) - .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(sum("sum").field("value")))) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + terms("terms").field("value") + .order(BucketOrder.compound(BucketOrder.aggregation("filter>sum", true))) + .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(sum("sum").field("value"))) + ) + .get(); assertHitCount(searchResponse, 10); @@ -204,54 +210,136 @@ public void testOrderByEmptyAggregation() throws Exception { * Ensure requests using nondeterministic scripts do not get cached. */ public void testScriptCaching() throws Exception { - assertAcked(prepareCreate("cache_test_idx").setMapping("d", "type=long") + assertAcked( + prepareCreate("cache_test_idx").setMapping("d", "type=long") .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) - .get()); - indexRandom(true, client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2)); + .get() + ); + indexRandom( + true, + client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + ); // Make sure we are starting with a clear cache - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation(sum("foo").field("d").script( - new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, RANDOM_SCRIPT, Collections.emptyMap()))).get(); + SearchResponse r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + sum("foo").field("d").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, RANDOM_SCRIPT, Collections.emptyMap())) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a deterministic script gets cached - r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation(sum("foo").field("d").script( - new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, VALUE_SCRIPT, Collections.emptyMap()))).get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + sum("foo").field("d").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, VALUE_SCRIPT, Collections.emptyMap())) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(1L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(1L) + ); // Ensure that non-scripted requests are cached as normal r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(sum("foo").field("d")).get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(2L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(2L) + ); } public void testFieldAlias() { SearchResponse response = client().prepareSearch("old_index", "new_index") - .addAggregation(sum("sum") - .field("route_length_miles")) + .addAggregation(sum("sum").field("route_length_miles")) .get(); assertSearchResponse(response); @@ -262,12 +350,9 @@ public void testFieldAlias() { assertThat(sum.getValue(), equalTo(192.7)); } - public void testFieldAliasInSubAggregation() { + public void testFieldAliasInSubAggregation() { SearchResponse response = client().prepareSearch("old_index", "new_index") - .addAggregation(terms("terms") - .field("transit_mode") - .subAggregation(sum("sum") - .field("route_length_miles"))) + .addAggregation(terms("terms").field("transit_mode").subAggregation(sum("sum").field("route_length_miles"))) .get(); assertSearchResponse(response); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java index 21db90ca9ebfb..7a8cb1fc0d1e4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java @@ -15,12 +15,12 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.aggregations.AggregationTestScriptsPlugin; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.filter.Filter; import org.elasticsearch.search.aggregations.bucket.global.Global; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.BucketOrder; import java.util.Arrays; import java.util.Collection; @@ -58,15 +58,15 @@ private static double[] randomPercents(long minValue, long maxValue) { final double[] percents = new double[length]; for (int i = 0; i < percents.length; ++i) { switch (randomInt(20)) { - case 0: - percents[i] = minValue; - break; - case 1: - percents[i] = maxValue; - break; - default: - percents[i] = (randomDouble() * (maxValue - minValue)) + minValue; - break; + case 0: + percents[i] = minValue; + break; + case 1: + percents[i] = maxValue; + break; + default: + percents[i] = (randomDouble() * (maxValue - minValue)) + minValue; + break; } } Arrays.sort(percents); @@ -103,10 +103,14 @@ private void assertConsistent(double[] pcts, PercentileRanks values, long minVal @Override public void testEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") - .setQuery(matchAllQuery()) - .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0) - .subAggregation(randomCompression(percentileRanks("percentile_ranks", new double[]{10,15}).field("value")))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation(randomCompression(percentileRanks("percentile_ranks", new double[] { 10, 15 }).field("value"))) + ) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); Histogram histo = searchResponse.getAggregations().get("histo"); @@ -123,33 +127,34 @@ public void testEmptyAggregation() throws Exception { public void testNullValuesField() throws Exception { final double[] pcts = null; - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.TDIGEST).field("value")) - .get()); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation(percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.TDIGEST).field("value")) + .get() + ); assertThat(e.getMessage(), equalTo("[values] must not be null: [percentile_ranks]")); } public void testEmptyValuesField() throws Exception { final double[] pcts = new double[0]; - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.TDIGEST).field("value")) - .get()); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation(percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.TDIGEST).field("value")) + .get() + ); assertThat(e.getMessage(), equalTo("[values] must not be an empty array: [percentile_ranks]")); } @Override public void testUnmapped() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx_unmapped") - .setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentileRanks("percentile_ranks", new double[]{0, 10, 15, 100})) - .field("value")) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentileRanks("percentile_ranks", new double[] { 0, 10, 15, 100 })).field("value")) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); @@ -166,10 +171,9 @@ public void testUnmapped() throws Exception { public void testSingleValuedField() throws Exception { final double[] pcts = randomPercents(minValue, maxValue); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)) - .field("value")) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).field("value")) + .get(); assertHitCount(searchResponse, 10); @@ -180,12 +184,10 @@ public void testSingleValuedField() throws Exception { @Override public void testSingleValuedFieldGetProperty() throws Exception { final double[] pcts = randomPercents(minValue, maxValue); - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - global("global").subAggregation( - randomCompression(percentileRanks("percentile_ranks", pcts)).field("value"))).get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation(global("global").subAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).field("value"))) + .get(); assertHitCount(searchResponse, 10); @@ -199,16 +201,15 @@ public void testSingleValuedFieldGetProperty() throws Exception { PercentileRanks values = global.getAggregations().get("percentile_ranks"); assertThat(values, notNullValue()); assertThat(values.getName(), equalTo("percentile_ranks")); - assertThat(((InternalAggregation)global).getProperty("percentile_ranks"), sameInstance(values)); + assertThat(((InternalAggregation) global).getProperty("percentile_ranks"), sameInstance(values)); } public void testSingleValuedFieldOutsideRange() throws Exception { - final double[] pcts = new double[] {minValue - 1, maxValue + 1}; + final double[] pcts = new double[] { minValue - 1, maxValue + 1 }; SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)) - .field("value")) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).field("value")) + .get(); assertHitCount(searchResponse, 10); @@ -220,10 +221,9 @@ public void testSingleValuedFieldOutsideRange() throws Exception { public void testSingleValuedFieldPartiallyUnmapped() throws Exception { final double[] pcts = randomPercents(minValue, maxValue); SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped") - .setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)) - .field("value")) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).field("value")) + .get(); assertHitCount(searchResponse, 10); @@ -235,13 +235,12 @@ public void testSingleValuedFieldPartiallyUnmapped() throws Exception { public void testSingleValuedFieldWithValueScript() throws Exception { final double[] pcts = randomPercents(minValue - 1, maxValue - 1); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - randomCompression( - percentileRanks("percentile_ranks", pcts)) - .field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap()))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentileRanks("percentile_ranks", pcts)).field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + ) + .get(); assertHitCount(searchResponse, 10); @@ -255,13 +254,12 @@ public void testSingleValuedFieldWithValueScriptWithParams() throws Exception { params.put("dec", 1); final double[] pcts = randomPercents(minValue - 1, maxValue - 1); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - randomCompression( - percentileRanks("percentile_ranks", pcts)) - .field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentileRanks("percentile_ranks", pcts)).field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) + ) + .get(); assertHitCount(searchResponse, 10); @@ -273,10 +271,9 @@ public void testSingleValuedFieldWithValueScriptWithParams() throws Exception { public void testMultiValuedField() throws Exception { final double[] pcts = randomPercents(minValues, maxValues); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)) - .field("values")) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).field("values")) + .get(); assertHitCount(searchResponse, 10); @@ -288,13 +285,12 @@ public void testMultiValuedField() throws Exception { public void testMultiValuedFieldWithValueScript() throws Exception { final double[] pcts = randomPercents(minValues - 1, maxValues - 1); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - randomCompression( - percentileRanks("percentile_ranks", pcts)) - .field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap()))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentileRanks("percentile_ranks", pcts)).field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + ) + .get(); assertHitCount(searchResponse, 10); @@ -305,13 +301,12 @@ public void testMultiValuedFieldWithValueScript() throws Exception { public void testMultiValuedFieldWithValueScriptReverse() throws Exception { final double[] pcts = randomPercents(-maxValues, -minValues); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - randomCompression( - percentileRanks("percentile_ranks", pcts)) - .field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value * -1", emptyMap()))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentileRanks("percentile_ranks", pcts)).field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value * -1", emptyMap())) + ) + .get(); assertHitCount(searchResponse, 10); @@ -325,13 +320,12 @@ public void testMultiValuedFieldWithValueScriptWithParams() throws Exception { params.put("dec", 1); final double[] pcts = randomPercents(minValues - 1, maxValues - 1); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - randomCompression( - percentileRanks("percentile_ranks", pcts)) - .field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentileRanks("percentile_ranks", pcts)).field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) + ) + .get(); assertHitCount(searchResponse, 10); @@ -343,12 +337,13 @@ public void testMultiValuedFieldWithValueScriptWithParams() throws Exception { public void testScriptSingleValued() throws Exception { final double[] pcts = randomPercents(minValue, maxValue); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - randomCompression( - percentileRanks("percentile_ranks", pcts)) - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", emptyMap()))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentileRanks("percentile_ranks", pcts)).script( + new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", emptyMap()) + ) + ) + .get(); assertHitCount(searchResponse, 10); @@ -365,11 +360,9 @@ public void testScriptSingleValuedWithParams() throws Exception { final double[] pcts = randomPercents(minValue - 1, maxValue - 1); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - randomCompression( - percentileRanks("percentile_ranks", pcts)).script(script)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).script(script)) + .get(); assertHitCount(searchResponse, 10); @@ -382,12 +375,9 @@ public void testScriptMultiValued() throws Exception { final double[] pcts = randomPercents(minValues, maxValues); Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['values']", emptyMap()); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - randomCompression( - percentileRanks("percentile_ranks", pcts)) - .script(script)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).script(script)) + .get(); assertHitCount(searchResponse, 10); @@ -401,12 +391,9 @@ public void testScriptMultiValuedWithParams() throws Exception { final double[] pcts = randomPercents(minValues - 1, maxValues - 1); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - randomCompression( - percentileRanks("percentile_ranks", pcts)) - .script(script)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentileRanks("percentile_ranks", pcts)).script(script)) + .get(); assertHitCount(searchResponse, 10); @@ -417,12 +404,14 @@ public void testScriptMultiValuedWithParams() throws Exception { public void testOrderBySubAggregation() { boolean asc = randomBoolean(); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value").interval(2L) - .subAggregation(randomCompression(percentileRanks("percentile_ranks", new double[]{99}).field("value"))) - .order(BucketOrder.aggregation("percentile_ranks", "99", asc))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(2L) + .subAggregation(randomCompression(percentileRanks("percentile_ranks", new double[] { 99 }).field("value"))) + .order(BucketOrder.aggregation("percentile_ranks", "99", asc)) + ) + .get(); assertHitCount(searchResponse, 10); @@ -442,12 +431,18 @@ public void testOrderBySubAggregation() { @Override public void testOrderByEmptyAggregation() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(terms("terms").field("value").order(BucketOrder.compound(BucketOrder.aggregation("filter>ranks.99", true))) - .subAggregation(filter("filter", termQuery("value", 100)) - .subAggregation(percentileRanks("ranks", new double[]{99}) - .method(PercentilesMethod.TDIGEST).field("value")))) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + terms("terms").field("value") + .order(BucketOrder.compound(BucketOrder.aggregation("filter>ranks.99", true))) + .subAggregation( + filter("filter", termQuery("value", 100)).subAggregation( + percentileRanks("ranks", new double[] { 99 }).method(PercentilesMethod.TDIGEST).field("value") + ) + ) + ) + .get(); assertHitCount(searchResponse, 10); @@ -477,50 +472,136 @@ public void testOrderByEmptyAggregation() throws Exception { * Ensure requests using nondeterministic scripts do not get cached. */ public void testScriptCaching() throws Exception { - assertAcked(prepareCreate("cache_test_idx").setMapping("d", "type=long") + assertAcked( + prepareCreate("cache_test_idx").setMapping("d", "type=long") .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) - .get()); - indexRandom(true, client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2)); + .get() + ); + indexRandom( + true, + client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + ); // Make sure we are starting with a clear cache - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation(percentileRanks("foo", new double[]{50.0}) - .field("d") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap()))).get(); + SearchResponse r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + percentileRanks("foo", new double[] { 50.0 }).field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap())) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a deterministic script gets cached - r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation(percentileRanks("foo", new double[]{50.0}) - .field("d") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap()))).get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + percentileRanks("foo", new double[] { 50.0 }).field("d") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(1L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(1L) + ); // Ensure that non-scripted requests are cached as normal - r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(percentileRanks("foo", new double[]{50.0}).field("d")).get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation(percentileRanks("foo", new double[] { 50.0 }).field("d")) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(2L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(2L) + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java index d397243ecb8ba..d1d89d7c02740 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java @@ -60,18 +60,18 @@ private static double[] randomPercentiles() { final Set uniquedPercentiles = new HashSet<>(); for (int i = 0; i < length; ++i) { switch (randomInt(20)) { - case 0: - uniquedPercentiles.add(0.0); - break; - case 1: - uniquedPercentiles.add(100.0); - break; - default: - uniquedPercentiles.add(randomDouble() * 100); - break; + case 0: + uniquedPercentiles.add(0.0); + break; + case 1: + uniquedPercentiles.add(100.0); + break; + default: + uniquedPercentiles.add(randomDouble() * 100); + break; } } - double[] percentiles= uniquedPercentiles.stream().mapToDouble(Double::doubleValue).sorted().toArray(); + double[] percentiles = uniquedPercentiles.stream().mapToDouble(Double::doubleValue).sorted().toArray(); LogManager.getLogger(TDigestPercentilesIT.class).info("Using percentiles={}", Arrays.toString(percentiles)); return percentiles; } @@ -109,11 +109,14 @@ private void assertConsistent(double[] pcts, Percentiles percentiles, long minVa @Override public void testEmptyAggregation() throws Exception { SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") - .setQuery(matchAllQuery()) - .addAggregation(histogram("histo").field("value").interval(1L).minDocCount(0) - .subAggregation(randomCompression(percentiles("percentiles").field("value")) - .percentiles(10, 15))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(1L) + .minDocCount(0) + .subAggregation(randomCompression(percentiles("percentiles").field("value")).percentiles(10, 15)) + ) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(2L)); Histogram histo = searchResponse.getAggregations().get("histo"); @@ -131,11 +134,9 @@ public void testEmptyAggregation() throws Exception { @Override public void testUnmapped() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx_unmapped") - .setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentiles("percentiles")) - .field("value") - .percentiles(0, 10, 15, 100)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentiles("percentiles")).field("value").percentiles(0, 10, 15, 100)) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); @@ -152,11 +153,9 @@ public void testUnmapped() throws Exception { public void testSingleValuedField() throws Exception { final double[] pcts = randomPercentiles(); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentiles("percentiles")) - .field("value") - .percentiles(pcts)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentiles("percentiles")).field("value").percentiles(pcts)) + .get(); assertHitCount(searchResponse, 10); @@ -167,12 +166,10 @@ public void testSingleValuedField() throws Exception { @Override public void testSingleValuedFieldGetProperty() throws Exception { final double[] pcts = randomPercentiles(); - SearchResponse searchResponse = client() - .prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - global("global").subAggregation(randomCompression(percentiles("percentiles")).field("value").percentiles(pcts))) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation(global("global").subAggregation(randomCompression(percentiles("percentiles")).field("value").percentiles(pcts))) + .get(); assertHitCount(searchResponse, 10); @@ -186,18 +183,16 @@ public void testSingleValuedFieldGetProperty() throws Exception { Percentiles percentiles = global.getAggregations().get("percentiles"); assertThat(percentiles, notNullValue()); assertThat(percentiles.getName(), equalTo("percentiles")); - assertThat(((InternalAggregation)global).getProperty("percentiles"), sameInstance(percentiles)); + assertThat(((InternalAggregation) global).getProperty("percentiles"), sameInstance(percentiles)); } @Override public void testSingleValuedFieldPartiallyUnmapped() throws Exception { final double[] pcts = randomPercentiles(); SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped") - .setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentiles("percentiles")) - .field("value") - .percentiles(pcts)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentiles("percentiles")).field("value").percentiles(pcts)) + .get(); assertHitCount(searchResponse, 10); @@ -209,14 +204,13 @@ public void testSingleValuedFieldPartiallyUnmapped() throws Exception { public void testSingleValuedFieldWithValueScript() throws Exception { final double[] pcts = randomPercentiles(); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - randomCompression( - percentiles("percentiles")) - .field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - .percentiles(pcts)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentiles("percentiles")).field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + .percentiles(pcts) + ) + .get(); assertHitCount(searchResponse, 10); @@ -230,14 +224,13 @@ public void testSingleValuedFieldWithValueScriptWithParams() throws Exception { params.put("dec", 1); final double[] pcts = randomPercentiles(); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - randomCompression( - percentiles("percentiles")) - .field("value") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) - .percentiles(pcts)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentiles("percentiles")).field("value") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) + .percentiles(pcts) + ) + .get(); assertHitCount(searchResponse, 10); @@ -249,9 +242,9 @@ public void testSingleValuedFieldWithValueScriptWithParams() throws Exception { public void testMultiValuedField() throws Exception { final double[] pcts = randomPercentiles(); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(randomCompression(percentiles("percentiles")).field("values").percentiles(pcts)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentiles("percentiles")).field("values").percentiles(pcts)) + .get(); assertHitCount(searchResponse, 10); @@ -263,14 +256,13 @@ public void testMultiValuedField() throws Exception { public void testMultiValuedFieldWithValueScript() throws Exception { final double[] pcts = randomPercentiles(); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - randomCompression( - percentiles("percentiles")) - .field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) - .percentiles(pcts)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentiles("percentiles")).field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + .percentiles(pcts) + ) + .get(); assertHitCount(searchResponse, 10); @@ -281,14 +273,13 @@ public void testMultiValuedFieldWithValueScript() throws Exception { public void testMultiValuedFieldWithValueScriptReverse() throws Exception { final double[] pcts = randomPercentiles(); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - randomCompression( - percentiles("percentiles")) - .field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value * -1", emptyMap())) - .percentiles(pcts)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentiles("percentiles")).field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value * -1", emptyMap())) + .percentiles(pcts) + ) + .get(); assertHitCount(searchResponse, 10); @@ -302,14 +293,13 @@ public void testMultiValuedFieldWithValueScriptWithParams() throws Exception { params.put("dec", 1); final double[] pcts = randomPercentiles(); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - randomCompression( - percentiles("percentiles")) - .field("values") - .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) - .percentiles(pcts)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + randomCompression(percentiles("percentiles")).field("values") + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - dec", params)) + .percentiles(pcts) + ) + .get(); assertHitCount(searchResponse, 10); @@ -322,13 +312,9 @@ public void testScriptSingleValued() throws Exception { Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['value'].value", emptyMap()); final double[] pcts = randomPercentiles(); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - randomCompression( - percentiles("percentiles")) - .script(script) - .percentiles(pcts)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentiles("percentiles")).script(script).percentiles(pcts)) + .get(); assertHitCount(searchResponse, 10); @@ -345,13 +331,9 @@ public void testScriptSingleValuedWithParams() throws Exception { final double[] pcts = randomPercentiles(); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - randomCompression( - percentiles("percentiles")) - .script(script) - .percentiles(pcts)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentiles("percentiles")).script(script).percentiles(pcts)) + .get(); assertHitCount(searchResponse, 10); @@ -365,13 +347,9 @@ public void testScriptMultiValued() throws Exception { Script script = new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "doc['values']", emptyMap()); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - randomCompression( - percentiles("percentiles")) - .script(script) - .percentiles(pcts)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentiles("percentiles")).script(script).percentiles(pcts)) + .get(); assertHitCount(searchResponse, 10); @@ -385,13 +363,9 @@ public void testScriptMultiValuedWithParams() throws Exception { final double[] pcts = randomPercentiles(); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - randomCompression( - percentiles("percentiles")) - .script(script) - .percentiles(pcts)) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(randomCompression(percentiles("percentiles")).script(script).percentiles(pcts)) + .get(); assertHitCount(searchResponse, 10); @@ -402,12 +376,14 @@ public void testScriptMultiValuedWithParams() throws Exception { public void testOrderBySubAggregation() { boolean asc = randomBoolean(); SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field("value").interval(2L) - .subAggregation(randomCompression(percentiles("percentiles").field("value").percentiles(99))) - .order(BucketOrder.aggregation("percentiles", "99", asc))) - .get(); + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field("value") + .interval(2L) + .subAggregation(randomCompression(percentiles("percentiles").field("value").percentiles(99))) + .order(BucketOrder.aggregation("percentiles", "99", asc)) + ) + .get(); assertHitCount(searchResponse, 10); @@ -427,12 +403,18 @@ public void testOrderBySubAggregation() { @Override public void testOrderByEmptyAggregation() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - terms("terms").field("value").order(BucketOrder.compound(BucketOrder.aggregation("filter>percentiles.99", true))) - .subAggregation(filter("filter", termQuery("value", 100)) - .subAggregation(percentiles("percentiles").method(PercentilesMethod.TDIGEST).field("value")))) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + terms("terms").field("value") + .order(BucketOrder.compound(BucketOrder.aggregation("filter>percentiles.99", true))) + .subAggregation( + filter("filter", termQuery("value", 100)).subAggregation( + percentiles("percentiles").method(PercentilesMethod.TDIGEST).field("value") + ) + ) + ) + .get(); assertHitCount(searchResponse, 10); @@ -462,47 +444,134 @@ public void testOrderByEmptyAggregation() throws Exception { * Ensure requests using nondeterministic scripts do not get cached. */ public void testScriptCaching() throws Exception { - assertAcked(prepareCreate("cache_test_idx").setMapping("d", "type=long") + assertAcked( + prepareCreate("cache_test_idx").setMapping("d", "type=long") .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) - .get()); - indexRandom(true, client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2)); + .get() + ); + indexRandom( + true, + client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + ); // Make sure we are starting with a clear cache - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(percentiles("foo").field("d") - .percentiles(50.0).script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap()))) - .get(); + SearchResponse r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + percentiles("foo").field("d") + .percentiles(50.0) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "Math.random()", emptyMap())) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a deterministic script gets cached - r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(percentiles("foo").field("d") - .percentiles(50.0).script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap()))) - .get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + percentiles("foo").field("d") + .percentiles(50.0) + .script(new Script(ScriptType.INLINE, AggregationTestScriptsPlugin.NAME, "_value - 1", emptyMap())) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(1L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(1L) + ); // Ensure that non-scripted requests are cached as normal r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(percentiles("foo").field("d").percentiles(50.0)).get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(2L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(2L) + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index 86431a7711d08..ef7d24495abd2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -114,99 +114,108 @@ public void setupSuiteScopeCluster() throws Exception { assertAcked(prepareCreate("idx").setMapping(TERMS_AGGS_FIELD, "type=keyword")); assertAcked(prepareCreate("field-collapsing").setMapping("group", "type=keyword")); createIndex("empty"); - assertAcked(prepareCreate("articles").setMapping( - jsonBuilder().startObject().startObject("_doc").startObject("properties") - .startObject(TERMS_AGGS_FIELD) + assertAcked( + prepareCreate("articles").setMapping( + jsonBuilder().startObject() + .startObject("_doc") + .startObject("properties") + .startObject(TERMS_AGGS_FIELD) .field("type", "keyword") - .endObject() - .startObject("comments") + .endObject() + .startObject("comments") .field("type", "nested") .startObject("properties") - .startObject("user") - .field("type", "keyword") - .endObject() - .startObject("date") - .field("type", "long") - .endObject() - .startObject("message") - .field("type", "text") - .field("store", true) - .field("term_vector", "with_positions_offsets") - .field("index_options", "offsets") - .endObject() - .startObject("reviewers") - .field("type", "nested") - .startObject("properties") - .startObject("name") - .field("type", "keyword") - .endObject() - .endObject() - .endObject() + .startObject("user") + .field("type", "keyword") + .endObject() + .startObject("date") + .field("type", "long") + .endObject() + .startObject("message") + .field("type", "text") + .field("store", true) + .field("term_vector", "with_positions_offsets") + .field("index_options", "offsets") + .endObject() + .startObject("reviewers") + .field("type", "nested") + .startObject("properties") + .startObject("name") + .field("type", "keyword") + .endObject() .endObject() - .endObject() - .endObject().endObject().endObject())); + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + ) + ); ensureGreen("idx", "empty", "articles"); List builders = new ArrayList<>(); for (int i = 0; i < 50; i++) { - builders.add(client().prepareIndex("idx").setId(Integer.toString(i)).setSource(jsonBuilder() - .startObject() - .field(TERMS_AGGS_FIELD, "val" + (i / 10)) - .field(SORT_FIELD, i + 1) - .field("text", "some text to entertain") - .field("field1", 5) - .field("field2", 2.71) - .endObject())); + builders.add( + client().prepareIndex("idx") + .setId(Integer.toString(i)) + .setSource( + jsonBuilder().startObject() + .field(TERMS_AGGS_FIELD, "val" + (i / 10)) + .field(SORT_FIELD, i + 1) + .field("text", "some text to entertain") + .field("field1", 5) + .field("field2", 2.71) + .endObject() + ) + ); } - builders.add(client().prepareIndex("field-collapsing").setId("1").setSource(jsonBuilder() - .startObject() - .field("group", "a") - .field("text", "term x y z b") - .endObject())); - builders.add(client().prepareIndex("field-collapsing").setId("2").setSource(jsonBuilder() - .startObject() - .field("group", "a") - .field("text", "term x y z n rare") - .field("value", 1) - .endObject())); - builders.add(client().prepareIndex("field-collapsing").setId("3").setSource(jsonBuilder() - .startObject() - .field("group", "b") - .field("text", "x y z term") - .endObject())); - builders.add(client().prepareIndex("field-collapsing").setId("4").setSource(jsonBuilder() - .startObject() - .field("group", "b") - .field("text", "x y term") - .endObject())); - builders.add(client().prepareIndex("field-collapsing").setId("5").setSource(jsonBuilder() - .startObject() - .field("group", "b") - .field("text", "x term") - .endObject())); - builders.add(client().prepareIndex("field-collapsing").setId("6").setSource(jsonBuilder() - .startObject() - .field("group", "b") - .field("text", "term rare") - .field("value", 3) - .endObject())); - builders.add(client().prepareIndex("field-collapsing").setId("7").setSource(jsonBuilder() - .startObject() - .field("group", "c") - .field("text", "x y z term") - .endObject())); - builders.add(client().prepareIndex("field-collapsing").setId("8").setSource(jsonBuilder() - .startObject() - .field("group", "c") - .field("text", "x y term b") - .endObject())); - builders.add(client().prepareIndex("field-collapsing").setId("9").setSource(jsonBuilder() - .startObject() - .field("group", "c") - .field("text", "rare x term") - .field("value", 2) - .endObject())); + builders.add( + client().prepareIndex("field-collapsing") + .setId("1") + .setSource(jsonBuilder().startObject().field("group", "a").field("text", "term x y z b").endObject()) + ); + builders.add( + client().prepareIndex("field-collapsing") + .setId("2") + .setSource(jsonBuilder().startObject().field("group", "a").field("text", "term x y z n rare").field("value", 1).endObject()) + ); + builders.add( + client().prepareIndex("field-collapsing") + .setId("3") + .setSource(jsonBuilder().startObject().field("group", "b").field("text", "x y z term").endObject()) + ); + builders.add( + client().prepareIndex("field-collapsing") + .setId("4") + .setSource(jsonBuilder().startObject().field("group", "b").field("text", "x y term").endObject()) + ); + builders.add( + client().prepareIndex("field-collapsing") + .setId("5") + .setSource(jsonBuilder().startObject().field("group", "b").field("text", "x term").endObject()) + ); + builders.add( + client().prepareIndex("field-collapsing") + .setId("6") + .setSource(jsonBuilder().startObject().field("group", "b").field("text", "term rare").field("value", 3).endObject()) + ); + builders.add( + client().prepareIndex("field-collapsing") + .setId("7") + .setSource(jsonBuilder().startObject().field("group", "c").field("text", "x y z term").endObject()) + ); + builders.add( + client().prepareIndex("field-collapsing") + .setId("8") + .setSource(jsonBuilder().startObject().field("group", "c").field("text", "x y term b").endObject()) + ); + builders.add( + client().prepareIndex("field-collapsing") + .setId("9") + .setSource(jsonBuilder().startObject().field("group", "c").field("text", "rare x term").field("value", 2).endObject()) + ); numArticles = scaledRandomIntBetween(10, 100); numArticles -= (numArticles % 5); @@ -219,48 +228,79 @@ public void setupSuiteScopeCluster() throws Exception { } builder.endArray().endObject(); - builders.add( - client().prepareIndex("articles").setSource(builder) - ); + builders.add(client().prepareIndex("articles").setSource(builder)); } builders.add( - client().prepareIndex("articles").setId("1") - .setSource(jsonBuilder().startObject().field("title", "title 1").field("body", "some text").startArray("comments") - .startObject() - .field("user", "a").field("date", 1L).field("message", "some comment") - .startArray("reviewers") - .startObject().field("name", "user a").endObject() - .startObject().field("name", "user b").endObject() - .startObject().field("name", "user c").endObject() - .endArray() - .endObject() - .startObject() - .field("user", "b").field("date", 2L).field("message", "some other comment") - .startArray("reviewers") - .startObject().field("name", "user c").endObject() - .startObject().field("name", "user d").endObject() - .startObject().field("name", "user e").endObject() - .endArray() - .endObject() - .endArray().endObject()) + client().prepareIndex("articles") + .setId("1") + .setSource( + jsonBuilder().startObject() + .field("title", "title 1") + .field("body", "some text") + .startArray("comments") + .startObject() + .field("user", "a") + .field("date", 1L) + .field("message", "some comment") + .startArray("reviewers") + .startObject() + .field("name", "user a") + .endObject() + .startObject() + .field("name", "user b") + .endObject() + .startObject() + .field("name", "user c") + .endObject() + .endArray() + .endObject() + .startObject() + .field("user", "b") + .field("date", 2L) + .field("message", "some other comment") + .startArray("reviewers") + .startObject() + .field("name", "user c") + .endObject() + .startObject() + .field("name", "user d") + .endObject() + .startObject() + .field("name", "user e") + .endObject() + .endArray() + .endObject() + .endArray() + .endObject() + ) ); builders.add( - client().prepareIndex("articles").setId("2") - .setSource(jsonBuilder().startObject().field("title", "title 2").field("body", "some different text") - .startArray("comments") - .startObject() - .field("user", "b").field("date", 3L).field("message", "some comment") - .startArray("reviewers") - .startObject().field("name", "user f").endObject() - .endArray() - .endObject() - .startObject() - .field("user", "c") - .field("date", 4L) - .field("message", "some other comment") - .endObject() - .endArray().endObject()) + client().prepareIndex("articles") + .setId("2") + .setSource( + jsonBuilder().startObject() + .field("title", "title 2") + .field("body", "some different text") + .startArray("comments") + .startObject() + .field("user", "b") + .field("date", 3L) + .field("message", "some comment") + .startArray("reviewers") + .startObject() + .field("name", "user f") + .endObject() + .endArray() + .endObject() + .startObject() + .field("user", "c") + .field("date", 4L) + .field("message", "some other comment") + .endObject() + .endArray() + .endObject() + ) ); indexRandom(true, builders); @@ -272,16 +312,13 @@ private String key(Terms.Bucket bucket) { } public void testBasics() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(TERMS_AGGS_FIELD) - .subAggregation( - topHits("hits").sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)) - ) - ) - .get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(TERMS_AGGS_FIELD) + .subAggregation(topHits("hits").sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC))) + ) + .get(); assertSearchResponse(response); @@ -311,13 +348,11 @@ public void testBasics() throws Exception { public void testIssue11119() throws Exception { // Test that top_hits aggregation is fed scores if query results size=0 - SearchResponse response = client() - .prepareSearch("field-collapsing") - .setSize(0) - .setQuery(matchQuery("text", "x y z")) - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()).field("group").subAggregation(topHits("hits"))) - .get(); + SearchResponse response = client().prepareSearch("field-collapsing") + .setSize(0) + .setQuery(matchQuery("text", "x y z")) + .addAggregation(terms("terms").executionHint(randomExecutionHint()).field("group").subAggregation(topHits("hits"))) + .get(); assertSearchResponse(response); @@ -335,7 +370,7 @@ public void testIssue11119() throws Exception { SearchHits hits = topHits.getHits(); float bestScore = Float.MAX_VALUE; for (int h = 0; h < hits.getHits().length; h++) { - float score=hits.getAt(h).getScore(); + float score = hits.getAt(h).getScore(); assertThat(score, lessThanOrEqualTo(bestScore)); assertThat(score, greaterThan(0f)); bestScore = hits.getAt(h).getScore(); @@ -346,13 +381,12 @@ public void testIssue11119() throws Exception { // (technically not a test of top_hits but implementation details are // tied up with the need to feed scores into the agg tree even when // users don't want ranked set of query results.) - response = client() - .prepareSearch("field-collapsing") - .setSize(0) - .setMinScore(0.0001f) - .setQuery(matchQuery("text", "x y z")) - .addAggregation(terms("terms").executionHint(randomExecutionHint()).field("group")) - .get(); + response = client().prepareSearch("field-collapsing") + .setSize(0) + .setMinScore(0.0001f) + .setQuery(matchQuery("text", "x y z")) + .addAggregation(terms("terms").executionHint(randomExecutionHint()).field("group")) + .get(); assertSearchResponse(response); @@ -365,15 +399,15 @@ public void testIssue11119() throws Exception { assertThat(terms.getBuckets().size(), equalTo(3)); } - public void testBreadthFirstWithScoreNeeded() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .collectMode(SubAggCollectionMode.BREADTH_FIRST) - .field(TERMS_AGGS_FIELD) - .subAggregation(topHits("hits").size(3)) - ).get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .collectMode(SubAggCollectionMode.BREADTH_FIRST) + .field(TERMS_AGGS_FIELD) + .subAggregation(topHits("hits").size(3)) + ) + .get(); assertSearchResponse(response); @@ -398,14 +432,15 @@ public void testBreadthFirstWithScoreNeeded() throws Exception { public void testBreadthFirstWithAggOrderAndScoreNeeded() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .collectMode(SubAggCollectionMode.BREADTH_FIRST) - .field(TERMS_AGGS_FIELD) - .order(BucketOrder.aggregation("max", false)) - .subAggregation(max("max").field(SORT_FIELD)) - .subAggregation(topHits("hits").size(3)) - ).get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .collectMode(SubAggCollectionMode.BREADTH_FIRST) + .field(TERMS_AGGS_FIELD) + .order(BucketOrder.aggregation("max", false)) + .subAggregation(max("max").field(SORT_FIELD)) + .subAggregation(topHits("hits").size(3)) + ) + .get(); assertSearchResponse(response); @@ -429,8 +464,10 @@ public void testBreadthFirstWithAggOrderAndScoreNeeded() throws Exception { } public void testBasicsGetProperty() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(global("global").subAggregation(topHits("hits"))).get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation(global("global").subAggregation(topHits("hits"))) + .get(); assertSearchResponse(searchResponse); @@ -443,7 +480,7 @@ public void testBasicsGetProperty() throws Exception { TopHits topHits = global.getAggregations().get("hits"); assertThat(topHits, notNullValue()); assertThat(topHits.getName(), equalTo("hits")); - assertThat((TopHits) ((InternalAggregation)global).getProperty("hits"), sameInstance(topHits)); + assertThat((TopHits) ((InternalAggregation) global).getProperty("hits"), sameInstance(topHits)); } @@ -451,25 +488,20 @@ public void testPagination() throws Exception { int size = randomIntBetween(1, 10); int from = randomIntBetween(0, 10); SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(TERMS_AGGS_FIELD) - .subAggregation( - topHits("hits") - .sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)) - .from(from) - .size(size) - ) - ) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(TERMS_AGGS_FIELD) + .subAggregation(topHits("hits").sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)).from(from).size(size)) + ) + .get(); assertSearchResponse(response); SearchResponse control = client().prepareSearch("idx") - .setFrom(from) - .setSize(size) - .setPostFilter(QueryBuilders.termQuery(TERMS_AGGS_FIELD, "val0")) - .addSort(SORT_FIELD, SortOrder.DESC) - .get(); + .setFrom(from) + .setSize(size) + .setPostFilter(QueryBuilders.termQuery(TERMS_AGGS_FIELD, "val0")) + .addSort(SORT_FIELD, SortOrder.DESC) + .get(); assertSearchResponse(control); SearchHits controlHits = control.getHits(); @@ -486,8 +518,14 @@ public void testPagination() throws Exception { assertThat(hits.getTotalHits().value, equalTo(controlHits.getTotalHits().value)); assertThat(hits.getHits().length, equalTo(controlHits.getHits().length)); for (int i = 0; i < hits.getHits().length; i++) { - logger.info("{}: top_hits: [{}][{}] control: [{}][{}]", i, hits.getAt(i).getId(), hits.getAt(i).getSortValues()[0], - controlHits.getAt(i).getId(), controlHits.getAt(i).getSortValues()[0]); + logger.info( + "{}: top_hits: [{}][{}] control: [{}][{}]", + i, + hits.getAt(i).getId(), + hits.getAt(i).getSortValues()[0], + controlHits.getAt(i).getId(), + controlHits.getAt(i).getSortValues()[0] + ); assertThat(hits.getAt(i).getId(), equalTo(controlHits.getAt(i).getId())); assertThat(hits.getAt(i).getSortValues()[0], equalTo(controlHits.getAt(i).getSortValues()[0])); } @@ -495,18 +533,14 @@ public void testPagination() throws Exception { public void testSortByBucket() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(TERMS_AGGS_FIELD) - .order(BucketOrder.aggregation("max_sort", false)) - .subAggregation( - topHits("hits").sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)).trackScores(true) - ) - .subAggregation( - max("max_sort").field(SORT_FIELD) - ) - ) - .get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(TERMS_AGGS_FIELD) + .order(BucketOrder.aggregation("max_sort", false)) + .subAggregation(topHits("hits").sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)).trackScores(true)) + .subAggregation(max("max_sort").field(SORT_FIELD)) + ) + .get(); assertSearchResponse(response); Terms terms = response.getAggregations().get("terms"); @@ -533,14 +567,17 @@ public void testSortByBucket() throws Exception { } public void testFieldCollapsing() throws Exception { - SearchResponse response = client() - .prepareSearch("field-collapsing") - .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) - .setQuery(matchQuery("text", "term rare")) - .addAggregation( - terms("terms").executionHint(randomExecutionHint()).field("group") - .order(BucketOrder.aggregation("max_score", false)).subAggregation(topHits("hits").size(1)) - .subAggregation(max("max_score").field("value"))).get(); + SearchResponse response = client().prepareSearch("field-collapsing") + .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setQuery(matchQuery("text", "term rare")) + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field("group") + .order(BucketOrder.aggregation("max_score", false)) + .subAggregation(topHits("hits").size(1)) + .subAggregation(max("max_score").field("value")) + ) + .get(); assertSearchResponse(response); Terms terms = response.getAggregations().get("terms"); @@ -577,25 +614,24 @@ public void testFieldCollapsing() throws Exception { public void testFetchFeatures() { final boolean seqNoAndTerm = randomBoolean(); SearchResponse response = client().prepareSearch("idx") - .setQuery(matchQuery("text", "text").queryName("test")) - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(TERMS_AGGS_FIELD) - .subAggregation( - topHits("hits").size(1) - .highlighter(new HighlightBuilder().field("text")) - .explain(true) - .storedField("text") - .docValueField("field1") - .fetchField("field2") - .scriptField("script", - new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())) - .fetchSource("text", null) - .version(true) - .seqNoAndPrimaryTerm(seqNoAndTerm) - ) - ) - .get(); + .setQuery(matchQuery("text", "text").queryName("test")) + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(TERMS_AGGS_FIELD) + .subAggregation( + topHits("hits").size(1) + .highlighter(new HighlightBuilder().field("text")) + .explain(true) + .storedField("text") + .docValueField("field1") + .fetchField("field2") + .scriptField("script", new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())) + .fetchSource("text", null) + .version(true) + .seqNoAndPrimaryTerm(seqNoAndTerm) + ) + ) + .get(); assertSearchResponse(response); Terms terms = response.getAggregations().get("terms"); @@ -649,13 +685,12 @@ public void testFetchFeatures() { public void testInvalidSortField() throws Exception { try { client().prepareSearch("idx") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(TERMS_AGGS_FIELD) - .subAggregation( - topHits("hits").sort(SortBuilders.fieldSort("xyz").order(SortOrder.DESC)) - ) - ).get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(TERMS_AGGS_FIELD) + .subAggregation(topHits("hits").sort(SortBuilders.fieldSort("xyz").order(SortOrder.DESC))) + ) + .get(); fail(); } catch (SearchPhaseExecutionException e) { assertThat(e.toString(), containsString("No mapping found for [xyz] in order to sort on")); @@ -663,9 +698,7 @@ public void testInvalidSortField() throws Exception { } public void testEmptyIndex() throws Exception { - SearchResponse response = client().prepareSearch("empty") - .addAggregation(topHits("hits")) - .get(); + SearchResponse response = client().prepareSearch("empty").addAggregation(topHits("hits")).get(); assertSearchResponse(response); TopHits hits = response.getAggregations().get("hits"); @@ -675,21 +708,16 @@ public void testEmptyIndex() throws Exception { } public void testTrackScores() throws Exception { - boolean[] trackScores = new boolean[]{true, false}; + boolean[] trackScores = new boolean[] { true, false }; for (boolean trackScore : trackScores) { logger.info("Track score={}", trackScore); SearchResponse response = client().prepareSearch("field-collapsing") - .setQuery(matchQuery("text", "term rare")) - .addAggregation(terms("terms") - .field("group") - .subAggregation( - topHits("hits") - .trackScores(trackScore) - .size(1) - .sort("_index", SortOrder.DESC) - ) - ) - .get(); + .setQuery(matchQuery("text", "term rare")) + .addAggregation( + terms("terms").field("group") + .subAggregation(topHits("hits").trackScores(trackScore).size(1).sort("_index", SortOrder.DESC)) + ) + .get(); assertSearchResponse(response); Terms terms = response.getAggregations().get("terms"); @@ -722,18 +750,13 @@ public void testTrackScores() throws Exception { public void testTopHitsInNestedSimple() throws Exception { SearchResponse searchResponse = client().prepareSearch("articles") - .setQuery(matchQuery("title", "title")) - .addAggregation( - nested("to-comments", "comments") - .subAggregation( - terms("users") - .field("comments.user") - .subAggregation( - topHits("top-comments").sort("comments.date", SortOrder.ASC) - ) - ) + .setQuery(matchQuery("title", "title")) + .addAggregation( + nested("to-comments", "comments").subAggregation( + terms("users").field("comments.user").subAggregation(topHits("top-comments").sort("comments.date", SortOrder.ASC)) ) - .get(); + ) + .get(); Nested nested = searchResponse.getAggregations().get("to-comments"); assertThat(nested.getDocCount(), equalTo(4L)); @@ -772,18 +795,16 @@ public void testTopHitsInNestedSimple() throws Exception { public void testTopHitsInSecondLayerNested() throws Exception { SearchResponse searchResponse = client().prepareSearch("articles") - .setQuery(matchQuery("title", "title")) - .addAggregation( - nested("to-comments", "comments") - .subAggregation( - nested("to-reviewers", "comments.reviewers").subAggregation( - // Also need to sort on _doc because there are two reviewers with the same name - topHits("top-reviewers") - .sort("comments.reviewers.name", SortOrder.ASC).sort("_doc", SortOrder.DESC).size(7) - ) - ) - .subAggregation(topHits("top-comments").sort("comments.date", SortOrder.DESC).size(4)) - ).get(); + .setQuery(matchQuery("title", "title")) + .addAggregation( + nested("to-comments", "comments").subAggregation( + nested("to-reviewers", "comments.reviewers").subAggregation( + // Also need to sort on _doc because there are two reviewers with the same name + topHits("top-reviewers").sort("comments.reviewers.name", SortOrder.ASC).sort("_doc", SortOrder.DESC).size(7) + ) + ).subAggregation(topHits("top-comments").sort("comments.date", SortOrder.DESC).size(4)) + ) + .get(); assertNoFailures(searchResponse); Nested toComments = searchResponse.getAggregations().get("to-comments"); @@ -872,22 +893,27 @@ public void testTopHitsInSecondLayerNested() throws Exception { public void testNestedFetchFeatures() { String hlType = randomFrom("plain", "fvh", "unified"); - HighlightBuilder.Field hlField = new HighlightBuilder.Field("comments.message") - .highlightQuery(matchQuery("comments.message", "comment")) - .forceSource(randomBoolean()) // randomly from stored field or _source - .highlighterType(hlType); - - SearchResponse searchResponse = client() - .prepareSearch("articles") - .setQuery(nestedQuery("comments", matchQuery("comments.message", "comment").queryName("test"), ScoreMode.Avg)) - .addAggregation( - nested("to-comments", "comments").subAggregation( - topHits("top-comments").size(1).highlighter(new HighlightBuilder().field(hlField)).explain(true) - .docValueField("comments.user") - .scriptField("script", - new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())) - .fetchSource("comments.message", null) - .version(true).sort("comments.date", SortOrder.ASC))).get(); + HighlightBuilder.Field hlField = new HighlightBuilder.Field("comments.message").highlightQuery( + matchQuery("comments.message", "comment") + ) + .forceSource(randomBoolean()) // randomly from stored field or _source + .highlighterType(hlType); + + SearchResponse searchResponse = client().prepareSearch("articles") + .setQuery(nestedQuery("comments", matchQuery("comments.message", "comment").queryName("test"), ScoreMode.Avg)) + .addAggregation( + nested("to-comments", "comments").subAggregation( + topHits("top-comments").size(1) + .highlighter(new HighlightBuilder().field(hlField)) + .explain(true) + .docValueField("comments.user") + .scriptField("script", new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())) + .fetchSource("comments.message", null) + .version(true) + .sort("comments.date", SortOrder.ASC) + ) + ) + .get(); assertHitCount(searchResponse, 2); Nested nested = searchResponse.getAggregations().get("to-comments"); assertThat(nested.getDocCount(), equalTo(4L)); @@ -927,24 +953,20 @@ public void testNestedFetchFeatures() { public void testTopHitsInNested() throws Exception { SearchResponse searchResponse = client().prepareSearch("articles") - .addAggregation( - histogram("dates") - .field("date") - .interval(5) - .subAggregation( - nested("to-comments", "comments") - .subAggregation(topHits("comments") - .highlighter( - new HighlightBuilder() - .field( - new HighlightBuilder.Field("comments.message") - .highlightQuery(matchQuery("comments.message", "text")) - ) - ) - .sort("comments.id", SortOrder.ASC)) + .addAggregation( + histogram("dates").field("date") + .interval(5) + .subAggregation( + nested("to-comments", "comments").subAggregation( + topHits("comments").highlighter( + new HighlightBuilder().field( + new HighlightBuilder.Field("comments.message").highlightQuery(matchQuery("comments.message", "text")) ) - ) - .get(); + ).sort("comments.id", SortOrder.ASC) + ) + ) + ) + .get(); Histogram histogram = searchResponse.getAggregations().get("dates"); for (int i = 0; i < numArticles; i += 5) { @@ -971,93 +993,101 @@ public void testTopHitsInNested() throws Exception { } public void testUseMaxDocInsteadOfSize() throws Exception { - client().admin().indices().prepareUpdateSettings("idx") + client().admin() + .indices() + .prepareUpdateSettings("idx") .setSettings(Collections.singletonMap(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING.getKey(), ArrayUtil.MAX_ARRAY_LENGTH)) .get(); - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(TERMS_AGGS_FIELD) - .subAggregation( - topHits("hits").size(ArrayUtil.MAX_ARRAY_LENGTH - 1) - .sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)) - ) - ) - .get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(TERMS_AGGS_FIELD) + .subAggregation( + topHits("hits").size(ArrayUtil.MAX_ARRAY_LENGTH - 1).sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)) + ) + ) + .get(); assertNoFailures(response); - client().admin().indices().prepareUpdateSettings("idx") + client().admin() + .indices() + .prepareUpdateSettings("idx") .setSettings(Collections.singletonMap(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING.getKey(), null)) .get(); } public void testTooHighResultWindow() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(TERMS_AGGS_FIELD) - .subAggregation( - topHits("hits").from(50).size(10).sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)) - ) + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(TERMS_AGGS_FIELD) + .subAggregation(topHits("hits").from(50).size(10).sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC))) ) .get(); assertNoFailures(response); - Exception e = expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch("idx") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(TERMS_AGGS_FIELD) - .subAggregation( - topHits("hits").from(100).size(10).sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)) + Exception e = expectThrows( + SearchPhaseExecutionException.class, + () -> client().prepareSearch("idx") + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(TERMS_AGGS_FIELD) + .subAggregation(topHits("hits").from(100).size(10).sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC))) ) - ).get()); - assertThat(e.getCause().getMessage(), - containsString("the top hits aggregator [hits]'s from + size must be less than or equal to: [100] but was [110]")); - e = expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch("idx") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(TERMS_AGGS_FIELD) - .subAggregation( - topHits("hits").from(10).size(100).sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)) + .get() + ); + assertThat( + e.getCause().getMessage(), + containsString("the top hits aggregator [hits]'s from + size must be less than or equal to: [100] but was [110]") + ); + e = expectThrows( + SearchPhaseExecutionException.class, + () -> client().prepareSearch("idx") + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(TERMS_AGGS_FIELD) + .subAggregation(topHits("hits").from(10).size(100).sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC))) ) - ).get()); - assertThat(e.getCause().getMessage(), - containsString("the top hits aggregator [hits]'s from + size must be less than or equal to: [100] but was [110]")); + .get() + ); + assertThat( + e.getCause().getMessage(), + containsString("the top hits aggregator [hits]'s from + size must be less than or equal to: [100] but was [110]") + ); - client().admin().indices().prepareUpdateSettings("idx") + client().admin() + .indices() + .prepareUpdateSettings("idx") .setSettings(Collections.singletonMap(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING.getKey(), 110)) .get(); response = client().prepareSearch("idx") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(TERMS_AGGS_FIELD) - .subAggregation( - topHits("hits").from(100).size(10).sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)) - )).get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(TERMS_AGGS_FIELD) + .subAggregation(topHits("hits").from(100).size(10).sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC))) + ) + .get(); assertNoFailures(response); response = client().prepareSearch("idx") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(TERMS_AGGS_FIELD) - .subAggregation( - topHits("hits").from(10).size(100).sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)) - )).get(); + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(TERMS_AGGS_FIELD) + .subAggregation(topHits("hits").from(10).size(100).sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC))) + ) + .get(); assertNoFailures(response); - client().admin().indices().prepareUpdateSettings("idx") + client().admin() + .indices() + .prepareUpdateSettings("idx") .setSettings(Collections.singletonMap(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING.getKey(), null)) .get(); } public void testNoStoredFields() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation(terms("terms") - .executionHint(randomExecutionHint()) - .field(TERMS_AGGS_FIELD) - .subAggregation( - topHits("hits").storedField("_none_") - ) + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").executionHint(randomExecutionHint()) + .field(TERMS_AGGS_FIELD) + .subAggregation(topHits("hits").storedField("_none_")) ) .get(); @@ -1090,79 +1120,210 @@ public void testNoStoredFields() throws Exception { */ public void testScriptCaching() throws Exception { try { - assertAcked(prepareCreate("cache_test_idx").setMapping("d", "type=long") - .setSettings( - Settings.builder() - .put("requests.cache.enable", true) - .put("number_of_shards", 1) - .put("number_of_replicas", 1)) - .get()); - indexRandom(true, client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2)); + assertAcked( + prepareCreate("cache_test_idx").setMapping("d", "type=long") + .setSettings( + Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1) + ) + .get() + ); + indexRandom( + true, + client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + ); // Make sure we are starting with a clear cache - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a nondeterministic script field does not get cached - SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation(topHits("foo").scriptField("bar", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap()))).get(); + SearchResponse r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + topHits("foo").scriptField( + "bar", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap()) + ) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a nondeterministic script sort does not get cached - r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation(topHits("foo").sort( - SortBuilders.scriptSort( - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap()), - ScriptSortType.STRING))) + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + topHits("foo").sort( + SortBuilders.scriptSort( + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "Math.random()", Collections.emptyMap()), + ScriptSortType.STRING + ) + ) + ) .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a deterministic script field does not get cached - r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation(topHits("foo").scriptField("bar", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "5", Collections.emptyMap()))).get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + topHits("foo").scriptField("bar", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "5", Collections.emptyMap())) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(1L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(1L) + ); // Test that a request using a deterministic script sort does not get cached - r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation(topHits("foo").sort( - SortBuilders.scriptSort( - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "5", Collections.emptyMap()), ScriptSortType.STRING))) + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + topHits("foo").sort( + SortBuilders.scriptSort( + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "5", Collections.emptyMap()), + ScriptSortType.STRING + ) + ) + ) .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(2L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(2L) + ); // Ensure that non-scripted requests are cached as normal r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(topHits("foo")).get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(3L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(3L) + ); } finally { assertAcked(client().admin().indices().prepareDelete("cache_test_idx")); // delete this - if we use tests.iters it would fail } @@ -1171,17 +1332,9 @@ public void testScriptCaching() throws Exception { public void testWithRescore() { // Rescore with default sort on relevancy (score) { - SearchResponse response = client() - .prepareSearch("idx") - .addRescorer( - new QueryRescorerBuilder(new MatchAllQueryBuilder().boost(3.0f)) - ) - .addAggregation(terms("terms") - .field(TERMS_AGGS_FIELD) - .subAggregation( - topHits("hits") - ) - ) + SearchResponse response = client().prepareSearch("idx") + .addRescorer(new QueryRescorerBuilder(new MatchAllQueryBuilder().boost(3.0f))) + .addAggregation(terms("terms").field(TERMS_AGGS_FIELD).subAggregation(topHits("hits"))) .get(); Terms terms = response.getAggregations().get("terms"); for (Terms.Bucket bucket : terms.getBuckets()) { @@ -1193,17 +1346,9 @@ public void testWithRescore() { } { - SearchResponse response = client() - .prepareSearch("idx") - .addRescorer( - new QueryRescorerBuilder(new MatchAllQueryBuilder().boost(3.0f)) - ) - .addAggregation(terms("terms") - .field(TERMS_AGGS_FIELD) - .subAggregation( - topHits("hits").sort(SortBuilders.scoreSort()) - ) - ) + SearchResponse response = client().prepareSearch("idx") + .addRescorer(new QueryRescorerBuilder(new MatchAllQueryBuilder().boost(3.0f))) + .addAggregation(terms("terms").field(TERMS_AGGS_FIELD).subAggregation(topHits("hits").sort(SortBuilders.scoreSort()))) .get(); Terms terms = response.getAggregations().get("terms"); for (Terms.Bucket bucket : terms.getBuckets()) { @@ -1216,16 +1361,10 @@ public void testWithRescore() { // Rescore should not be applied if the sort order is not relevancy { - SearchResponse response = client() - .prepareSearch("idx") - .addRescorer( - new QueryRescorerBuilder(new MatchAllQueryBuilder().boost(3.0f)) - ) - .addAggregation(terms("terms") - .field(TERMS_AGGS_FIELD) - .subAggregation( - topHits("hits").sort(SortBuilders.fieldSort("_index")) - ) + SearchResponse response = client().prepareSearch("idx") + .addRescorer(new QueryRescorerBuilder(new MatchAllQueryBuilder().boost(3.0f))) + .addAggregation( + terms("terms").field(TERMS_AGGS_FIELD).subAggregation(topHits("hits").sort(SortBuilders.fieldSort("_index"))) ) .get(); Terms terms = response.getAggregations().get("terms"); @@ -1238,16 +1377,11 @@ public void testWithRescore() { } { - SearchResponse response = client() - .prepareSearch("idx") - .addRescorer( - new QueryRescorerBuilder(new MatchAllQueryBuilder().boost(3.0f)) - ) - .addAggregation(terms("terms") - .field(TERMS_AGGS_FIELD) - .subAggregation( - topHits("hits").sort(SortBuilders.scoreSort()).sort(SortBuilders.fieldSort("_index")) - ) + SearchResponse response = client().prepareSearch("idx") + .addRescorer(new QueryRescorerBuilder(new MatchAllQueryBuilder().boost(3.0f))) + .addAggregation( + terms("terms").field(TERMS_AGGS_FIELD) + .subAggregation(topHits("hits").sort(SortBuilders.scoreSort()).sort(SortBuilders.fieldSort("_index"))) ) .get(); Terms terms = response.getAggregations().get("terms"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java index 516a14c50a47f..d5659586a24fd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java @@ -49,12 +49,12 @@ public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); createIndex("idx_unmapped"); for (int i = 0; i < 10; i++) { - client().prepareIndex("idx").setId(""+i).setSource(jsonBuilder() - .startObject() - .field("value", i+1) - .startArray("values").value(i+2).value(i+3).endArray() - .endObject()) - .get(); + client().prepareIndex("idx") + .setId("" + i) + .setSource( + jsonBuilder().startObject().field("value", i + 1).startArray("values").value(i + 2).value(i + 3).endArray().endObject() + ) + .get(); } client().admin().indices().prepareFlush().get(); client().admin().indices().prepareRefresh().get(); @@ -68,9 +68,9 @@ protected Collection> nodePlugins() { public void testUnmapped() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx_unmapped") - .setQuery(matchAllQuery()) - .addAggregation(count("count").field("value")) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(count("count").field("value")) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); @@ -82,9 +82,9 @@ public void testUnmapped() throws Exception { public void testSingleValuedField() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(count("count").field("value")) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(count("count").field("value")) + .get(); assertHitCount(searchResponse, 10); @@ -95,8 +95,10 @@ public void testSingleValuedField() throws Exception { } public void testSingleValuedFieldGetProperty() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(global("global").subAggregation(count("count").field("value"))).get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation(global("global").subAggregation(count("count").field("value"))) + .get(); assertHitCount(searchResponse, 10); @@ -111,16 +113,16 @@ public void testSingleValuedFieldGetProperty() throws Exception { assertThat(valueCount, notNullValue()); assertThat(valueCount.getName(), equalTo("count")); assertThat(valueCount.getValue(), equalTo(10L)); - assertThat((ValueCount) ((InternalAggregation)global).getProperty("count"), equalTo(valueCount)); - assertThat((double) ((InternalAggregation)global).getProperty("count.value"), equalTo(10d)); - assertThat((double) ((InternalAggregation)valueCount).getProperty("value"), equalTo(10d)); + assertThat((ValueCount) ((InternalAggregation) global).getProperty("count"), equalTo(valueCount)); + assertThat((double) ((InternalAggregation) global).getProperty("count.value"), equalTo(10d)); + assertThat((double) ((InternalAggregation) valueCount).getProperty("value"), equalTo(10d)); } public void testSingleValuedFieldPartiallyUnmapped() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped") - .setQuery(matchAllQuery()) - .addAggregation(count("count").field("value")) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(count("count").field("value")) + .get(); assertHitCount(searchResponse, 10); @@ -132,9 +134,9 @@ public void testSingleValuedFieldPartiallyUnmapped() throws Exception { public void testMultiValuedField() throws Exception { SearchResponse searchResponse = client().prepareSearch("idx") - .setQuery(matchAllQuery()) - .addAggregation(count("count").field("values")) - .get(); + .setQuery(matchAllQuery()) + .addAggregation(count("count").field("values")) + .get(); assertHitCount(searchResponse, 10); @@ -145,9 +147,11 @@ public void testMultiValuedField() throws Exception { } public void testSingleValuedScript() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(count("count").script( - new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, VALUE_FIELD_SCRIPT, Collections.emptyMap()))) + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + count("count").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, VALUE_FIELD_SCRIPT, Collections.emptyMap())) + ) .get(); assertHitCount(searchResponse, 10); @@ -159,9 +163,11 @@ public void testSingleValuedScript() throws Exception { } public void testMultiValuedScript() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(count("count").script( - new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, SUM_VALUES_FIELD_SCRIPT, Collections.emptyMap()))) + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + count("count").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, SUM_VALUES_FIELD_SCRIPT, Collections.emptyMap())) + ) .get(); assertHitCount(searchResponse, 10); @@ -174,7 +180,8 @@ public void testMultiValuedScript() throws Exception { public void testSingleValuedScriptWithParams() throws Exception { Map params = Collections.singletonMap("field", "value"); - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) .addAggregation(count("count").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, SUM_FIELD_PARAMS_SCRIPT, params))) .get(); @@ -188,9 +195,10 @@ public void testSingleValuedScriptWithParams() throws Exception { public void testMultiValuedScriptWithParams() throws Exception { Map params = Collections.singletonMap("field", "values"); - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(count("count").script( - new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, SUM_FIELD_PARAMS_SCRIPT, params))).get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation(count("count").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, SUM_FIELD_PARAMS_SCRIPT, params))) + .get(); assertHitCount(searchResponse, 10); @@ -205,57 +213,143 @@ public void testMultiValuedScriptWithParams() throws Exception { * Ensure requests using nondeterministic scripts do not get cached. */ public void testScriptCaching() throws Exception { - assertAcked(prepareCreate("cache_test_idx").setMapping("d", "type=long") + assertAcked( + prepareCreate("cache_test_idx").setMapping("d", "type=long") .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) - .get()); - indexRandom(true, client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), - client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2)); + .get() + ); + indexRandom( + true, + client().prepareIndex("cache_test_idx").setId("1").setSource("s", 1), + client().prepareIndex("cache_test_idx").setId("2").setSource("s", 2) + ); // Make sure we are starting with a clear cache - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a nondeterministic script does not get cached - SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation(count("foo").field("d").script( - new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, RANDOM_SCRIPT, Collections.emptyMap()))) - .get(); + SearchResponse r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + count("foo").field("d").script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, RANDOM_SCRIPT, Collections.emptyMap())) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(0L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(0L) + ); // Test that a request using a deterministic script gets cached - r = client().prepareSearch("cache_test_idx").setSize(0) - .addAggregation(count("foo").field("d").script( - new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, VALUE_FIELD_SCRIPT, Collections.emptyMap()))) - .get(); + r = client().prepareSearch("cache_test_idx") + .setSize(0) + .addAggregation( + count("foo").field("d") + .script(new Script(ScriptType.INLINE, METRIC_SCRIPT_ENGINE, VALUE_FIELD_SCRIPT, Collections.emptyMap())) + ) + .get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(1L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(1L) + ); // Ensure that non-scripted requests are cached as normal r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(count("foo").field("d")).get(); assertSearchResponse(r); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getHitCount(), equalTo(0L)); - assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() - .getMissCount(), equalTo(2L)); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getHitCount(), + equalTo(0L) + ); + assertThat( + client().admin() + .indices() + .prepareStats("cache_test_idx") + .setRequestCache(true) + .get() + .getTotal() + .getRequestCache() + .getMissCount(), + equalTo(2L) + ); } public void testOrderByEmptyAggregation() throws Exception { - SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation(terms("terms").field("value").order(BucketOrder.compound(BucketOrder.aggregation("filter>count", true))) - .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(count("count").field("value")))) - .get(); + SearchResponse searchResponse = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation( + terms("terms").field("value") + .order(BucketOrder.compound(BucketOrder.aggregation("filter>count", true))) + .subAggregation(filter("filter", termQuery("value", 100)).subAggregation(count("count").field("value"))) + ) + .get(); assertHitCount(searchResponse, 10); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketIT.java index 57fe539ad0bd6..d7a1d63311c1c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketIT.java @@ -47,8 +47,7 @@ public class AvgBucketIT extends ESIntegTestCase { @Override public void setupSuiteScopeCluster() throws Exception { - assertAcked(client().admin().indices().prepareCreate("idx") - .setMapping("tag", "type=keyword").get()); + assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); createIndex("idx_unmapped"); numDocs = randomIntBetween(6, 20); @@ -64,17 +63,26 @@ public void setupSuiteScopeCluster() throws Exception { for (int i = 0; i < numDocs; i++) { int fieldValue = randomIntBetween(minRandomValue, maxRandomValue); - builders.add(client().prepareIndex("idx").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, fieldValue).field("tag", "tag" + (i % interval)) - .endObject())); + builders.add( + client().prepareIndex("idx") + .setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, fieldValue) + .field("tag", "tag" + (i % interval)) + .endObject() + ) + ); final int bucket = (fieldValue / interval); // + (fieldValue < 0 ? -1 : 0) - (minRandomValue / interval - 1); valueCounts[bucket]++; } assertAcked(prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer")); for (int i = 0; i < 2; i++) { - builders.add(client().prepareIndex("empty_bucket_idx").setId("" + i).setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject())); + builders.add( + client().prepareIndex("empty_bucket_idx") + .setId("" + i) + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) + ); } indexRandom(true, builders); ensureSearchable(); @@ -82,9 +90,11 @@ public void setupSuiteScopeCluster() throws Exception { public void testDocCountTopLevel() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue)) - .addAggregation(avgBucket("avg_bucket", "histo>_count")).get(); + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) + ) + .addAggregation(avgBucket("avg_bucket", "histo>_count")) + .get(); assertSearchResponse(response); @@ -113,16 +123,16 @@ public void testDocCountTopLevel() throws Exception { } public void testDocCountAsSubAgg() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue)) - .subAggregation(avgBucket("avg_bucket", "histo>_count"))).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) + ) + .subAggregation(avgBucket("avg_bucket", "histo>_count")) + ) + .get(); assertSearchResponse(response); @@ -161,10 +171,10 @@ public void testDocCountAsSubAgg() throws Exception { } public void testMetricTopLevel() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .addAggregation(avgBucket("avg_bucket", "terms>sum")).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .addAggregation(avgBucket("avg_bucket", "terms>sum")) + .get(); assertSearchResponse(response); @@ -195,17 +205,19 @@ public void testMetricTopLevel() throws Exception { } public void testMetricAsSubAgg() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .subAggregation(avgBucket("avg_bucket", "histo>sum"))).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .extendedBounds(minRandomValue, maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ) + .subAggregation(avgBucket("avg_bucket", "histo>sum")) + ) + .get(); assertSearchResponse(response); @@ -248,18 +260,19 @@ public void testMetricAsSubAgg() throws Exception { } public void testMetricAsSubAggWithInsertZeros() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .subAggregation(avgBucket("avg_bucket", "histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS))) - .get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .extendedBounds(minRandomValue, maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ) + .subAggregation(avgBucket("avg_bucket", "histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS)) + ) + .get(); assertSearchResponse(response); @@ -302,9 +315,13 @@ public void testMetricAsSubAggWithInsertZeros() throws Exception { public void testNoBuckets() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms").field("tag").includeExclude(new IncludeExclude(null, "tag.*")) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .addAggregation(avgBucket("avg_bucket", "terms>sum")).get(); + .addAggregation( + terms("terms").field("tag") + .includeExclude(new IncludeExclude(null, "tag.*")) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ) + .addAggregation(avgBucket("avg_bucket", "terms>sum")) + .get(); assertSearchResponse(response); @@ -321,17 +338,17 @@ public void testNoBuckets() throws Exception { } public void testNested() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue)) - .subAggregation(avgBucket("avg_histo_bucket", "histo>_count"))) - .addAggregation(avgBucket("avg_terms_bucket", "terms>avg_histo_bucket")).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) + ) + .subAggregation(avgBucket("avg_histo_bucket", "histo>_count")) + ) + .addAggregation(avgBucket("avg_terms_bucket", "terms>avg_histo_bucket")) + .get(); assertSearchResponse(response); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java index ab51b4eeace4d..79186b851799f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java @@ -151,21 +151,24 @@ private XContentBuilder newDocBuilder() throws IOException { } public void testInlineScript() { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - histogram("histo") - .field(FIELD_1_NAME) - .interval(interval) - .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) - .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) - .subAggregation(sum("field4Sum").field(FIELD_4_NAME)) - .subAggregation( - bucketScript("seriesArithmetic", - new Script(ScriptType.INLINE, - CustomScriptPlugin.NAME, "_value0 + _value1 + _value2", Collections.emptyMap()), - "field2Sum", "field3Sum", "field4Sum"))) - .get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + histogram("histo").field(FIELD_1_NAME) + .interval(interval) + .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) + .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) + .subAggregation(sum("field4Sum").field(FIELD_4_NAME)) + .subAggregation( + bucketScript( + "seriesArithmetic", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value0 + _value1 + _value2", Collections.emptyMap()), + "field2Sum", + "field3Sum", + "field4Sum" + ) + ) + ) + .get(); assertSearchResponse(response); @@ -198,21 +201,24 @@ public void testInlineScript() { } public void testInlineScript2() { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - histogram("histo") - .field(FIELD_1_NAME) - .interval(interval) - .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) - .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) - .subAggregation(sum("field4Sum").field(FIELD_4_NAME)) - .subAggregation( - bucketScript("seriesArithmetic", - new Script(ScriptType.INLINE, - CustomScriptPlugin.NAME, "_value0 + _value1 / _value2", Collections.emptyMap()), - "field2Sum", "field3Sum", "field4Sum"))) - .get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + histogram("histo").field(FIELD_1_NAME) + .interval(interval) + .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) + .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) + .subAggregation(sum("field4Sum").field(FIELD_4_NAME)) + .subAggregation( + bucketScript( + "seriesArithmetic", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value0 + _value1 / _value2", Collections.emptyMap()), + "field2Sum", + "field3Sum", + "field4Sum" + ) + ) + ) + .get(); assertSearchResponse(response); @@ -245,19 +251,23 @@ public void testInlineScript2() { } public void testInlineScriptWithDateRange() { - SearchResponse response = client() - .prepareSearch("idx") + SearchResponse response = client().prepareSearch("idx") .addAggregation( - dateRange("range") - .field(FIELD_5_NAME) + dateRange("range").field(FIELD_5_NAME) .addUnboundedFrom(date) .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) .subAggregation(sum("field4Sum").field(FIELD_4_NAME)) .subAggregation( - bucketScript("seriesArithmetic", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value0 + _value1 + _value2", Collections.emptyMap()) - , "field2Sum", "field3Sum", "field4Sum"))) + bucketScript( + "seriesArithmetic", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value0 + _value1 + _value2", Collections.emptyMap()), + "field2Sum", + "field3Sum", + "field4Sum" + ) + ) + ) .get(); assertSearchResponse(response); @@ -291,18 +301,20 @@ public void testInlineScriptWithDateRange() { } public void testInlineScriptSingleVariable() { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - histogram("histo") - .field(FIELD_1_NAME) - .interval(interval) - .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) - .subAggregation( - bucketScript("seriesArithmetic", - new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value0", Collections.emptyMap()), - "field2Sum"))) - .get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + histogram("histo").field(FIELD_1_NAME) + .interval(interval) + .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) + .subAggregation( + bucketScript( + "seriesArithmetic", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value0", Collections.emptyMap()), + "field2Sum" + ) + ) + ) + .get(); assertSearchResponse(response); @@ -333,20 +345,22 @@ public void testInlineScriptNamedVars() { bucketsPathsMap.put("foo", "field2Sum"); bucketsPathsMap.put("bar", "field3Sum"); bucketsPathsMap.put("baz", "field4Sum"); - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - histogram("histo") - .field(FIELD_1_NAME) - .interval(interval) - .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) - .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) - .subAggregation(sum("field4Sum").field(FIELD_4_NAME)) - .subAggregation( - bucketScript("seriesArithmetic", bucketsPathsMap, - new Script(ScriptType.INLINE, - CustomScriptPlugin.NAME, "foo + bar + baz", Collections.emptyMap())))) - .get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + histogram("histo").field(FIELD_1_NAME) + .interval(interval) + .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) + .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) + .subAggregation(sum("field4Sum").field(FIELD_4_NAME)) + .subAggregation( + bucketScript( + "seriesArithmetic", + bucketsPathsMap, + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "foo + bar + baz", Collections.emptyMap()) + ) + ) + ) + .get(); assertSearchResponse(response); @@ -384,17 +398,16 @@ public void testInlineScriptWithParams() { Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "(_value0 + _value1 + _value2) * factor", params); - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - histogram("histo") - .field(FIELD_1_NAME) - .interval(interval) - .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) - .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) - .subAggregation(sum("field4Sum").field(FIELD_4_NAME)) - .subAggregation(bucketScript("seriesArithmetic", script, "field2Sum", "field3Sum", "field4Sum"))) - .get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + histogram("histo").field(FIELD_1_NAME) + .interval(interval) + .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) + .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) + .subAggregation(sum("field4Sum").field(FIELD_4_NAME)) + .subAggregation(bucketScript("seriesArithmetic", script, "field2Sum", "field3Sum", "field4Sum")) + ) + .get(); assertSearchResponse(response); @@ -427,21 +440,24 @@ public void testInlineScriptWithParams() { } public void testInlineScriptInsertZeros() { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - histogram("histo") - .field(FIELD_1_NAME) - .interval(interval) - .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) - .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) - .subAggregation(sum("field4Sum").field(FIELD_4_NAME)) - .subAggregation( - bucketScript("seriesArithmetic", - new Script(ScriptType.INLINE, - CustomScriptPlugin.NAME, "_value0 + _value1 + _value2", Collections.emptyMap()), - "field2Sum", "field3Sum", "field4Sum").gapPolicy(GapPolicy.INSERT_ZEROS))) - .get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + histogram("histo").field(FIELD_1_NAME) + .interval(interval) + .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) + .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) + .subAggregation(sum("field4Sum").field(FIELD_4_NAME)) + .subAggregation( + bucketScript( + "seriesArithmetic", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value0 + _value1 + _value2", Collections.emptyMap()), + "field2Sum", + "field3Sum", + "field4Sum" + ).gapPolicy(GapPolicy.INSERT_ZEROS) + ) + ) + .get(); assertSearchResponse(response); @@ -476,18 +492,18 @@ public void testInlineScriptInsertZeros() { } public void testInlineScriptReturnNull() { - SearchResponse response = client() - .prepareSearch("idx") + SearchResponse response = client().prepareSearch("idx") .addAggregation( - histogram("histo") - .field(FIELD_1_NAME).interval(interval) + histogram("histo").field(FIELD_1_NAME) + .interval(interval) .subAggregation( bucketScript( "nullField", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "return null", Collections.emptyMap()) ) ) - ).get(); + ) + .get(); assertSearchResponse(response); @@ -503,25 +519,36 @@ public void testInlineScriptReturnNull() { } public void testStoredScript() { - assertAcked(client().admin().cluster().preparePutStoredScript() + assertAcked( + client().admin() + .cluster() + .preparePutStoredScript() .setId("my_script") // Script source is not interpreted but it references a pre-defined script from CustomScriptPlugin - .setContent(new BytesArray("{ \"script\": {\"lang\": \"" + CustomScriptPlugin.NAME + "\"," + - " \"source\": \"my_script\" } }"), XContentType.JSON)); - - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - histogram("histo") - .field(FIELD_1_NAME) - .interval(interval) - .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) - .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) - .subAggregation(sum("field4Sum").field(FIELD_4_NAME)) - .subAggregation( - bucketScript("seriesArithmetic", - new Script(ScriptType.STORED, null, "my_script", Collections.emptyMap()), - "field2Sum", "field3Sum", "field4Sum"))).get(); + .setContent( + new BytesArray("{ \"script\": {\"lang\": \"" + CustomScriptPlugin.NAME + "\"," + " \"source\": \"my_script\" } }"), + XContentType.JSON + ) + ); + + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + histogram("histo").field(FIELD_1_NAME) + .interval(interval) + .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) + .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) + .subAggregation(sum("field4Sum").field(FIELD_4_NAME)) + .subAggregation( + bucketScript( + "seriesArithmetic", + new Script(ScriptType.STORED, null, "my_script", Collections.emptyMap()), + "field2Sum", + "field3Sum", + "field4Sum" + ) + ) + ) + .get(); assertSearchResponse(response); @@ -554,21 +581,24 @@ public void testStoredScript() { } public void testUnmapped() throws Exception { - SearchResponse response = client() - .prepareSearch("idx_unmapped") - .addAggregation( - histogram("histo") - .field(FIELD_1_NAME) - .interval(interval) - .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) - .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) - .subAggregation(sum("field4Sum").field(FIELD_4_NAME)) - .subAggregation( - bucketScript("seriesArithmetic", - new Script(ScriptType.INLINE, - CustomScriptPlugin.NAME, "_value0 + _value1 + _value2", Collections.emptyMap()), - "field2Sum", "field3Sum", "field4Sum"))) - .get(); + SearchResponse response = client().prepareSearch("idx_unmapped") + .addAggregation( + histogram("histo").field(FIELD_1_NAME) + .interval(interval) + .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) + .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) + .subAggregation(sum("field4Sum").field(FIELD_4_NAME)) + .subAggregation( + bucketScript( + "seriesArithmetic", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value0 + _value1 + _value2", Collections.emptyMap()), + "field2Sum", + "field3Sum", + "field4Sum" + ) + ) + ) + .get(); assertSearchResponse(response); @@ -579,20 +609,24 @@ public void testUnmapped() throws Exception { } public void testPartiallyUnmapped() throws Exception { - SearchResponse response = client() - .prepareSearch("idx", "idx_unmapped") - .addAggregation( - histogram("histo") - .field(FIELD_1_NAME) - .interval(interval) - .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) - .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) - .subAggregation(sum("field4Sum").field(FIELD_4_NAME)) - .subAggregation( - bucketScript("seriesArithmetic", - new Script(ScriptType.INLINE, - CustomScriptPlugin.NAME, "_value0 + _value1 + _value2", Collections.emptyMap()), - "field2Sum", "field3Sum", "field4Sum"))).get(); + SearchResponse response = client().prepareSearch("idx", "idx_unmapped") + .addAggregation( + histogram("histo").field(FIELD_1_NAME) + .interval(interval) + .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) + .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) + .subAggregation(sum("field4Sum").field(FIELD_4_NAME)) + .subAggregation( + bucketScript( + "seriesArithmetic", + new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value0 + _value1 + _value2", Collections.emptyMap()), + "field2Sum", + "field3Sum", + "field4Sum" + ) + ) + ) + .get(); assertSearchResponse(response); @@ -633,17 +667,19 @@ public void testSingleBucketPathAgg() throws Exception { .field("lang", CustomScriptPlugin.NAME) .endObject() .endObject(); - BucketScriptPipelineAggregationBuilder bucketScriptAgg = - BucketScriptPipelineAggregationBuilder.PARSER.parse(createParser(content), "seriesArithmetic"); + BucketScriptPipelineAggregationBuilder bucketScriptAgg = BucketScriptPipelineAggregationBuilder.PARSER.parse( + createParser(content), + "seriesArithmetic" + ); - SearchResponse response = client() - .prepareSearch("idx", "idx_unmapped") + SearchResponse response = client().prepareSearch("idx", "idx_unmapped") .addAggregation( - histogram("histo") - .field(FIELD_1_NAME) + histogram("histo").field(FIELD_1_NAME) .interval(interval) .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) - .subAggregation(bucketScriptAgg)).get(); + .subAggregation(bucketScriptAgg) + ) + .get(); assertSearchResponse(response); @@ -678,19 +714,21 @@ public void testArrayBucketPathAgg() throws Exception { .field("lang", CustomScriptPlugin.NAME) .endObject() .endObject(); - BucketScriptPipelineAggregationBuilder bucketScriptAgg = - BucketScriptPipelineAggregationBuilder.PARSER.parse(createParser(content), "seriesArithmetic"); + BucketScriptPipelineAggregationBuilder bucketScriptAgg = BucketScriptPipelineAggregationBuilder.PARSER.parse( + createParser(content), + "seriesArithmetic" + ); - SearchResponse response = client() - .prepareSearch("idx", "idx_unmapped") + SearchResponse response = client().prepareSearch("idx", "idx_unmapped") .addAggregation( - histogram("histo") - .field(FIELD_1_NAME) + histogram("histo").field(FIELD_1_NAME) .interval(interval) .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) .subAggregation(sum("field4Sum").field(FIELD_4_NAME)) - .subAggregation(bucketScriptAgg)).get(); + .subAggregation(bucketScriptAgg) + ) + .get(); assertSearchResponse(response); @@ -726,28 +764,30 @@ public void testObjectBucketPathAgg() throws Exception { XContentBuilder content = XContentFactory.jsonBuilder() .startObject() .startObject("buckets_path") - .field("_value0", "field2Sum") - .field("_value1", "field3Sum") - .field("_value2", "field4Sum") + .field("_value0", "field2Sum") + .field("_value1", "field3Sum") + .field("_value2", "field4Sum") .endObject() .startObject("script") .field("source", "_value0 + _value1 + _value2") .field("lang", CustomScriptPlugin.NAME) .endObject() .endObject(); - BucketScriptPipelineAggregationBuilder bucketScriptAgg = - BucketScriptPipelineAggregationBuilder.PARSER.parse(createParser(content), "seriesArithmetic"); + BucketScriptPipelineAggregationBuilder bucketScriptAgg = BucketScriptPipelineAggregationBuilder.PARSER.parse( + createParser(content), + "seriesArithmetic" + ); - SearchResponse response = client() - .prepareSearch("idx", "idx_unmapped") + SearchResponse response = client().prepareSearch("idx", "idx_unmapped") .addAggregation( - histogram("histo") - .field(FIELD_1_NAME) + histogram("histo").field(FIELD_1_NAME) .interval(interval) .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) .subAggregation(sum("field4Sum").field(FIELD_4_NAME)) - .subAggregation(bucketScriptAgg)).get(); + .subAggregation(bucketScriptAgg) + ) + .get(); assertSearchResponse(response); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorIT.java index 04a2a3b2e836b..f5337d0f85639 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketSelectorIT.java @@ -150,8 +150,12 @@ public void setupSuiteScopeCluster() throws Exception { } private XContentBuilder newDocBuilder() throws IOException { - return newDocBuilder(randomIntBetween(minNumber, maxNumber), randomIntBetween(minNumber, maxNumber), - randomIntBetween(minNumber, maxNumber), randomIntBetween(minNumber, maxNumber)); + return newDocBuilder( + randomIntBetween(minNumber, maxNumber), + randomIntBetween(minNumber, maxNumber), + randomIntBetween(minNumber, maxNumber), + randomIntBetween(minNumber, maxNumber) + ); } private XContentBuilder newDocBuilder(int field1Value, int field2Value, int field3Value, int field4Value) throws IOException { @@ -166,14 +170,22 @@ private XContentBuilder newDocBuilder(int field1Value, int field2Value, int fiel } public void testInlineScript() { - Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "Double.isNaN(_value0) ? false : (_value0 + _value1 > 100)", Collections.emptyMap()); + Script script = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "Double.isNaN(_value0) ? false : (_value0 + _value1 > 100)", + Collections.emptyMap() + ); SearchResponse response = client().prepareSearch("idx") - .addAggregation(histogram("histo").field(FIELD_1_NAME).interval(interval) - .subAggregation(sum("field2Sum").field(FIELD_2_NAME)).subAggregation(sum("field3Sum").field(FIELD_3_NAME)) - .subAggregation(bucketSelector("bucketSelector", script, "field2Sum", "field3Sum"))) - .get(); + .addAggregation( + histogram("histo").field(FIELD_1_NAME) + .interval(interval) + .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) + .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) + .subAggregation(bucketSelector("bucketSelector", script, "field2Sum", "field3Sum")) + ) + .get(); assertSearchResponse(response); @@ -195,19 +207,22 @@ public void testInlineScript() { } public void testInlineScriptNoBucketsPruned() { - Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "Double.isNaN(_value0) ? true : (_value0 < 10000)", Collections.emptyMap()); - - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - histogram("histo") - .field(FIELD_1_NAME) - .interval(interval) - .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) - .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) - .subAggregation(bucketSelector("bucketSelector", script, "field2Sum", "field3Sum"))) - .get(); + Script script = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "Double.isNaN(_value0) ? true : (_value0 < 10000)", + Collections.emptyMap() + ); + + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + histogram("histo").field(FIELD_1_NAME) + .interval(interval) + .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) + .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) + .subAggregation(bucketSelector("bucketSelector", script, "field2Sum", "field3Sum")) + ) + .get(); assertSearchResponse(response); @@ -229,19 +244,22 @@ public void testInlineScriptNoBucketsPruned() { } public void testInlineScriptNoBucketsLeft() { - Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "Double.isNaN(_value0) ? false : (_value0 > 10000)", Collections.emptyMap()); - - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - histogram("histo") - .field(FIELD_1_NAME) - .interval(interval) - .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) - .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) - .subAggregation(bucketSelector("bucketSelector", script, "field2Sum", "field3Sum"))) - .get(); + Script script = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "Double.isNaN(_value0) ? false : (_value0 > 10000)", + Collections.emptyMap() + ); + + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + histogram("histo").field(FIELD_1_NAME) + .interval(interval) + .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) + .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) + .subAggregation(bucketSelector("bucketSelector", script, "field2Sum", "field3Sum")) + ) + .get(); assertSearchResponse(response); @@ -253,19 +271,22 @@ public void testInlineScriptNoBucketsLeft() { } public void testInlineScript2() { - Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "Double.isNaN(_value0) ? false : (_value0 < _value1)", Collections.emptyMap()); - - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - histogram("histo") - .field(FIELD_1_NAME) - .interval(interval) - .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) - .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) - .subAggregation(bucketSelector("bucketSelector", script, "field2Sum", "field3Sum"))) - .get(); + Script script = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "Double.isNaN(_value0) ? false : (_value0 < _value1)", + Collections.emptyMap() + ); + + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + histogram("histo").field(FIELD_1_NAME) + .interval(interval) + .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) + .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) + .subAggregation(bucketSelector("bucketSelector", script, "field2Sum", "field3Sum")) + ) + .get(); assertSearchResponse(response); @@ -287,18 +308,21 @@ public void testInlineScript2() { } public void testInlineScriptSingleVariable() { - Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "Double.isNaN(_value0) ? false : (_value0 > 100)", Collections.emptyMap()); - - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - histogram("histo") - .field(FIELD_1_NAME) - .interval(interval) - .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) - .subAggregation(bucketSelector("bucketSelector", script, "field2Sum"))) - .get(); + Script script = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "Double.isNaN(_value0) ? false : (_value0 > 100)", + Collections.emptyMap() + ); + + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + histogram("histo").field(FIELD_1_NAME) + .interval(interval) + .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) + .subAggregation(bucketSelector("bucketSelector", script, "field2Sum")) + ) + .get(); assertSearchResponse(response); @@ -317,22 +341,26 @@ public void testInlineScriptSingleVariable() { } public void testInlineScriptNamedVars() { - Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "Double.isNaN(my_value1) ? false : (my_value1 + my_value2 > 100)", Collections.emptyMap()); + Script script = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "Double.isNaN(my_value1) ? false : (my_value1 + my_value2 > 100)", + Collections.emptyMap() + ); Map bucketPathsMap = new HashMap<>(); bucketPathsMap.put("my_value1", "field2Sum"); bucketPathsMap.put("my_value2", "field3Sum"); SearchResponse response = client().prepareSearch("idx") - .addAggregation( - histogram("histo") - .field(FIELD_1_NAME) - .interval(interval) - .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) - .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) - .subAggregation(bucketSelector("bucketSelector", bucketPathsMap, script))) - .get(); + .addAggregation( + histogram("histo").field(FIELD_1_NAME) + .interval(interval) + .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) + .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) + .subAggregation(bucketSelector("bucketSelector", bucketPathsMap, script)) + ) + .get(); assertSearchResponse(response); @@ -354,18 +382,22 @@ public void testInlineScriptNamedVars() { } public void testInlineScriptWithParams() { - Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "Double.isNaN(_value0) ? false : (_value0 + _value1 > threshold)", Collections.singletonMap("threshold", 100)); + Script script = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "Double.isNaN(_value0) ? false : (_value0 + _value1 > threshold)", + Collections.singletonMap("threshold", 100) + ); SearchResponse response = client().prepareSearch("idx") - .addAggregation( - histogram("histo") - .field(FIELD_1_NAME) - .interval(interval) - .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) - .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) - .subAggregation(bucketSelector("bucketSelector", script, "field2Sum", "field3Sum"))) - .get(); + .addAggregation( + histogram("histo").field(FIELD_1_NAME) + .interval(interval) + .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) + .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) + .subAggregation(bucketSelector("bucketSelector", script, "field2Sum", "field3Sum")) + ) + .get(); assertSearchResponse(response); @@ -390,15 +422,14 @@ public void testInlineScriptInsertZeros() { Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_value0 + _value1 > 100", Collections.emptyMap()); SearchResponse response = client().prepareSearch("idx") - .addAggregation( - histogram("histo") - .field(FIELD_1_NAME) - .interval(interval) - .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) - .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) - .subAggregation(bucketSelector("bucketSelector", script , "field2Sum", "field3Sum") - .gapPolicy(GapPolicy.INSERT_ZEROS))) - .get(); + .addAggregation( + histogram("histo").field(FIELD_1_NAME) + .interval(interval) + .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) + .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) + .subAggregation(bucketSelector("bucketSelector", script, "field2Sum", "field3Sum").gapPolicy(GapPolicy.INSERT_ZEROS)) + ) + .get(); assertSearchResponse(response); @@ -420,25 +451,34 @@ public void testInlineScriptInsertZeros() { } public void testStoredScript() { - assertAcked(client().admin().cluster().preparePutStoredScript() + assertAcked( + client().admin() + .cluster() + .preparePutStoredScript() .setId("my_script") // Source is not interpreted but my_script is defined in CustomScriptPlugin - .setContent(new BytesArray("{ \"script\": { \"lang\": \"" + CustomScriptPlugin.NAME + "\", " + - "\"source\": \"Double.isNaN(_value0) ? false : (_value0 + _value1 > 100)\" } }"), - XContentType.JSON)); + .setContent( + new BytesArray( + "{ \"script\": { \"lang\": \"" + + CustomScriptPlugin.NAME + + "\", " + + "\"source\": \"Double.isNaN(_value0) ? false : (_value0 + _value1 > 100)\" } }" + ), + XContentType.JSON + ) + ); Script script = new Script(ScriptType.STORED, null, "my_script", Collections.emptyMap()); - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - histogram("histo") - .field(FIELD_1_NAME) - .interval(interval) - .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) - .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) - .subAggregation(bucketSelector("bucketSelector", script, "field2Sum", "field3Sum"))) - .get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + histogram("histo").field(FIELD_1_NAME) + .interval(interval) + .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) + .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) + .subAggregation(bucketSelector("bucketSelector", script, "field2Sum", "field3Sum")) + ) + .get(); assertSearchResponse(response); @@ -460,18 +500,22 @@ public void testStoredScript() { } public void testUnmapped() throws Exception { - Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "Double.isNaN(_value0) ? false : (_value0 + _value1 > 100)", Collections.emptyMap()); + Script script = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "Double.isNaN(_value0) ? false : (_value0 + _value1 > 100)", + Collections.emptyMap() + ); SearchResponse response = client().prepareSearch("idx_unmapped") - .addAggregation( - histogram("histo") - .field(FIELD_1_NAME) - .interval(interval) - .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) - .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) - .subAggregation(bucketSelector("bucketSelector", script, "field2Sum", "field3Sum"))) - .get(); + .addAggregation( + histogram("histo").field(FIELD_1_NAME) + .interval(interval) + .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) + .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) + .subAggregation(bucketSelector("bucketSelector", script, "field2Sum", "field3Sum")) + ) + .get(); assertSearchResponse(response); @@ -482,18 +526,22 @@ public void testUnmapped() throws Exception { } public void testPartiallyUnmapped() throws Exception { - Script script = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, - "Double.isNaN(_value0) ? false : (_value0 + _value1 > 100)", Collections.emptyMap()); + Script script = new Script( + ScriptType.INLINE, + CustomScriptPlugin.NAME, + "Double.isNaN(_value0) ? false : (_value0 + _value1 > 100)", + Collections.emptyMap() + ); SearchResponse response = client().prepareSearch("idx", "idx_unmapped") - .addAggregation( - histogram("histo") - .field(FIELD_1_NAME) - .interval(interval) - .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) - .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) - .subAggregation(bucketSelector("bucketSelector", script, "field2Sum", "field3Sum"))) - .get(); + .addAggregation( + histogram("histo").field(FIELD_1_NAME) + .interval(interval) + .subAggregation(sum("field2Sum").field(FIELD_2_NAME)) + .subAggregation(sum("field3Sum").field(FIELD_3_NAME)) + .subAggregation(bucketSelector("bucketSelector", script, "field2Sum", "field3Sum")) + ) + .get(); assertSearchResponse(response); @@ -516,19 +564,18 @@ public void testPartiallyUnmapped() throws Exception { public void testEmptyBuckets() { SearchResponse response = client().prepareSearch("idx_with_gaps") - .addAggregation( - histogram("histo") - .field(FIELD_1_NAME) - .interval(1) - .subAggregation( - histogram("inner_histo") - .field(FIELD_1_NAME) - .interval(1) - .extendedBounds(1L, 4L) - .minDocCount(0) - .subAggregation(derivative("derivative", "_count") - .gapPolicy(GapPolicy.INSERT_ZEROS)))) - .get(); + .addAggregation( + histogram("histo").field(FIELD_1_NAME) + .interval(1) + .subAggregation( + histogram("inner_histo").field(FIELD_1_NAME) + .interval(1) + .extendedBounds(1L, 4L) + .minDocCount(0) + .subAggregation(derivative("derivative", "_count").gapPolicy(GapPolicy.INSERT_ZEROS)) + ) + ) + .get(); assertSearchResponse(response); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketSortIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketSortIT.java index b173c15805a9b..582bf13b0c7b4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketSortIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketSortIT.java @@ -11,8 +11,8 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; @@ -58,9 +58,11 @@ public class BucketSortIT extends ESIntegTestCase { @Override public void setupSuiteScopeCluster() throws Exception { createIndex(INDEX, INDEX_WITH_GAPS); - client().admin().indices().preparePutMapping(INDEX) - .setSource("time", "type=date", "foo", "type=keyword", "value_1", "type=float", "value_2", "type=float") - .get(); + client().admin() + .indices() + .preparePutMapping(INDEX) + .setSource("time", "type=date", "foo", "type=keyword", "value_1", "type=float", "value_2", "type=float") + .get(); int numTerms = 10; List terms = new ArrayList<>(numTerms); @@ -75,8 +77,9 @@ public void setupSuiteScopeCluster() throws Exception { for (String term : terms) { int termCount = randomIntBetween(3, 6); for (int i = 0; i < termCount; ++i) { - builders.add(client().prepareIndex(INDEX) - .setSource(newDocBuilder(time, term, randomIntBetween(1, 10) * randomDouble()))); + builders.add( + client().prepareIndex(INDEX).setSource(newDocBuilder(time, term, randomIntBetween(1, 10) * randomDouble())) + ); } } time += TimeValue.timeValueHours(1).millis(); @@ -111,9 +114,9 @@ private XContentBuilder newDocBuilder(long timeMillis, String fooValue, Double v public void testEmptyBucketSort() { SearchResponse response = client().prepareSearch(INDEX) - .setSize(0) - .addAggregation(dateHistogram("time_buckets").field(TIME_FIELD).fixedInterval(DateHistogramInterval.HOUR)) - .get(); + .setSize(0) + .addAggregation(dateHistogram("time_buckets").field(TIME_FIELD).fixedInterval(DateHistogramInterval.HOUR)) + .get(); assertSearchResponse(response); @@ -129,10 +132,13 @@ public void testEmptyBucketSort() { // Now let's test using size response = client().prepareSearch(INDEX) - .setSize(0) - .addAggregation(dateHistogram("time_buckets").field(TIME_FIELD).fixedInterval(DateHistogramInterval.HOUR) - .subAggregation(bucketSort("bucketSort", Collections.emptyList()).size(3))) - .get(); + .setSize(0) + .addAggregation( + dateHistogram("time_buckets").field(TIME_FIELD) + .fixedInterval(DateHistogramInterval.HOUR) + .subAggregation(bucketSort("bucketSort", Collections.emptyList()).size(3)) + ) + .get(); assertSearchResponse(response); @@ -146,10 +152,13 @@ public void testEmptyBucketSort() { // Finally, let's test using size + from response = client().prepareSearch(INDEX) - .setSize(0) - .addAggregation(dateHistogram("time_buckets").field(TIME_FIELD).fixedInterval(DateHistogramInterval.HOUR) - .subAggregation(bucketSort("bucketSort", Collections.emptyList()).size(3).from(2))) - .get(); + .setSize(0) + .addAggregation( + dateHistogram("time_buckets").field(TIME_FIELD) + .fixedInterval(DateHistogramInterval.HOUR) + .subAggregation(bucketSort("bucketSort", Collections.emptyList()).size(3).from(2)) + ) + .get(); assertSearchResponse(response); @@ -164,10 +173,11 @@ public void testEmptyBucketSort() { public void testSortTermsOnKey() { SearchResponse response = client().prepareSearch(INDEX) - .setSize(0) - .addAggregation(terms("foos").field(TERM_FIELD) - .subAggregation(bucketSort("bucketSort", Arrays.asList(new FieldSortBuilder("_key"))))) - .get(); + .setSize(0) + .addAggregation( + terms("foos").field(TERM_FIELD).subAggregation(bucketSort("bucketSort", Arrays.asList(new FieldSortBuilder("_key")))) + ) + .get(); assertSearchResponse(response); @@ -184,8 +194,10 @@ public void testSortTermsOnKey() { public void testSortTermsOnKeyWithSize() { SearchResponse response = client().prepareSearch(INDEX) .setSize(0) - .addAggregation(terms("foos").field(TERM_FIELD) - .subAggregation(bucketSort("bucketSort", Arrays.asList(new FieldSortBuilder("_key"))).size(3))) + .addAggregation( + terms("foos").field(TERM_FIELD) + .subAggregation(bucketSort("bucketSort", Arrays.asList(new FieldSortBuilder("_key"))).size(3)) + ) .get(); assertSearchResponse(response); @@ -203,12 +215,13 @@ public void testSortTermsOnKeyWithSize() { public void testSortTermsOnSubAggregation() { SearchResponse response = client().prepareSearch(INDEX) - .setSize(0) - .addAggregation(terms("foos").field(TERM_FIELD) - .subAggregation(avg("avg_value").field(VALUE_1_FIELD)) - .subAggregation(bucketSort("bucketSort", Arrays.asList( - new FieldSortBuilder("avg_value").order(SortOrder.DESC))))) - .get(); + .setSize(0) + .addAggregation( + terms("foos").field(TERM_FIELD) + .subAggregation(avg("avg_value").field(VALUE_1_FIELD)) + .subAggregation(bucketSort("bucketSort", Arrays.asList(new FieldSortBuilder("avg_value").order(SortOrder.DESC)))) + ) + .get(); assertSearchResponse(response); @@ -224,12 +237,15 @@ public void testSortTermsOnSubAggregation() { } response = client().prepareSearch(INDEX) - .setSize(0) - .addAggregation(terms("foos").field(TERM_FIELD) - .subAggregation(avg("avg_value").field(VALUE_1_FIELD)) - .subAggregation(bucketSort("bucketSort", Arrays.asList( - new FieldSortBuilder("avg_value").order(SortOrder.DESC))).size(2).from(3))) - .get(); + .setSize(0) + .addAggregation( + terms("foos").field(TERM_FIELD) + .subAggregation(avg("avg_value").field(VALUE_1_FIELD)) + .subAggregation( + bucketSort("bucketSort", Arrays.asList(new FieldSortBuilder("avg_value").order(SortOrder.DESC))).size(2).from(3) + ) + ) + .get(); assertSearchResponse(response); @@ -244,10 +260,12 @@ public void testSortTermsOnSubAggregation() { public void testSortTermsOnSubAggregationPreservesOrderOnEquals() { SearchResponse response = client().prepareSearch(INDEX) .setSize(0) - .addAggregation(terms("foos").field(TERM_FIELD) - .subAggregation(bucketSort("keyBucketSort", Arrays.asList(new FieldSortBuilder("_key")))) - .subAggregation(max("max").field("missingValue").missing(1)) - .subAggregation(bucketSort("maxBucketSort", Arrays.asList(new FieldSortBuilder("max"))))) + .addAggregation( + terms("foos").field(TERM_FIELD) + .subAggregation(bucketSort("keyBucketSort", Arrays.asList(new FieldSortBuilder("_key")))) + .subAggregation(max("max").field("missingValue").missing(1)) + .subAggregation(bucketSort("maxBucketSort", Arrays.asList(new FieldSortBuilder("max")))) + ) .get(); assertSearchResponse(response); @@ -266,13 +284,21 @@ public void testSortTermsOnSubAggregationPreservesOrderOnEquals() { public void testSortTermsOnCountWithSecondarySort() { SearchResponse response = client().prepareSearch(INDEX) - .setSize(0) - .addAggregation(terms("foos").field(TERM_FIELD) - .subAggregation(avg("avg_value").field(VALUE_1_FIELD)) - .subAggregation(bucketSort("bucketSort", Arrays.asList( + .setSize(0) + .addAggregation( + terms("foos").field(TERM_FIELD) + .subAggregation(avg("avg_value").field(VALUE_1_FIELD)) + .subAggregation( + bucketSort( + "bucketSort", + Arrays.asList( new FieldSortBuilder("_count").order(SortOrder.ASC), - new FieldSortBuilder("avg_value").order(SortOrder.DESC))))) - .get(); + new FieldSortBuilder("avg_value").order(SortOrder.DESC) + ) + ) + ) + ) + .get(); assertSearchResponse(response); @@ -295,8 +321,8 @@ public void testSortTermsOnCountWithSecondarySort() { public void testSortDateHistogramDescending() { SearchResponse response = client().prepareSearch(INDEX) - .addAggregation(dateHistogram("time_buckets").field(TIME_FIELD).fixedInterval(DateHistogramInterval.HOUR)) - .get(); + .addAggregation(dateHistogram("time_buckets").field(TIME_FIELD).fixedInterval(DateHistogramInterval.HOUR)) + .get(); assertSearchResponse(response); @@ -306,10 +332,12 @@ public void testSortDateHistogramDescending() { List ascendingTimeBuckets = histo.getBuckets(); response = client().prepareSearch(INDEX) - .addAggregation(dateHistogram("time_buckets").field(TIME_FIELD).fixedInterval(DateHistogramInterval.HOUR) - .subAggregation(bucketSort("bucketSort", Arrays.asList( - new FieldSortBuilder("_key").order(SortOrder.DESC))))) - .get(); + .addAggregation( + dateHistogram("time_buckets").field(TIME_FIELD) + .fixedInterval(DateHistogramInterval.HOUR) + .subAggregation(bucketSort("bucketSort", Arrays.asList(new FieldSortBuilder("_key").order(SortOrder.DESC)))) + ) + .get(); assertSearchResponse(response); @@ -327,12 +355,17 @@ public void testSortDateHistogramDescending() { public void testSortHistogram_GivenGapsAndGapPolicyIsSkip() { SearchResponse response = client().prepareSearch(INDEX_WITH_GAPS) - .addAggregation(histogram("time_buckets").field(TIME_FIELD).interval(1) - .subAggregation(avg("avg_value").field(VALUE_1_FIELD)) - .subAggregation(bucketSort("bucketSort", Arrays.asList( - new FieldSortBuilder("avg_value").order(SortOrder.DESC))).gapPolicy( - BucketHelpers.GapPolicy.SKIP))) - .get(); + .addAggregation( + histogram("time_buckets").field(TIME_FIELD) + .interval(1) + .subAggregation(avg("avg_value").field(VALUE_1_FIELD)) + .subAggregation( + bucketSort("bucketSort", Arrays.asList(new FieldSortBuilder("avg_value").order(SortOrder.DESC))).gapPolicy( + BucketHelpers.GapPolicy.SKIP + ) + ) + ) + .get(); assertSearchResponse(response); @@ -347,12 +380,17 @@ public void testSortHistogram_GivenGapsAndGapPolicyIsSkip() { public void testSortHistogram_GivenGapsAndGapPolicyIsSkipAndSizeIsLessThanAvailableBuckets() { SearchResponse response = client().prepareSearch(INDEX_WITH_GAPS) - .addAggregation(histogram("time_buckets").field(TIME_FIELD).interval(1) - .subAggregation(avg("avg_value").field(VALUE_1_FIELD)) - .subAggregation(bucketSort("bucketSort", Arrays.asList( - new FieldSortBuilder("avg_value").order(SortOrder.DESC))).gapPolicy( - BucketHelpers.GapPolicy.SKIP).size(2))) - .get(); + .addAggregation( + histogram("time_buckets").field(TIME_FIELD) + .interval(1) + .subAggregation(avg("avg_value").field(VALUE_1_FIELD)) + .subAggregation( + bucketSort("bucketSort", Arrays.asList(new FieldSortBuilder("avg_value").order(SortOrder.DESC))).gapPolicy( + BucketHelpers.GapPolicy.SKIP + ).size(2) + ) + ) + .get(); assertSearchResponse(response); @@ -367,14 +405,22 @@ public void testSortHistogram_GivenGapsAndGapPolicyIsSkipAndSizeIsLessThanAvaila public void testSortHistogram_GivenGapsAndGapPolicyIsSkipAndPrimarySortHasGaps() { SearchResponse response = client().prepareSearch(INDEX_WITH_GAPS) - .addAggregation(histogram("time_buckets").field(TIME_FIELD).interval(1) - .subAggregation(avg("avg_value_1").field(VALUE_1_FIELD)) - .subAggregation(avg("avg_value_2").field(VALUE_2_FIELD)) - .subAggregation(bucketSort("bucketSort", Arrays.asList( + .addAggregation( + histogram("time_buckets").field(TIME_FIELD) + .interval(1) + .subAggregation(avg("avg_value_1").field(VALUE_1_FIELD)) + .subAggregation(avg("avg_value_2").field(VALUE_2_FIELD)) + .subAggregation( + bucketSort( + "bucketSort", + Arrays.asList( new FieldSortBuilder("avg_value_1").order(SortOrder.DESC), - new FieldSortBuilder("avg_value_2").order(SortOrder.DESC))).gapPolicy( - BucketHelpers.GapPolicy.SKIP))) - .get(); + new FieldSortBuilder("avg_value_2").order(SortOrder.DESC) + ) + ).gapPolicy(BucketHelpers.GapPolicy.SKIP) + ) + ) + .get(); assertSearchResponse(response); @@ -390,14 +436,22 @@ public void testSortHistogram_GivenGapsAndGapPolicyIsSkipAndPrimarySortHasGaps() public void testSortHistogram_GivenGapsAndGapPolicyIsSkipAndSecondarySortHasGaps() { SearchResponse response = client().prepareSearch(INDEX_WITH_GAPS) - .addAggregation(histogram("time_buckets").field(TIME_FIELD).interval(1) - .subAggregation(avg("avg_value_1").field(VALUE_1_FIELD)) - .subAggregation(avg("avg_value_2").field(VALUE_2_FIELD)) - .subAggregation(bucketSort("bucketSort", Arrays.asList( + .addAggregation( + histogram("time_buckets").field(TIME_FIELD) + .interval(1) + .subAggregation(avg("avg_value_1").field(VALUE_1_FIELD)) + .subAggregation(avg("avg_value_2").field(VALUE_2_FIELD)) + .subAggregation( + bucketSort( + "bucketSort", + Arrays.asList( new FieldSortBuilder("avg_value_2").order(SortOrder.DESC), - new FieldSortBuilder("avg_value_1").order(SortOrder.ASC))).gapPolicy( - BucketHelpers.GapPolicy.SKIP))) - .get(); + new FieldSortBuilder("avg_value_1").order(SortOrder.ASC) + ) + ).gapPolicy(BucketHelpers.GapPolicy.SKIP) + ) + ) + .get(); assertSearchResponse(response); @@ -413,12 +467,17 @@ public void testSortHistogram_GivenGapsAndGapPolicyIsSkipAndSecondarySortHasGaps public void testSortHistogram_GivenGapsAndGapPolicyIsInsertZeros() { SearchResponse response = client().prepareSearch(INDEX_WITH_GAPS) - .addAggregation(histogram("time_buckets").field(TIME_FIELD).interval(1) - .subAggregation(avg("avg_value").field(VALUE_1_FIELD)) - .subAggregation(bucketSort("bucketSort", Arrays.asList( - new FieldSortBuilder("avg_value").order(SortOrder.DESC))).gapPolicy( - BucketHelpers.GapPolicy.INSERT_ZEROS))) - .get(); + .addAggregation( + histogram("time_buckets").field(TIME_FIELD) + .interval(1) + .subAggregation(avg("avg_value").field(VALUE_1_FIELD)) + .subAggregation( + bucketSort("bucketSort", Arrays.asList(new FieldSortBuilder("avg_value").order(SortOrder.DESC))).gapPolicy( + BucketHelpers.GapPolicy.INSERT_ZEROS + ) + ) + ) + .get(); assertSearchResponse(response); @@ -434,11 +493,12 @@ public void testSortHistogram_GivenGapsAndGapPolicyIsInsertZeros() { public void testEmptyBuckets() { SearchResponse response = client().prepareSearch(INDEX) - .setSize(0) - .setQuery(QueryBuilders.existsQuery("non-field")) - .addAggregation(terms("foos").field(TERM_FIELD) - .subAggregation(bucketSort("bucketSort", Arrays.asList(new FieldSortBuilder("_key"))))) - .get(); + .setSize(0) + .setQuery(QueryBuilders.existsQuery("non-field")) + .addAggregation( + terms("foos").field(TERM_FIELD).subAggregation(bucketSort("bucketSort", Arrays.asList(new FieldSortBuilder("_key")))) + ) + .get(); assertSearchResponse(response); @@ -449,21 +509,27 @@ public void testEmptyBuckets() { } public void testInvalidPath() { - Exception e = expectThrows(ActionRequestValidationException.class, - () -> client().prepareSearch(INDEX) - .addAggregation(terms("foos").field(TERM_FIELD) - .subAggregation(bucketSort("bucketSort", Arrays.asList(new FieldSortBuilder("invalid"))))) - .get()); + Exception e = expectThrows( + ActionRequestValidationException.class, + () -> client().prepareSearch(INDEX) + .addAggregation( + terms("foos").field(TERM_FIELD).subAggregation(bucketSort("bucketSort", Arrays.asList(new FieldSortBuilder("invalid")))) + ) + .get() + ); assertThat(e.getMessage(), containsString("No aggregation found for path [invalid]")); } public void testNeitherSortsNorSizeSpecifiedAndFromIsDefault_ShouldThrowValidation() { - Exception e = expectThrows(ActionRequestValidationException.class, - () -> client().prepareSearch(INDEX) - .addAggregation(terms("foos").field(TERM_FIELD) - .subAggregation(bucketSort("bucketSort", Collections.emptyList()))) - .get()); - assertThat(e.getMessage(), containsString("[bucketSort] is configured to perform nothing." + - " Please set either of [sort, size, from] to use bucket_sort")); + Exception e = expectThrows( + ActionRequestValidationException.class, + () -> client().prepareSearch(INDEX) + .addAggregation(terms("foos").field(TERM_FIELD).subAggregation(bucketSort("bucketSort", Collections.emptyList()))) + .get() + ); + assertThat( + e.getMessage(), + containsString("[bucketSort] is configured to perform nothing." + " Please set either of [sort, size, from] to use bucket_sort") + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeIT.java index 7d01a18424467..4744eafecef12 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeIT.java @@ -57,14 +57,21 @@ private ZonedDateTime date(int month, int day) { } private static IndexRequestBuilder indexDoc(String idx, ZonedDateTime date, int value) throws Exception { - return client().prepareIndex(idx).setSource( - jsonBuilder().startObject().timeField("date", date).field("value", value).endObject()); + return client().prepareIndex(idx).setSource(jsonBuilder().startObject().timeField("date", date).field("value", value).endObject()); } private IndexRequestBuilder indexDoc(int month, int day, int value) throws Exception { - return client().prepareIndex("idx").setSource( - jsonBuilder().startObject().field("value", value).timeField("date", date(month, day)).startArray("dates") - .timeValue(date(month, day)).timeValue(date(month + 1, day + 1)).endArray().endObject()); + return client().prepareIndex("idx") + .setSource( + jsonBuilder().startObject() + .field("value", value) + .timeField("date", date(month, day)) + .startArray("dates") + .timeValue(date(month, day)) + .timeValue(date(month + 1, day + 1)) + .endArray() + .endObject() + ); } @Override @@ -75,15 +82,22 @@ public void setupSuiteScopeCluster() throws Exception { prepareCreate("empty_bucket_idx").setMapping("value", "type=integer").get(); List builders = new ArrayList<>(); for (int i = 0; i < 2; i++) { - builders.add(client().prepareIndex("empty_bucket_idx").setId("" + i).setSource( - jsonBuilder().startObject().field("value", i * 2).endObject())); + builders.add( + client().prepareIndex("empty_bucket_idx") + .setId("" + i) + .setSource(jsonBuilder().startObject().field("value", i * 2).endObject()) + ); } - builders.addAll(Arrays.asList(indexDoc(1, 2, 1), // date: Jan 2, dates: Jan 2, Feb 3 + builders.addAll( + Arrays.asList( + indexDoc(1, 2, 1), // date: Jan 2, dates: Jan 2, Feb 3 indexDoc(2, 2, 2), // date: Feb 2, dates: Feb 2, Mar 3 indexDoc(2, 15, 3), // date: Feb 15, dates: Feb 15, Mar 16 indexDoc(3, 2, 4), // date: Mar 2, dates: Mar 2, Apr 3 indexDoc(3, 15, 5), // date: Mar 15, dates: Mar 15, Apr 16 - indexDoc(3, 23, 6))); // date: Mar 23, dates: Mar 23, Apr 24 + indexDoc(3, 23, 6) + ) + ); // date: Mar 23, dates: Mar 23, Apr 24 indexRandom(true, builders); ensureSearchable(); } @@ -94,11 +108,14 @@ public void afterEachTest() throws IOException { } public void testSingleValuedField() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).minDocCount(0) - .subAggregation(derivative("deriv", "_count"))).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.MONTH) + .minDocCount(0) + .subAggregation(derivative("deriv", "_count")) + ) + .get(); assertSearchResponse(response); @@ -136,11 +153,14 @@ public void testSingleValuedField() throws Exception { } public void testSingleValuedFieldNormalised() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).minDocCount(0) - .subAggregation(derivative("deriv", "_count").unit(DateHistogramInterval.DAY))).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.MONTH) + .minDocCount(0) + .subAggregation(derivative("deriv", "_count").unit(DateHistogramInterval.DAY)) + ) + .get(); assertSearchResponse(response); @@ -197,12 +217,15 @@ public void testSingleValuedFieldNormalised_timeZone_CET_DstStart() throws Excep indexRandom(true, builders); ensureSearchable(); - SearchResponse response = client() - .prepareSearch(IDX_DST_START) - .addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.DAY) - .timeZone(timezone).minDocCount(0) - .subAggregation(derivative("deriv", "_count").unit(DateHistogramInterval.HOUR))) - .get(); + SearchResponse response = client().prepareSearch(IDX_DST_START) + .addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.DAY) + .timeZone(timezone) + .minDocCount(0) + .subAggregation(derivative("deriv", "_count").unit(DateHistogramInterval.HOUR)) + ) + .get(); assertSearchResponse(response); @@ -213,21 +236,25 @@ public void testSingleValuedFieldNormalised_timeZone_CET_DstStart() throws Excep assertThat(buckets.size(), equalTo(4)); DateFormatter dateFormatter = DateFormatter.forPattern("uuuu-MM-dd"); - ZonedDateTime expectedKeyFirstBucket = - LocalDate.from(dateFormatter.parse("2012-03-24")).atStartOfDay(timezone).withZoneSameInstant(ZoneOffset.UTC); + ZonedDateTime expectedKeyFirstBucket = LocalDate.from(dateFormatter.parse("2012-03-24")) + .atStartOfDay(timezone) + .withZoneSameInstant(ZoneOffset.UTC); assertBucket(buckets.get(0), expectedKeyFirstBucket, 1L, nullValue(), null, null); - ZonedDateTime expectedKeySecondBucket = - LocalDate.from(dateFormatter.parse("2012-03-25")).atStartOfDay(timezone).withZoneSameInstant(ZoneOffset.UTC); - assertBucket(buckets.get(1), expectedKeySecondBucket,2L, notNullValue(), 1d, 1d / 24d); + ZonedDateTime expectedKeySecondBucket = LocalDate.from(dateFormatter.parse("2012-03-25")) + .atStartOfDay(timezone) + .withZoneSameInstant(ZoneOffset.UTC); + assertBucket(buckets.get(1), expectedKeySecondBucket, 2L, notNullValue(), 1d, 1d / 24d); // the following is normalized using a 23h bucket width - ZonedDateTime expectedKeyThirdBucket = - LocalDate.from(dateFormatter.parse("2012-03-26")).atStartOfDay(timezone).withZoneSameInstant(ZoneOffset.UTC); + ZonedDateTime expectedKeyThirdBucket = LocalDate.from(dateFormatter.parse("2012-03-26")) + .atStartOfDay(timezone) + .withZoneSameInstant(ZoneOffset.UTC); assertBucket(buckets.get(2), expectedKeyThirdBucket, 3L, notNullValue(), 1d, 1d / 23d); - ZonedDateTime expectedKeyFourthBucket = - LocalDate.from(dateFormatter.parse("2012-03-27")).atStartOfDay(timezone).withZoneSameInstant(ZoneOffset.UTC); + ZonedDateTime expectedKeyFourthBucket = LocalDate.from(dateFormatter.parse("2012-03-27")) + .atStartOfDay(timezone) + .withZoneSameInstant(ZoneOffset.UTC); assertBucket(buckets.get(3), expectedKeyFourthBucket, 4L, notNullValue(), 1d, 1d / 24d); } @@ -248,12 +275,15 @@ public void testSingleValuedFieldNormalised_timeZone_CET_DstEnd() throws Excepti indexRandom(true, builders); ensureSearchable(); - SearchResponse response = client() - .prepareSearch(IDX_DST_END) - .addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.DAY) - .timeZone(timezone).minDocCount(0) - .subAggregation(derivative("deriv", "_count").unit(DateHistogramInterval.HOUR))) - .get(); + SearchResponse response = client().prepareSearch(IDX_DST_END) + .addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.DAY) + .timeZone(timezone) + .minDocCount(0) + .subAggregation(derivative("deriv", "_count").unit(DateHistogramInterval.HOUR)) + ) + .get(); assertSearchResponse(response); @@ -265,21 +295,25 @@ public void testSingleValuedFieldNormalised_timeZone_CET_DstEnd() throws Excepti DateFormatter dateFormatter = DateFormatter.forPattern("uuuu-MM-dd").withZone(ZoneOffset.UTC); - ZonedDateTime expectedKeyFirstBucket = - LocalDate.from(dateFormatter.parse("2012-10-27")).atStartOfDay(timezone).withZoneSameInstant(ZoneOffset.UTC); + ZonedDateTime expectedKeyFirstBucket = LocalDate.from(dateFormatter.parse("2012-10-27")) + .atStartOfDay(timezone) + .withZoneSameInstant(ZoneOffset.UTC); assertBucket(buckets.get(0), expectedKeyFirstBucket, 1L, nullValue(), null, null); - ZonedDateTime expectedKeySecondBucket = - LocalDate.from(dateFormatter.parse("2012-10-28")).atStartOfDay(timezone).withZoneSameInstant(ZoneOffset.UTC); + ZonedDateTime expectedKeySecondBucket = LocalDate.from(dateFormatter.parse("2012-10-28")) + .atStartOfDay(timezone) + .withZoneSameInstant(ZoneOffset.UTC); assertBucket(buckets.get(1), expectedKeySecondBucket, 2L, notNullValue(), 1d, 1d / 24d); // the following is normalized using a 25h bucket width - ZonedDateTime expectedKeyThirdBucket = - LocalDate.from(dateFormatter.parse("2012-10-29")).atStartOfDay(timezone).withZoneSameInstant(ZoneOffset.UTC); + ZonedDateTime expectedKeyThirdBucket = LocalDate.from(dateFormatter.parse("2012-10-29")) + .atStartOfDay(timezone) + .withZoneSameInstant(ZoneOffset.UTC); assertBucket(buckets.get(2), expectedKeyThirdBucket, 3L, notNullValue(), 1d, 1d / 25d); - ZonedDateTime expectedKeyFourthBucket = - LocalDate.from(dateFormatter.parse("2012-10-30")).atStartOfDay(timezone).withZoneSameInstant(ZoneOffset.UTC); + ZonedDateTime expectedKeyFourthBucket = LocalDate.from(dateFormatter.parse("2012-10-30")) + .atStartOfDay(timezone) + .withZoneSameInstant(ZoneOffset.UTC); assertBucket(buckets.get(3), expectedKeyFourthBucket, 4L, notNullValue(), 1d, 1d / 24d); } @@ -301,12 +335,15 @@ public void testSingleValuedFieldNormalised_timeZone_AsiaKathmandu() throws Exce indexRandom(true, builders); ensureSearchable(); - SearchResponse response = client() - .prepareSearch(IDX_DST_KATHMANDU) - .addAggregation(dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.HOUR) - .timeZone(timezone).minDocCount(0) - .subAggregation(derivative("deriv", "_count").unit(DateHistogramInterval.MINUTE))) - .get(); + SearchResponse response = client().prepareSearch(IDX_DST_KATHMANDU) + .addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.HOUR) + .timeZone(timezone) + .minDocCount(0) + .subAggregation(derivative("deriv", "_count").unit(DateHistogramInterval.MINUTE)) + ) + .get(); assertSearchResponse(response); @@ -318,22 +355,26 @@ public void testSingleValuedFieldNormalised_timeZone_AsiaKathmandu() throws Exce DateFormatter dateFormatter = DateFormatter.forPattern("uuuu-MM-dd'T'HH:mm:ss").withZone(ZoneOffset.UTC); - ZonedDateTime expectedKeyFirstBucket = - LocalDateTime.from(dateFormatter.parse("1985-12-31T22:00:00")).atZone(timezone).withZoneSameInstant(ZoneOffset.UTC); - assertBucket(buckets.get(0), expectedKeyFirstBucket, 1L, nullValue(), null,null); + ZonedDateTime expectedKeyFirstBucket = LocalDateTime.from(dateFormatter.parse("1985-12-31T22:00:00")) + .atZone(timezone) + .withZoneSameInstant(ZoneOffset.UTC); + assertBucket(buckets.get(0), expectedKeyFirstBucket, 1L, nullValue(), null, null); - ZonedDateTime expectedKeySecondBucket = - LocalDateTime.from(dateFormatter.parse("1985-12-31T23:00:00")).atZone(timezone).withZoneSameInstant(ZoneOffset.UTC); - assertBucket(buckets.get(1), expectedKeySecondBucket, 2L, notNullValue(), 1d,1d / 60d); + ZonedDateTime expectedKeySecondBucket = LocalDateTime.from(dateFormatter.parse("1985-12-31T23:00:00")) + .atZone(timezone) + .withZoneSameInstant(ZoneOffset.UTC); + assertBucket(buckets.get(1), expectedKeySecondBucket, 2L, notNullValue(), 1d, 1d / 60d); // the following is normalized using a 105min bucket width - ZonedDateTime expectedKeyThirdBucket = - LocalDateTime.from(dateFormatter.parse("1986-01-01T01:00:00")).atZone(timezone).withZoneSameInstant(ZoneOffset.UTC); - assertBucket(buckets.get(2), expectedKeyThirdBucket, 3L, notNullValue(), 1d,1d / 105d); - - ZonedDateTime expectedKeyFourthBucket = - LocalDateTime.from(dateFormatter.parse("1986-01-01T02:00:00")).atZone(timezone).withZoneSameInstant(ZoneOffset.UTC); - assertBucket(buckets.get(3), expectedKeyFourthBucket, 4L, notNullValue(), 1d,1d / 60d); + ZonedDateTime expectedKeyThirdBucket = LocalDateTime.from(dateFormatter.parse("1986-01-01T01:00:00")) + .atZone(timezone) + .withZoneSameInstant(ZoneOffset.UTC); + assertBucket(buckets.get(2), expectedKeyThirdBucket, 3L, notNullValue(), 1d, 1d / 105d); + + ZonedDateTime expectedKeyFourthBucket = LocalDateTime.from(dateFormatter.parse("1986-01-01T02:00:00")) + .atZone(timezone) + .withZoneSameInstant(ZoneOffset.UTC); + assertBucket(buckets.get(3), expectedKeyFourthBucket, 4L, notNullValue(), 1d, 1d / 60d); } private static void addNTimes(int amount, String index, ZonedDateTime dateTime, List builders) throws Exception { @@ -342,8 +383,14 @@ private static void addNTimes(int amount, String index, ZonedDateTime dateTime, } } - private static void assertBucket(Histogram.Bucket bucket, ZonedDateTime expectedKey, long expectedDocCount, - Matcher derivativeMatcher, Double derivative, Double normalizedDerivative) { + private static void assertBucket( + Histogram.Bucket bucket, + ZonedDateTime expectedKey, + long expectedDocCount, + Matcher derivativeMatcher, + Double derivative, + Double normalizedDerivative + ) { assertThat(bucket, notNullValue()); assertThat((ZonedDateTime) bucket.getKey(), equalTo(expectedKey)); assertThat(bucket.getDocCount(), equalTo(expectedDocCount)); @@ -356,12 +403,15 @@ private static void assertBucket(Histogram.Bucket bucket, ZonedDateTime expected } public void testSingleValuedFieldWithSubAggregation() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).minDocCount(0) - .subAggregation(sum("sum").field("value")).subAggregation(derivative("deriv", "sum"))) - .get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.MONTH) + .minDocCount(0) + .subAggregation(sum("sum").field("value")) + .subAggregation(derivative("deriv", "sum")) + ) + .get(); assertSearchResponse(response); @@ -370,9 +420,9 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { assertThat(histo.getName(), equalTo("histo")); List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(3)); - Object[] propertiesKeys = (Object[]) ((InternalAggregation)histo).getProperty("_key"); - Object[] propertiesDocCounts = (Object[]) ((InternalAggregation)histo).getProperty("_count"); - Object[] propertiesCounts = (Object[]) ((InternalAggregation)histo).getProperty("sum.value"); + Object[] propertiesKeys = (Object[]) ((InternalAggregation) histo).getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) histo).getProperty("_count"); + Object[] propertiesCounts = (Object[]) ((InternalAggregation) histo).getProperty("sum.value"); ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); @@ -401,8 +451,13 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { deriv = bucket.getAggregations().get("deriv"); assertThat(deriv, notNullValue()); assertThat(deriv.value(), equalTo(4.0)); - assertThat(((InternalMultiBucketAggregation.InternalBucket)bucket).getProperty( - "histo", AggregationPath.parse("deriv.value").getPathElementsAsStringList()), equalTo(4.0)); + assertThat( + ((InternalMultiBucketAggregation.InternalBucket) bucket).getProperty( + "histo", + AggregationPath.parse("deriv.value").getPathElementsAsStringList() + ), + equalTo(4.0) + ); assertThat((ZonedDateTime) propertiesKeys[1], equalTo(key)); assertThat((long) propertiesDocCounts[1], equalTo(2L)); assertThat((double) propertiesCounts[1], equalTo(5.0)); @@ -419,19 +474,27 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { deriv = bucket.getAggregations().get("deriv"); assertThat(deriv, notNullValue()); assertThat(deriv.value(), equalTo(10.0)); - assertThat(((InternalMultiBucketAggregation.InternalBucket)bucket).getProperty( - "histo", AggregationPath.parse("deriv.value").getPathElementsAsStringList()), equalTo(10.0)); + assertThat( + ((InternalMultiBucketAggregation.InternalBucket) bucket).getProperty( + "histo", + AggregationPath.parse("deriv.value").getPathElementsAsStringList() + ), + equalTo(10.0) + ); assertThat((ZonedDateTime) propertiesKeys[2], equalTo(key)); assertThat((long) propertiesDocCounts[2], equalTo(3L)); assertThat((double) propertiesCounts[2], equalTo(15.0)); } public void testMultiValuedField() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - dateHistogram("histo").field("dates").calendarInterval(DateHistogramInterval.MONTH).minDocCount(0) - .subAggregation(derivative("deriv", "_count"))).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + dateHistogram("histo").field("dates") + .calendarInterval(DateHistogramInterval.MONTH) + .minDocCount(0) + .subAggregation(derivative("deriv", "_count")) + ) + .get(); assertSearchResponse(response); @@ -460,7 +523,7 @@ public void testMultiValuedField() throws Exception { assertThat(docCountDeriv, notNullValue()); assertThat(docCountDeriv.value(), equalTo(2.0)); - key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0,ZoneOffset.UTC); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); @@ -482,11 +545,14 @@ public void testMultiValuedField() throws Exception { } public void testUnmapped() throws Exception { - SearchResponse response = client() - .prepareSearch("idx_unmapped") - .addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).minDocCount(0) - .subAggregation(derivative("deriv", "_count"))).get(); + SearchResponse response = client().prepareSearch("idx_unmapped") + .addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.MONTH) + .minDocCount(0) + .subAggregation(derivative("deriv", "_count")) + ) + .get(); assertSearchResponse(response); @@ -497,11 +563,14 @@ public void testUnmapped() throws Exception { } public void testPartiallyUnmapped() throws Exception { - SearchResponse response = client() - .prepareSearch("idx", "idx_unmapped") - .addAggregation( - dateHistogram("histo").field("date").calendarInterval(DateHistogramInterval.MONTH).minDocCount(0) - .subAggregation(derivative("deriv", "_count"))).get(); + SearchResponse response = client().prepareSearch("idx", "idx_unmapped") + .addAggregation( + dateHistogram("histo").field("date") + .calendarInterval(DateHistogramInterval.MONTH) + .minDocCount(0) + .subAggregation(derivative("deriv", "_count")) + ) + .get(); assertSearchResponse(response); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/DerivativeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/DerivativeIT.java index 1087bbac9bbb7..52db45710607b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/DerivativeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/DerivativeIT.java @@ -133,8 +133,7 @@ public void setupSuiteScopeCluster() throws Exception { for (int i = 0; i < numBuckets_empty_rnd; i++) { valueCounts_empty_rnd[i] = (long) randomIntBetween(1, 10); // make approximately half of the buckets empty - if (randomBoolean()) - valueCounts_empty_rnd[i] = 0L; + if (randomBoolean()) valueCounts_empty_rnd[i] = 0L; for (int docs = 0; docs < valueCounts_empty_rnd[i]; docs++) { builders.add(client().prepareIndex("empty_bucket_idx_rnd").setSource(newDocBuilder(i))); numDocsEmptyIdx_rnd++; @@ -157,12 +156,14 @@ private XContentBuilder newDocBuilder(int singleValueFieldValue) throws IOExcept */ public void testDocCountDerivative() { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .subAggregation(derivative("deriv", "_count")) - .subAggregation(derivative("2nd_deriv", "deriv"))).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .subAggregation(derivative("deriv", "_count")) + .subAggregation(derivative("2nd_deriv", "deriv")) + ) + .get(); assertSearchResponse(response); @@ -196,12 +197,15 @@ public void testDocCountDerivative() { * test first and second derivative on the sing */ public void testSingleValuedField_normalised() { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0) - .subAggregation(derivative("deriv", "_count").unit("1ms")) - .subAggregation(derivative("2nd_deriv", "deriv").unit("10ms"))).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .minDocCount(0) + .subAggregation(derivative("deriv", "_count").unit("1ms")) + .subAggregation(derivative("2nd_deriv", "deriv").unit("10ms")) + ) + .get(); assertSearchResponse(response); @@ -234,12 +238,14 @@ public void testSingleValuedField_normalised() { } public void testSingleValueAggDerivative() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - .subAggregation(derivative("deriv", "sum"))).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + .subAggregation(derivative("deriv", "sum")) + ) + .get(); assertSearchResponse(response); @@ -247,9 +253,9 @@ public void testSingleValueAggDerivative() throws Exception { assertThat(deriv, notNullValue()); assertThat(deriv.getName(), equalTo("histo")); assertThat(deriv.getBuckets().size(), equalTo(numValueBuckets)); - Object[] propertiesKeys = (Object[]) ((InternalAggregation)deriv).getProperty("_key"); - Object[] propertiesDocCounts = (Object[]) ((InternalAggregation)deriv).getProperty("_count"); - Object[] propertiesSumCounts = (Object[]) ((InternalAggregation)deriv).getProperty("sum.value"); + Object[] propertiesKeys = (Object[]) ((InternalAggregation) deriv).getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) deriv).getProperty("_count"); + Object[] propertiesSumCounts = (Object[]) ((InternalAggregation) deriv).getProperty("sum.value"); List buckets = new ArrayList<>(deriv.getBuckets()); Long expectedSumPreviousBucket = Long.MIN_VALUE; // start value, gets @@ -266,9 +272,13 @@ public void testSingleValueAggDerivative() throws Exception { assertThat(sumDeriv, notNullValue()); long sumDerivValue = expectedSum - expectedSumPreviousBucket; assertThat(sumDeriv.value(), equalTo((double) sumDerivValue)); - assertThat(((InternalMultiBucketAggregation.InternalBucket)bucket).getProperty("histo", - AggregationPath.parse("deriv.value").getPathElementsAsStringList()), - equalTo((double) sumDerivValue)); + assertThat( + ((InternalMultiBucketAggregation.InternalBucket) bucket).getProperty( + "histo", + AggregationPath.parse("deriv.value").getPathElementsAsStringList() + ), + equalTo((double) sumDerivValue) + ); } else { assertThat(sumDeriv, nullValue()); } @@ -280,12 +290,14 @@ public void testSingleValueAggDerivative() throws Exception { } public void testMultiValueAggDerivative() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) - .subAggregation(derivative("deriv", "stats.sum"))).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) + .subAggregation(derivative("deriv", "stats.sum")) + ) + .get(); assertSearchResponse(response); @@ -293,9 +305,9 @@ public void testMultiValueAggDerivative() throws Exception { assertThat(deriv, notNullValue()); assertThat(deriv.getName(), equalTo("histo")); assertThat(deriv.getBuckets().size(), equalTo(numValueBuckets)); - Object[] propertiesKeys = (Object[]) ((InternalAggregation)deriv).getProperty("_key"); - Object[] propertiesDocCounts = (Object[]) ((InternalAggregation)deriv).getProperty("_count"); - Object[] propertiesSumCounts = (Object[]) ((InternalAggregation)deriv).getProperty("stats.sum"); + Object[] propertiesKeys = (Object[]) ((InternalAggregation) deriv).getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) ((InternalAggregation) deriv).getProperty("_count"); + Object[] propertiesSumCounts = (Object[]) ((InternalAggregation) deriv).getProperty("stats.sum"); List buckets = new ArrayList<>(deriv.getBuckets()); Long expectedSumPreviousBucket = Long.MIN_VALUE; // start value, gets @@ -312,9 +324,13 @@ public void testMultiValueAggDerivative() throws Exception { assertThat(sumDeriv, notNullValue()); long sumDerivValue = expectedSum - expectedSumPreviousBucket; assertThat(sumDeriv.value(), equalTo((double) sumDerivValue)); - assertThat(((InternalMultiBucketAggregation.InternalBucket)bucket).getProperty("histo", - AggregationPath.parse("deriv.value").getPathElementsAsStringList()), - equalTo((double) sumDerivValue)); + assertThat( + ((InternalMultiBucketAggregation.InternalBucket) bucket).getProperty( + "histo", + AggregationPath.parse("deriv.value").getPathElementsAsStringList() + ), + equalTo((double) sumDerivValue) + ); } else { assertThat(sumDeriv, nullValue()); } @@ -326,11 +342,11 @@ public void testMultiValueAggDerivative() throws Exception { } public void testUnmapped() throws Exception { - SearchResponse response = client() - .prepareSearch("idx_unmapped") - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .subAggregation(derivative("deriv", "_count"))).get(); + SearchResponse response = client().prepareSearch("idx_unmapped") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).subAggregation(derivative("deriv", "_count")) + ) + .get(); assertSearchResponse(response); @@ -341,11 +357,11 @@ public void testUnmapped() throws Exception { } public void testPartiallyUnmapped() throws Exception { - SearchResponse response = client() - .prepareSearch("idx", "idx_unmapped") - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .subAggregation(derivative("deriv", "_count"))).get(); + SearchResponse response = client().prepareSearch("idx", "idx_unmapped") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).subAggregation(derivative("deriv", "_count")) + ) + .get(); assertSearchResponse(response); @@ -369,12 +385,10 @@ public void testPartiallyUnmapped() throws Exception { } public void testDocCountDerivativeWithGaps() throws Exception { - SearchResponse searchResponse = client() - .prepareSearch("empty_bucket_idx") - .setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1) - .subAggregation(derivative("deriv", "_count"))).get(); + SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") + .setQuery(matchAllQuery()) + .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1).subAggregation(derivative("deriv", "_count"))) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(numDocsEmptyIdx)); @@ -397,14 +411,15 @@ public void testDocCountDerivativeWithGaps() throws Exception { } public void testDocCountDerivativeWithGaps_random() throws Exception { - SearchResponse searchResponse = client() - .prepareSearch("empty_bucket_idx_rnd") - .setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1) - .extendedBounds(0L, numBuckets_empty_rnd - 1) - .subAggregation(derivative("deriv", "_count").gapPolicy(randomFrom(GapPolicy.values())))) - .get(); + SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx_rnd") + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(1) + .extendedBounds(0L, numBuckets_empty_rnd - 1) + .subAggregation(derivative("deriv", "_count").gapPolicy(randomFrom(GapPolicy.values()))) + ) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(numDocsEmptyIdx_rnd)); @@ -427,12 +442,14 @@ public void testDocCountDerivativeWithGaps_random() throws Exception { } public void testDocCountDerivativeWithGaps_insertZeros() throws Exception { - SearchResponse searchResponse = client() - .prepareSearch("empty_bucket_idx") - .setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1) - .subAggregation(derivative("deriv", "_count").gapPolicy(GapPolicy.INSERT_ZEROS))).get(); + SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(1) + .subAggregation(derivative("deriv", "_count").gapPolicy(GapPolicy.INSERT_ZEROS)) + ) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(numDocsEmptyIdx)); @@ -455,13 +472,15 @@ public void testDocCountDerivativeWithGaps_insertZeros() throws Exception { } public void testSingleValueAggDerivativeWithGaps() throws Exception { - SearchResponse searchResponse = client() - .prepareSearch("empty_bucket_idx") - .setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - .subAggregation(derivative("deriv", "sum"))).get(); + SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(1) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + .subAggregation(derivative("deriv", "sum")) + ) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(numDocsEmptyIdx)); @@ -496,13 +515,15 @@ public void testSingleValueAggDerivativeWithGaps() throws Exception { } public void testSingleValueAggDerivativeWithGaps_insertZeros() throws Exception { - SearchResponse searchResponse = client() - .prepareSearch("empty_bucket_idx") - .setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - .subAggregation(derivative("deriv", "sum").gapPolicy(GapPolicy.INSERT_ZEROS))).get(); + SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx") + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(1) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + .subAggregation(derivative("deriv", "sum").gapPolicy(GapPolicy.INSERT_ZEROS)) + ) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(numDocsEmptyIdx)); @@ -534,14 +555,16 @@ public void testSingleValueAggDerivativeWithGaps_insertZeros() throws Exception public void testSingleValueAggDerivativeWithGaps_random() throws Exception { GapPolicy gapPolicy = randomFrom(GapPolicy.values()); - SearchResponse searchResponse = client() - .prepareSearch("empty_bucket_idx_rnd") - .setQuery(matchAllQuery()) - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1) - .extendedBounds(0L, (long) numBuckets_empty_rnd - 1) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - .subAggregation(derivative("deriv", "sum").gapPolicy(gapPolicy))).get(); + SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx_rnd") + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(1) + .extendedBounds(0L, (long) numBuckets_empty_rnd - 1) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + .subAggregation(derivative("deriv", "sum").gapPolicy(gapPolicy)) + ) + .get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(numDocsEmptyIdx_rnd)); @@ -578,14 +601,17 @@ public void testSingleValueAggDerivativeWithGaps_random() throws Exception { public void testSingleValueAggDerivative_invalidPath() throws Exception { try { client().prepareSearch("idx") - .addAggregation( - histogram("histo") - .field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .subAggregation( - filters("filters", QueryBuilders.termQuery("tag", "foo")).subAggregation( - sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .subAggregation(derivative("deriv", "filters>get>sum"))).get(); + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .subAggregation( + filters("filters", QueryBuilders.termQuery("tag", "foo")).subAggregation( + sum("sum").field(SINGLE_VALUED_FIELD_NAME) + ) + ) + .subAggregation(derivative("deriv", "filters>get>sum")) + ) + .get(); fail("Expected an Exception but didn't get one"); } catch (Exception e) { Throwable cause = ExceptionsHelper.unwrapCause(e); @@ -612,29 +638,31 @@ public void testDerivDerivNPE() throws Exception { value = null; } - XContentBuilder doc = jsonBuilder() - .startObject() - .field("tick", i) - .field("value", value) - .endObject(); + XContentBuilder doc = jsonBuilder().startObject().field("tick", i).field("value", value).endObject(); client().prepareIndex("deriv_npe").setSource(doc).get(); } refresh(); - SearchResponse response = client() - .prepareSearch("deriv_npe") - .addAggregation( - histogram("histo").field("tick").interval(1) - .subAggregation(avg("avg").field("value")) - .subAggregation(derivative("deriv1", "avg")) - .subAggregation(derivative("deriv2", "deriv1"))).get(); + SearchResponse response = client().prepareSearch("deriv_npe") + .addAggregation( + histogram("histo").field("tick") + .interval(1) + .subAggregation(avg("avg").field("value")) + .subAggregation(derivative("deriv1", "avg")) + .subAggregation(derivative("deriv2", "deriv1")) + ) + .get(); assertSearchResponse(response); } - private void checkBucketKeyAndDocCount(final String msg, final Histogram.Bucket bucket, final long expectedKey, - final long expectedDocCount) { + private void checkBucketKeyAndDocCount( + final String msg, + final Histogram.Bucket bucket, + final long expectedKey, + final long expectedDocCount + ) { assertThat(msg, bucket, notNullValue()); assertThat(msg + " key", ((Number) bucket.getKey()).longValue(), equalTo(expectedKey)); assertThat(msg + " docCount", bucket.getDocCount(), equalTo(expectedDocCount)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java index 6f673cdb4aecc..ecef1b8aa0f86 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java @@ -50,8 +50,7 @@ public class ExtendedStatsBucketIT extends ESIntegTestCase { @Override public void setupSuiteScopeCluster() throws Exception { - assertAcked(client().admin().indices().prepareCreate("idx") - .setMapping("tag", "type=keyword").get()); + assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); createIndex("idx_unmapped", "idx_gappy"); numDocs = randomIntBetween(6, 20); @@ -67,9 +66,15 @@ public void setupSuiteScopeCluster() throws Exception { for (int i = 0; i < numDocs; i++) { int fieldValue = randomIntBetween(minRandomValue, maxRandomValue); - builders.add(client().prepareIndex("idx").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, fieldValue).field("tag", "tag" + (i % interval)) - .endObject())); + builders.add( + client().prepareIndex("idx") + .setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, fieldValue) + .field("tag", "tag" + (i % interval)) + .endObject() + ) + ); final int bucket = (fieldValue / interval); // + (fieldValue < 0 ? -1 : 0) - (minRandomValue / interval - 1); valueCounts[bucket]++; } @@ -77,14 +82,20 @@ public void setupSuiteScopeCluster() throws Exception { for (int i = 0; i < 6; i++) { // creates 6 documents where the value of the field is 0, 1, 2, 3, // 3, 5 - builders.add(client().prepareIndex("idx_gappy").setId("" + i).setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i == 4 ? 3 : i).endObject())); + builders.add( + client().prepareIndex("idx_gappy") + .setId("" + i) + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i == 4 ? 3 : i).endObject()) + ); } assertAcked(prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer")); for (int i = 0; i < 2; i++) { - builders.add(client().prepareIndex("empty_bucket_idx").setId("" + i).setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject())); + builders.add( + client().prepareIndex("empty_bucket_idx") + .setId("" + i) + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) + ); } indexRandom(true, builders); ensureSearchable(); @@ -96,8 +107,9 @@ public void setupSuiteScopeCluster() throws Exception { public void testGappyIndexWithSigma() { double sigma = randomDoubleBetween(1.0, 6.0, true); SearchResponse response = client().prepareSearch("idx_gappy") - .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1L)) - .addAggregation(extendedStatsBucket("extended_stats_bucket", "histo>_count").sigma(sigma)).get(); + .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1L)) + .addAggregation(extendedStatsBucket("extended_stats_bucket", "histo>_count").sigma(sigma)) + .get(); assertSearchResponse(response); Histogram histo = response.getAggregations().get("histo"); assertThat(histo, notNullValue()); @@ -126,7 +138,7 @@ public void testGappyIndexWithSigma() { double sumOfSqrs = 1.0 + 1.0 + 1.0 + 4.0 + 0.0 + 1.0; double avg = sum / count; double var = (sumOfSqrs - ((sum * sum) / count)) / count; - var = var < 0 ? 0 : var; + var = var < 0 ? 0 : var; double stdDev = Math.sqrt(var); assertThat(extendedStatsBucketValue, notNullValue()); assertThat(extendedStatsBucketValue.getName(), equalTo("extended_stats_bucket")); @@ -144,9 +156,11 @@ public void testGappyIndexWithSigma() { public void testDocCountTopLevel() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue)) - .addAggregation(extendedStatsBucket("extended_stats_bucket", "histo>_count")).get(); + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) + ) + .addAggregation(extendedStatsBucket("extended_stats_bucket", "histo>_count")) + .get(); assertSearchResponse(response); @@ -184,16 +198,16 @@ public void testDocCountTopLevel() throws Exception { } public void testDocCountAsSubAgg() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue)) - .subAggregation(extendedStatsBucket("extended_stats_bucket", "histo>_count"))).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) + ) + .subAggregation(extendedStatsBucket("extended_stats_bucket", "histo>_count")) + ) + .get(); assertSearchResponse(response); @@ -241,10 +255,10 @@ public void testDocCountAsSubAgg() throws Exception { } public void testMetricTopLevel() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .addAggregation(extendedStatsBucket("extended_stats_bucket", "terms>sum")).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .addAggregation(extendedStatsBucket("extended_stats_bucket", "terms>sum")) + .get(); assertSearchResponse(response); @@ -284,17 +298,19 @@ public void testMetricTopLevel() throws Exception { } public void testMetricAsSubAgg() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .subAggregation(extendedStatsBucket("extended_stats_bucket", "histo>sum"))).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .extendedBounds(minRandomValue, maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ) + .subAggregation(extendedStatsBucket("extended_stats_bucket", "histo>sum")) + ) + .get(); assertSearchResponse(response); @@ -346,19 +362,19 @@ public void testMetricAsSubAgg() throws Exception { } public void testMetricAsSubAggWithInsertZeros() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .subAggregation(extendedStatsBucket("extended_stats_bucket", "histo>sum") - .gapPolicy(GapPolicy.INSERT_ZEROS))) - .get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .extendedBounds(minRandomValue, maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ) + .subAggregation(extendedStatsBucket("extended_stats_bucket", "histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS)) + ) + .get(); assertSearchResponse(response); @@ -410,9 +426,13 @@ public void testMetricAsSubAggWithInsertZeros() throws Exception { public void testNoBuckets() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms").field("tag").includeExclude(new IncludeExclude(null, "tag.*")) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .addAggregation(extendedStatsBucket("extended_stats_bucket", "terms>sum")).get(); + .addAggregation( + terms("terms").field("tag") + .includeExclude(new IncludeExclude(null, "tag.*")) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ) + .addAggregation(extendedStatsBucket("extended_stats_bucket", "terms>sum")) + .get(); assertSearchResponse(response); @@ -429,18 +449,22 @@ public void testNoBuckets() throws Exception { } public void testBadSigmaAsSubAgg() throws Exception { - Exception ex = expectThrows(Exception.class, () -> client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .subAggregation(extendedStatsBucket("extended_stats_bucket", "histo>sum") - .sigma(-1.0))).get()); + Exception ex = expectThrows( + Exception.class, + () -> client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .extendedBounds(minRandomValue, maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ) + .subAggregation(extendedStatsBucket("extended_stats_bucket", "histo>sum").sigma(-1.0)) + ) + .get() + ); Throwable cause = ExceptionsHelper.unwrapCause(ex); if (cause == null) { throw ex; @@ -456,17 +480,17 @@ public void testBadSigmaAsSubAgg() throws Exception { } public void testNested() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue)) - .subAggregation(extendedStatsBucket("avg_histo_bucket", "histo>_count"))) - .addAggregation(extendedStatsBucket("avg_terms_bucket", "terms>avg_histo_bucket.avg")).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) + ) + .subAggregation(extendedStatsBucket("avg_histo_bucket", "histo>_count")) + ) + .addAggregation(extendedStatsBucket("avg_terms_bucket", "terms>avg_histo_bucket.avg")) + .get(); assertSearchResponse(response); @@ -508,7 +532,6 @@ public void testNested() throws Exception { assertThat(extendedStatsBucketValue.getName(), equalTo("avg_histo_bucket")); assertThat(extendedStatsBucketValue.getAvg(), equalTo(avgHistoValue)); - aggTermsCount++; aggTermsSum += avgHistoValue; min = Math.min(min, avgHistoValue); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java index f73b76054fc4b..6a183f15d1c04 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java @@ -61,8 +61,7 @@ public class MaxBucketIT extends ESIntegTestCase { @Override public void setupSuiteScopeCluster() throws Exception { - assertAcked(client().admin().indices().prepareCreate("idx") - .setMapping("tag", "type=keyword").get()); + assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); createIndex("idx_unmapped"); numDocs = randomIntBetween(6, 20); @@ -78,17 +77,26 @@ public void setupSuiteScopeCluster() throws Exception { for (int i = 0; i < numDocs; i++) { int fieldValue = randomIntBetween(minRandomValue, maxRandomValue); - builders.add(client().prepareIndex("idx").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, fieldValue).field("tag", "tag" + (i % interval)) - .endObject())); + builders.add( + client().prepareIndex("idx") + .setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, fieldValue) + .field("tag", "tag" + (i % interval)) + .endObject() + ) + ); final int bucket = (fieldValue / interval); // + (fieldValue < 0 ? -1 : 0) - (minRandomValue / interval - 1); valueCounts[bucket]++; } assertAcked(prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer")); for (int i = 0; i < 2; i++) { - builders.add(client().prepareIndex("empty_bucket_idx").setId("" + i).setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject())); + builders.add( + client().prepareIndex("empty_bucket_idx") + .setId("" + i) + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) + ); } indexRandom(true, builders); ensureSearchable(); @@ -96,9 +104,11 @@ public void setupSuiteScopeCluster() throws Exception { public void testDocCountTopLevel() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue)) - .addAggregation(maxBucket("max_bucket", "histo>_count")).get(); + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) + ) + .addAggregation(maxBucket("max_bucket", "histo>_count")) + .get(); assertSearchResponse(response); @@ -132,16 +142,16 @@ public void testDocCountTopLevel() throws Exception { } public void testDocCountAsSubAgg() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue)) - .subAggregation(maxBucket("max_bucket", "histo>_count"))).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) + ) + .subAggregation(maxBucket("max_bucket", "histo>_count")) + ) + .get(); assertSearchResponse(response); @@ -185,10 +195,10 @@ public void testDocCountAsSubAgg() throws Exception { } public void testMetricTopLevel() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .addAggregation(maxBucket("max_bucket", "terms>sum")).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .addAggregation(maxBucket("max_bucket", "terms>sum")) + .get(); assertSearchResponse(response); @@ -224,17 +234,19 @@ public void testMetricTopLevel() throws Exception { } public void testMetricAsSubAgg() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .subAggregation(maxBucket("max_bucket", "histo>sum"))).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .extendedBounds(minRandomValue, maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ) + .subAggregation(maxBucket("max_bucket", "histo>sum")) + ) + .get(); assertSearchResponse(response); @@ -282,15 +294,16 @@ public void testMetricAsSubAgg() throws Exception { } public void testMetricAsSubAggOfSingleBucketAgg() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - filter("filter", termQuery("tag", "tag0")) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .subAggregation(maxBucket("max_bucket", "histo>sum"))).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + filter("filter", termQuery("tag", "tag0")).subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .extendedBounds(minRandomValue, maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ).subAggregation(maxBucket("max_bucket", "histo>sum")) + ) + .get(); assertSearchResponse(response); @@ -329,18 +342,19 @@ public void testMetricAsSubAggOfSingleBucketAgg() throws Exception { } public void testMetricAsSubAggWithInsertZeros() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .subAggregation(maxBucket("max_bucket", "histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS))) - .get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .extendedBounds(minRandomValue, maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ) + .subAggregation(maxBucket("max_bucket", "histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS)) + ) + .get(); assertSearchResponse(response); @@ -387,9 +401,13 @@ public void testMetricAsSubAggWithInsertZeros() throws Exception { public void testNoBuckets() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms").field("tag").includeExclude(new IncludeExclude(null, "tag.*")) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .addAggregation(maxBucket("max_bucket", "terms>sum")).get(); + .addAggregation( + terms("terms").field("tag") + .includeExclude(new IncludeExclude(null, "tag.*")) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ) + .addAggregation(maxBucket("max_bucket", "terms>sum")) + .get(); assertSearchResponse(response); @@ -407,17 +425,17 @@ public void testNoBuckets() throws Exception { } public void testNested() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue)) - .subAggregation(maxBucket("max_histo_bucket", "histo>_count"))) - .addAggregation(maxBucket("max_terms_bucket", "terms>max_histo_bucket")).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) + ) + .subAggregation(maxBucket("max_histo_bucket", "histo>_count")) + ) + .addAggregation(maxBucket("max_terms_bucket", "terms>max_histo_bucket")) + .get(); assertSearchResponse(response); @@ -491,19 +509,36 @@ public void testFieldIsntWrittenOutTwice() throws Exception { // so that there is an UnmappedTerms in the list to reduce. createIndex("foo_1"); - XContentBuilder builder = jsonBuilder().startObject().startObject("properties") - .startObject("@timestamp").field("type", "date").endObject() - .startObject("license").startObject("properties") - .startObject("count").field("type", "long").endObject() - .startObject("partnumber").field("type", "text").startObject("fields").startObject("keyword") - .field("type", "keyword").field("ignore_above", 256) - .endObject().endObject().endObject() - .endObject().endObject().endObject().endObject(); - assertAcked(client().admin().indices().prepareCreate("foo_2") - .setMapping(builder).get()); + XContentBuilder builder = jsonBuilder().startObject() + .startObject("properties") + .startObject("@timestamp") + .field("type", "date") + .endObject() + .startObject("license") + .startObject("properties") + .startObject("count") + .field("type", "long") + .endObject() + .startObject("partnumber") + .field("type", "text") + .startObject("fields") + .startObject("keyword") + .field("type", "keyword") + .field("ignore_above", 256) + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject(); + assertAcked(client().admin().indices().prepareCreate("foo_2").setMapping(builder).get()); XContentBuilder docBuilder = jsonBuilder().startObject() - .startObject("license").field("partnumber", "foobar").field("count", 2).endObject() + .startObject("license") + .field("partnumber", "foobar") + .field("count", 2) + .endObject() .field("@timestamp", "2018-07-08T08:07:00.599Z") .endObject(); @@ -513,11 +548,14 @@ public void testFieldIsntWrittenOutTwice() throws Exception { TermsAggregationBuilder groupByLicenseAgg = AggregationBuilders.terms("group_by_license_partnumber") .field("license.partnumber.keyword"); - MaxBucketPipelineAggregationBuilder peakPipelineAggBuilder = - PipelineAggregatorBuilders.maxBucket("peak", "licenses_per_day>total_licenses"); + MaxBucketPipelineAggregationBuilder peakPipelineAggBuilder = PipelineAggregatorBuilders.maxBucket( + "peak", + "licenses_per_day>total_licenses" + ); SumAggregationBuilder sumAggBuilder = AggregationBuilders.sum("total_licenses").field("license.count"); - DateHistogramAggregationBuilder licensePerDayBuilder = - AggregationBuilders.dateHistogram("licenses_per_day").field("@timestamp").fixedInterval(DateHistogramInterval.DAY); + DateHistogramAggregationBuilder licensePerDayBuilder = AggregationBuilders.dateHistogram("licenses_per_day") + .field("@timestamp") + .fixedInterval(DateHistogramInterval.DAY); licensePerDayBuilder.subAggregation(sumAggBuilder); groupByLicenseAgg.subAggregation(licensePerDayBuilder); groupByLicenseAgg.subAggregation(peakPipelineAggBuilder); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/MinBucketIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/MinBucketIT.java index 91bec4a323465..b327164ff5868 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/MinBucketIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/MinBucketIT.java @@ -10,13 +10,13 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; -import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; @@ -47,8 +47,7 @@ public class MinBucketIT extends ESIntegTestCase { @Override public void setupSuiteScopeCluster() throws Exception { - assertAcked(client().admin().indices().prepareCreate("idx") - .setMapping("tag", "type=keyword").get()); + assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); createIndex("idx_unmapped"); numDocs = randomIntBetween(6, 20); @@ -64,17 +63,26 @@ public void setupSuiteScopeCluster() throws Exception { for (int i = 0; i < numDocs; i++) { int fieldValue = randomIntBetween(minRandomValue, maxRandomValue); - builders.add(client().prepareIndex("idx").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, fieldValue).field("tag", "tag" + (i % interval)) - .endObject())); + builders.add( + client().prepareIndex("idx") + .setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, fieldValue) + .field("tag", "tag" + (i % interval)) + .endObject() + ) + ); final int bucket = (fieldValue / interval); // + (fieldValue < 0 ? -1 : 0) - (minRandomValue / interval - 1); valueCounts[bucket]++; } assertAcked(prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer")); for (int i = 0; i < 2; i++) { - builders.add(client().prepareIndex("empty_bucket_idx").setId("" + i).setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject())); + builders.add( + client().prepareIndex("empty_bucket_idx") + .setId("" + i) + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) + ); } indexRandom(true, builders); ensureSearchable(); @@ -82,9 +90,11 @@ public void setupSuiteScopeCluster() throws Exception { public void testDocCountTopLevel() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue)) - .addAggregation(minBucket("min_bucket", "histo>_count")).get(); + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) + ) + .addAggregation(minBucket("min_bucket", "histo>_count")) + .get(); assertSearchResponse(response); @@ -118,16 +128,16 @@ public void testDocCountTopLevel() throws Exception { } public void testDocCountAsSubAgg() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue)) - .subAggregation(minBucket("min_bucket", "histo>_count"))).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) + ) + .subAggregation(minBucket("min_bucket", "histo>_count")) + ) + .get(); assertSearchResponse(response); @@ -171,10 +181,10 @@ public void testDocCountAsSubAgg() throws Exception { } public void testMetricTopLevel() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .addAggregation(minBucket("min_bucket", "terms>sum")).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .addAggregation(minBucket("min_bucket", "terms>sum")) + .get(); assertSearchResponse(response); @@ -210,17 +220,19 @@ public void testMetricTopLevel() throws Exception { } public void testMetricAsSubAgg() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .subAggregation(minBucket("min_bucket", "histo>sum"))).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .extendedBounds(minRandomValue, maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ) + .subAggregation(minBucket("min_bucket", "histo>sum")) + ) + .get(); assertSearchResponse(response); @@ -268,18 +280,19 @@ public void testMetricAsSubAgg() throws Exception { } public void testMetricAsSubAggWithInsertZeros() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .subAggregation(minBucket("min_bucket", "histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS))) - .get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .extendedBounds(minRandomValue, maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ) + .subAggregation(minBucket("min_bucket", "histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS)) + ) + .get(); assertSearchResponse(response); @@ -326,9 +339,13 @@ public void testMetricAsSubAggWithInsertZeros() throws Exception { public void testNoBuckets() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms").field("tag").includeExclude(new IncludeExclude(null, "tag.*")) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .addAggregation(minBucket("min_bucket", "terms>sum")).get(); + .addAggregation( + terms("terms").field("tag") + .includeExclude(new IncludeExclude(null, "tag.*")) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ) + .addAggregation(minBucket("min_bucket", "terms>sum")) + .get(); assertSearchResponse(response); @@ -346,17 +363,17 @@ public void testNoBuckets() throws Exception { } public void testNested() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue)) - .subAggregation(minBucket("min_histo_bucket", "histo>_count"))) - .addAggregation(minBucket("min_terms_bucket", "terms>min_histo_bucket")).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) + ) + .subAggregation(minBucket("min_histo_bucket", "histo>_count")) + ) + .addAggregation(minBucket("min_terms_bucket", "terms>min_histo_bucket")) + .get(); assertSearchResponse(response); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java index 8c243a1740a4a..69e0fa84f9086 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java @@ -41,7 +41,7 @@ public class PercentilesBucketIT extends ESIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; - private static final double[] PERCENTS = {0.0, 1.0, 25.0, 50.0, 75.0, 99.0, 100.0}; + private static final double[] PERCENTS = { 0.0, 1.0, 25.0, 50.0, 75.0, 99.0, 100.0 }; static int numDocs; static int interval; static int minRandomValue; @@ -51,8 +51,7 @@ public class PercentilesBucketIT extends ESIntegTestCase { @Override public void setupSuiteScopeCluster() throws Exception { - assertAcked(client().admin().indices().prepareCreate("idx") - .setMapping("tag", "type=keyword").get()); + assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); createIndex("idx_unmapped"); numDocs = randomIntBetween(6, 20); @@ -68,17 +67,26 @@ public void setupSuiteScopeCluster() throws Exception { for (int i = 0; i < numDocs; i++) { int fieldValue = randomIntBetween(minRandomValue, maxRandomValue); - builders.add(client().prepareIndex("idx").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, fieldValue).field("tag", "tag" + (i % interval)) - .endObject())); + builders.add( + client().prepareIndex("idx") + .setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, fieldValue) + .field("tag", "tag" + (i % interval)) + .endObject() + ) + ); final int bucket = (fieldValue / interval); // + (fieldValue < 0 ? -1 : 0) - (minRandomValue / interval - 1); valueCounts[bucket]++; } assertAcked(prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer")); for (int i = 0; i < 2; i++) { - builders.add(client().prepareIndex("empty_bucket_idx").setId("" + i).setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject())); + builders.add( + client().prepareIndex("empty_bucket_idx") + .setId("" + i) + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) + ); } indexRandom(true, builders); ensureSearchable(); @@ -86,10 +94,11 @@ public void setupSuiteScopeCluster() throws Exception { public void testDocCountopLevel() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue)) - .addAggregation(percentilesBucket("percentiles_bucket", "histo>_count") - .setPercents(PERCENTS)).get(); + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) + ) + .addAggregation(percentilesBucket("percentiles_bucket", "histo>_count").setPercents(PERCENTS)) + .get(); assertSearchResponse(response); @@ -117,17 +126,16 @@ public void testDocCountopLevel() throws Exception { } public void testDocCountAsSubAgg() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue)) - .subAggregation(percentilesBucket("percentiles_bucket", "histo>_count") - .setPercents(PERCENTS))).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) + ) + .subAggregation(percentilesBucket("percentiles_bucket", "histo>_count").setPercents(PERCENTS)) + ) + .get(); assertSearchResponse(response); @@ -165,11 +173,10 @@ public void testDocCountAsSubAgg() throws Exception { } public void testMetricTopLevel() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .addAggregation(percentilesBucket("percentiles_bucket", "terms>sum") - .setPercents(PERCENTS)).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .addAggregation(percentilesBucket("percentiles_bucket", "terms>sum").setPercents(PERCENTS)) + .get(); assertSearchResponse(response); @@ -199,10 +206,10 @@ public void testMetricTopLevel() throws Exception { } public void testMetricTopLevelDefaultPercents() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .addAggregation(percentilesBucket("percentiles_bucket", "terms>sum")).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .addAggregation(percentilesBucket("percentiles_bucket", "terms>sum")) + .get(); assertSearchResponse(response); @@ -232,18 +239,19 @@ public void testMetricTopLevelDefaultPercents() throws Exception { } public void testMetricAsSubAgg() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .subAggregation(percentilesBucket("percentiles_bucket", "histo>sum") - .setPercents(PERCENTS))).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .extendedBounds(minRandomValue, maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ) + .subAggregation(percentilesBucket("percentiles_bucket", "histo>sum").setPercents(PERCENTS)) + ) + .get(); assertSearchResponse(response); @@ -285,20 +293,22 @@ public void testMetricAsSubAgg() throws Exception { } public void testMetricAsSubAggWithInsertZeros() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .subAggregation(percentilesBucket("percentiles_bucket", "histo>sum") - .gapPolicy(BucketHelpers.GapPolicy.INSERT_ZEROS) - .setPercents(PERCENTS))) - .get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .extendedBounds(minRandomValue, maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ) + .subAggregation( + percentilesBucket("percentiles_bucket", "histo>sum").gapPolicy(BucketHelpers.GapPolicy.INSERT_ZEROS) + .setPercents(PERCENTS) + ) + ) + .get(); assertSearchResponse(response); @@ -340,10 +350,13 @@ public void testMetricAsSubAggWithInsertZeros() throws Exception { public void testNoBuckets() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms").field("tag").includeExclude(new IncludeExclude(null, "tag.*")) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .addAggregation(percentilesBucket("percentiles_bucket", "terms>sum") - .setPercents(PERCENTS)).get(); + .addAggregation( + terms("terms").field("tag") + .includeExclude(new IncludeExclude(null, "tag.*")) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ) + .addAggregation(percentilesBucket("percentiles_bucket", "terms>sum").setPercents(PERCENTS)) + .get(); assertSearchResponse(response); @@ -363,10 +376,13 @@ public void testNoBuckets() throws Exception { public void testWrongPercents() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms").field("tag").includeExclude(new IncludeExclude(null, "tag.*")) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .addAggregation(percentilesBucket("percentiles_bucket", "terms>sum") - .setPercents(PERCENTS)).get(); + .addAggregation( + terms("terms").field("tag") + .includeExclude(new IncludeExclude(null, "tag.*")) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ) + .addAggregation(percentilesBucket("percentiles_bucket", "terms>sum").setPercents(PERCENTS)) + .get(); assertSearchResponse(response); @@ -389,13 +405,13 @@ public void testWrongPercents() throws Exception { } public void testBadPercents() throws Exception { - double[] badPercents = {-1.0, 110.0}; + double[] badPercents = { -1.0, 110.0 }; try { client().prepareSearch("idx") - .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .addAggregation(percentilesBucket("percentiles_bucket", "terms>sum") - .setPercents(badPercents)).get(); + .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .addAggregation(percentilesBucket("percentiles_bucket", "terms>sum").setPercents(badPercents)) + .get(); fail("Illegal percent's were provided but no exception was thrown."); } catch (Exception e) { @@ -416,20 +432,21 @@ public void testBadPercents() throws Exception { } public void testBadPercents_asSubAgg() throws Exception { - double[] badPercents = {-1.0, 110.0}; + double[] badPercents = { -1.0, 110.0 }; try { - client() - .prepareSearch("idx") + client().prepareSearch("idx") .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue)) - .subAggregation(percentilesBucket("percentiles_bucket", "histo>_count") - .setPercents(badPercents))).get(); + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .extendedBounds(minRandomValue, maxRandomValue) + ) + .subAggregation(percentilesBucket("percentiles_bucket", "histo>_count").setPercents(badPercents)) + ) + .get(); fail("Illegal percent's were provided but no exception was thrown."); } catch (Exception e) { @@ -450,18 +467,17 @@ public void testBadPercents_asSubAgg() throws Exception { } public void testNested() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue)) - .subAggregation(percentilesBucket("percentile_histo_bucket", "histo>_count").setPercents(PERCENTS))) - .addAggregation(percentilesBucket("percentile_terms_bucket", "terms>percentile_histo_bucket.50") - .setPercents(PERCENTS)).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) + ) + .subAggregation(percentilesBucket("percentile_histo_bucket", "histo>_count").setPercents(PERCENTS)) + ) + .addAggregation(percentilesBucket("percentile_terms_bucket", "terms>percentile_histo_bucket.50").setPercents(PERCENTS)) + .get(); assertSearchResponse(response); @@ -508,20 +524,18 @@ public void testNested() throws Exception { } public void testNestedWithDecimal() throws Exception { - double[] percent = {99.9}; - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue)) - .subAggregation(percentilesBucket("percentile_histo_bucket", "histo>_count") - .setPercents(percent))) - .addAggregation(percentilesBucket("percentile_terms_bucket", "terms>percentile_histo_bucket[99.9]") - .setPercents(percent)).get(); + double[] percent = { 99.9 }; + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) + ) + .subAggregation(percentilesBucket("percentile_histo_bucket", "histo>_count").setPercents(percent)) + ) + .addAggregation(percentilesBucket("percentile_terms_bucket", "terms>percentile_histo_bucket[99.9]").setPercents(percent)) + .get(); assertSearchResponse(response); @@ -565,7 +579,7 @@ public void testNestedWithDecimal() throws Exception { assertThat(percentilesBucketValue, notNullValue()); assertThat(percentilesBucketValue.getName(), equalTo("percentile_terms_bucket")); for (Double p : percent) { - double expected = values[(int)((p / 100) * values.length)]; + double expected = values[(int) ((p / 100) * values.length)]; assertThat(percentilesBucketValue.percentile(p), equalTo(expected)); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffIT.java index 2fd8be334ff3e..c9e7ad2e41ec9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffIT.java @@ -51,7 +51,8 @@ public class SerialDiffIT extends ESIntegTestCase { static Map> testValues; enum MetricTarget { - VALUE ("value"), COUNT("count"); + VALUE("value"), + COUNT("count"); private final String name; @@ -60,14 +61,13 @@ enum MetricTarget { } @Override - public String toString(){ + public String toString() { return name; } } - private ValuesSourceAggregationBuilder< - ? extends ValuesSourceAggregationBuilder> randomMetric(String name, String field) { - int rand = randomIntBetween(0,3); + private ValuesSourceAggregationBuilder> randomMetric(String name, String field) { + int rand = randomIntBetween(0, 3); switch (rand) { case 0: @@ -100,8 +100,11 @@ private void assertBucketContents(Histogram.Bucket actual, Double expectedCount, assertThat("[_count] diff is not null", countDiff, nullValue()); } else { assertThat("[_count] diff is null", countDiff, notNullValue()); - assertThat("[_count] diff does not match expected [" + countDiff.value() + " vs " + expectedCount + "]", - countDiff.value(), closeTo(expectedCount, 0.1)); + assertThat( + "[_count] diff does not match expected [" + countDiff.value() + " vs " + expectedCount + "]", + countDiff.value(), + closeTo(expectedCount, 0.1) + ); } // This is a gap bucket @@ -110,19 +113,20 @@ private void assertBucketContents(Histogram.Bucket actual, Double expectedCount, assertThat("[value] diff is not null", valuesDiff, Matchers.nullValue()); } else { assertThat("[value] diff is null", valuesDiff, notNullValue()); - assertThat("[value] diff does not match expected [" + valuesDiff.value() + " vs " + expectedValue + "]", - valuesDiff.value(), closeTo(expectedValue, 0.1)); + assertThat( + "[value] diff does not match expected [" + valuesDiff.value() + " vs " + expectedValue + "]", + valuesDiff.value(), + closeTo(expectedValue, 0.1) + ); } } - @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); createIndex("idx_unmapped"); List builders = new ArrayList<>(); - interval = 5; numBuckets = randomIntBetween(10, 80); lag = randomIntBetween(1, numBuckets / 2); @@ -139,9 +143,10 @@ public void setupSuiteScopeCluster() throws Exception { for (PipelineAggregationHelperTests.MockBucket mockBucket : mockHisto) { for (double value : mockBucket.docValues) { - builders.add(client().prepareIndex("idx").setSource(jsonBuilder().startObject() - .field(INTERVAL_FIELD, mockBucket.key) - .field(VALUE_FIELD, value).endObject())); + builders.add( + client().prepareIndex("idx") + .setSource(jsonBuilder().startObject().field(INTERVAL_FIELD, mockBucket.key).field(VALUE_FIELD, value).endObject()) + ); } } @@ -216,19 +221,16 @@ private void setupExpected(MetricTarget target) { } public void testBasicDiff() { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - histogram("histo").field(INTERVAL_FIELD).interval(interval) - .extendedBounds(0L, (long) (interval * (numBuckets - 1))) - .subAggregation(metric) - .subAggregation(diff("diff_counts", "_count") - .lag(lag) - .gapPolicy(gapPolicy)) - .subAggregation(diff("diff_values", "the_metric") - .lag(lag) - .gapPolicy(gapPolicy)) - ).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + histogram("histo").field(INTERVAL_FIELD) + .interval(interval) + .extendedBounds(0L, (long) (interval * (numBuckets - 1))) + .subAggregation(metric) + .subAggregation(diff("diff_counts", "_count").lag(lag).gapPolicy(gapPolicy)) + .subAggregation(diff("diff_values", "the_metric").lag(lag).gapPolicy(gapPolicy)) + ) + .get(); assertSearchResponse(response); @@ -255,7 +257,7 @@ public void testBasicDiff() { Double expectedValue = expectedValuesIter.next(); assertThat("keys do not match", ((Number) actual.getKey()).longValue(), equalTo(expected.key)); - assertThat("doc counts do not match", actual.getDocCount(), equalTo((long)expected.count)); + assertThat("doc counts do not match", actual.getDocCount(), equalTo((long) expected.count)); assertBucketContents(actual, expectedCount, expectedValue); } @@ -263,16 +265,15 @@ public void testBasicDiff() { public void testInvalidLagSize() { try { - client() - .prepareSearch("idx") + client().prepareSearch("idx") .addAggregation( - histogram("histo").field(INTERVAL_FIELD).interval(interval) - .extendedBounds(0L, (long) (interval * (numBuckets - 1))) - .subAggregation(metric) - .subAggregation(diff("diff_counts", "_count") - .lag(-1) - .gapPolicy(gapPolicy)) - ).get(); + histogram("histo").field(INTERVAL_FIELD) + .interval(interval) + .extendedBounds(0L, (long) (interval * (numBuckets - 1))) + .subAggregation(metric) + .subAggregation(diff("diff_counts", "_count").lag(-1).gapPolicy(gapPolicy)) + ) + .get(); } catch (IllegalArgumentException e) { assertThat(e.getMessage(), is("[lag] must be a positive integer: [diff_counts]")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketIT.java index b95895c21779e..ab2700005e785 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketIT.java @@ -47,8 +47,7 @@ public class StatsBucketIT extends ESIntegTestCase { @Override public void setupSuiteScopeCluster() throws Exception { - assertAcked(client().admin().indices().prepareCreate("idx") - .setMapping("tag", "type=keyword").get()); + assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); createIndex("idx_unmapped"); numDocs = randomIntBetween(6, 20); @@ -64,17 +63,26 @@ public void setupSuiteScopeCluster() throws Exception { for (int i = 0; i < numDocs; i++) { int fieldValue = randomIntBetween(minRandomValue, maxRandomValue); - builders.add(client().prepareIndex("idx").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, fieldValue).field("tag", "tag" + (i % interval)) - .endObject())); + builders.add( + client().prepareIndex("idx") + .setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, fieldValue) + .field("tag", "tag" + (i % interval)) + .endObject() + ) + ); final int bucket = (fieldValue / interval); // + (fieldValue < 0 ? -1 : 0) - (minRandomValue / interval - 1); valueCounts[bucket]++; } assertAcked(prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer")); for (int i = 0; i < 2; i++) { - builders.add(client().prepareIndex("empty_bucket_idx").setId("" + i).setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject())); + builders.add( + client().prepareIndex("empty_bucket_idx") + .setId("" + i) + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) + ); } indexRandom(true, builders); ensureSearchable(); @@ -82,9 +90,11 @@ public void setupSuiteScopeCluster() throws Exception { public void testDocCountTopLevel() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue)) - .addAggregation(statsBucket("stats_bucket", "histo>_count")).get(); + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) + ) + .addAggregation(statsBucket("stats_bucket", "histo>_count")) + .get(); assertSearchResponse(response); @@ -119,16 +129,16 @@ public void testDocCountTopLevel() throws Exception { } public void testDocCountAsSubAgg() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue)) - .subAggregation(statsBucket("stats_bucket", "histo>_count"))).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) + ) + .subAggregation(statsBucket("stats_bucket", "histo>_count")) + ) + .get(); assertSearchResponse(response); @@ -173,10 +183,10 @@ public void testDocCountAsSubAgg() throws Exception { } public void testMetricTopLevel() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .addAggregation(statsBucket("stats_bucket", "terms>sum")).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .addAggregation(statsBucket("stats_bucket", "terms>sum")) + .get(); assertSearchResponse(response); @@ -213,17 +223,19 @@ public void testMetricTopLevel() throws Exception { } public void testMetricAsSubAgg() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .subAggregation(statsBucket("stats_bucket", "histo>sum"))).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .extendedBounds(minRandomValue, maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ) + .subAggregation(statsBucket("stats_bucket", "histo>sum")) + ) + .get(); assertSearchResponse(response); @@ -272,18 +284,19 @@ public void testMetricAsSubAgg() throws Exception { } public void testMetricAsSubAggWithInsertZeros() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .subAggregation(statsBucket("stats_bucket", "histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS))) - .get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .extendedBounds(minRandomValue, maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ) + .subAggregation(statsBucket("stats_bucket", "histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS)) + ) + .get(); assertSearchResponse(response); @@ -332,9 +345,13 @@ public void testMetricAsSubAggWithInsertZeros() throws Exception { public void testNoBuckets() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms").field("tag").includeExclude(new IncludeExclude(null, "tag.*")) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .addAggregation(statsBucket("stats_bucket", "terms>sum")).get(); + .addAggregation( + terms("terms").field("tag") + .includeExclude(new IncludeExclude(null, "tag.*")) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ) + .addAggregation(statsBucket("stats_bucket", "terms>sum")) + .get(); assertSearchResponse(response); @@ -351,17 +368,17 @@ public void testNoBuckets() throws Exception { } public void testNested() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue)) - .subAggregation(statsBucket("avg_histo_bucket", "histo>_count"))) - .addAggregation(statsBucket("avg_terms_bucket", "terms>avg_histo_bucket.avg")).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) + ) + .subAggregation(statsBucket("avg_histo_bucket", "histo>_count")) + ) + .addAggregation(statsBucket("avg_terms_bucket", "terms>avg_histo_bucket.avg")) + .get(); assertSearchResponse(response); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/SumBucketIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/SumBucketIT.java index 355c8702922ad..6cf7799ca2508 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/SumBucketIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/SumBucketIT.java @@ -47,8 +47,7 @@ public class SumBucketIT extends ESIntegTestCase { @Override public void setupSuiteScopeCluster() throws Exception { - assertAcked(client().admin().indices().prepareCreate("idx") - .setMapping("tag", "type=keyword").get()); + assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); createIndex("idx_unmapped"); numDocs = randomIntBetween(6, 20); @@ -64,17 +63,26 @@ public void setupSuiteScopeCluster() throws Exception { for (int i = 0; i < numDocs; i++) { int fieldValue = randomIntBetween(minRandomValue, maxRandomValue); - builders.add(client().prepareIndex("idx").setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, fieldValue).field("tag", "tag" + (i % interval)) - .endObject())); + builders.add( + client().prepareIndex("idx") + .setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, fieldValue) + .field("tag", "tag" + (i % interval)) + .endObject() + ) + ); final int bucket = (fieldValue / interval); // + (fieldValue < 0 ? -1 : 0) - (minRandomValue / interval - 1); valueCounts[bucket]++; } assertAcked(prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer")); for (int i = 0; i < 2; i++) { - builders.add(client().prepareIndex("empty_bucket_idx").setId("" + i).setSource( - jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject())); + builders.add( + client().prepareIndex("empty_bucket_idx") + .setId("" + i) + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) + ); } indexRandom(true, builders); ensureSearchable(); @@ -82,9 +90,11 @@ public void setupSuiteScopeCluster() throws Exception { public void testDocCountTopLevel() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue)) - .addAggregation(sumBucket("sum_bucket", "histo>_count")).get(); + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) + ) + .addAggregation(sumBucket("sum_bucket", "histo>_count")) + .get(); assertSearchResponse(response); @@ -110,16 +120,16 @@ public void testDocCountTopLevel() throws Exception { } public void testDocCountAsSubAgg() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue)) - .subAggregation(sumBucket("sum_bucket", "histo>_count"))).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) + ) + .subAggregation(sumBucket("sum_bucket", "histo>_count")) + ) + .get(); assertSearchResponse(response); @@ -155,10 +165,10 @@ public void testDocCountAsSubAgg() throws Exception { } public void testMetricTopLevel() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .addAggregation(sumBucket("sum_bucket", "terms>sum")).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .addAggregation(sumBucket("sum_bucket", "terms>sum")) + .get(); assertSearchResponse(response); @@ -186,17 +196,19 @@ public void testMetricTopLevel() throws Exception { } public void testMetricAsSubAgg() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .subAggregation(sumBucket("sum_bucket", "histo>sum"))).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .extendedBounds(minRandomValue, maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ) + .subAggregation(sumBucket("sum_bucket", "histo>sum")) + ) + .get(); assertSearchResponse(response); @@ -236,18 +248,19 @@ public void testMetricAsSubAgg() throws Exception { } public void testMetricAsSubAggWithInsertZeros() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .subAggregation(sumBucket("sum_bucket", "histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS))) - .get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .extendedBounds(minRandomValue, maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ) + .subAggregation(sumBucket("sum_bucket", "histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS)) + ) + .get(); assertSearchResponse(response); @@ -287,9 +300,13 @@ public void testMetricAsSubAggWithInsertZeros() throws Exception { public void testNoBuckets() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms").field("tag").includeExclude(new IncludeExclude(null, "tag.*")) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .addAggregation(sumBucket("sum_bucket", "terms>sum")).get(); + .addAggregation( + terms("terms").field("tag") + .includeExclude(new IncludeExclude(null, "tag.*")) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ) + .addAggregation(sumBucket("sum_bucket", "terms>sum")) + .get(); assertSearchResponse(response); @@ -306,17 +323,17 @@ public void testNoBuckets() throws Exception { } public void testNested() throws Exception { - SearchResponse response = client() - .prepareSearch("idx") - .addAggregation( - terms("terms") - .field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) - .extendedBounds(minRandomValue, maxRandomValue)) - .subAggregation(sumBucket("sum_histo_bucket", "histo>_count"))) - .addAggregation(sumBucket("sum_terms_bucket", "terms>sum_histo_bucket")).get(); + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms("terms").field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) + ) + .subAggregation(sumBucket("sum_histo_bucket", "histo>_count")) + ) + .addAggregation(sumBucket("sum_terms_bucket", "terms>sum_histo_bucket")) + .get(); assertSearchResponse(response); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java index 1f911f5d59038..5ee11e0e9a657 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java @@ -1120,9 +1120,10 @@ public void testLoadMetadata() throws Exception { assertSearchResponse(response); assertHitCount(response, 1); - Map fields = response.getHits().getAt(0).getFields(); + Map fields = response.getHits().getAt(0).getMetadataFields(); assertThat(fields.get("field1"), nullValue()); assertThat(fields.get("_routing").getValue().toString(), equalTo("1")); + assertThat(response.getHits().getAt(0).getDocumentFields().size(), equalTo(0)); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryGeoPointIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryGeoPointIT.java index 6dbb41810f352..f96ed273dce2c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryGeoPointIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryGeoPointIT.java @@ -11,17 +11,23 @@ import org.elasticsearch.Version; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.test.VersionUtils; import java.io.IOException; -public class GeoBoundingBoxQueryGeoPointIT extends AbstractGeoBoundingBoxQueryIT { +public class GeoBoundingBoxQueryGeoPointIT extends GeoBoundingBoxQueryIntegTestCase { @Override - public XContentBuilder getMapping(Version version) throws IOException { + public XContentBuilder getMapping() throws IOException { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("location").field("type", "geo_point"); xContentBuilder.endObject().endObject().endObject().endObject(); return xContentBuilder; } + + @Override + public Version randomSupportedVersion() throws IOException { + return VersionUtils.randomIndexCompatibleVersion(random()); + } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryGeoShapeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryGeoShapeIT.java index 22abcc9014d20..ab4b5fbf10b95 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryGeoShapeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryGeoShapeIT.java @@ -11,20 +11,23 @@ import org.elasticsearch.Version; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.test.VersionUtils; import java.io.IOException; -public class GeoBoundingBoxQueryGeoShapeIT extends AbstractGeoBoundingBoxQueryIT { +public class GeoBoundingBoxQueryGeoShapeIT extends GeoBoundingBoxQueryIntegTestCase { @Override - public XContentBuilder getMapping(Version version) throws IOException { + public XContentBuilder getMapping() throws IOException { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("location").field("type", "geo_shape"); - if (version.before(Version.V_8_0_0) && randomBoolean()) { - xContentBuilder.field("strategy", "recursive"); - } xContentBuilder.endObject().endObject().endObject().endObject(); return xContentBuilder; } + + @Override + public Version randomSupportedVersion() throws IOException { + return VersionUtils.randomIndexCompatibleVersion(random()); + } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryLegacyGeoShapeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryLegacyGeoShapeIT.java new file mode 100644 index 0000000000000..c12897ac2d63d --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryLegacyGeoShapeIT.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.geo; + +import org.elasticsearch.Version; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.test.VersionUtils; + +import java.io.IOException; + +public class GeoBoundingBoxQueryLegacyGeoShapeIT extends GeoBoundingBoxQueryIntegTestCase { + + @Override + public XContentBuilder getMapping() throws IOException { + return XContentFactory.jsonBuilder().startObject().startObject("_doc") + .startObject("properties").startObject("location").field("type", "geo_shape").field("strategy", "recursive") + .endObject().endObject().endObject().endObject(); + } + + @Override + public Version randomSupportedVersion() throws IOException { + return VersionUtils.randomPreviousCompatibleVersion(random(), Version.V_8_0_0); + } +} + diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoFilterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoFilterIT.java deleted file mode 100644 index f1c76403f9e4c..0000000000000 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoFilterIT.java +++ /dev/null @@ -1,503 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.search.geo; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy; -import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; -import org.apache.lucene.spatial.query.SpatialArgs; -import org.apache.lucene.spatial.query.SpatialOperation; -import org.apache.lucene.spatial.query.UnsupportedSpatialOperation; -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; -import org.elasticsearch.action.bulk.BulkItemResponse; -import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.Priority; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.geo.GeoPoint; -import org.elasticsearch.common.geo.GeoUtils; -import org.elasticsearch.common.geo.builders.CoordinatesBuilder; -import org.elasticsearch.common.geo.builders.LineStringBuilder; -import org.elasticsearch.common.geo.builders.MultiPolygonBuilder; -import org.elasticsearch.common.geo.builders.PointBuilder; -import org.elasticsearch.common.geo.builders.PolygonBuilder; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.core.internal.io.Streams; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.search.SearchHit; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.VersionUtils; -import org.junit.BeforeClass; -import org.locationtech.spatial4j.context.SpatialContext; -import org.locationtech.spatial4j.distance.DistanceUtils; -import org.locationtech.spatial4j.exception.InvalidShapeException; -import org.locationtech.spatial4j.shape.Shape; - -import java.io.ByteArrayOutputStream; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.InputStream; -import java.util.ArrayList; -import java.util.Random; -import java.util.zip.GZIPInputStream; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.geometry.utils.Geohash.addNeighbors; -import static org.elasticsearch.index.query.QueryBuilders.geoBoundingBoxQuery; -import static org.elasticsearch.index.query.QueryBuilders.geoDistanceQuery; -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.index.query.QueryBuilders.matchQuery; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; -import static org.hamcrest.Matchers.anyOf; -import static org.hamcrest.Matchers.closeTo; -import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.lessThanOrEqualTo; - -public class GeoFilterIT extends ESIntegTestCase { - - @Override - protected boolean forbidPrivateIndexSettings() { - return false; - } - - private static boolean intersectSupport; - private static boolean disjointSupport; - private static boolean withinSupport; - - @BeforeClass - public static void createNodes() throws Exception { - intersectSupport = testRelationSupport(SpatialOperation.Intersects); - disjointSupport = testRelationSupport(SpatialOperation.IsDisjointTo); - withinSupport = testRelationSupport(SpatialOperation.IsWithin); - } - - private static byte[] unZipData(String path) throws IOException { - InputStream is = Streams.class.getResourceAsStream(path); - if (is == null) { - throw new FileNotFoundException("Resource [" + path + "] not found in classpath"); - } - - ByteArrayOutputStream out = new ByteArrayOutputStream(); - GZIPInputStream in = new GZIPInputStream(is); - Streams.copy(in, out); - - is.close(); - out.close(); - - return out.toByteArray(); - } - - public void testShapeBuilders() { - try { - // self intersection polygon - new PolygonBuilder(new CoordinatesBuilder() - .coordinate(-10, -10) - .coordinate(10, 10) - .coordinate(-10, 10) - .coordinate(10, -10) - .close()) - .buildS4J(); - fail("Self intersection not detected"); - } catch (InvalidShapeException e) { - } - - // polygon with hole - new PolygonBuilder(new CoordinatesBuilder() - .coordinate(-10, -10).coordinate(-10, 10).coordinate(10, 10).coordinate(10, -10).close()) - .hole(new LineStringBuilder(new CoordinatesBuilder().coordinate(-5, -5).coordinate(-5, 5).coordinate(5, 5) - .coordinate(5, -5).close())) - .buildS4J(); - try { - // polygon with overlapping hole - new PolygonBuilder(new CoordinatesBuilder() - .coordinate(-10, -10).coordinate(-10, 10).coordinate(10, 10).coordinate(10, -10).close()) - .hole(new LineStringBuilder(new CoordinatesBuilder() - .coordinate(-5, -5).coordinate(-5, 11).coordinate(5, 11).coordinate(5, -5).close())) - .buildS4J(); - - fail("Self intersection not detected"); - } catch (InvalidShapeException e) { - } - - try { - // polygon with intersection holes - new PolygonBuilder(new CoordinatesBuilder() - .coordinate(-10, -10).coordinate(-10, 10).coordinate(10, 10).coordinate(10, -10).close()) - .hole(new LineStringBuilder(new CoordinatesBuilder().coordinate(-5, -5).coordinate(-5, 5).coordinate(5, 5) - .coordinate(5, -5).close())) - .hole(new LineStringBuilder(new CoordinatesBuilder().coordinate(-5, -6).coordinate(5, -6).coordinate(5, -4) - .coordinate(-5, -4).close())) - .buildS4J(); - fail("Intersection of holes not detected"); - } catch (InvalidShapeException e) { - } - - try { - // Common line in polygon - new PolygonBuilder(new CoordinatesBuilder() - .coordinate(-10, -10) - .coordinate(-10, 10) - .coordinate(-5, 10) - .coordinate(-5, -5) - .coordinate(-5, 20) - .coordinate(10, 20) - .coordinate(10, -10) - .close()) - .buildS4J(); - fail("Self intersection not detected"); - } catch (InvalidShapeException e) { - } - - // Multipolygon: polygon with hole and polygon within the whole - new MultiPolygonBuilder() - .polygon(new PolygonBuilder( - new CoordinatesBuilder().coordinate(-10, -10) - .coordinate(-10, 10) - .coordinate(10, 10) - .coordinate(10, -10).close()) - .hole(new LineStringBuilder( - new CoordinatesBuilder().coordinate(-5, -5) - .coordinate(-5, 5) - .coordinate(5, 5) - .coordinate(5, -5).close()))) - .polygon(new PolygonBuilder( - new CoordinatesBuilder() - .coordinate(-4, -4) - .coordinate(-4, 4) - .coordinate(4, 4) - .coordinate(4, -4).close())) - .buildS4J(); - } - - public void testShapeRelations() throws Exception { - assertTrue( "Intersect relation is not supported", intersectSupport); - assertTrue("Disjoint relation is not supported", disjointSupport); - assertTrue("within relation is not supported", withinSupport); - - String mapping = Strings.toString(XContentFactory.jsonBuilder() - .startObject() - .startObject("properties") - .startObject("area") - .field("type", "geo_shape") - .field("tree", "geohash") - .endObject() - .endObject() - .endObject()); - - final Version version = VersionUtils.randomPreviousCompatibleVersion(random(), Version.V_8_0_0); - CreateIndexRequestBuilder mappingRequest = client().admin().indices().prepareCreate("shapes") - .setMapping(mapping).setSettings(settings(version).build()); - mappingRequest.get(); - client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); - - // Create a multipolygon with two polygons. The first is an rectangle of size 10x10 - // with a hole of size 5x5 equidistant from all sides. This hole in turn contains - // the second polygon of size 4x4 equidistant from all sites - MultiPolygonBuilder polygon = new MultiPolygonBuilder() - .polygon(new PolygonBuilder( - new CoordinatesBuilder().coordinate(-10, -10).coordinate(-10, 10).coordinate(10, 10).coordinate(10, -10) - .close()) - .hole(new LineStringBuilder(new CoordinatesBuilder() - .coordinate(-5, -5).coordinate(-5, 5).coordinate(5, 5).coordinate(5, -5).close()))) - .polygon(new PolygonBuilder( - new CoordinatesBuilder().coordinate(-4, -4).coordinate(-4, 4).coordinate(4, 4).coordinate(4, -4).close())); - BytesReference data = BytesReference.bytes(jsonBuilder().startObject().field("area", polygon).endObject()); - - client().prepareIndex("shapes").setId("1").setSource(data, XContentType.JSON).get(); - client().admin().indices().prepareRefresh().get(); - - // Point in polygon - SearchResponse result = client().prepareSearch() - .setQuery(matchAllQuery()) - .setPostFilter(QueryBuilders.geoIntersectionQuery("area", new PointBuilder(3, 3))) - .get(); - assertHitCount(result, 1); - assertFirstHit(result, hasId("1")); - - // Point in polygon hole - result = client().prepareSearch() - .setQuery(matchAllQuery()) - .setPostFilter(QueryBuilders.geoIntersectionQuery("area", new PointBuilder(4.5, 4.5))) - .get(); - assertHitCount(result, 0); - - // by definition the border of a polygon belongs to the inner - // so the border of a polygons hole also belongs to the inner - // of the polygon NOT the hole - - // Point on polygon border - result = client().prepareSearch() - .setQuery(matchAllQuery()) - .setPostFilter(QueryBuilders.geoIntersectionQuery("area", new PointBuilder(10.0, 5.0))) - .get(); - assertHitCount(result, 1); - assertFirstHit(result, hasId("1")); - - // Point on hole border - result = client().prepareSearch() - .setQuery(matchAllQuery()) - .setPostFilter(QueryBuilders.geoIntersectionQuery("area", new PointBuilder(5.0, 2.0))) - .get(); - assertHitCount(result, 1); - assertFirstHit(result, hasId("1")); - - if (disjointSupport) { - // Point not in polygon - result = client().prepareSearch() - .setQuery(matchAllQuery()) - .setPostFilter(QueryBuilders.geoDisjointQuery("area", new PointBuilder(3, 3))) - .get(); - assertHitCount(result, 0); - - // Point in polygon hole - result = client().prepareSearch() - .setQuery(matchAllQuery()) - .setPostFilter(QueryBuilders.geoDisjointQuery("area", new PointBuilder(4.5, 4.5))) - .get(); - assertHitCount(result, 1); - assertFirstHit(result, hasId("1")); - } - - // Create a polygon that fills the empty area of the polygon defined above - PolygonBuilder inverse = new PolygonBuilder(new CoordinatesBuilder() - .coordinate(-5, -5).coordinate(-5, 5).coordinate(5, 5).coordinate(5, -5).close()) - .hole(new LineStringBuilder( - new CoordinatesBuilder().coordinate(-4, -4).coordinate(-4, 4).coordinate(4, 4).coordinate(4, -4).close())); - - data = BytesReference.bytes(jsonBuilder().startObject().field("area", inverse).endObject()); - client().prepareIndex("shapes").setId("2").setSource(data, XContentType.JSON).get(); - client().admin().indices().prepareRefresh().get(); - - // re-check point on polygon hole - result = client().prepareSearch() - .setQuery(matchAllQuery()) - .setPostFilter(QueryBuilders.geoIntersectionQuery("area", new PointBuilder(4.5, 4.5))) - .get(); - assertHitCount(result, 1); - assertFirstHit(result, hasId("2")); - - // Create Polygon with hole and common edge - PolygonBuilder builder = new PolygonBuilder(new CoordinatesBuilder() - .coordinate(-10, -10).coordinate(-10, 10).coordinate(10, 10).coordinate(10, -10).close()) - .hole(new LineStringBuilder(new CoordinatesBuilder() - .coordinate(-5, -5).coordinate(-5, 5).coordinate(10, 5).coordinate(10, -5).close())); - - if (withinSupport) { - // Polygon WithIn Polygon - builder = new PolygonBuilder(new CoordinatesBuilder() - .coordinate(-30, -30).coordinate(-30, 30).coordinate(30, 30).coordinate(30, -30).close()); - - result = client().prepareSearch() - .setQuery(matchAllQuery()) - .setPostFilter(QueryBuilders.geoWithinQuery("area", builder.buildGeometry())) - .get(); - assertHitCount(result, 2); - } - - // Create a polygon crossing longitude 180. - builder = new PolygonBuilder(new CoordinatesBuilder() - .coordinate(170, -10).coordinate(190, -10).coordinate(190, 10).coordinate(170, 10).close()); - - data = BytesReference.bytes(jsonBuilder().startObject().field("area", builder).endObject()); - client().prepareIndex("shapes").setId("1").setSource(data, XContentType.JSON).get(); - client().admin().indices().prepareRefresh().get(); - - // Create a polygon crossing longitude 180 with hole. - builder = new PolygonBuilder(new CoordinatesBuilder() - .coordinate(170, -10).coordinate(190, -10).coordinate(190, 10).coordinate(170, 10).close()) - .hole(new LineStringBuilder(new CoordinatesBuilder().coordinate(175, -5).coordinate(185, -5).coordinate(185, 5) - .coordinate(175, 5).close())); - - data = BytesReference.bytes(jsonBuilder().startObject().field("area", builder).endObject()); - client().prepareIndex("shapes").setId("1").setSource(data, XContentType.JSON).get(); - client().admin().indices().prepareRefresh().get(); - - result = client().prepareSearch() - .setQuery(matchAllQuery()) - .setPostFilter(QueryBuilders.geoIntersectionQuery("area", new PointBuilder(174, -4).buildGeometry())) - .get(); - assertHitCount(result, 1); - - result = client().prepareSearch() - .setQuery(matchAllQuery()) - .setPostFilter(QueryBuilders.geoIntersectionQuery("area", new PointBuilder(-174, -4).buildGeometry())) - .get(); - assertHitCount(result, 1); - - result = client().prepareSearch() - .setQuery(matchAllQuery()) - .setPostFilter(QueryBuilders.geoIntersectionQuery("area", new PointBuilder(180, -4).buildGeometry())) - .get(); - assertHitCount(result, 0); - - result = client().prepareSearch() - .setQuery(matchAllQuery()) - .setPostFilter(QueryBuilders.geoIntersectionQuery("area", new PointBuilder(180, -6).buildGeometry())) - .get(); - assertHitCount(result, 1); - } - - public void testBulk() throws Exception { - byte[] bulkAction = unZipData("/org/elasticsearch/search/geo/gzippedmap.gz"); - Version version = VersionUtils.randomIndexCompatibleVersion(random()); - Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); - XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() - .startObject() - .startObject("_doc") - .startObject("properties") - .startObject("pin") - .field("type", "geo_point"); - xContentBuilder.field("store", true) - .endObject() - .startObject("location") - .field("type", "geo_shape") - .field("ignore_malformed", true) - .endObject() - .endObject() - .endObject() - .endObject(); - - client().admin().indices().prepareCreate("countries").setSettings(settings) - .setMapping(xContentBuilder).get(); - BulkResponse bulk = client().prepareBulk().add(bulkAction, 0, bulkAction.length, null, xContentBuilder.contentType()).get(); - - for (BulkItemResponse item : bulk.getItems()) { - assertFalse("unable to index data", item.isFailed()); - } - - client().admin().indices().prepareRefresh().get(); - String key = "DE"; - - SearchResponse searchResponse = client().prepareSearch() - .setQuery(matchQuery("_id", key)) - .get(); - - assertHitCount(searchResponse, 1); - - for (SearchHit hit : searchResponse.getHits()) { - assertThat(hit.getId(), equalTo(key)); - } - - SearchResponse world = client().prepareSearch().addStoredField("pin").setQuery( - geoBoundingBoxQuery("pin").setCorners(90, -179.99999, -90, 179.99999) - ).get(); - - assertHitCount(world, 53); - - SearchResponse distance = client().prepareSearch().addStoredField("pin").setQuery( - geoDistanceQuery("pin").distance("425km").point(51.11, 9.851) - ).get(); - - assertHitCount(distance, 5); - GeoPoint point = new GeoPoint(); - for (SearchHit hit : distance.getHits()) { - String name = hit.getId(); - point.resetFromString(hit.getFields().get("pin").getValue()); - double dist = distance(point.getLat(), point.getLon(), 51.11, 9.851); - - assertThat("distance to '" + name + "'", dist, lessThanOrEqualTo(425000d)); - assertThat(name, anyOf(equalTo("CZ"), equalTo("DE"), equalTo("BE"), equalTo("NL"), equalTo("LU"))); - if (key.equals(name)) { - assertThat(dist, closeTo(0d, 0.1d)); - } - } - } - - public void testNeighbors() { - // Simple root case - assertThat(addNeighbors("7", new ArrayList<>()), containsInAnyOrder("4", "5", "6", "d", "e", "h", "k", "s")); - - // Root cases (Outer cells) - assertThat(addNeighbors("0", new ArrayList<>()), containsInAnyOrder("1", "2", "3", "p", "r")); - assertThat(addNeighbors("b", new ArrayList<>()), containsInAnyOrder("8", "9", "c", "x", "z")); - assertThat(addNeighbors("p", new ArrayList<>()), containsInAnyOrder("n", "q", "r", "0", "2")); - assertThat(addNeighbors("z", new ArrayList<>()), containsInAnyOrder("8", "b", "w", "x", "y")); - - // Root crossing dateline - assertThat(addNeighbors("2", new ArrayList<>()), containsInAnyOrder("0", "1", "3", "8", "9", "p", "r", "x")); - assertThat(addNeighbors("r", new ArrayList<>()), containsInAnyOrder("0", "2", "8", "n", "p", "q", "w", "x")); - - // level1: simple case - assertThat(addNeighbors("dk", new ArrayList<>()), - containsInAnyOrder("d5", "d7", "de", "dh", "dj", "dm", "ds", "dt")); - - // Level1: crossing cells - assertThat(addNeighbors("d5", new ArrayList<>()), - containsInAnyOrder("d4", "d6", "d7", "dh", "dk", "9f", "9g", "9u")); - assertThat(addNeighbors("d0", new ArrayList<>()), - containsInAnyOrder("d1", "d2", "d3", "9b", "9c", "6p", "6r", "3z")); - } - - public static double distance(double lat1, double lon1, double lat2, double lon2) { - return GeoUtils.EARTH_SEMI_MAJOR_AXIS * DistanceUtils.distHaversineRAD( - DistanceUtils.toRadians(lat1), - DistanceUtils.toRadians(lon1), - DistanceUtils.toRadians(lat2), - DistanceUtils.toRadians(lon2) - ); - } - - protected static boolean testRelationSupport(SpatialOperation relation) { - if (relation == SpatialOperation.IsDisjointTo) { - // disjoint works in terms of intersection - relation = SpatialOperation.Intersects; - } - try { - GeohashPrefixTree tree = new GeohashPrefixTree(SpatialContext.GEO, 3); - RecursivePrefixTreeStrategy strategy = new RecursivePrefixTreeStrategy(tree, "area"); - Shape shape = SpatialContext.GEO.makePoint(0, 0); - SpatialArgs args = new SpatialArgs(relation, shape); - strategy.makeQuery(args); - return true; - } catch (UnsupportedSpatialOperation e) { - final SpatialOperation finalRelation = relation; - LogManager.getLogger(GeoFilterIT.class) - .info(() -> new ParameterizedMessage("Unsupported spatial operation {}", finalRelation), e); - return false; - } - } - - protected static String randomhash(int length) { - return randomhash(random(), length); - } - - protected static String randomhash(Random random) { - return randomhash(random, 2 + random.nextInt(10)); - } - - protected static String randomhash() { - return randomhash(random()); - } - - protected static String randomhash(Random random, int length) { - final char[] BASE_32 = { - '0', '1', '2', '3', '4', '5', '6', '7', - '8', '9', 'b', 'c', 'd', 'e', 'f', 'g', - 'h', 'j', 'k', 'm', 'n', 'p', 'q', 'r', - 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'}; - - StringBuilder sb = new StringBuilder(); - for (int i = 0; i < length; i++) { - sb.append(BASE_32[random.nextInt(BASE_32.length)]); - } - - return sb.toString(); - } -} - diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoShapeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoShapeIT.java new file mode 100644 index 0000000000000..70ff028360165 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoShapeIT.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.geo; + +import org.elasticsearch.Version; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.VersionUtils; + +import java.io.IOException; + +public class GeoShapeIT extends GeoShapeIntegTestCase { + + @Override + protected void getGeoShapeMapping(XContentBuilder b) throws IOException { + b.field("type", "geo_shape"); + } + + @Override + protected Version getVersion() { + return VersionUtils.randomIndexCompatibleVersion(random()); + } + + @Override + protected boolean allowExpensiveQueries() { + return true; + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoShapeIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoShapeIntegrationIT.java deleted file mode 100644 index 28d51c09e88a5..0000000000000 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoShapeIntegrationIT.java +++ /dev/null @@ -1,292 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.search.geo; - -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.routing.IndexShardRoutingTable; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.geo.Orientation; -import org.elasticsearch.common.geo.builders.PointBuilder; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.mapper.GeoShapeFieldMapper; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.VersionUtils; - -import static org.elasticsearch.index.query.QueryBuilders.geoShapeQuery; -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; - -public class GeoShapeIntegrationIT extends ESIntegTestCase { - - @Override - protected boolean forbidPrivateIndexSettings() { - return false; - } - - @Override - protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { - return Settings.builder() - // Check that only geo-shape queries on legacy PrefixTree based - // geo shapes are disallowed. - .put("search.allow_expensive_queries", false) - .put(super.nodeSettings(nodeOrdinal, otherSettings)) - .build(); - } - - /** - * Test that orientation parameter correctly persists across cluster restart - */ - public void testOrientationPersistence() throws Exception { - String idxName = "orientation"; - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("properties").startObject("location") - .field("type", "geo_shape") - .field("orientation", "left") - .endObject() - .endObject().endObject()); - - // create index - assertAcked(prepareCreate(idxName).setMapping(mapping)); - - mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("properties").startObject("location") - .field("type", "geo_shape") - .field("orientation", "right") - .endObject() - .endObject().endObject()); - - assertAcked(prepareCreate(idxName+"2").setMapping(mapping)); - ensureGreen(idxName, idxName+"2"); - - internalCluster().fullRestart(); - ensureGreen(idxName, idxName+"2"); - - // left orientation test - IndicesService indicesService = internalCluster().getInstance(IndicesService.class, findNodeName(idxName)); - IndexService indexService = indicesService.indexService(resolveIndex(idxName)); - MappedFieldType fieldType = indexService.mapperService().fieldType("location"); - assertThat(fieldType, instanceOf(GeoShapeFieldMapper.GeoShapeFieldType.class)); - - GeoShapeFieldMapper.GeoShapeFieldType gsfm = (GeoShapeFieldMapper.GeoShapeFieldType)fieldType; - Orientation orientation = gsfm.orientation(); - assertThat(orientation, equalTo(Orientation.CLOCKWISE)); - assertThat(orientation, equalTo(Orientation.LEFT)); - assertThat(orientation, equalTo(Orientation.CW)); - - // right orientation test - indicesService = internalCluster().getInstance(IndicesService.class, findNodeName(idxName+"2")); - indexService = indicesService.indexService(resolveIndex((idxName+"2"))); - fieldType = indexService.mapperService().fieldType("location"); - assertThat(fieldType, instanceOf(GeoShapeFieldMapper.GeoShapeFieldType.class)); - - gsfm = (GeoShapeFieldMapper.GeoShapeFieldType)fieldType; - orientation = gsfm.orientation(); - assertThat(orientation, equalTo(Orientation.COUNTER_CLOCKWISE)); - assertThat(orientation, equalTo(Orientation.RIGHT)); - assertThat(orientation, equalTo(Orientation.CCW)); - } - - /** - * Test that ignore_malformed on GeoShapeFieldMapper does not fail the entire document - */ - public void testIgnoreMalformed() throws Exception { - // create index - assertAcked(client().admin().indices().prepareCreate("test") - .setMapping("shape", "type=geo_shape,ignore_malformed=true").get()); - ensureGreen(); - - // test self crossing ccw poly not crossing dateline - String polygonGeoJson = Strings.toString(XContentFactory.jsonBuilder().startObject().field("type", "Polygon") - .startArray("coordinates") - .startArray() - .startArray().value(176.0).value(15.0).endArray() - .startArray().value(-177.0).value(10.0).endArray() - .startArray().value(-177.0).value(-10.0).endArray() - .startArray().value(176.0).value(-15.0).endArray() - .startArray().value(-177.0).value(15.0).endArray() - .startArray().value(172.0).value(0.0).endArray() - .startArray().value(176.0).value(15.0).endArray() - .endArray() - .endArray() - .endObject()); - - indexRandom(true, client().prepareIndex("test").setId("0").setSource("shape", - polygonGeoJson)); - SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - } - - public void testMappingUpdate() { - // create index - final Version version = VersionUtils.randomPreviousCompatibleVersion(random(), Version.V_8_0_0); - assertAcked(client().admin().indices().prepareCreate("test").setSettings(settings(version).build()) - .setMapping("shape", "type=geo_shape,strategy=recursive").get()); - ensureGreen(); - - String update ="{\n" + - " \"properties\": {\n" + - " \"shape\": {\n" + - " \"type\": \"geo_shape\"" + - " }\n" + - " }\n" + - "}"; - - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> client().admin().indices() - .preparePutMapping("test") - .setSource(update, XContentType.JSON).get()); - assertThat(e.getMessage(), containsString("mapper [shape] of type [geo_shape] cannot change strategy from [recursive] to [BKD]")); - } - - /** - * Test that the indexed shape routing can be provided if it is required - */ - public void testIndexShapeRouting() throws Exception { - String mapping = "{\"_doc\":{\n" + - " \"_routing\": {\n" + - " \"required\": true\n" + - " },\n" + - " \"properties\": {\n" + - " \"shape\": {\n" + - " \"type\": \"geo_shape\"\n" + - " }\n" + - " }\n" + - " }}"; - - - // create index - assertAcked(client().admin().indices().prepareCreate("test").setMapping(mapping).get()); - ensureGreen(); - - String source = "{\n" + - " \"shape\" : {\n" + - " \"type\" : \"bbox\",\n" + - " \"coordinates\" : [[-45.0, 45.0], [45.0, -45.0]]\n" + - " }\n" + - "}"; - - indexRandom(true, client().prepareIndex("test").setId("0").setSource(source, XContentType.JSON).setRouting("ABC")); - - SearchResponse searchResponse = client().prepareSearch("test").setQuery( - geoShapeQuery("shape", "0").indexedShapeIndex("test").indexedShapeRouting("ABC") - ).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - } - - public void testIndexPolygonDateLine() throws Exception { - String mappingVector = "{\n" + - " \"properties\": {\n" + - " \"shape\": {\n" + - " \"type\": \"geo_shape\"\n" + - " }\n" + - " }\n" + - " }"; - - String mappingQuad = "{\n" + - " \"properties\": {\n" + - " \"shape\": {\n" + - " \"type\": \"geo_shape\",\n" + - " \"tree\": \"quadtree\"\n" + - " }\n" + - " }\n" + - " }"; - - - // create index - assertAcked(client().admin().indices().prepareCreate("vector").setMapping(mappingVector).get()); - ensureGreen(); - - final Version version = VersionUtils.randomPreviousCompatibleVersion(random(), Version.V_8_0_0); - assertAcked(client().admin().indices().prepareCreate("quad") - .setSettings(settings(version).build()).setMapping(mappingQuad).get()); - ensureGreen(); - - String source = "{\n" + - " \"shape\" : \"POLYGON((179 0, -179 0, -179 2, 179 2, 179 0))\""+ - "}"; - - indexRandom(true, client().prepareIndex("quad").setId("0").setSource(source, XContentType.JSON)); - indexRandom(true, client().prepareIndex("vector").setId("0").setSource(source, XContentType.JSON)); - - try { - ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); - updateSettingsRequest.persistentSettings(Settings.builder().put("search.allow_expensive_queries", true)); - assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); - - SearchResponse searchResponse = client().prepareSearch("quad").setQuery( - geoShapeQuery("shape", new PointBuilder(-179.75, 1)) - ).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - - searchResponse = client().prepareSearch("quad").setQuery( - geoShapeQuery("shape", new PointBuilder(90, 1)) - ).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); - - searchResponse = client().prepareSearch("quad").setQuery( - geoShapeQuery("shape", new PointBuilder(-180, 1)) - ).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - - searchResponse = client().prepareSearch("quad").setQuery( - geoShapeQuery("shape", new PointBuilder(180, 1)) - ).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - } finally { - ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); - updateSettingsRequest.persistentSettings(Settings.builder().put("search.allow_expensive_queries", (String) null)); - assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); - } - - SearchResponse searchResponse = client().prepareSearch("vector").setQuery( - geoShapeQuery("shape", new PointBuilder(90, 1)) - ).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); - - searchResponse = client().prepareSearch("vector").setQuery( - geoShapeQuery("shape", new PointBuilder(-179.75, 1)) - ).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - - searchResponse = client().prepareSearch("vector").setQuery( - geoShapeQuery("shape", new PointBuilder(-180, 1)) - ).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - - searchResponse = client().prepareSearch("vector").setQuery( - geoShapeQuery("shape", new PointBuilder(180, 1)) - ).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - } - - private String findNodeName(String index) { - ClusterState state = client().admin().cluster().prepareState().get().getState(); - IndexShardRoutingTable shard = state.getRoutingTable().index(index).shard(0); - String nodeId = shard.assignedShards().get(0).currentNodeId(); - return state.getNodes().get(nodeId).getName(); - } -} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/LegacyGeoShapeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/LegacyGeoShapeIT.java new file mode 100644 index 0000000000000..3aff066a5bff9 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/LegacyGeoShapeIT.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.geo; + +import org.elasticsearch.Version; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.geometry.Circle; +import org.elasticsearch.test.VersionUtils; + +import java.io.IOException; + +import static org.elasticsearch.index.query.QueryBuilders.geoShapeQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class LegacyGeoShapeIT extends GeoShapeIntegTestCase { + + @Override + protected void getGeoShapeMapping(XContentBuilder b) throws IOException { + b.field("type", "geo_shape"); + b.field("strategy", "recursive"); + } + + @Override + protected Version getVersion() { + // legacy shapes can only be created in version lower than 8.x + return VersionUtils.randomPreviousCompatibleVersion(random(), Version.V_8_0_0); + } + + @Override + protected boolean allowExpensiveQueries() { + return false; + } + + public void testMappingUpdate() { + // create index + assertAcked(client().admin().indices().prepareCreate("test").setSettings(settings(getVersion()).build()) + .setMapping("shape", "type=geo_shape,strategy=recursive").get()); + ensureGreen(); + + String update ="{\n" + + " \"properties\": {\n" + + " \"shape\": {\n" + + " \"type\": \"geo_shape\"" + + " }\n" + + " }\n" + + "}"; + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> client().admin().indices() + .preparePutMapping("test") + .setSource(update, XContentType.JSON).get()); + assertThat(e.getMessage(), containsString("mapper [shape] of type [geo_shape] cannot change strategy from [recursive] to [BKD]")); + } + + /** + * Test that the circle is still supported for the legacy shapes + */ + public void testLegacyCircle() throws Exception { + // create index + assertAcked(prepareCreate("test").setSettings(settings(getVersion()).build()) + .setMapping("shape", "type=geo_shape,strategy=recursive,tree=geohash").get()); + ensureGreen(); + + indexRandom(true, client().prepareIndex("test").setId("0").setSource("shape", (ToXContent) (builder, params) -> { + builder.startObject().field("type", "circle") + .startArray("coordinates").value(30).value(50).endArray() + .field("radius","77km") + .endObject(); + return builder; + })); + + // test self crossing of circles + SearchResponse searchResponse = client().prepareSearch("test").setQuery(geoShapeQuery("shape", + new Circle(30, 50, 77000))).get(); + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/LegacyGeoShapeIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/LegacyGeoShapeIntegrationIT.java deleted file mode 100644 index c1301a2a10b85..0000000000000 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/LegacyGeoShapeIntegrationIT.java +++ /dev/null @@ -1,247 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.search.geo; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.routing.IndexShardRoutingTable; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.geo.Orientation; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.geometry.Circle; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.mapper.LegacyGeoShapeFieldMapper; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.VersionUtils; - -import java.io.IOException; - -import static org.elasticsearch.index.query.QueryBuilders.geoShapeQuery; -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; - -public class LegacyGeoShapeIntegrationIT extends ESIntegTestCase { - - @Override - protected boolean forbidPrivateIndexSettings() { - return false; - } - - /** - * Test that orientation parameter correctly persists across cluster restart - */ - public void testOrientationPersistence() throws Exception { - final Version version = VersionUtils.randomPreviousCompatibleVersion(random(), Version.V_8_0_0); - String idxName = "orientation"; - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("properties").startObject("location") - .field("type", "geo_shape") - .field("tree", "quadtree") - .field("orientation", "left") - .endObject() - .endObject().endObject()); - - // create index - assertAcked(prepareCreate(idxName).setMapping(mapping).setSettings(settings(version).build())); - - mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("properties").startObject("location") - .field("type", "geo_shape") - .field("tree", "quadtree") - .field("orientation", "right") - .endObject() - .endObject().endObject()); - - assertAcked(prepareCreate(idxName+"2").setMapping(mapping).setSettings(settings(version).build())); - ensureGreen(idxName, idxName+"2"); - - internalCluster().fullRestart(); - ensureGreen(idxName, idxName+"2"); - - // left orientation test - IndicesService indicesService = internalCluster().getInstance(IndicesService.class, findNodeName(idxName)); - IndexService indexService = indicesService.indexService(resolveIndex(idxName)); - MappedFieldType fieldType = indexService.mapperService().fieldType("location"); - assertThat(fieldType, instanceOf(LegacyGeoShapeFieldMapper.GeoShapeFieldType.class)); - - LegacyGeoShapeFieldMapper.GeoShapeFieldType gsfm = (LegacyGeoShapeFieldMapper.GeoShapeFieldType)fieldType; - Orientation orientation = gsfm.orientation(); - assertThat(orientation, equalTo(Orientation.CLOCKWISE)); - assertThat(orientation, equalTo(Orientation.LEFT)); - assertThat(orientation, equalTo(Orientation.CW)); - - // right orientation test - indicesService = internalCluster().getInstance(IndicesService.class, findNodeName(idxName+"2")); - indexService = indicesService.indexService(resolveIndex((idxName+"2"))); - fieldType = indexService.mapperService().fieldType("location"); - assertThat(fieldType, instanceOf(LegacyGeoShapeFieldMapper.GeoShapeFieldType.class)); - - gsfm = (LegacyGeoShapeFieldMapper.GeoShapeFieldType)fieldType; - orientation = gsfm.orientation(); - assertThat(orientation, equalTo(Orientation.COUNTER_CLOCKWISE)); - assertThat(orientation, equalTo(Orientation.RIGHT)); - assertThat(orientation, equalTo(Orientation.CCW)); - } - - /** - * Test that ignore_malformed on GeoShapeFieldMapper does not fail the entire document - */ - public void testIgnoreMalformed() throws Exception { - // create index - final Version version = VersionUtils.randomPreviousCompatibleVersion(random(), Version.V_8_0_0); - assertAcked(prepareCreate("test").setSettings(settings(version).build()) - .setMapping("shape", "type=geo_shape,tree=quadtree,ignore_malformed=true").get()); - ensureGreen(); - - // test self crossing ccw poly not crossing dateline - String polygonGeoJson = Strings.toString(XContentFactory.jsonBuilder().startObject().field("type", "Polygon") - .startArray("coordinates") - .startArray() - .startArray().value(176.0).value(15.0).endArray() - .startArray().value(-177.0).value(10.0).endArray() - .startArray().value(-177.0).value(-10.0).endArray() - .startArray().value(176.0).value(-15.0).endArray() - .startArray().value(-177.0).value(15.0).endArray() - .startArray().value(172.0).value(0.0).endArray() - .startArray().value(176.0).value(15.0).endArray() - .endArray() - .endArray() - .endObject()); - - indexRandom(true, client().prepareIndex("test").setId("0").setSource("shape", - polygonGeoJson)); - SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - } - - /** - * Test that the indexed shape routing can be provided if it is required - */ - public void testIndexShapeRouting() throws Exception { - String mapping = "{\"_doc\":{\n" + - " \"_routing\": {\n" + - " \"required\": true\n" + - " },\n" + - " \"properties\": {\n" + - " \"shape\": {\n" + - " \"type\": \"geo_shape\",\n" + - " \"tree\" : \"quadtree\"\n" + - " }\n" + - " }\n" + - " }}"; - - final Version version = VersionUtils.randomPreviousCompatibleVersion(random(), Version.V_8_0_0); - // create index - assertAcked(prepareCreate("test").setSettings(settings(version).build()).setMapping(mapping).get()); - ensureGreen(); - - String source = "{\n" + - " \"shape\" : {\n" + - " \"type\" : \"bbox\",\n" + - " \"coordinates\" : [[-45.0, 45.0], [45.0, -45.0]]\n" + - " }\n" + - "}"; - - indexRandom(true, client().prepareIndex("test").setId("0").setSource(source, XContentType.JSON).setRouting("ABC")); - - SearchResponse searchResponse = client().prepareSearch("test").setQuery( - geoShapeQuery("shape", "0").indexedShapeIndex("test").indexedShapeRouting("ABC") - ).get(); - - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - } - - /** - * Test that the circle is still supported for the legacy shapes - */ - public void testLegacyCircle() throws Exception { - // create index - final Version version = VersionUtils.randomPreviousCompatibleVersion(random(), Version.V_8_0_0); - assertAcked(prepareCreate("test").setSettings(settings(version).build()) - .setMapping("shape", "type=geo_shape,strategy=recursive,tree=geohash").get()); - ensureGreen(); - - indexRandom(true, client().prepareIndex("test").setId("0").setSource("shape", (ToXContent) (builder, params) -> { - builder.startObject().field("type", "circle") - .startArray("coordinates").value(30).value(50).endArray() - .field("radius","77km") - .endObject(); - return builder; - })); - - // test self crossing of circles - SearchResponse searchResponse = client().prepareSearch("test").setQuery(geoShapeQuery("shape", - new Circle(30, 50, 77000))).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - } - - public void testDisallowExpensiveQueries() throws InterruptedException, IOException { - final Version version = VersionUtils.randomPreviousCompatibleVersion(random(), Version.V_8_0_0); - try { - // create index - assertAcked(client().admin().indices().prepareCreate("test").setSettings(settings(version).build()) - .setMapping("shape", "type=geo_shape,strategy=recursive,tree=geohash").get()); - ensureGreen(); - - indexRandom(true, client().prepareIndex("test").setId("0").setSource( - "shape", (ToXContent) (builder, params) -> { - builder.startObject().field("type", "circle") - .startArray("coordinates").value(30).value(50).endArray() - .field("radius", "77km") - .endObject(); - return builder; - })); - refresh(); - - // Execute with search.allow_expensive_queries = null => default value = false => success - SearchResponse searchResponse = client().prepareSearch("test").setQuery(geoShapeQuery("shape", - new Circle(30, 50, 77000))).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - - ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); - updateSettingsRequest.persistentSettings(Settings.builder().put("search.allow_expensive_queries", false)); - assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); - - // Set search.allow_expensive_queries to "false" => assert failure - ElasticsearchException e = expectThrows(ElasticsearchException.class, - () -> client().prepareSearch("test").setQuery(geoShapeQuery("shape", - new Circle(30, 50, 77000))).get()); - assertEquals("[geo-shape] queries on [PrefixTree geo shapes] cannot be executed when " + - "'search.allow_expensive_queries' is set to false.", e.getCause().getMessage()); - - // Set search.allow_expensive_queries to "true" => success - updateSettingsRequest = new ClusterUpdateSettingsRequest(); - updateSettingsRequest.persistentSettings(Settings.builder().put("search.allow_expensive_queries", true)); - assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); - searchResponse = client().prepareSearch("test").setQuery(geoShapeQuery("shape", - new Circle(30, 50, 77000))).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); - } finally { - ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); - updateSettingsRequest.persistentSettings(Settings.builder().put("search.allow_expensive_queries", (String) null)); - assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); - } - } - - private String findNodeName(String index) { - ClusterState state = client().admin().cluster().prepareState().get().getState(); - IndexShardRoutingTable shard = state.getRoutingTable().index(index).shard(0); - String nodeId = shard.assignedShards().get(0).currentNodeId(); - return state.getNodes().get(nodeId).getName(); - } -} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java index 3ccb6d20d66f6..3f0e055994eda 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java @@ -58,9 +58,6 @@ public void testShardClone() throws Exception { final boolean useBwCFormat = randomBoolean(); if (useBwCFormat) { initWithSnapshotVersion(repoName, repoPath, SnapshotsService.OLD_SNAPSHOT_FORMAT); - // Re-create repo to clear repository data cache - assertAcked(clusterAdmin().prepareDeleteRepository(repoName).get()); - createRepository(repoName, "fs", repoPath); } final String indexName = "test-index"; @@ -797,6 +794,34 @@ public void testRemoveFailedCloneFromCSWithQueuedSnapshotInProgress() throws Exc assertAcked(startDeleteSnapshot(repoName, sourceSnapshot).get()); } + public void testCloneAfterFailedShardSnapshot() throws Exception { + final String masterNode = internalCluster().startMasterOnlyNode(); + final String dataNode = internalCluster().startDataOnlyNode(); + final String repoName = "test-repo"; + createRepository(repoName, "mock"); + final String testIndex = "index-test"; + createIndex(testIndex); + final String sourceSnapshot = "source-snapshot"; + createFullSnapshot(repoName, sourceSnapshot); + indexRandomDocs(testIndex, randomIntBetween(1, 100)); + blockDataNode(repoName, dataNode); + final ActionFuture snapshotFuture = client(masterNode).admin() + .cluster() + .prepareCreateSnapshot(repoName, "full-snapshot") + .execute(); + awaitNumberOfSnapshotsInProgress(1); + waitForBlock(dataNode, repoName); + final ActionFuture cloneFuture = client(masterNode).admin() + .cluster() + .prepareCloneSnapshot(repoName, sourceSnapshot, "target-snapshot") + .setIndices(testIndex) + .execute(); + awaitNumberOfSnapshotsInProgress(2); + internalCluster().stopNode(dataNode); + assertAcked(cloneFuture.get()); + assertTrue(snapshotFuture.isDone()); + } + private ActionFuture startCloneFromDataNode( String repoName, String sourceSnapshot, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java index 00bac57b9238c..0f53857357834 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java @@ -1690,6 +1690,66 @@ public void testIndexDeletedWhileSnapshotQueuedAfterClone() throws Exception { ); } + public void testIndexDeletedWhileSnapshotAndCloneQueuedAfterClone() throws Exception { + final String master = internalCluster().startMasterOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS); + internalCluster().startDataOnlyNode(); + final String index1 = "index-1"; + final String index2 = "index-2"; + createIndexWithContent(index1); + createIndexWithContent(index2); + + final String repository = "test-repo"; + createRepository(repository, "mock"); + + final String sourceSnapshot = "source-snapshot"; + createFullSnapshot(repository, sourceSnapshot); + + final IndexId index1Id = getRepositoryData(repository).resolveIndexId(index1); + blockMasterOnShardLevelSnapshotFile(repository, index1Id.getId()); + + final String cloneTarget = "target-snapshot"; + final ActionFuture cloneSnapshot = clusterAdmin().prepareCloneSnapshot( + repository, + sourceSnapshot, + cloneTarget + ).setIndices(index1, index2).execute(); + awaitNumberOfSnapshotsInProgress(1); + waitForBlock(master, repository); + + final ActionFuture snapshot3 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-3") + .setIndices(index1, index2) + .setWaitForCompletion(true) + .setPartial(true) + .execute(); + final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-2") + .setIndices(index2) + .setWaitForCompletion(true) + .execute(); + assertSuccessful(snapshot2); + awaitNumberOfSnapshotsInProgress(2); + assertFalse(snapshot3.isDone()); + + final String cloneTarget2 = "target-snapshot-2"; + final ActionFuture cloneSnapshot2 = clusterAdmin().prepareCloneSnapshot( + repository, + sourceSnapshot, + cloneTarget2 + ).setIndices(index1, index2).execute(); + + assertAcked(admin().indices().prepareDelete(index1).get()); + assertSuccessful(snapshot3); + unblockNode(repository, master); + + assertAcked(cloneSnapshot.get()); + assertAcked(cloneSnapshot2.get()); + assertAcked(startDeleteSnapshot(repository, cloneTarget).get()); + + assertThat( + clusterAdmin().prepareSnapshotStatus().setSnapshots("snapshot-2", "snapshot-3").setRepository(repository).get().getSnapshots(), + hasSize(2) + ); + } + public void testQueuedAfterFailedShardSnapshot() throws Exception { internalCluster().startMasterOnlyNode(); final String dataNode = internalCluster().startDataOnlyNode(); @@ -1736,6 +1796,43 @@ public void testQueuedAfterFailedShardSnapshot() throws Exception { assertEquals(snapshotsStatusResponse1, snapshotsStatusResponse3); } + public void testOutOfOrderFinalizationManySnapshots() throws Exception { + internalCluster().startMasterOnlyNode(); + final List dataNodes = internalCluster().startDataOnlyNodes(2); + final String index1 = "index-1"; + final String index2 = "index-2"; + createIndexWithContent(index1, dataNodes.get(0), dataNodes.get(1)); + createIndexWithContent(index2, dataNodes.get(1), dataNodes.get(0)); + + final String repository = "test-repo"; + createRepository(repository, "mock"); + + blockNodeWithIndex(repository, index2); + + final ActionFuture snapshot1 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-1") + .setIndices(index1, index2) + .setWaitForCompletion(true) + .execute(); + final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-2") + .setIndices(index1, index2) + .setWaitForCompletion(true) + .execute(); + awaitNumberOfSnapshotsInProgress(2); + final ActionFuture snapshot3 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-3") + .setIndices(index1) + .setWaitForCompletion(true) + .execute(); + assertSuccessful(snapshot3); + unblockAllDataNodes(repository); + assertSuccessful(snapshot1); + assertSuccessful(snapshot2); + + assertThat( + clusterAdmin().prepareSnapshotStatus().setSnapshots("snapshot-2").setRepository(repository).get().getSnapshots(), + hasSize(1) + ); + } + private static void assertSnapshotStatusCountOnRepo(String otherBlockedRepoName, int count) { final SnapshotsStatusResponse snapshotsStatusResponse = client().admin() .cluster() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java index 219b72516493f..5f497ebf7b125 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java @@ -33,7 +33,6 @@ import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.threadpool.ThreadPool; -import java.io.IOException; import java.nio.channels.SeekableByteChannel; import java.nio.file.Files; import java.nio.file.Path; @@ -42,7 +41,6 @@ import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.concurrent.ExecutionException; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -368,16 +366,12 @@ public void testMountCorruptedRepositoryData() throws Exception { expectThrows(RepositoryException.class, () -> getRepositoryData(otherRepo)); } - public void testHandleSnapshotErrorWithBwCFormat() throws IOException, ExecutionException, InterruptedException { + public void testHandleSnapshotErrorWithBwCFormat() throws Exception { final String repoName = "test-repo"; final Path repoPath = randomRepoPath(); createRepository(repoName, "fs", repoPath); final String oldVersionSnapshot = initWithSnapshotVersion(repoName, repoPath, SnapshotsService.OLD_SNAPSHOT_FORMAT); - logger.info("--> recreating repository to clear caches"); - client().admin().cluster().prepareDeleteRepository(repoName).get(); - createRepository(repoName, "fs", repoPath); - final String indexName = "test-index"; createIndex(indexName); @@ -403,10 +397,6 @@ public void testRepairBrokenShardGenerations() throws Exception { createRepository(repoName, "fs", repoPath); final String oldVersionSnapshot = initWithSnapshotVersion(repoName, repoPath, SnapshotsService.OLD_SNAPSHOT_FORMAT); - logger.info("--> recreating repository to clear caches"); - client().admin().cluster().prepareDeleteRepository(repoName).get(); - createRepository(repoName, "fs", repoPath); - final String indexName = "test-index"; createIndex(indexName); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java index 06a00a6c7162c..e558dfbd0bcac 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java @@ -38,21 +38,36 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { } public void testSortBy() throws Exception { - final String repoName = "test-repo"; + final String repoNameA = "test-repo-a"; final Path repoPath = randomRepoPath(); - createRepository(repoName, "fs", repoPath); - maybeInitWithOldSnapshotVersion(repoName, repoPath); - final List snapshotNamesWithoutIndex = createNSnapshots(repoName, randomIntBetween(3, 20)); + createRepository(repoNameA, "fs", repoPath); + maybeInitWithOldSnapshotVersion(repoNameA, repoPath); + final String repoNameB = "test-repo-b"; + createRepository(repoNameB, "fs"); + + final List snapshotNamesWithoutIndexA = createNSnapshots(repoNameA, randomIntBetween(3, 20)); + final List snapshotNamesWithoutIndexB = createNSnapshots(repoNameB, randomIntBetween(3, 20)); createIndexWithContent("test-index"); - final List snapshotNamesWithIndex = createNSnapshots(repoName, randomIntBetween(3, 20)); + final List snapshotNamesWithIndexA = createNSnapshots(repoNameA, randomIntBetween(3, 20)); + final List snapshotNamesWithIndexB = createNSnapshots(repoNameB, randomIntBetween(3, 20)); - final Collection allSnapshotNames = new HashSet<>(snapshotNamesWithIndex); - allSnapshotNames.addAll(snapshotNamesWithoutIndex); + final Collection allSnapshotNamesA = new HashSet<>(snapshotNamesWithIndexA); + final Collection allSnapshotNamesB = new HashSet<>(snapshotNamesWithIndexB); + allSnapshotNamesA.addAll(snapshotNamesWithoutIndexA); + allSnapshotNamesB.addAll(snapshotNamesWithoutIndexB); - doTestSortOrder(repoName, allSnapshotNames, SortOrder.ASC); - doTestSortOrder(repoName, allSnapshotNames, SortOrder.DESC); + doTestSortOrder(repoNameA, allSnapshotNamesA, SortOrder.ASC); + doTestSortOrder(repoNameA, allSnapshotNamesA, SortOrder.DESC); + + doTestSortOrder(repoNameB, allSnapshotNamesB, SortOrder.ASC); + doTestSortOrder(repoNameB, allSnapshotNamesB, SortOrder.DESC); + + final Collection allSnapshots = new HashSet<>(allSnapshotNamesA); + allSnapshots.addAll(allSnapshotNamesB); + doTestSortOrder("*", allSnapshots, SortOrder.ASC); + doTestSortOrder("*", allSnapshots, SortOrder.DESC); } private void doTestSortOrder(String repoName, Collection allSnapshotNames, SortOrder order) { @@ -78,6 +93,21 @@ private void doTestSortOrder(String repoName, Collection allSnapshotName GetSnapshotsRequest.SortBy.START_TIME, order ); + assertSnapshotListSorted( + allSnapshotsSorted(allSnapshotNames, repoName, GetSnapshotsRequest.SortBy.SHARDS, order), + GetSnapshotsRequest.SortBy.SHARDS, + order + ); + assertSnapshotListSorted( + allSnapshotsSorted(allSnapshotNames, repoName, GetSnapshotsRequest.SortBy.FAILED_SHARDS, order), + GetSnapshotsRequest.SortBy.FAILED_SHARDS, + order + ); + assertSnapshotListSorted( + allSnapshotsSorted(allSnapshotNames, repoName, GetSnapshotsRequest.SortBy.REPOSITORY, order), + GetSnapshotsRequest.SortBy.REPOSITORY, + order + ); } public void testResponseSizeLimit() throws Exception { diff --git a/server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java b/server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java index 3cb62e8428559..194d13d4dc970 100644 --- a/server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java +++ b/server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java @@ -19,8 +19,8 @@ import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.search.Weight; -import org.apache.lucene.store.ByteArrayDataInput; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.ByteArrayStreamInput; import org.elasticsearch.index.mapper.RangeType; import java.io.IOException; @@ -61,7 +61,7 @@ public Scorer scorer(LeafReaderContext context) throws IOException { final TwoPhaseIterator iterator = new TwoPhaseIterator(values) { - ByteArrayDataInput in = new ByteArrayDataInput(); + ByteArrayStreamInput in = new ByteArrayStreamInput(); BytesRef otherFrom = new BytesRef(); BytesRef otherTo = new BytesRef(); diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index c86b7db1a4186..1f071c9d8f4ed 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -87,6 +87,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_7_13_4 = new Version(7130499, org.apache.lucene.util.Version.LUCENE_8_8_2); public static final Version V_7_14_0 = new Version(7140099, org.apache.lucene.util.Version.LUCENE_8_9_0); public static final Version V_7_14_1 = new Version(7140199, org.apache.lucene.util.Version.LUCENE_8_9_0); + public static final Version V_7_14_2 = new Version(7140299, org.apache.lucene.util.Version.LUCENE_8_9_0); public static final Version V_7_15_0 = new Version(7150099, org.apache.lucene.util.Version.LUCENE_8_9_0); public static final Version V_7_16_0 = new Version(7160099, org.apache.lucene.util.Version.LUCENE_8_9_0); public static final Version V_8_0_0 = new Version(8000099, org.apache.lucene.util.Version.LUCENE_8_9_0); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java index 5da8fa14e3350..0991b22cb8a36 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java @@ -46,6 +46,8 @@ public class GetSnapshotsRequest extends MasterNodeRequest public static final Version NUMERIC_PAGINATION_VERSION = Version.V_7_15_0; + private static final Version SORT_BY_SHARDS_OR_REPO_VERSION = Version.V_7_16_0; + public static final int NO_LIMIT = -1; /** @@ -136,6 +138,12 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(verbose); if (out.getVersion().onOrAfter(PAGINATED_GET_SNAPSHOTS_VERSION)) { out.writeOptionalWriteable(after); + if ((sort == SortBy.SHARDS || sort == SortBy.FAILED_SHARDS || sort == SortBy.REPOSITORY) + && out.getVersion().before(SORT_BY_SHARDS_OR_REPO_VERSION)) { + throw new IllegalArgumentException( + "can't use sort by shard count or repository name with node version [" + out.getVersion() + "]" + ); + } out.writeEnum(sort); out.writeVInt(size); order.writeTo(out); @@ -320,7 +328,10 @@ public enum SortBy { START_TIME("start_time"), NAME("name"), DURATION("duration"), - INDICES("index_count"); + INDICES("index_count"), + SHARDS("shard_count"), + FAILED_SHARDS("failed_shard_count"), + REPOSITORY("repository"); private final String param; @@ -343,6 +354,12 @@ public static SortBy of(String value) { return DURATION; case "index_count": return INDICES; + case "shard_count": + return SHARDS; + case "failed_shard_count": + return FAILED_SHARDS; + case "repository": + return REPOSITORY; default: throw new IllegalArgumentException("unknown sort order [" + value + "]"); } @@ -388,6 +405,15 @@ public static After from(@Nullable SnapshotInfo snapshotInfo, SortBy sortBy) { case INDICES: afterValue = String.valueOf(snapshotInfo.indices().size()); break; + case SHARDS: + afterValue = String.valueOf(snapshotInfo.totalShards()); + break; + case FAILED_SHARDS: + afterValue = String.valueOf(snapshotInfo.failedShards()); + break; + case REPOSITORY: + afterValue = snapshotInfo.repository(); + break; default: throw new AssertionError("unknown sort column [" + sortBy + "]"); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index 2da538f628e76..7ac984014a770 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -484,8 +484,17 @@ private static SnapshotsInRepo buildSimpleSnapshotInfos( private static final Comparator BY_INDICES_COUNT = Comparator.comparingInt(sni -> sni.indices().size()) .thenComparing(SnapshotInfo::snapshotId); + private static final Comparator BY_SHARDS_COUNT = Comparator.comparingInt(SnapshotInfo::totalShards) + .thenComparing(SnapshotInfo::snapshotId); + + private static final Comparator BY_FAILED_SHARDS_COUNT = Comparator.comparingInt(SnapshotInfo::failedShards) + .thenComparing(SnapshotInfo::snapshotId); + private static final Comparator BY_NAME = Comparator.comparing(sni -> sni.snapshotId().getName()); + private static final Comparator BY_REPOSITORY = Comparator.comparing(SnapshotInfo::repository) + .thenComparing(SnapshotInfo::snapshotId); + private static SnapshotsInRepo sortSnapshots( final List snapshotInfos, final GetSnapshotsRequest.SortBy sortBy, @@ -508,6 +517,15 @@ private static SnapshotsInRepo sortSnapshots( case INDICES: comparator = BY_INDICES_COUNT; break; + case SHARDS: + comparator = BY_SHARDS_COUNT; + break; + case FAILED_SHARDS: + comparator = BY_FAILED_SHARDS_COUNT; + break; + case REPOSITORY: + comparator = BY_REPOSITORY; + break; default: throw new AssertionError("unexpected sort column [" + sortBy + "]"); } @@ -546,6 +564,23 @@ private static SnapshotsInRepo sortSnapshots( order ); break; + case SHARDS: + isAfter = filterByLongOffset(SnapshotInfo::totalShards, Integer.parseInt(after.value()), snapshotName, repoName, order); + break; + case FAILED_SHARDS: + isAfter = filterByLongOffset( + SnapshotInfo::failedShards, + Integer.parseInt(after.value()), + snapshotName, + repoName, + order + ); + break; + case REPOSITORY: + isAfter = order == SortOrder.ASC + ? (info -> compareRepositoryName(snapshotName, repoName, info) < 0) + : (info -> compareRepositoryName(snapshotName, repoName, info) > 0); + break; default: throw new AssertionError("unexpected sort column [" + sortBy + "]"); } @@ -582,6 +617,14 @@ private static Predicate filterByLongOffset( }; } + private static int compareRepositoryName(String name, String repoName, SnapshotInfo info) { + final int res = repoName.compareTo(info.repository()); + if (res != 0) { + return res; + } + return name.compareTo(info.snapshotId().getName()); + } + private static int compareName(String name, String repoName, SnapshotInfo info) { final int res = name.compareTo(info.snapshotId().getName()); if (res != 0) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java index e358327ee6a7e..fa34679629ff4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java @@ -169,7 +169,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { throw new IllegalStateException(message); } - updateRequest = buildSystemIndexUpdateRequest(descriptor); + updateRequest = buildSystemIndexUpdateRequest(indexName, descriptor); } else { updateRequest = buildUpdateRequest(indexName); } @@ -187,11 +187,14 @@ private CreateIndexClusterStateUpdateRequest buildUpdateRequest(String indexName return updateRequest; } - private CreateIndexClusterStateUpdateRequest buildSystemIndexUpdateRequest(SystemIndexDescriptor descriptor) { + private CreateIndexClusterStateUpdateRequest buildSystemIndexUpdateRequest( + String indexName, SystemIndexDescriptor descriptor) { String mappings = descriptor.getMappings(); Settings settings = descriptor.getSettings(); String aliasName = descriptor.getAliasName(); - String concreteIndexName = descriptor.getPrimaryIndex(); + + // if we are writing to the alias name, we should create the primary index here + String concreteIndexName = indexName.equals(aliasName) ? descriptor.getPrimaryIndex() : indexName; CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest(request.cause(), concreteIndexName, request.index()) @@ -210,7 +213,13 @@ private CreateIndexClusterStateUpdateRequest buildSystemIndexUpdateRequest(Syste updateRequest.aliases(Set.of(new Alias(aliasName))); } - logger.debug("Auto-creating system index {}", concreteIndexName); + if (logger.isDebugEnabled()) { + if (concreteIndexName.equals(indexName) == false) { + logger.debug("Auto-creating backing system index {} for alias {}", concreteIndexName, indexName); + } else { + logger.debug("Auto-creating system index {}", concreteIndexName); + } + } return updateRequest; } diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/AsyncFieldCapabilitiesShardsAction.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/AsyncFieldCapabilitiesShardsAction.java new file mode 100644 index 0000000000000..61ad51a09078d --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/AsyncFieldCapabilitiesShardsAction.java @@ -0,0 +1,158 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.fieldcaps; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.NoShardAvailableActionException; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.logging.LoggerMessageFormat; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportResponseHandler; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.Collections; + +import static org.elasticsearch.action.support.TransportActions.isShardNotAvailableException; + +/** + * An action that executes on each shard sequentially until it finds one that can match the provided + * {@link FieldCapabilitiesIndexRequest#indexFilter()}. In which case the shard is used + * to create the final {@link FieldCapabilitiesIndexResponse}. + */ +class AsyncFieldCapabilitiesShardsAction { + private static final Logger logger = LogManager.getLogger(AsyncFieldCapabilitiesShardsAction.class); + + private final FieldCapabilitiesIndexRequest request; + private final TransportService transportService; + private final DiscoveryNodes nodes; + private final ActionListener listener; + private final GroupShardsIterator shardsIt; + + private volatile int shardIndex = 0; + + AsyncFieldCapabilitiesShardsAction(TransportService transportService, + ClusterService clusterService, + FieldCapabilitiesIndexRequest request, + ActionListener listener) { + this.listener = listener; + this.transportService = transportService; + + ClusterState clusterState = clusterService.state(); + if (logger.isTraceEnabled()) { + logger.trace("executing [{}] based on cluster state version [{}]", request, + clusterState.version()); + } + nodes = clusterState.nodes(); + this.request = request; + + shardsIt = clusterService.operationRouting().searchShards(clusterService.state(), + new String[]{request.index()}, null, null, null, null); + } + + public void start() { + tryNext(null, true); + } + + private void onFailure(ShardRouting shardRouting, Exception e) { + if (e != null) { + logger.trace(() -> new ParameterizedMessage("{}: failed to execute [{}]", shardRouting, + request), e); + } + tryNext(e, false); + } + + private ShardRouting nextRoutingOrNull() { + if (shardsIt.size() == 0 || shardIndex >= shardsIt.size()) { + return null; + } + ShardRouting next = shardsIt.get(shardIndex).nextOrNull(); + if (next != null) { + return next; + } + moveToNextShard(); + return nextRoutingOrNull(); + } + + private void moveToNextShard() { + ++shardIndex; + } + + private void tryNext(@Nullable final Exception lastFailure, boolean canMatchShard) { + ShardRouting shardRouting = nextRoutingOrNull(); + if (shardRouting == null) { + if (canMatchShard == false) { + if (lastFailure == null) { + listener.onResponse(new FieldCapabilitiesIndexResponse(request.index(), Collections.emptyMap(), false)); + } else { + logger.debug(() -> new ParameterizedMessage("{}: failed to execute [{}]", null, + request), lastFailure); + listener.onFailure(lastFailure); + } + } else { + if (lastFailure == null || isShardNotAvailableException(lastFailure)) { + listener.onFailure(new NoShardAvailableActionException(null, + LoggerMessageFormat.format("No shard available for [{}]", request), lastFailure)); + } else { + logger.debug(() -> new ParameterizedMessage("{}: failed to execute [{}]", null, + request), lastFailure); + listener.onFailure(lastFailure); + } + } + return; + } + DiscoveryNode node = nodes.get(shardRouting.currentNodeId()); + if (node == null) { + onFailure(shardRouting, new NoShardAvailableActionException(shardRouting.shardId())); + } else { + request.shardId(shardRouting.shardId()); + if (logger.isTraceEnabled()) { + logger.trace( + "sending request [{}] on node [{}]", + request, + node + ); + } + transportService.sendRequest(node, TransportFieldCapabilitiesAction.ACTION_SHARD_NAME, request, + new TransportResponseHandler() { + + @Override + public FieldCapabilitiesIndexResponse read(StreamInput in) throws IOException { + return new FieldCapabilitiesIndexResponse(in); + } + + @Override + public void handleResponse(final FieldCapabilitiesIndexResponse response) { + if (response.canMatch()) { + listener.onResponse(response); + } else { + moveToNextShard(); + tryNext(null, false); + } + } + + @Override + public void handleException(TransportException exp) { + onFailure(shardRouting, exp); + } + }); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java index 3f39d47613002..e997ca1af0a56 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java @@ -10,34 +10,23 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.cluster.routing.ShardIterator; -import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.logging.LoggerMessageFormat; -import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.mapper.MappedFieldType; @@ -56,9 +45,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequestHandler; -import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; import java.io.IOException; @@ -72,12 +59,8 @@ import java.util.Set; import java.util.function.Predicate; -import static org.elasticsearch.action.support.TransportActions.isShardNotAvailableException; - public class TransportFieldCapabilitiesAction extends HandledTransportAction { - - private static final String ACTION_SHARD_NAME = FieldCapabilitiesAction.NAME + "[index][s]"; - + static final String ACTION_SHARD_NAME = FieldCapabilitiesAction.NAME + "[index][s]"; private static final Logger logger = LogManager.getLogger(TransportFieldCapabilitiesAction.class); private final ThreadPool threadPool; @@ -121,42 +104,19 @@ protected void doExecute(Task task, FieldCapabilitiesRequest request, final Acti } else { concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, localIndices); } + + checkIndexBlocks(clusterState, concreteIndices); + final int totalNumRequest = concreteIndices.length + remoteClusterIndices.size(); if (totalNumRequest == 0) { listener.onResponse(new FieldCapabilitiesResponse(new String[0], Collections.emptyMap())); return; } - final CountDown completionCounter = new CountDown(totalNumRequest); final List indexResponses = Collections.synchronizedList(new ArrayList<>()); final FailureCollector indexFailures = new FailureCollector(); - final Runnable countDown = () -> { - if (completionCounter.countDown()) { - List failures = indexFailures.values(); - if (indexResponses.size() > 0) { - if (request.isMergeResults()) { - // fork off to the management pool for merging the responses as the operation can run for longer than is acceptable - // on a transport thread in case of large numbers of indices and/or fields - threadPool.executor(ThreadPool.Names.MANAGEMENT).submit( - ActionRunnable.supply( - listener, - () -> merge(indexResponses, request.includeUnmapped(), new ArrayList<>(failures))) - ); - } else { - listener.onResponse(new FieldCapabilitiesResponse(indexResponses, new ArrayList<>(failures))); - } - } else { - // we have no responses at all, maybe because of errors - if (indexFailures.size() > 0) { - // throw back the first exception - listener.onFailure(failures.iterator().next().getException()); - } else { - listener.onResponse(new FieldCapabilitiesResponse(Collections.emptyList(), Collections.emptyList())); - } - } - } - }; + final Runnable countDown = createResponseMerger(request, totalNumRequest, indexResponses, indexFailures, listener); if (concreteIndices.length > 0) { // fork this action to the management pool as it can fan out to a large number of child requests that get handled on SAME and @@ -164,17 +124,10 @@ protected void doExecute(Task task, FieldCapabilitiesRequest request, final Acti // (particularly with security enabled) threadPool.executor(ThreadPool.Names.MANAGEMENT).execute(ActionRunnable.wrap(listener, l -> { for (String index : concreteIndices) { - new AsyncShardsAction( + new AsyncFieldCapabilitiesShardsAction( transportService, clusterService, - new FieldCapabilitiesIndexRequest( - request.fields(), - index, - localIndices, - request.indexFilter(), - nowInMillis, - request.runtimeFields() - ), + prepareLocalIndexRequest(request, index, localIndices, nowInMillis), new ActionListener<>() { @Override public void onResponse(FieldCapabilitiesIndexResponse result) { @@ -201,14 +154,7 @@ public void onFailure(Exception e) { String clusterAlias = remoteIndices.getKey(); OriginalIndices originalIndices = remoteIndices.getValue(); Client remoteClusterClient = transportService.getRemoteClusterService().getRemoteClusterClient(threadPool, clusterAlias); - FieldCapabilitiesRequest remoteRequest = new FieldCapabilitiesRequest(); - remoteRequest.setMergeResults(false); // we need to merge on this node - remoteRequest.indicesOptions(originalIndices.indicesOptions()); - remoteRequest.indices(originalIndices.indices()); - remoteRequest.fields(request.fields()); - remoteRequest.runtimeFields(request.runtimeFields()); - remoteRequest.indexFilter(request.indexFilter()); - remoteRequest.nowInMillis(nowInMillis); + FieldCapabilitiesRequest remoteRequest = prepareRemoteRequest(request, originalIndices, nowInMillis); remoteClusterClient.fieldCaps(remoteRequest, ActionListener.wrap(response -> { for (FieldCapabilitiesIndexResponse resp : response.getIndexResponses()) { indexResponses.add( @@ -232,6 +178,69 @@ public void onFailure(Exception e) { } } + private void checkIndexBlocks(ClusterState clusterState, String[] concreteIndices) { + clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ); + for (String index : concreteIndices) { + clusterState.blocks().indexBlockedRaiseException(ClusterBlockLevel.READ, index); + } + } + + private Runnable createResponseMerger(FieldCapabilitiesRequest request, + int totalNumRequests, + List indexResponses, + FailureCollector indexFailures, + ActionListener listener) { + final CountDown completionCounter = new CountDown(totalNumRequests); + return () -> { + if (completionCounter.countDown()) { + List failures = indexFailures.values(); + if (indexResponses.size() > 0) { + if (request.isMergeResults()) { + // fork off to the management pool for merging the responses as the operation can run for longer than is acceptable + // on a transport thread in case of large numbers of indices and/or fields + threadPool.executor(ThreadPool.Names.MANAGEMENT).submit( + ActionRunnable.supply( + listener, + () -> merge(indexResponses, request.includeUnmapped(), new ArrayList<>(failures))) + ); + } else { + listener.onResponse(new FieldCapabilitiesResponse(indexResponses, new ArrayList<>(failures))); + } + } else { + // we have no responses at all, maybe because of errors + if (indexFailures.size() > 0) { + // throw back the first exception + listener.onFailure(failures.iterator().next().getException()); + } else { + listener.onResponse(new FieldCapabilitiesResponse(Collections.emptyList(), Collections.emptyList())); + } + } + } + }; + } + + private static FieldCapabilitiesIndexRequest prepareLocalIndexRequest(FieldCapabilitiesRequest request, + String index, + OriginalIndices originalIndices, + long nowInMillis) { + return new FieldCapabilitiesIndexRequest(request.fields(), index, originalIndices, + request.indexFilter(), nowInMillis, request.runtimeFields()); + } + + private static FieldCapabilitiesRequest prepareRemoteRequest(FieldCapabilitiesRequest request, + OriginalIndices originalIndices, + long nowInMillis) { + FieldCapabilitiesRequest remoteRequest = new FieldCapabilitiesRequest(); + remoteRequest.setMergeResults(false); // we need to merge on this node + remoteRequest.indicesOptions(originalIndices.indicesOptions()); + remoteRequest.indices(originalIndices.indices()); + remoteRequest.fields(request.fields()); + remoteRequest.runtimeFields(request.runtimeFields()); + remoteRequest.indexFilter(request.indexFilter()); + remoteRequest.nowInMillis(nowInMillis); + return remoteRequest; + } + private FieldCapabilitiesResponse merge( List indexResponses, boolean includeUnmapped, @@ -317,14 +326,6 @@ int size() { } } - private static ClusterBlockException checkGlobalBlock(ClusterState state) { - return state.blocks().globalBlockedException(ClusterBlockLevel.READ); - } - - private static ClusterBlockException checkRequestBlock(ClusterState state, String concreteIndex) { - return state.blocks().indexBlockedException(ClusterBlockLevel.READ, concreteIndex); - } - private FieldCapabilitiesIndexResponse shardOperation(final FieldCapabilitiesIndexRequest request) throws IOException { final ShardId shardId = request.shardId(); final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); @@ -397,134 +398,6 @@ private boolean canMatchShard(FieldCapabilitiesIndexRequest req, SearchExecution return SearchService.queryStillMatchesAfterRewrite(searchRequest, searchExecutionContext); } - /** - * An action that executes on each shard sequentially until it finds one that can match the provided - * {@link FieldCapabilitiesIndexRequest#indexFilter()}. In which case the shard is used - * to create the final {@link FieldCapabilitiesIndexResponse}. - */ - public static class AsyncShardsAction { - private final FieldCapabilitiesIndexRequest request; - private final TransportService transportService; - private final DiscoveryNodes nodes; - private final ActionListener listener; - private final GroupShardsIterator shardsIt; - - private volatile int shardIndex = 0; - - public AsyncShardsAction(TransportService transportService, - ClusterService clusterService, - FieldCapabilitiesIndexRequest request, - ActionListener listener) { - this.listener = listener; - this.transportService = transportService; - - ClusterState clusterState = clusterService.state(); - if (logger.isTraceEnabled()) { - logger.trace("executing [{}] based on cluster state version [{}]", request, clusterState.version()); - } - nodes = clusterState.nodes(); - ClusterBlockException blockException = checkGlobalBlock(clusterState); - if (blockException != null) { - throw blockException; - } - - this.request = request; - blockException = checkRequestBlock(clusterState, request.index()); - if (blockException != null) { - throw blockException; - } - - shardsIt = clusterService.operationRouting().searchShards(clusterService.state(), - new String[]{request.index()}, null, null, null, null); - } - - public void start() { - tryNext(null, true); - } - - private void onFailure(ShardRouting shardRouting, Exception e) { - if (e != null) { - logger.trace(() -> new ParameterizedMessage("{}: failed to execute [{}]", shardRouting, request), e); - } - tryNext(e, false); - } - - private ShardRouting nextRoutingOrNull() { - if (shardsIt.size() == 0 || shardIndex >= shardsIt.size()) { - return null; - } - ShardRouting next = shardsIt.get(shardIndex).nextOrNull(); - if (next != null) { - return next; - } - moveToNextShard(); - return nextRoutingOrNull(); - } - - private void moveToNextShard() { - ++ shardIndex; - } - - private void tryNext(@Nullable final Exception lastFailure, boolean canMatchShard) { - ShardRouting shardRouting = nextRoutingOrNull(); - if (shardRouting == null) { - if (canMatchShard == false) { - if (lastFailure == null) { - listener.onResponse(new FieldCapabilitiesIndexResponse(request.index(), Collections.emptyMap(), false)); - } else { - logger.debug(() -> new ParameterizedMessage("{}: failed to execute [{}]", null, request), lastFailure); - listener.onFailure(lastFailure); - } - } else { - if (lastFailure == null || isShardNotAvailableException(lastFailure)) { - listener.onFailure(new NoShardAvailableActionException(null, - LoggerMessageFormat.format("No shard available for [{}]", request), lastFailure)); - } else { - logger.debug(() -> new ParameterizedMessage("{}: failed to execute [{}]", null, request), lastFailure); - listener.onFailure(lastFailure); - } - } - return; - } - DiscoveryNode node = nodes.get(shardRouting.currentNodeId()); - if (node == null) { - onFailure(shardRouting, new NoShardAvailableActionException(shardRouting.shardId())); - } else { - request.shardId(shardRouting.shardId()); - if (logger.isTraceEnabled()) { - logger.trace( - "sending request [{}] on node [{}]", - request, - node - ); - } - transportService.sendRequest(node, ACTION_SHARD_NAME, request, - new TransportResponseHandler() { - - @Override - public FieldCapabilitiesIndexResponse read(StreamInput in) throws IOException { - return new FieldCapabilitiesIndexResponse(in); - } - - @Override - public void handleResponse(final FieldCapabilitiesIndexResponse response) { - if (response.canMatch()) { - listener.onResponse(response); - } else { - moveToNextShard(); - tryNext(null, false); - } - } - - @Override - public void handleException(TransportException exp) { - onFailure(shardRouting, exp); - } - }); - } - } - } - private class ShardTransportHandler implements TransportRequestHandler { @Override public void messageReceived(final FieldCapabilitiesIndexRequest request, diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java index de79cb0bf29ff..6d084b4fa89f9 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java @@ -8,8 +8,8 @@ package org.elasticsearch.action.search; -import org.apache.lucene.store.ByteArrayDataInput; import org.apache.lucene.store.RAMOutputStream; +import org.elasticsearch.common.io.stream.ByteArrayStreamInput; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; @@ -56,7 +56,7 @@ static String buildScrollId(AtomicArray searchPhase static ParsedScrollId parseScrollId(String scrollId) { try { byte[] bytes = Base64.getUrlDecoder().decode(scrollId); - ByteArrayDataInput in = new ByteArrayDataInput(bytes); + ByteArrayStreamInput in = new ByteArrayStreamInput(bytes); final boolean includeContextUUID; final String type; final String firstChunk = in.readString(); diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskListener.java b/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskListener.java index a1c265295b3e8..92973f32dbda0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskListener.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskListener.java @@ -14,18 +14,18 @@ public interface ClusterStateTaskListener { /** * A callback for when task execution fails. * - * Implementations of this callback should not throw exceptions: an exception thrown here is logged by the master service at {@code - * ERROR} level and otherwise ignored. If log-and-ignore is the right behaviour then implementations should do so themselves, typically - * using a more specific logger and at a less dramatic log level. + * Implementations of this callback must not throw exceptions: an exception thrown here is logged by the master service at {@code ERROR} + * level and otherwise ignored, except in tests where it raises an {@link AssertionError}. If log-and-ignore is the right behaviour then + * implementations must do so themselves, typically using a more specific logger and at a less dramatic log level. */ void onFailure(String source, Exception e); /** * A callback for when the task was rejected because the processing node is no longer the elected master. * - * Implementations of this callback should not throw exceptions: an exception thrown here is logged by the master service at {@code - * ERROR} level and otherwise ignored. If log-and-ignore is the right behaviour then implementations should do so themselves, typically - * using a more specific logger and at a less dramatic log level. + * Implementations of this callback must not throw exceptions: an exception thrown here is logged by the master service at {@code ERROR} + * level and otherwise ignored, except in tests where it raises an {@link AssertionError}. If log-and-ignore is the right behaviour then + * implementations must do so themselves, typically using a more specific logger and at a less dramatic log level. */ default void onNoLongerMaster(String source) { onFailure(source, new NotMasterException("no longer master. source: [" + source + "]")); @@ -35,9 +35,9 @@ default void onNoLongerMaster(String source) { * Called when the result of the {@link ClusterStateTaskExecutor#execute(ClusterState, List)} have been processed * properly by all listeners. * - * Implementations of this callback should not throw exceptions: an exception thrown here is logged by the master service at {@code - * ERROR} level and otherwise ignored. If log-and-ignore is the right behaviour then implementations should do so themselves, typically - * using a more specific logger and at a less dramatic log level. + * Implementations of this callback must not throw exceptions: an exception thrown here is logged by the master service at {@code ERROR} + * level and otherwise ignored, except in tests where it raises an {@link AssertionError}. If log-and-ignore is the right behaviour then + * implementations must do so themselves, typically using a more specific logger and at a less dramatic log level. */ default void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { } diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index 947ca3acde233..5459b13ed499b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -86,6 +86,38 @@ public Entry snapshot(final Snapshot snapshot) { return null; } + /** + * Computes a map of repository shard id to set of generations, containing all shard generations that became obsolete and may be + * deleted from the repository as the cluster state moved from the given {@code old} value of {@link SnapshotsInProgress} to this + * instance. + */ + public Map> obsoleteGenerations(SnapshotsInProgress old) { + final Map> obsoleteGenerations = new HashMap<>(); + for (Entry entry : old.entries()) { + final Entry updatedEntry = snapshot(entry.snapshot()); + if (updatedEntry == null) { + continue; + } + for (ObjectObjectCursor oldShardAssignment : entry.shardsByRepoShardId()) { + final RepositoryShardId repositoryShardId = oldShardAssignment.key; + final ShardSnapshotStatus oldStatus = oldShardAssignment.value; + final ShardSnapshotStatus newStatus = updatedEntry.shardsByRepoShardId().get(repositoryShardId); + if (oldStatus.state == ShardState.SUCCESS + && oldStatus.generation() != null + && newStatus != null + && newStatus.state() == ShardState.SUCCESS + && newStatus.generation() != null + && oldStatus.generation().equals(newStatus.generation()) == false + ) { + // We moved from a non-null generation successful generation to a different non-null successful generation + // so the original generation is clearly obsolete because it was in-flight before and is now unreferenced everywhere. + obsoleteGenerations.computeIfAbsent(repositoryShardId, ignored -> new HashSet<>()).add(oldStatus.generation()); + } + } + } + return Map.copyOf(obsoleteGenerations); + } + @Override public String getWriteableName() { return TYPE; diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 10132b1808ee7..79b810e8bea58 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -49,6 +49,7 @@ import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.discovery.ConfiguredHostsResolver; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoveryStats; @@ -56,6 +57,7 @@ import org.elasticsearch.discovery.PeerFinder; import org.elasticsearch.discovery.SeedHostsProvider; import org.elasticsearch.discovery.SeedHostsResolver; +import org.elasticsearch.discovery.TransportAddressConnector; import org.elasticsearch.monitor.NodeHealthService; import org.elasticsearch.monitor.StatusInfo; import org.elasticsearch.threadpool.Scheduler; @@ -263,12 +265,12 @@ private void handleApplyCommit(ApplyCommitRequest applyCommitRequest, ActionList new ClusterApplyListener() { @Override - public void onFailure(String source, Exception e) { + public void onFailure(Exception e) { applyListener.onFailure(e); } @Override - public void onSuccess(String source) { + public void onSuccess() { applyListener.onResponse(null); } }); @@ -532,7 +534,7 @@ void becomeCandidate(String method) { if (applierState.nodes().getMasterNodeId() != null) { applierState = clusterStateWithNoMasterBlock(applierState); - clusterApplier.onNewClusterState("becoming candidate: " + method, () -> applierState, (source, e) -> { + clusterApplier.onNewClusterState("becoming candidate: " + method, () -> applierState, e -> { }); } } @@ -1382,7 +1384,7 @@ public void onResponse(Void ignore) { clusterApplier.onNewClusterState(CoordinatorPublication.this.toString(), () -> applierState, new ClusterApplyListener() { @Override - public void onFailure(String source, Exception e) { + public void onFailure(Exception e) { synchronized (mutex) { removePublicationAndPossiblyBecomeCandidate("clusterApplier#onNewClusterState"); } @@ -1392,7 +1394,7 @@ public void onFailure(String source, Exception e) { } @Override - public void onSuccess(String source) { + public void onSuccess() { clusterStatePublicationEvent.setMasterApplyElapsedMillis( transportService.getThreadPool().rawRelativeTimeInMillis() - completionTimeMillis); synchronized (mutex) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index 72bea68ee6f66..3f2eeb0fc0bb4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -7,6 +7,9 @@ */ package org.elasticsearch.cluster.metadata; +import org.apache.lucene.document.LongPoint; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.PointValues; import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diff; @@ -26,6 +29,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.Comparator; import java.util.HashMap; import java.util.List; import java.util.Locale; @@ -37,6 +41,25 @@ public final class DataStream extends AbstractDiffable implements To public static final String BACKING_INDEX_PREFIX = ".ds-"; public static final DateFormatter DATE_FORMATTER = DateFormatter.forPattern("uuuu.MM.dd"); + // Datastreams' leaf readers should be sorted by desc order of their timestamp field, as it allows search time optimizations + public static Comparator DATASTREAM_LEAF_READERS_SORTER = + Comparator.comparingLong( + (LeafReader r) -> { + try { + PointValues points = r.getPointValues(DataStream.TimestampField.FIXED_TIMESTAMP_FIELD); + if (points != null) { + byte[] sortValue = points.getMaxPackedValue(); + return LongPoint.decodeDimension(sortValue, 0); + } else if (r.numDocs() == 0) { + // points can be null if the segment contains only deleted documents + return Long.MIN_VALUE; + } + } catch (IOException e) { + } + throw new IllegalStateException("Can't access [" + + DataStream.TimestampField.FIXED_TIMESTAMP_FIELD + "] field for the data stream!"); + }) + .reversed(); private final LongSupplier timeProvider; private final String name; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java index 27789d27fd9c7..93f82e29d9274 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java @@ -8,6 +8,8 @@ package org.elasticsearch.cluster.metadata; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diffable; import org.elasticsearch.common.io.stream.StreamInput; @@ -33,6 +35,8 @@ public class SingleNodeShutdownMetadata extends AbstractDiffable { + public static final Version REPLACE_SHUTDOWN_TYPE_ADDED_VERSION = Version.V_8_0_0; + public static final ParseField NODE_ID_FIELD = new ParseField("node_id"); public static final ParseField TYPE_FIELD = new ParseField("type"); public static final ParseField REASON_FIELD = new ParseField("reason"); @@ -40,6 +44,7 @@ public class SingleNodeShutdownMetadata extends AbstractDiffable PARSER = new ConstructingObjectParser<>( "node_shutdown_info", @@ -49,7 +54,8 @@ public class SingleNodeShutdownMetadata extends AbstractDiffable TimeValue.parseTimeValue(p.textOrNull(), ALLOCATION_DELAY_FIELD.getPreferredName()), ALLOCATION_DELAY_FIELD, ObjectParser.ValueType.STRING_OR_NULL ); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), TARGET_NODE_NAME_FIELD); } public static SingleNodeShutdownMetadata parse(XContentParser parser) { @@ -78,6 +85,7 @@ public static SingleNodeShutdownMetadata parse(XContentParser parser) { private final long startedAtMillis; private final boolean nodeSeen; @Nullable private final TimeValue allocationDelay; + @Nullable private final String targetNodeName; /** * @param nodeId The node ID that this shutdown metadata refers to. @@ -91,7 +99,8 @@ private SingleNodeShutdownMetadata( String reason, long startedAtMillis, boolean nodeSeen, - @Nullable TimeValue allocationDelay + @Nullable TimeValue allocationDelay, + @Nullable String targetNodeName ) { this.nodeId = Objects.requireNonNull(nodeId, "node ID must not be null"); this.type = Objects.requireNonNull(type, "shutdown type must not be null"); @@ -102,6 +111,13 @@ private SingleNodeShutdownMetadata( throw new IllegalArgumentException("shard allocation delay is only valid for RESTART-type shutdowns"); } this.allocationDelay = allocationDelay; + if (targetNodeName != null && type != Type.REPLACE) { + throw new IllegalArgumentException(new ParameterizedMessage("target node name is only valid for REPLACE type shutdowns, " + + "but was given type [{}] and target node name [{}]", type, targetNodeName).getFormattedMessage()); + } else if (targetNodeName == null && type == Type.REPLACE) { + throw new IllegalArgumentException("target node name is required for REPLACE type shutdowns"); + } + this.targetNodeName = targetNodeName; } public SingleNodeShutdownMetadata(StreamInput in) throws IOException { @@ -111,6 +127,11 @@ public SingleNodeShutdownMetadata(StreamInput in) throws IOException { this.startedAtMillis = in.readVLong(); this.nodeSeen = in.readBoolean(); this.allocationDelay = in.readOptionalTimeValue(); + if (in.getVersion().onOrAfter(REPLACE_SHUTDOWN_TYPE_ADDED_VERSION)) { + this.targetNodeName = in.readOptionalString(); + } else { + this.targetNodeName = null; + } } /** @@ -148,6 +169,13 @@ public boolean getNodeSeen() { return nodeSeen; } + /** + * @return The name of the node to be used as a replacement for this node, or null. + */ + public String getTargetNodeName() { + return targetNodeName; + } + /** * @return The amount of time shard reallocation should be delayed for shards on this node, so that they will not be automatically * reassigned while the node is restarting. Will be {@code null} for non-restart shutdowns. @@ -165,11 +193,18 @@ public TimeValue getAllocationDelay() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(nodeId); - out.writeEnum(type); + if (out.getVersion().before(REPLACE_SHUTDOWN_TYPE_ADDED_VERSION) && this.type == SingleNodeShutdownMetadata.Type.REPLACE) { + out.writeEnum(SingleNodeShutdownMetadata.Type.REMOVE); + } else { + out.writeEnum(type); + } out.writeString(reason); out.writeVLong(startedAtMillis); out.writeBoolean(nodeSeen); out.writeOptionalTimeValue(allocationDelay); + if (out.getVersion().onOrAfter(REPLACE_SHUTDOWN_TYPE_ADDED_VERSION)) { + out.writeOptionalString(targetNodeName); + } } @Override @@ -184,6 +219,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (allocationDelay != null) { builder.field(ALLOCATION_DELAY_FIELD.getPreferredName(), allocationDelay.getStringRep()); } + if (targetNodeName != null) { + builder.field(TARGET_NODE_NAME_FIELD.getPreferredName(), targetNodeName); + } } builder.endObject(); @@ -200,7 +238,8 @@ && getNodeId().equals(that.getNodeId()) && getType() == that.getType() && getReason().equals(that.getReason()) && getNodeSeen() == that.getNodeSeen() - && Objects.equals(allocationDelay, that.allocationDelay); + && Objects.equals(allocationDelay, that.allocationDelay) + && Objects.equals(targetNodeName, that.targetNodeName); } @Override @@ -211,7 +250,8 @@ public int hashCode() { getReason(), getStartedAtMillis(), getNodeSeen(), - allocationDelay + allocationDelay, + targetNodeName ); } @@ -228,7 +268,8 @@ public static Builder builder(SingleNodeShutdownMetadata original) { .setType(original.getType()) .setReason(original.getReason()) .setStartedAtMillis(original.getStartedAtMillis()) - .setNodeSeen(original.getNodeSeen()); + .setNodeSeen(original.getNodeSeen()) + .setTargetNodeName(original.getTargetNodeName()); } public static class Builder { @@ -238,6 +279,7 @@ public static class Builder { private long startedAtMillis = -1; private boolean nodeSeen = false; private TimeValue allocationDelay; + private String targetNodeName; private Builder() {} @@ -295,6 +337,15 @@ public Builder setAllocationDelay(TimeValue allocationDelay) { return this; } + /** + * @param targetNodeName The name of the node which should be used to replcae this one. Only valid if the shutdown type is REPLACE. + * @return This builder. + */ + public Builder setTargetNodeName(String targetNodeName) { + this.targetNodeName = targetNodeName; + return this; + } + public SingleNodeShutdownMetadata build() { if (startedAtMillis == -1) { throw new IllegalArgumentException("start timestamp must be set"); @@ -306,7 +357,8 @@ public SingleNodeShutdownMetadata build() { reason, startedAtMillis, nodeSeen, - allocationDelay + allocationDelay, + targetNodeName ); } } @@ -316,13 +368,16 @@ public SingleNodeShutdownMetadata build() { */ public enum Type { REMOVE, - RESTART; + RESTART, + REPLACE; public static Type parse(String type) { if ("remove".equals(type.toLowerCase(Locale.ROOT))) { return REMOVE; } else if ("restart".equals(type.toLowerCase(Locale.ROOT))) { return RESTART; + } else if ("replace".equals(type.toLowerCase(Locale.ROOT))) { + return REPLACE; } else { throw new IllegalArgumentException("unknown shutdown type: " + type); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java index ca9b6b291a2b2..f5854bb97fafc 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.Nullable; import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.common.Strings; @@ -141,7 +142,7 @@ public boolean equals(Object obj) { } Template other = (Template) obj; return Objects.equals(settings, other.settings) && - Objects.equals(mappings, other.mappings) && + mappingsEquals(this.mappings, other.mappings) && Objects.equals(aliases, other.aliases); } @@ -178,11 +179,33 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } @SuppressWarnings("unchecked") - private static Map reduceMapping(Map mapping) { + static Map reduceMapping(Map mapping) { if (mapping.size() == 1 && MapperService.SINGLE_MAPPING_NAME.equals(mapping.keySet().iterator().next())) { return (Map) mapping.values().iterator().next(); } else { return mapping; } } + + static boolean mappingsEquals(CompressedXContent m1, CompressedXContent m2) { + if (m1 == m2) { + return true; + } + + if (m1 == null || m2 == null) { + return false; + } + + if (m1.equals(m2)) { + return true; + } + + Map thisUncompressedMapping = reduceMapping( + XContentHelper.convertToMap(m1.uncompressed(), true, XContentType.JSON).v2() + ); + Map otherUncompressedMapping = reduceMapping( + XContentHelper.convertToMap(m2.uncompressed(), true, XContentType.JSON).v2() + ); + return Maps.deepEquals(thisUncompressedMapping, otherUncompressedMapping); + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index 7e3909371bc9b..706f128e4bff1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -32,7 +32,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; -import java.util.function.Function; +import java.util.function.Consumer; import java.util.function.Predicate; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -378,7 +378,7 @@ public String[] resolveNodes(String... nodes) { // the role is not a data role, we require an exact match (e.g., ingest) predicate = s -> s.contains(role); } - final Function mutation; + final Consumer mutation; if (Booleans.parseBoolean(matchAttrValue, true)) { mutation = resolvedNodesIds::add; } else { @@ -386,7 +386,7 @@ public String[] resolveNodes(String... nodes) { } for (final DiscoveryNode node : this) { if (predicate.test(node.getRoles())) { - mutation.apply(node.getId()); + mutation.accept(node.getId()); } } } else if(DiscoveryNode.COORDINATING_ONLY.equals(matchAttrName)) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index b207776d49260..eb8d296365bd9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -267,6 +267,10 @@ public boolean hasInactivePrimaries() { return inactivePrimaryCount > 0; } + public boolean hasInactiveReplicas() { + return inactiveShardCount > inactivePrimaryCount; + } + public boolean hasInactiveShards() { return inactiveShardCount > 0; } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeShutdownAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeShutdownAllocationDecider.java index 5a1391d39fe74..450937fc60a4c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeShutdownAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeShutdownAllocationDecider.java @@ -46,6 +46,7 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing } switch (thisNodeShutdownMetadata.getType()) { + case REPLACE: case REMOVE: return allocation.decision(Decision.NO, NAME, "node [%s] is preparing to be removed from the cluster", node.nodeId()); case RESTART: @@ -98,6 +99,7 @@ public Decision shouldAutoExpandToNode(IndexMetadata indexMetadata, DiscoveryNod "node [%s] is preparing to restart, auto-expansion waiting until it is complete", node.getId() ); + case REPLACE: case REMOVE: return allocation.decision(Decision.NO, NAME, "node [%s] is preparing for removal from the cluster", node.getId()); default: diff --git a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplier.java b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplier.java index be7bdda88dd1a..666738c0d7b87 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplier.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplier.java @@ -33,17 +33,26 @@ public interface ClusterApplier { */ interface ClusterApplyListener { /** - * Called on successful cluster state application - * @param source information where the cluster state came from + * Called on successful cluster state application. + * + * Implementations of this callback must not throw exceptions: an exception thrown here is logged by the cluster applier service at + * {@code ERROR} level and otherwise ignored, except in tests where it raises an {@link AssertionError}. If log-and-ignore is the + * right behaviour then implementations must do so themselves, typically using a more specific logger and at a less dramatic log + * level. */ - default void onSuccess(String source) { + default void onSuccess() { } /** - * Called on failure during cluster state application - * @param source information where the cluster state came from + * Called on failure during cluster state application. + * + * Implementations of this callback must not throw exceptions: an exception thrown here is logged by the cluster applier service at + * {@code ERROR} level and otherwise ignored, except in tests where it raises an {@link AssertionError}. If log-and-ignore is the + * right behaviour then implementations must do so themselves, typically using a more specific logger and at a less dramatic log + * level. + * * @param e exception that occurred */ - void onFailure(String source, Exception e); + void onFailure(Exception e); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java index 21743d0a9462a..69ee6fff5830d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java @@ -11,30 +11,30 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateApplier; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateObserver; -import org.elasticsearch.cluster.ClusterStateTaskConfig; import org.elasticsearch.cluster.LocalNodeMasterListener; import org.elasticsearch.cluster.NodeConnectionsService; import org.elasticsearch.cluster.TimeoutClusterStateListener; -import org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.core.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.StopWatch; import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.core.Releasable; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; @@ -64,7 +64,7 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements public static final String CLUSTER_UPDATE_THREAD_NAME = "clusterApplierService#updateTask"; private final ClusterSettings clusterSettings; - protected final ThreadPool threadPool; + private final ThreadPool threadPool; private volatile TimeValue slowTaskLoggingThreshold; @@ -131,25 +131,24 @@ protected PrioritizedEsThreadPoolExecutor createThreadPoolExecutor() { PrioritizedEsThreadPoolExecutor.StarvationWatcher.NOOP_STARVATION_WATCHER); } - class UpdateTask extends SourcePrioritizedRunnable implements Function { - final ClusterApplyListener listener; - final Function updateFunction; + class UpdateTask extends SourcePrioritizedRunnable { + private final ActionListener listener; + private final Function updateFunction; - UpdateTask(Priority priority, String source, ClusterApplyListener listener, - Function updateFunction) { + UpdateTask( + Priority priority, + String source, + ActionListener listener, + Function updateFunction + ) { super(priority, source); this.listener = listener; this.updateFunction = updateFunction; } - @Override - public ClusterState apply(ClusterState clusterState) { - return updateFunction.apply(clusterState); - } - @Override public void run() { - runTask(this); + runTask(source(), updateFunction, listener); } } @@ -175,7 +174,7 @@ protected synchronized void doClose() { * Should be renamed to appliedClusterState */ public ClusterState state() { - assert assertNotCalledFromClusterStateApplier("the applied cluster state is not yet available"); + assert assertNotCalledFromClusterStateApplier(); ClusterState clusterState = this.state.get(); assert clusterState != null : "initial cluster state not set yet"; return clusterState; @@ -280,9 +279,22 @@ public void run() { } } - public void runOnApplierThread(final String source, Consumer clusterStateConsumer, - final ClusterApplyListener listener, Priority priority) { - submitStateUpdateTask(source, ClusterStateTaskConfig.build(priority), + /** + * Run the given clusterStateConsumer on the applier thread. Should only be used in tests and by {@link IndicesStore} when it's deleting + * the data behind a shard that moved away from a node. + * + * @param priority {@link Priority#HIGH} unless in tests. + */ + // TODO get rid of this, make it so that shard data can be deleted without blocking the applier thread. + public void runOnApplierThread( + String source, + Priority priority, + Consumer clusterStateConsumer, + ClusterApplyListener listener + ) { + submitStateUpdateTask( + source, + priority, (clusterState) -> { clusterStateConsumer.accept(clusterState); return clusterState; @@ -290,51 +302,52 @@ public void runOnApplierThread(final String source, Consumer clust listener); } - public void runOnApplierThread(final String source, Consumer clusterStateConsumer, - final ClusterApplyListener listener) { - runOnApplierThread(source, clusterStateConsumer, listener, Priority.HIGH); - } - public ThreadPool threadPool() { return threadPool; } @Override - public void onNewClusterState(final String source, final Supplier clusterStateSupplier, - final ClusterApplyListener listener) { - Function applyFunction = currentState -> { - ClusterState nextState = clusterStateSupplier.get(); - if (nextState != null) { - return nextState; - } else { - return currentState; - } - }; - submitStateUpdateTask(source, ClusterStateTaskConfig.build(Priority.HIGH), applyFunction, listener); + public void onNewClusterState( + final String source, + final Supplier clusterStateSupplier, + final ClusterApplyListener listener + ) { + submitStateUpdateTask( + source, + Priority.HIGH, + currentState -> { + ClusterState nextState = clusterStateSupplier.get(); + if (nextState != null) { + return nextState; + } else { + return currentState; + } + }, listener); } - private void submitStateUpdateTask(final String source, final ClusterStateTaskConfig config, - final Function executor, - final ClusterApplyListener listener) { + private void submitStateUpdateTask( + final String source, + final Priority priority, + final Function clusterStateUpdate, + final ClusterApplyListener listener + ) { if (lifecycle.started() == false) { return; } + final ThreadContext threadContext = threadPool.getThreadContext(); - final Supplier supplier = threadContext.newRestorableContext(true); + final Supplier storedContextSupplier = threadContext.newRestorableContext(true); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { threadContext.markAsSystemContext(); - final UpdateTask updateTask = new UpdateTask(config.priority(), source, - new SafeClusterApplyListener(listener, supplier, logger), executor); - if (config.timeout() != null) { - threadPoolExecutor.execute(updateTask, config.timeout(), - () -> threadPool.generic().execute( - () -> listener.onFailure(source, new ProcessClusterEventTimeoutException(config.timeout(), source)))); - } else { - threadPoolExecutor.execute(updateTask); - } + threadPoolExecutor.execute(new UpdateTask( + priority, + source, + new ClusterApplyActionListener(source, listener, storedContextSupplier), + clusterStateUpdate)); } catch (EsRejectedExecutionException e) { - // ignore cases where we are shutting down..., there is really nothing interesting - // to be done here... + assert lifecycle.stoppedOrClosed() : e; + // ignore cases where we are shutting down..., there is really nothing interesting to be done here... if (lifecycle.stoppedOrClosed() == false) { throw e; } @@ -349,7 +362,7 @@ public static boolean assertNotClusterStateUpdateThread(String reason) { } /** asserts that the current stack trace does NOT involve a cluster state applier */ - private static boolean assertNotCalledFromClusterStateApplier(String reason) { + private static boolean assertNotCalledFromClusterStateApplier() { if (Thread.currentThread().getName().contains(CLUSTER_UPDATE_THREAD_NAME)) { for (StackTraceElement element : Thread.currentThread().getStackTrace()) { final String className = element.getClassName(); @@ -359,87 +372,95 @@ private static boolean assertNotCalledFromClusterStateApplier(String reason) { return true; } else if (className.equals(ClusterApplierService.class.getName()) && methodName.equals("callClusterStateAppliers")) { - throw new AssertionError("should not be called by a cluster state applier. reason [" + reason + "]"); + throw new AssertionError("should not be called by a cluster state applier: the applied state is not yet available"); } } } return true; } - private void runTask(UpdateTask task) { + private void runTask(String source, Function updateFunction, ActionListener clusterApplyListener) { if (lifecycle.started() == false) { - logger.debug("processing [{}]: ignoring, cluster applier service not started", task.source); + logger.debug("processing [{}]: ignoring, cluster applier service not started", source); return; } - logger.debug("processing [{}]: execute", task.source); + logger.debug("processing [{}]: execute", source); final ClusterState previousClusterState = state.get(); - long startTimeMS = currentTimeInMillis(); + final long startTimeMillis = threadPool.relativeTimeInMillis(); final StopWatch stopWatch = new StopWatch(); final ClusterState newClusterState; try { - try (Releasable ignored = stopWatch.timing("running task [" + task.source + ']')) { - newClusterState = task.apply(previousClusterState); + try (Releasable ignored = stopWatch.timing("running task [" + source + ']')) { + newClusterState = updateFunction.apply(previousClusterState); } } catch (Exception e) { - TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, currentTimeInMillis() - startTimeMS)); + TimeValue executionTime = getTimeSince(startTimeMillis); logger.trace(() -> new ParameterizedMessage( "failed to execute cluster state applier in [{}], state:\nversion [{}], source [{}]\n{}", - executionTime, previousClusterState.version(), task.source, previousClusterState), e); - warnAboutSlowTaskIfNeeded(executionTime, task.source, stopWatch); - task.listener.onFailure(task.source, e); + executionTime, previousClusterState.version(), source, previousClusterState), e); + warnAboutSlowTaskIfNeeded(executionTime, source, stopWatch); + clusterApplyListener.onFailure(e); return; } if (previousClusterState == newClusterState) { - TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, currentTimeInMillis() - startTimeMS)); - logger.debug("processing [{}]: took [{}] no change in cluster state", task.source, executionTime); - warnAboutSlowTaskIfNeeded(executionTime, task.source, stopWatch); - task.listener.onSuccess(task.source); + TimeValue executionTime = getTimeSince(startTimeMillis); + logger.debug("processing [{}]: took [{}] no change in cluster state", source, executionTime); + warnAboutSlowTaskIfNeeded(executionTime, source, stopWatch); + clusterApplyListener.onResponse(null); } else { if (logger.isTraceEnabled()) { - logger.debug("cluster state updated, version [{}], source [{}]\n{}", newClusterState.version(), task.source, + logger.debug("cluster state updated, version [{}], source [{}]\n{}", newClusterState.version(), source, newClusterState); } else { - logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), task.source); + logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), source); } try { - applyChanges(task, previousClusterState, newClusterState, stopWatch); - TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, currentTimeInMillis() - startTimeMS)); - logger.debug("processing [{}]: took [{}] done applying updated cluster state (version: {}, uuid: {})", task.source, + applyChanges(previousClusterState, newClusterState, source, stopWatch); + TimeValue executionTime = getTimeSince(startTimeMillis); + logger.debug("processing [{}]: took [{}] done applying updated cluster state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.stateUUID()); - warnAboutSlowTaskIfNeeded(executionTime, task.source, stopWatch); - task.listener.onSuccess(task.source); + warnAboutSlowTaskIfNeeded(executionTime, source, stopWatch); + clusterApplyListener.onResponse(null); } catch (Exception e) { - TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, currentTimeInMillis() - startTimeMS)); + TimeValue executionTime = getTimeSince(startTimeMillis); if (logger.isTraceEnabled()) { logger.warn(new ParameterizedMessage( - "failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}", - executionTime, newClusterState.version(), newClusterState.stateUUID(), task.source, newClusterState), e); + "failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}", + executionTime, newClusterState.version(), newClusterState.stateUUID(), source, newClusterState), e); } else { logger.warn(new ParameterizedMessage( - "failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]", - executionTime, newClusterState.version(), newClusterState.stateUUID(), task.source), e); + "failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]", + executionTime, newClusterState.version(), newClusterState.stateUUID(), source), e); } // failing to apply a cluster state with an exception indicates a bug in validation or in one of the appliers; if we // continue we will retry with the same cluster state but that might not help. assert applicationMayFail(); - task.listener.onFailure(task.source, e); + clusterApplyListener.onFailure(e); } } } - private void applyChanges(UpdateTask task, ClusterState previousClusterState, ClusterState newClusterState, StopWatch stopWatch) { - ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent(task.source, newClusterState, previousClusterState); + private TimeValue getTimeSince(long startTimeMillis) { + return TimeValue.timeValueMillis(Math.max(0, threadPool.relativeTimeInMillis() - startTimeMillis)); + } + + private void applyChanges(ClusterState previousClusterState, ClusterState newClusterState, String source, StopWatch stopWatch) { + ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent(source, newClusterState, previousClusterState); // new cluster state, notify all listeners final DiscoveryNodes.Delta nodesDelta = clusterChangedEvent.nodesDelta(); if (nodesDelta.hasChanges() && logger.isInfoEnabled()) { String summary = nodesDelta.shortSummary(); if (summary.length() > 0) { - logger.info("{}, term: {}, version: {}, reason: {}", - summary, newClusterState.term(), newClusterState.version(), task.source); + logger.info( + "{}, term: {}, version: {}, reason: {}", + summary, + newClusterState.term(), + newClusterState.version(), + source); } } @@ -515,33 +536,39 @@ private void callClusterStateListener(ClusterChangedEvent clusterChangedEvent, S } } - private static class SafeClusterApplyListener implements ClusterApplyListener { + private static class ClusterApplyActionListener implements ActionListener { + private final String source; private final ClusterApplyListener listener; - protected final Supplier context; - private final Logger logger; - - SafeClusterApplyListener(ClusterApplyListener listener, Supplier context, Logger logger) { + private final Supplier storedContextSupplier; + + ClusterApplyActionListener( + String source, + ClusterApplyListener listener, + Supplier storedContextSupplier + ) { + this.source = source; this.listener = listener; - this.context = context; - this.logger = logger; + this.storedContextSupplier = storedContextSupplier; } @Override - public void onFailure(String source, Exception e) { - try (ThreadContext.StoredContext ignore = context.get()) { - listener.onFailure(source, e); + public void onFailure(Exception e) { + try (ThreadContext.StoredContext ignored = storedContextSupplier.get()) { + listener.onFailure(e); } catch (Exception inner) { inner.addSuppressed(e); + assert false : inner; logger.error(new ParameterizedMessage( "exception thrown by listener notifying of failure from [{}]", source), inner); } } @Override - public void onSuccess(String source) { - try (ThreadContext.StoredContext ignore = context.get()) { - listener.onSuccess(source); + public void onResponse(Void unused) { + try (ThreadContext.StoredContext ignored = storedContextSupplier.get()) { + listener.onSuccess(); } catch (Exception e) { + assert false : e; logger.error(new ParameterizedMessage( "exception thrown by listener while notifying of cluster state processed from [{}]", source), e); } @@ -588,12 +615,7 @@ public void run() { } } - // this one is overridden in tests so we can control time - protected long currentTimeInMillis() { - return threadPool.relativeTimeInMillis(); - } - - // overridden by tests that need to check behaviour in the event of an application failure + // overridden by tests that need to check behaviour in the event of an application failure without tripping assertions protected boolean applicationMayFail() { return false; } diff --git a/server/src/main/java/org/elasticsearch/common/LocalTimeOffset.java b/server/src/main/java/org/elasticsearch/common/LocalTimeOffset.java index ebf777b6f3b2f..2cee860a1e790 100644 --- a/server/src/main/java/org/elasticsearch/common/LocalTimeOffset.java +++ b/server/src/main/java/org/elasticsearch/common/LocalTimeOffset.java @@ -29,8 +29,9 @@ * utc. So converting from utc is as simple as adding the offset. *

* Getting from local time back to utc is harder. Most local times happen once. - * But some local times happen twice. And some don't happen at all. Take, for - * example, the time in my house. Most days I don't touch my clocks and I'm a + * But some local times happen twice (DST overlap). + * And some don't happen at all (DST gap). Take, for example, + * the time in my house. Most days I don't touch my clocks and I'm a * constant offset from UTC. But once in the fall at 2am I roll my clock back. * So at 5am utc my clocks say 1am. Then at 6am utc my clocks say 1am AGAIN. * I do similarly terrifying things again in the spring when I skip my clocks @@ -38,6 +39,8 @@ *

* So there are two methods to convert from local time back to utc, * {@link #localToUtc(long, Strategy)} and {@link #localToUtcInThisOffset(long)}. + * @see ZoneOffsetTransition#isGap() + * @see ZoneOffsetTransition#isOverlap() */ public abstract class LocalTimeOffset { /** diff --git a/server/src/main/java/org/elasticsearch/common/Rounding.java b/server/src/main/java/org/elasticsearch/common/Rounding.java index 55b720a682ef7..c62fabd8a58c8 100644 --- a/server/src/main/java/org/elasticsearch/common/Rounding.java +++ b/server/src/main/java/org/elasticsearch/common/Rounding.java @@ -515,7 +515,14 @@ public Prepared prepare(long minUtcMillis, long maxUtcMillis) { } private TimeUnitPreparedRounding prepareOffsetOrJavaTimeRounding(long minUtcMillis, long maxUtcMillis) { - long minLookup = minUtcMillis - unit.extraLocalOffsetLookup(); + /* + minUtcMillis has to be decreased by 2 units. + This is because if a minUtcMillis can be rounded down up to unit.extraLocalOffsetLookup + and that rounding down might still fall within DST gap/overlap. + Meaning that minUtcMillis has to be decreased by additional unit + so that the transition just before the minUtcMillis is applied + */ + long minLookup = minUtcMillis - 2 * unit.extraLocalOffsetLookup(); long maxLookup = maxUtcMillis; long unitMillis = 0; diff --git a/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java b/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java index 472b9080f33b8..db075f1d8e8f5 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java @@ -216,7 +216,6 @@ private static final class RefCountedReleasable extends AbstractRefCounted { private final Releasable releasable; RefCountedReleasable(Releasable releasable) { - super("bytes-reference"); this.releasable = releasable; } diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java new file mode 100644 index 0000000000000..bfd727eb03b0d --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.io.stream; + +import org.apache.lucene.util.BytesRef; + +import java.io.EOFException; +import java.io.IOException; + +/** + * Resettable {@link StreamInput} that wraps a byte array. It is heavily inspired in Lucene's + * {@link org.apache.lucene.store.ByteArrayDataInput}. + */ +public class ByteArrayStreamInput extends StreamInput { + + private byte[] bytes; + private int pos; + private int limit; + + public ByteArrayStreamInput() { + reset(BytesRef.EMPTY_BYTES); + } + + public ByteArrayStreamInput(byte[] bytes) { + reset(bytes); + } + + @Override + public int read() throws IOException { + return readByte() & 0xFF; + } + + public void reset(byte[] bytes) { + reset(bytes, 0, bytes.length); + } + + public int getPosition() { + return pos; + } + + public void setPosition(int pos) { + this.pos = pos; + } + + public void reset(byte[] bytes, int offset, int len) { + this.bytes = bytes; + pos = offset; + limit = offset + len; + } + + public int length() { + return limit; + } + + public void skipBytes(long count) { + pos += count; + } + + @Override + public void close() { + // No-op + } + + @Override + public int available() { + return limit - pos; + } + + @Override + protected void ensureCanReadBytes(int length) throws EOFException { + if (pos + length > limit) { + throw new EOFException("tried to read: " + length + " bytes but only " + available() + " remaining"); + } + } + + @Override + public byte readByte() { + return bytes[pos++]; + } + + @Override + public void readBytes(byte[] b, int offset, int len) { + System.arraycopy(bytes, pos, b, offset, len); + pos += len; + } +} diff --git a/server/src/main/java/org/elasticsearch/common/util/ByteUtils.java b/server/src/main/java/org/elasticsearch/common/util/ByteUtils.java index 5a4a16ddd885e..401bbd022dc78 100644 --- a/server/src/main/java/org/elasticsearch/common/util/ByteUtils.java +++ b/server/src/main/java/org/elasticsearch/common/util/ByteUtils.java @@ -8,9 +8,6 @@ package org.elasticsearch.common.util; -import org.apache.lucene.store.ByteArrayDataInput; -import org.apache.lucene.store.ByteArrayDataOutput; - /** Utility methods to do byte-level encoding. These methods are biased towards little-endian byte order because it is the most * common byte order and reading several bytes at once may be optimizable in the future with the help of sun.mist.Unsafe. */ @@ -85,45 +82,4 @@ public static float readFloatLE(byte[] arr, int offset) { return Float.intBitsToFloat(readIntLE(arr, offset)); } - /** Same as DataOutput#writeVLong but accepts negative values (written on 9 bytes). */ - public static void writeVLong(ByteArrayDataOutput out, long i) { - for (int k = 0; k < 8 && (i & ~0x7FL) != 0L; ++k) { - out.writeByte((byte)((i & 0x7FL) | 0x80L)); - i >>>= 7; - } - out.writeByte((byte)i); - } - - /** Same as DataOutput#readVLong but can read negative values (read on 9 bytes). */ - public static long readVLong(ByteArrayDataInput in) { - // unwinded because of hotspot bugs, see Lucene's impl - byte b = in.readByte(); - if (b >= 0) return b; - long i = b & 0x7FL; - b = in.readByte(); - i |= (b & 0x7FL) << 7; - if (b >= 0) return i; - b = in.readByte(); - i |= (b & 0x7FL) << 14; - if (b >= 0) return i; - b = in.readByte(); - i |= (b & 0x7FL) << 21; - if (b >= 0) return i; - b = in.readByte(); - i |= (b & 0x7FL) << 28; - if (b >= 0) return i; - b = in.readByte(); - i |= (b & 0x7FL) << 35; - if (b >= 0) return i; - b = in.readByte(); - i |= (b & 0x7FL) << 42; - if (b >= 0) return i; - b = in.readByte(); - i |= (b & 0x7FL) << 49; - if (b >= 0) return i; - b = in.readByte(); - i |= (b & 0xFFL) << 56; - return i; - } - } diff --git a/server/src/main/java/org/elasticsearch/common/util/CancellableSingleObjectCache.java b/server/src/main/java/org/elasticsearch/common/util/CancellableSingleObjectCache.java index 6d6895867f888..48c287951ed2f 100644 --- a/server/src/main/java/org/elasticsearch/common/util/CancellableSingleObjectCache.java +++ b/server/src/main/java/org/elasticsearch/common/util/CancellableSingleObjectCache.java @@ -178,7 +178,6 @@ private final class CachedItem extends AbstractRefCounted { private final CancellationChecks cancellationChecks = new CancellationChecks(); CachedItem(Key key) { - super("cached item"); this.key = key; incRef(); // start with a refcount of 2 so we're not closed while adding the first listener this.future.addListener(new ActionListener<>() { diff --git a/server/src/main/java/org/elasticsearch/discovery/ConfiguredHostsResolver.java b/server/src/main/java/org/elasticsearch/discovery/ConfiguredHostsResolver.java new file mode 100644 index 0000000000000..26e2e18de79fd --- /dev/null +++ b/server/src/main/java/org/elasticsearch/discovery/ConfiguredHostsResolver.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.discovery; + +import org.elasticsearch.common.transport.TransportAddress; + +import java.util.List; +import java.util.function.Consumer; + +public interface ConfiguredHostsResolver { + /** + * Attempt to resolve the configured hosts list to a list of transport addresses. + * + * @param consumer Consumer for the resolved list. May not be called if an error occurs or if another resolution attempt is in progress. + */ + void resolveConfiguredHosts(Consumer> consumer); +} diff --git a/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java b/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java index c7a4b51c5a9ec..110bea359a079 100644 --- a/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java +++ b/server/src/main/java/org/elasticsearch/discovery/HandshakingTransportAddressConnector.java @@ -24,7 +24,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.discovery.PeerFinder.TransportAddressConnector; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.ConnectionProfile; import org.elasticsearch.transport.TransportRequestOptions.Type; diff --git a/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java b/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java index 0f29161a49c12..be2a2a60f0ea3 100644 --- a/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java +++ b/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java @@ -36,7 +36,6 @@ import java.util.Map; import java.util.Objects; import java.util.Optional; -import java.util.function.Consumer; import java.util.stream.Collectors; import static java.util.Collections.emptyList; @@ -196,23 +195,6 @@ public List getLastResolvedAddresses() { return lastResolvedAddresses; } - public interface TransportAddressConnector { - /** - * Identify the node at the given address and, if it is a master node and not the local node then establish a full connection to it. - */ - void connectToRemoteMasterNode(TransportAddress transportAddress, ActionListener listener); - } - - public interface ConfiguredHostsResolver { - /** - * Attempt to resolve the configured hosts list to a list of transport addresses. - * - * @param consumer Consumer for the resolved list. May not be called if an error occurs or if another resolution attempt is in - * progress. - */ - void resolveConfiguredHosts(Consumer> consumer); - } - public Iterable getFoundPeers() { synchronized (mutex) { return getFoundPeersUnderLock(); diff --git a/server/src/main/java/org/elasticsearch/discovery/SeedHostsResolver.java b/server/src/main/java/org/elasticsearch/discovery/SeedHostsResolver.java index 2ec1bb06b5ba0..6e4b1544c9902 100644 --- a/server/src/main/java/org/elasticsearch/discovery/SeedHostsResolver.java +++ b/server/src/main/java/org/elasticsearch/discovery/SeedHostsResolver.java @@ -19,7 +19,6 @@ import org.elasticsearch.common.util.CancellableThreads; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.discovery.PeerFinder.ConfiguredHostsResolver; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; diff --git a/server/src/main/java/org/elasticsearch/discovery/TransportAddressConnector.java b/server/src/main/java/org/elasticsearch/discovery/TransportAddressConnector.java new file mode 100644 index 0000000000000..b830ad2c1facf --- /dev/null +++ b/server/src/main/java/org/elasticsearch/discovery/TransportAddressConnector.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.discovery; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.transport.TransportAddress; + +public interface TransportAddressConnector { + /** + * Identify the node at the given address and, if it is a master node and not the local node then establish a full connection to it. + */ + void connectToRemoteMasterNode(TransportAddress transportAddress, ActionListener listener); +} diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java b/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java index 20e10a8b7a104..cfdbe4c12a9cc 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java @@ -121,7 +121,7 @@ public void beforeAllocation(final RoutingAllocation allocation) { @Override public void afterPrimariesBeforeReplicas(RoutingAllocation allocation) { assert replicaShardAllocator != null; - if (allocation.routingNodes().hasInactiveShards()) { + if (allocation.routingNodes().hasInactiveReplicas()) { // cancel existing recoveries if we have a better match replicaShardAllocator.processExistingRecoveries(allocation); } diff --git a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java index dc15232b184a5..bf5524b7dc8ef 100644 --- a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java @@ -81,12 +81,7 @@ public abstract class AbstractHttpServerTransport extends AbstractLifecycleCompo private final AtomicLong totalChannelsAccepted = new AtomicLong(); private final Set httpChannels = Collections.newSetFromMap(new ConcurrentHashMap<>()); private final PlainActionFuture allClientsClosedListener = PlainActionFuture.newFuture(); - private final RefCounted refCounted = new AbstractRefCounted("abstract-http-server-transport") { - @Override - protected void closeInternal() { - allClientsClosedListener.onResponse(null); - } - }; + private final RefCounted refCounted = AbstractRefCounted.of(() -> allClientsClosedListener.onResponse(null)); private final Set httpServerChannels = Collections.newSetFromMap(new ConcurrentHashMap<>()); private final HttpClientStatsTracker httpClientStatsTracker; diff --git a/server/src/main/java/org/elasticsearch/index/codec/CodecService.java b/server/src/main/java/org/elasticsearch/index/codec/CodecService.java index 37a249f2f3067..d55f2d41e064f 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/CodecService.java +++ b/server/src/main/java/org/elasticsearch/index/codec/CodecService.java @@ -8,7 +8,6 @@ package org.elasticsearch.index.codec; -import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.lucene87.Lucene87Codec; import org.apache.lucene.codecs.lucene87.Lucene87Codec.Mode; @@ -33,16 +32,16 @@ public class CodecService { /** the raw unfiltered lucene default. useful for testing */ public static final String LUCENE_DEFAULT_CODEC = "lucene_default"; - public CodecService(@Nullable MapperService mapperService, Logger logger) { + public CodecService(@Nullable MapperService mapperService) { final var codecs = new HashMap(); if (mapperService == null) { codecs.put(DEFAULT_CODEC, new Lucene87Codec()); codecs.put(BEST_COMPRESSION_CODEC, new Lucene87Codec(Mode.BEST_COMPRESSION)); } else { codecs.put(DEFAULT_CODEC, - new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); + new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService)); codecs.put(BEST_COMPRESSION_CODEC, - new PerFieldMappingPostingFormatCodec(Mode.BEST_COMPRESSION, mapperService, logger)); + new PerFieldMappingPostingFormatCodec(Mode.BEST_COMPRESSION, mapperService)); } codecs.put(LUCENE_DEFAULT_CODEC, Codec.getDefault()); for (String codec : Codec.availableCodecs()) { diff --git a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java index a08a05ee5f223..d9a6590bdc80e 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java +++ b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java @@ -8,15 +8,12 @@ package org.elasticsearch.index.codec; -import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.DocValuesFormat; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.codecs.lucene80.Lucene80DocValuesFormat; import org.apache.lucene.codecs.lucene87.Lucene87Codec; import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.index.mapper.CompletionFieldMapper; -import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; /** @@ -28,7 +25,7 @@ * configured for a specific field the default postings format is used. */ public class PerFieldMappingPostingFormatCodec extends Lucene87Codec { - private final Logger logger; + private final MapperService mapperService; // Always enable compression on binary doc values private final DocValuesFormat docValuesFormat = new Lucene80DocValuesFormat(Lucene80DocValuesFormat.Mode.BEST_COMPRESSION); @@ -38,21 +35,18 @@ public class PerFieldMappingPostingFormatCodec extends Lucene87Codec { "PerFieldMappingPostingFormatCodec must subclass the latest " + "lucene codec: " + Lucene.LATEST_CODEC; } - public PerFieldMappingPostingFormatCodec(Mode compressionMode, MapperService mapperService, Logger logger) { + public PerFieldMappingPostingFormatCodec(Mode compressionMode, MapperService mapperService) { super(compressionMode); this.mapperService = mapperService; - this.logger = logger; } @Override public PostingsFormat getPostingsFormatForField(String field) { - final MappedFieldType fieldType = mapperService.fieldType(field); - if (fieldType == null) { - logger.warn("no index mapper found for field: [{}] returning default postings format", field); - } else if (fieldType instanceof CompletionFieldMapper.CompletionFieldType) { - return CompletionFieldMapper.CompletionFieldType.postingsFormat(); + PostingsFormat format = mapperService.mappingLookup().getPostingsFormat(field); + if (format == null) { + return super.getPostingsFormatForField(field); } - return super.getPostingsFormatForField(field); + return format; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index 9b7ba7986167b..1714dad668e2c 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -9,6 +9,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.codecs.Codec; +import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.search.QueryCache; import org.apache.lucene.search.QueryCachingPolicy; @@ -32,6 +33,7 @@ import org.elasticsearch.plugins.IndexStorePlugin; import org.elasticsearch.threadpool.ThreadPool; +import java.util.Comparator; import java.util.List; import java.util.Objects; import java.util.function.LongSupplier; @@ -70,6 +72,7 @@ public final class EngineConfig { private final CircuitBreakerService circuitBreakerService; private final LongSupplier globalCheckpointSupplier; private final Supplier retentionLeasesSupplier; + private final Comparator leafSorter; /** * A supplier of the outstanding retention leases. This is used during merged operations to determine which operations that have been @@ -131,7 +134,8 @@ public EngineConfig( LongSupplier globalCheckpointSupplier, Supplier retentionLeasesSupplier, LongSupplier primaryTermSupplier, - IndexStorePlugin.SnapshotCommitSupplier snapshotCommitSupplier) { + IndexStorePlugin.SnapshotCommitSupplier snapshotCommitSupplier, + Comparator leafSorter) { this.shardId = shardId; this.indexSettings = indexSettings; this.threadPool = threadPool; @@ -169,6 +173,7 @@ public EngineConfig( this.retentionLeasesSupplier = Objects.requireNonNull(retentionLeasesSupplier); this.primaryTermSupplier = primaryTermSupplier; this.snapshotCommitSupplier = snapshotCommitSupplier; + this.leafSorter = leafSorter; } /** @@ -353,4 +358,12 @@ public LongSupplier getPrimaryTermSupplier() { public IndexStorePlugin.SnapshotCommitSupplier getSnapshotCommitSupplier() { return snapshotCommitSupplier; } + + /** + * Returns how segments should be sorted for reading or @null if no sorting should be applied. + */ + @Nullable + public Comparator getLeafSorter() { + return leafSorter; + } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 629e6bd68a9b5..67014b0ca824c 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -832,7 +832,11 @@ protected long generateSeqNoForOperationOnPrimary(final Operation operation) { return doGenerateSeqNoForOperation(operation); } - protected void advanceMaxSeqNoOfUpdatesOrDeletesOnPrimary(long seqNo) { + protected void advanceMaxSeqNoOfUpdatesOnPrimary(long seqNo) { + advanceMaxSeqNoOfUpdatesOrDeletes(seqNo); + } + + protected void advanceMaxSeqNoOfDeletesOnPrimary(long seqNo) { advanceMaxSeqNoOfUpdatesOrDeletes(seqNo); } @@ -900,7 +904,7 @@ public IndexResult index(Index index) throws IOException { final boolean toAppend = plan.indexIntoLucene && plan.useLuceneUpdateDocument == false; if (toAppend == false) { - advanceMaxSeqNoOfUpdatesOrDeletesOnPrimary(index.seqNo()); + advanceMaxSeqNoOfUpdatesOnPrimary(index.seqNo()); } } else { markSeqNoAsSeen(index.seqNo()); @@ -1276,7 +1280,7 @@ public DeleteResult delete(Delete delete) throws IOException { delete.primaryTerm(), delete.version(), delete.versionType(), delete.origin(), delete.startTime(), delete.getIfSeqNo(), delete.getIfPrimaryTerm()); - advanceMaxSeqNoOfUpdatesOrDeletesOnPrimary(delete.seqNo()); + advanceMaxSeqNoOfDeletesOnPrimary(delete.seqNo()); } else { markSeqNoAsSeen(delete.seqNo()); } @@ -2189,6 +2193,11 @@ private IndexWriterConfig getIndexWriterConfig() { if (config().getIndexSort() != null) { iwc.setIndexSort(config().getIndexSort()); } + // Provide a custom leaf sorter, so that index readers opened from this writer + // will have its leaves sorted according the given leaf sorter. + if (engineConfig.getLeafSorter() != null) { + iwc.setLeafSorter(engineConfig.getLeafSorter()); + } return iwc; } diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index 54c10dbef9449..0f2441dcbc621 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -199,6 +199,8 @@ protected final ElasticsearchDirectoryReader wrapReader(DirectoryReader reader, } protected DirectoryReader open(IndexCommit commit) throws IOException { + // TODO: provide engineConfig.getLeafSorter() when opening a DirectoryReader from a commit + // should be available from Lucene v 8.10 assert Transports.assertNotTransportThread("opening index commit of a read-only engine"); if (lazilyLoadSoftDeletes) { return new LazySoftDeletesDirectoryReaderWrapper(DirectoryReader.open(commit), Lucene.SOFT_DELETES_FIELD); diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java b/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java index dff93fa777402..37b1dad95ffaa 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.geometry.utils.Geohash; +import org.elasticsearch.script.Converters; import org.elasticsearch.script.Field; import org.elasticsearch.script.FieldValues; import org.elasticsearch.script.InvalidConversion; @@ -25,7 +26,6 @@ import java.io.IOException; import java.time.Instant; import java.time.ZoneOffset; -import java.time.temporal.ChronoUnit; import java.util.AbstractList; import java.util.Arrays; import java.util.Comparator; @@ -240,11 +240,10 @@ void refreshArray() throws IOException { @Override public long getLongValue() { throwIfEmpty(); - Instant dt = dates[0].toInstant(); if (isNanos) { - return ChronoUnit.NANOS.between(java.time.Instant.EPOCH, dt); + return Converters.convertDateNanosToLong(dates[0]); } - return dt.toEpochMilli(); + return Converters.convertDateMillisToLong(dates[0]); } @Override @@ -587,13 +586,13 @@ private static boolean[] grow(boolean[] array, int minSize) { @Override public long getLongValue() { throwIfEmpty(); - return values[0] ? 1L : 0L; + return Converters.convertBooleanToLong(values[0]); } @Override public double getDoubleValue() { throwIfEmpty(); - return values[0] ? 1.0D : 0.0D; + return Converters.convertBooleanToDouble(values[0]); } @Override @@ -675,12 +674,12 @@ public final String getValue() { @Override public long getLongValue() { - return Long.parseLong(get(0)); + return Converters.convertStringToLong(get(0)); } @Override public double getDoubleValue() { - return Double.parseDouble(get(0)); + return Converters.convertStringToDouble(get(0)); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractBinaryDVLeafFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractBinaryDVLeafFieldData.java index 3283af734a0a2..6a976fa0d47a3 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractBinaryDVLeafFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractBinaryDVLeafFieldData.java @@ -9,9 +9,9 @@ package org.elasticsearch.index.fielddata.plain; import org.apache.lucene.index.BinaryDocValues; -import org.apache.lucene.store.ByteArrayDataInput; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.ByteArrayStreamInput; import org.elasticsearch.index.fielddata.LeafFieldData; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; @@ -42,7 +42,7 @@ public SortedBinaryDocValues getBytesValues() { return new SortedBinaryDocValues() { int count; - final ByteArrayDataInput in = new ByteArrayDataInput(); + final ByteArrayStreamInput in = new ByteArrayStreamInput(); final BytesRef scratch = new BytesRef(); @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java index de3b9e80a3c03..779d49d28de7c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java @@ -12,11 +12,11 @@ import org.apache.lucene.document.StoredField; import org.apache.lucene.search.Query; -import org.apache.lucene.store.ByteArrayDataOutput; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -204,8 +204,7 @@ public BytesRef binaryValue() { try { CollectionUtils.sortAndDedup(bytesList); int size = bytesList.size(); - final byte[] bytes = new byte[totalSize + (size + 1) * 5]; - ByteArrayDataOutput out = new ByteArrayDataOutput(bytes); + BytesStreamOutput out = new BytesStreamOutput(totalSize + (size + 1) * 5); out.writeVInt(size); // write total number of values for (int i = 0; i < size; i ++) { final byte[] value = bytesList.get(i); @@ -213,7 +212,7 @@ public BytesRef binaryValue() { out.writeVInt(valueLength); out.writeBytes(value, 0, valueLength); } - return new BytesRef(bytes, 0, out.getPosition()); + return out.bytes().toBytesRef(); } catch (IOException e) { throw new ElasticsearchException("Failed to get binary value", e); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BinaryRangeUtil.java b/server/src/main/java/org/elasticsearch/index/mapper/BinaryRangeUtil.java index c92bccb4d56bd..68748623e46d3 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BinaryRangeUtil.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BinaryRangeUtil.java @@ -9,11 +9,11 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.document.InetAddressPoint; -import org.apache.lucene.store.ByteArrayDataInput; -import org.apache.lucene.store.ByteArrayDataOutput; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; import org.elasticsearch.common.TriFunction; +import org.elasticsearch.common.io.stream.ByteArrayStreamInput; +import org.elasticsearch.common.io.stream.BytesStreamOutput; import java.io.IOException; import java.net.InetAddress; @@ -28,8 +28,7 @@ enum BinaryRangeUtil { ; static BytesRef encodeIPRanges(Set ranges) throws IOException { - final byte[] encoded = new byte[5 + (16 * 2) * ranges.size()]; - ByteArrayDataOutput out = new ByteArrayDataOutput(encoded); + BytesStreamOutput out = new BytesStreamOutput(5 + (16 * 2) * ranges.size()); out.writeVInt(ranges.size()); for (RangeFieldMapper.Range range : ranges) { InetAddress fromValue = (InetAddress) range.from; @@ -40,10 +39,10 @@ static BytesRef encodeIPRanges(Set ranges) throws IOExce byte[] encodedToValue = InetAddressPoint.encode(toValue); out.writeBytes(encodedToValue, 0, encodedToValue.length); } - return new BytesRef(encoded, 0, out.getPosition()); + return out.bytes().toBytesRef(); } - static List decodeIPRanges(BytesRef encodedRanges) { + static List decodeIPRanges(BytesRef encodedRanges) throws IOException { return decodeRanges(encodedRanges, RangeType.IP, BinaryRangeUtil::decodeIP); } @@ -59,8 +58,7 @@ static BytesRef encodeLongRanges(Set ranges) throws IOEx Comparator toComparator = Comparator.comparingLong(range -> ((Number) range.to).longValue()); sortedRanges.sort(fromComparator.thenComparing(toComparator)); - final byte[] encoded = new byte[5 + (9 * 2) * sortedRanges.size()]; - ByteArrayDataOutput out = new ByteArrayDataOutput(encoded); + BytesStreamOutput out = new BytesStreamOutput(5 + (9 * 2) * sortedRanges.size()); out.writeVInt(sortedRanges.size()); for (RangeFieldMapper.Range range : sortedRanges) { byte[] encodedFrom = encodeLong(((Number) range.from).longValue()); @@ -68,10 +66,10 @@ static BytesRef encodeLongRanges(Set ranges) throws IOEx byte[] encodedTo = encodeLong(((Number) range.to).longValue()); out.writeBytes(encodedTo, encodedTo.length); } - return new BytesRef(encoded, 0, out.getPosition()); + return out.bytes().toBytesRef(); } - static List decodeLongRanges(BytesRef encodedRanges) { + static List decodeLongRanges(BytesRef encodedRanges) throws IOException { return decodeRanges(encodedRanges, RangeType.LONG, BinaryRangeUtil::decodeLong); } @@ -82,8 +80,7 @@ static BytesRef encodeDoubleRanges(Set ranges) throws IO Comparator toComparator = Comparator.comparingDouble(range -> ((Number) range.to).doubleValue()); sortedRanges.sort(fromComparator.thenComparing(toComparator)); - final byte[] encoded = new byte[5 + (8 * 2) * sortedRanges.size()]; - ByteArrayDataOutput out = new ByteArrayDataOutput(encoded); + BytesStreamOutput out = new BytesStreamOutput(5 + (8 * 2) * sortedRanges.size()); out.writeVInt(sortedRanges.size()); for (RangeFieldMapper.Range range : sortedRanges) { byte[] encodedFrom = encodeDouble(((Number) range.from).doubleValue()); @@ -91,27 +88,27 @@ static BytesRef encodeDoubleRanges(Set ranges) throws IO byte[] encodedTo = encodeDouble(((Number) range.to).doubleValue()); out.writeBytes(encodedTo, encodedTo.length); } - return new BytesRef(encoded, 0, out.getPosition()); + return out.bytes().toBytesRef(); } - static List decodeDoubleRanges(BytesRef encodedRanges) { + static List decodeDoubleRanges(BytesRef encodedRanges) throws IOException { return decodeRanges(encodedRanges, RangeType.DOUBLE, BinaryRangeUtil::decodeDouble); } - static List decodeFloatRanges(BytesRef encodedRanges) { + static List decodeFloatRanges(BytesRef encodedRanges) throws IOException { return decodeRanges(encodedRanges, RangeType.FLOAT, BinaryRangeUtil::decodeFloat); } static List decodeRanges(BytesRef encodedRanges, RangeType rangeType, - TriFunction decodeBytes) { + TriFunction decodeBytes) throws IOException { RangeType.LengthType lengthType = rangeType.lengthType; - ByteArrayDataInput in = new ByteArrayDataInput(); + ByteArrayStreamInput in = new ByteArrayStreamInput(); in.reset(encodedRanges.bytes, encodedRanges.offset, encodedRanges.length); - int numRanges = in.readVInt(); + int numRanges = in.readVInt(); List ranges = new ArrayList<>(numRanges); final byte[] bytes = encodedRanges.bytes; @@ -137,8 +134,7 @@ static BytesRef encodeFloatRanges(Set ranges) throws IOE Comparator toComparator = Comparator.comparingDouble(range -> ((Number) range.to).floatValue()); sortedRanges.sort(fromComparator.thenComparing(toComparator)); - final byte[] encoded = new byte[5 + (4 * 2) * sortedRanges.size()]; - ByteArrayDataOutput out = new ByteArrayDataOutput(encoded); + BytesStreamOutput out = new BytesStreamOutput(5 + (4 * 2) * sortedRanges.size()); out.writeVInt(sortedRanges.size()); for (RangeFieldMapper.Range range : sortedRanges) { byte[] encodedFrom = encodeFloat(((Number) range.from).floatValue()); @@ -146,7 +142,7 @@ static BytesRef encodeFloatRanges(Set ranges) throws IOE byte[] encodedTo = encodeFloat(((Number) range.to).floatValue()); out.writeBytes(encodedTo, encodedTo.length); } - return new BytesRef(encoded, 0, out.getPosition()); + return out.bytes().toBytesRef(); } static byte[] encodeDouble(double number) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java index 062265ad9da7e..8fcd20cd58aa2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java @@ -213,13 +213,8 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S @Override public DocValueFormat docValueFormat(@Nullable String format, ZoneId timeZone) { - if (format != null) { - throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] does not support custom formats"); - } - if (timeZone != null) { - throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() - + "] does not support custom time zones"); - } + checkNoFormat(format); + checkNoTimeZone(timeZone); return DocValueFormat.BOOLEAN; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BooleanScriptFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/BooleanScriptFieldType.java index 8d26a2b382070..e349bbb9fd20d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BooleanScriptFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BooleanScriptFieldType.java @@ -95,12 +95,8 @@ public Object valueForDisplay(Object value) { @Override public DocValueFormat docValueFormat(String format, ZoneId timeZone) { - if (format != null) { - throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] does not support custom formats"); - } - if (timeZone != null) { - throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] does not support custom time zones"); - } + checkNoFormat(format); + checkNoTimeZone(timeZone); return DocValueFormat.BOOLEAN; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java index 3052d860a3a92..b6bd39476a5bd 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/CompletionFieldMapper.java @@ -11,7 +11,6 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; -import org.apache.lucene.search.suggest.document.Completion84PostingsFormat; import org.apache.lucene.search.suggest.document.CompletionAnalyzer; import org.apache.lucene.search.suggest.document.CompletionQuery; import org.apache.lucene.search.suggest.document.FuzzyCompletionQuery; @@ -133,7 +132,7 @@ public static class Builder extends FieldMapper.Builder { private final Parameter maxInputLength = Parameter.intParam("max_input_length", true, m -> builder(m).maxInputLength.get(), Defaults.DEFAULT_MAX_INPUT_LENGTH) .addDeprecatedName("max_input_len") - .setValidator(Builder::validateInputLength) + .addValidator(Builder::validateInputLength) .alwaysSerialize(); private final Parameter> meta = Parameter.metaParam(); @@ -209,8 +208,6 @@ private void checkCompletionContextsLimit() { public static final class CompletionFieldType extends TermBasedFieldType { - private static PostingsFormat postingsFormat; - private ContextMappings contextMappings = null; public CompletionFieldType(String name, NamedAnalyzer searchAnalyzer, Map meta) { @@ -236,16 +233,6 @@ public ContextMappings getContextMappings() { return contextMappings; } - /** - * @return postings format to use for this field-type - */ - public static synchronized PostingsFormat postingsFormat() { - if (postingsFormat == null) { - postingsFormat = new Completion84PostingsFormat(); - } - return postingsFormat; - } - /** * Completion prefix query */ @@ -313,6 +300,10 @@ public CompletionFieldType fieldType() { return (CompletionFieldType) super.fieldType(); } + static PostingsFormat postingsFormat() { + return PostingsFormat.forName("Completion84"); + } + @Override public boolean parsesArrayValue() { return true; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/CompositeRuntimeField.java b/server/src/main/java/org/elasticsearch/index/mapper/CompositeRuntimeField.java index c841eef7768c8..78ab0f9d121a5 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/CompositeRuntimeField.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/CompositeRuntimeField.java @@ -38,7 +38,7 @@ public class CompositeRuntimeField implements RuntimeField { () -> null, RuntimeField::parseScript, RuntimeField.initializerNotSupported() - ).setValidator(s -> { + ).addValidator(s -> { if (s == null) { throw new IllegalArgumentException("composite runtime field [" + name + "] must declare a [script]"); } @@ -50,7 +50,7 @@ public class CompositeRuntimeField implements RuntimeField { Collections::emptyMap, (f, p, o) -> parseFields(f, o), RuntimeField.initializerNotSupported() - ).setValidator(objectMap -> { + ).addValidator(objectMap -> { if (objectMap == null || objectMap.isEmpty()) { throw new IllegalArgumentException("composite runtime field [" + name + "] must declare its [fields]"); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DataStreamTimestampFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DataStreamTimestampFieldMapper.java index 6f80de3002f0e..b4be07e53825d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DataStreamTimestampFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DataStreamTimestampFieldMapper.java @@ -34,11 +34,18 @@ public class DataStreamTimestampFieldMapper extends MetadataFieldMapper { public static final String NAME = "_data_stream_timestamp"; private static final String DEFAULT_PATH = "@timestamp"; + private static final DataStreamTimestampFieldMapper ENABLED_INSTANCE = + new DataStreamTimestampFieldMapper(TimestampFieldType.INSTANCE, true); + private static final DataStreamTimestampFieldMapper DISABLED_INSTANCE = + new DataStreamTimestampFieldMapper(TimestampFieldType.INSTANCE, false); + // For now the field shouldn't be useable in searches. // In the future it should act as an alias to the actual data stream timestamp field. public static final class TimestampFieldType extends MappedFieldType { - public TimestampFieldType() { + static final TimestampFieldType INSTANCE = new TimestampFieldType(); + + private TimestampFieldType() { super(NAME, false, false, false, TextSearchInfo.NONE, Map.of()); } @@ -84,16 +91,15 @@ protected List> getParameters() { @Override public MetadataFieldMapper build() { - return new DataStreamTimestampFieldMapper(new TimestampFieldType(), enabled.getValue()); + return enabled.getValue() ? ENABLED_INSTANCE : DISABLED_INSTANCE; } } public static final TypeParser PARSER = new ConfigurableTypeParser( - c -> new DataStreamTimestampFieldMapper(new TimestampFieldType(), false), + c -> DISABLED_INSTANCE, c -> new Builder() ); - private final String path = DEFAULT_PATH; private final boolean enabled; private DataStreamTimestampFieldMapper(MappedFieldType mappedFieldType, boolean enabled) { @@ -112,16 +118,16 @@ public void doValidate(MappingLookup lookup) { return; } - Mapper mapper = lookup.getMapper(path); + Mapper mapper = lookup.getMapper(DEFAULT_PATH); if (mapper == null) { - throw new IllegalArgumentException("data stream timestamp field [" + path + "] does not exist"); + throw new IllegalArgumentException("data stream timestamp field [" + DEFAULT_PATH + "] does not exist"); } if (DateFieldMapper.CONTENT_TYPE.equals(mapper.typeName()) == false && DateFieldMapper.DATE_NANOS_CONTENT_TYPE.equals(mapper.typeName()) == false) { throw new IllegalArgumentException( "data stream timestamp field [" - + path + + DEFAULT_PATH + "] is of type [" + mapper.typeName() + "], but [" @@ -134,19 +140,19 @@ public void doValidate(MappingLookup lookup) { DateFieldMapper dateFieldMapper = (DateFieldMapper) mapper; if (dateFieldMapper.fieldType().isSearchable() == false) { - throw new IllegalArgumentException("data stream timestamp field [" + path + "] is not indexed"); + throw new IllegalArgumentException("data stream timestamp field [" + DEFAULT_PATH + "] is not indexed"); } if (dateFieldMapper.fieldType().hasDocValues() == false) { - throw new IllegalArgumentException("data stream timestamp field [" + path + "] doesn't have doc values"); + throw new IllegalArgumentException("data stream timestamp field [" + DEFAULT_PATH + "] doesn't have doc values"); } if (dateFieldMapper.getNullValue() != null) { throw new IllegalArgumentException( - "data stream timestamp field [" + path + "] has disallowed [null_value] attribute specified" + "data stream timestamp field [" + DEFAULT_PATH + "] has disallowed [null_value] attribute specified" ); } if (dateFieldMapper.getIgnoreMalformed()) { throw new IllegalArgumentException( - "data stream timestamp field [" + path + "] has disallowed [ignore_malformed] attribute specified" + "data stream timestamp field [" + DEFAULT_PATH + "] has disallowed [ignore_malformed] attribute specified" ); } @@ -192,16 +198,16 @@ public void postParse(DocumentParserContext context) throws IOException { return; } - IndexableField[] fields = context.rootDoc().getFields(path); + IndexableField[] fields = context.rootDoc().getFields(DEFAULT_PATH); if (fields.length == 0) { - throw new IllegalArgumentException("data stream timestamp field [" + path + "] is missing"); + throw new IllegalArgumentException("data stream timestamp field [" + DEFAULT_PATH + "] is missing"); } long numberOfValues = Arrays.stream(fields) .filter(indexableField -> indexableField.fieldType().docValuesType() == DocValuesType.SORTED_NUMERIC) .count(); if (numberOfValues > 1) { - throw new IllegalArgumentException("data stream timestamp field [" + path + "] encountered multiple values"); + throw new IllegalArgumentException("data stream timestamp field [" + DEFAULT_PATH + "] encountered multiple values"); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocCountFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DocCountFieldMapper.java index 14d1a0ee9336b..b379729d50012 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocCountFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocCountFieldMapper.java @@ -22,7 +22,9 @@ public class DocCountFieldMapper extends MetadataFieldMapper { public static final String NAME = "_doc_count"; public static final String CONTENT_TYPE = "_doc_count"; - public static final TypeParser PARSER = new FixedTypeParser(c -> new DocCountFieldMapper()); + private static final DocCountFieldMapper INSTANCE = new DocCountFieldMapper(); + + public static final TypeParser PARSER = new FixedTypeParser(c -> INSTANCE); public static final class DocCountFieldType extends MappedFieldType { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DoubleScriptFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/DoubleScriptFieldType.java index af9ca71d1deb2..66a90e0fae321 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DoubleScriptFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DoubleScriptFieldType.java @@ -86,9 +86,7 @@ public Object valueForDisplay(Object value) { @Override public DocValueFormat docValueFormat(String format, ZoneId timeZone) { - if (timeZone != null) { - throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] does not support custom time zones"); - } + checkNoTimeZone(timeZone); if (format == null) { return DocValueFormat.RAW; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index 57d42c43d0c70..86dc9ec80e099 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -385,8 +385,10 @@ public final Map indexAnalyzers() { public static class MultiFields implements Iterable, ToXContent { + private static final MultiFields EMPTY = new MultiFields(Collections.emptyMap()); + public static MultiFields empty() { - return new MultiFields(Collections.emptyMap()); + return EMPTY; } public static class Builder { @@ -586,7 +588,7 @@ public static final class Parameter implements Supplier { private final TriFunction parser; private final Function initializer; private boolean acceptsNull = false; - private Consumer validator = null; + private List> validators = new ArrayList<>(); private Serializer serializer = XContentBuilder::field; private SerializerCheck serializerCheck = (includeDefaults, isConfigured, value) -> includeDefaults || isConfigured; private Function conflictSerializer = Objects::toString; @@ -681,10 +683,11 @@ public Parameter deprecated() { } /** - * Adds validation to a parameter, called after parsing and merging + * Adds validation to a parameter, called after parsing and merging. Multiple + * validators can be added and all of them will be executed. */ - public Parameter setValidator(Consumer validator) { - this.validator = validator; + public Parameter addValidator(Consumer validator) { + this.validators.add(validator); return this; } @@ -741,8 +744,9 @@ public Parameter precludesParameters(Parameter... ps) { } void validate() { - if (validator != null) { - validator.accept(getValue()); + // Iterate over the list of validators and execute them one by one. + for (Consumer v : validators) { + v.accept(getValue()); } if (this.isConfigured()) { for (Parameter p : requires) { @@ -893,7 +897,7 @@ public static Parameter restrictedStringParam(String name, boolean updat assert values.length > 0; Set acceptedValues = new LinkedHashSet<>(Arrays.asList(values)); return stringParam(name, updateable, initializer, values[0]) - .setValidator(v -> { + .addValidator(v -> { if (acceptedValues.contains(v)) { return; } @@ -1077,7 +1081,7 @@ protected void addScriptValidation( Parameter indexParam, Parameter docValuesParam ) { - scriptParam.setValidator(s -> { + scriptParam.addValidator(s -> { if (s != null && indexParam.get() == false && docValuesParam.get() == false) { throw new MapperParsingException("Cannot define script on field with index:false and doc_values:false"); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java index bde79e6e9fc60..4a41f16aa12b6 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java @@ -93,21 +93,28 @@ public FieldNamesFieldMapper build() { "field_names_enabled_parameter", ENABLED_DEPRECATION_MESSAGE); } } - FieldNamesFieldType fieldNamesFieldType = new FieldNamesFieldType(enabled.getValue().value()); - return new FieldNamesFieldMapper(enabled.getValue(), indexVersionCreated, fieldNamesFieldType); + return new FieldNamesFieldMapper(enabled.getValue(), indexVersionCreated); } } public static final TypeParser PARSER = new ConfigurableTypeParser( - c -> new FieldNamesFieldMapper(Defaults.ENABLED, c.indexVersionCreated(), new FieldNamesFieldType(Defaults.ENABLED.value())), + c -> new FieldNamesFieldMapper(Defaults.ENABLED, c.indexVersionCreated()), c -> new Builder(c.indexVersionCreated()) ); public static final class FieldNamesFieldType extends TermBasedFieldType { + private static final FieldNamesFieldType ENABLED = new FieldNamesFieldType(true); + + private static final FieldNamesFieldType DISABLED = new FieldNamesFieldType(false); + private final boolean enabled; - public FieldNamesFieldType(boolean enabled) { + public static FieldNamesFieldType get(boolean enabled) { + return enabled ? ENABLED : DISABLED; + } + + private FieldNamesFieldType(boolean enabled) { super(Defaults.NAME, true, false, false, TextSearchInfo.SIMPLE_MATCH_ONLY, Collections.emptyMap()); this.enabled = enabled; } @@ -145,8 +152,8 @@ public Query termQuery(Object value, SearchExecutionContext context) { private final Explicit enabled; private final Version indexVersionCreated; - private FieldNamesFieldMapper(Explicit enabled, Version indexVersionCreated, FieldNamesFieldType mappedFieldType) { - super(mappedFieldType); + private FieldNamesFieldMapper(Explicit enabled, Version indexVersionCreated) { + super(FieldNamesFieldType.get(enabled.value())); this.enabled = enabled; this.indexVersionCreated = indexVersionCreated; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredFieldMapper.java index 7ae84c27a76e3..f4c44d4227f67 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredFieldMapper.java @@ -40,7 +40,9 @@ public static class Defaults { } } - public static final TypeParser PARSER = new FixedTypeParser(c -> new IgnoredFieldMapper()); + private static final IgnoredFieldMapper INSTANCE = new IgnoredFieldMapper(); + + public static final TypeParser PARSER = new FixedTypeParser(c -> INSTANCE); public static final class IgnoredFieldType extends StringFieldType { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java index 001313f4db545..70382856c2e80 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java @@ -26,7 +26,9 @@ public class IndexFieldMapper extends MetadataFieldMapper { public static final String CONTENT_TYPE = "_index"; - public static final TypeParser PARSER = new FixedTypeParser(c -> new IndexFieldMapper()); + private static final IndexFieldMapper INSTANCE = new IndexFieldMapper(); + + public static final TypeParser PARSER = new FixedTypeParser(c -> INSTANCE); static final class IndexFieldType extends ConstantFieldType { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index a95556a13bf87..2631834a495fb 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -90,7 +90,7 @@ public Builder(String name, ScriptCompiler scriptCompiler, boolean ignoreMalform this.script.precludesParameters(nullValue, ignoreMalformed); addScriptValidation(script, indexed, hasDocValues); this.dimension = Parameter.boolParam("dimension", false, m -> toType(m).dimension, false) - .setValidator(v -> { + .addValidator(v -> { if (v && (indexed.getValue() == false || hasDocValues.getValue() == false)) { throw new IllegalArgumentException( "Field [dimension] requires that [" + indexed.name + "] and [" + hasDocValues.name + "] are true" @@ -378,13 +378,8 @@ public Object valueForDisplay(Object value) { @Override public DocValueFormat docValueFormat(@Nullable String format, ZoneId timeZone) { - if (format != null) { - throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] does not support custom formats"); - } - if (timeZone != null) { - throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() - + "] does not support custom time zones"); - } + checkNoFormat(format); + checkNoTimeZone(timeZone); return DocValueFormat.IP; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpScriptFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/IpScriptFieldType.java index 1f81b690a8572..dc70b14a86518 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpScriptFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpScriptFieldType.java @@ -21,8 +21,8 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.index.fielddata.IpScriptFieldData; import org.elasticsearch.index.query.SearchExecutionContext; -import org.elasticsearch.script.IpFieldScript; import org.elasticsearch.script.CompositeFieldScript; +import org.elasticsearch.script.IpFieldScript; import org.elasticsearch.script.Script; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.lookup.SearchLookup; @@ -36,7 +36,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.function.Function; import java.util.function.Supplier; @@ -88,14 +87,8 @@ public Object valueForDisplay(Object value) { @Override public DocValueFormat docValueFormat(String format, ZoneId timeZone) { - if (format != null) { - String message = "Runtime field [%s] of type [%s] does not support custom formats"; - throw new IllegalArgumentException(String.format(Locale.ROOT, message, name(), typeName())); - } - if (timeZone != null) { - String message = "Runtime field [%s] of type [%s] does not support custom time zones"; - throw new IllegalArgumentException(String.format(Locale.ROOT, message, name(), typeName())); - } + checkNoFormat(format); + checkNoTimeZone(timeZone); return DocValueFormat.IP; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index 1a3db9d4f96d5..e08ad09eb875a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -124,7 +124,7 @@ public Builder(String name, IndexAnalyzers indexAnalyzers, ScriptCompiler script this.script.precludesParameters(nullValue); addScriptValidation(script, indexed, hasDocValues); - this.dimension = Parameter.boolParam("dimension", false, m -> toType(m).dimension, false).setValidator(v -> { + this.dimension = Parameter.boolParam("dimension", false, m -> toType(m).dimension, false).addValidator(v -> { if (v && (indexed.getValue() == false || hasDocValues.getValue() == false)) { throw new IllegalArgumentException( "Field [dimension] requires that [" + indexed.name + "] and [" + hasDocValues.name + "] are true" @@ -452,8 +452,8 @@ public boolean isDimension() { private final IndexAnalyzers indexAnalyzers; - protected KeywordFieldMapper(String simpleName, FieldType fieldType, KeywordFieldType mappedFieldType, - MultiFields multiFields, CopyTo copyTo, Builder builder) { + private KeywordFieldMapper(String simpleName, FieldType fieldType, KeywordFieldType mappedFieldType, + MultiFields multiFields, CopyTo copyTo, Builder builder) { super(simpleName, mappedFieldType, mappedFieldType.normalizer, multiFields, copyTo, builder.script.get() != null, builder.onScriptError.getValue()); assert fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) <= 0; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordScriptFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordScriptFieldType.java index edcb1f5cae382..13726b6a67666 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordScriptFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordScriptFieldType.java @@ -215,4 +215,10 @@ public Query wildcardQuery(String value, RewriteMethod method, boolean caseInsen checkAllowExpensiveQueries(context); return new StringScriptFieldWildcardQuery(script, leafFactory(context), name(), value, caseInsensitive); } + + @Override + public Query normalizedWildcardQuery(String value, RewriteMethod method, SearchExecutionContext context) { + checkAllowExpensiveQueries(context); + return new StringScriptFieldWildcardQuery(script, leafFactory(context), name(), value, false); + } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/LegacyGeoShapeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/LegacyGeoShapeFieldMapper.java index 08cf9308533d8..edf921fd5b72f 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/LegacyGeoShapeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/LegacyGeoShapeFieldMapper.java @@ -179,7 +179,7 @@ public Builder(String name, Version version, boolean ignoreMalformedByDefault, b this.ignoreMalformed = ignoreMalformedParam(m -> builder(m).ignoreMalformed.get(), ignoreMalformedByDefault); this.coerce = coerceParam(m -> builder(m).coerce.get(), coerceByDefault); - this.pointsOnly.setValidator(v -> { + this.pointsOnly.addValidator(v -> { if (v == null) { return; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/LongScriptFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/LongScriptFieldType.java index ecd50a3477dc8..8c14a660a32c3 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/LongScriptFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/LongScriptFieldType.java @@ -83,9 +83,7 @@ public Object valueForDisplay(Object value) { @Override public DocValueFormat docValueFormat(String format, ZoneId timeZone) { - if (timeZone != null) { - throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] does not support custom time zones"); - } + checkNoTimeZone(timeZone); if (format == null) { return DocValueFormat.RAW; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index 0d5edfa025980..e7f2349984121 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -373,17 +373,32 @@ public boolean eagerGlobalOrdinals() { return false; } - /** Return a {@link DocValueFormat} that can be used to display and parse - * values as returned by the fielddata API. - * The default implementation returns a {@link DocValueFormat#RAW}. */ + /** + * Pick a {@link DocValueFormat} that can be used to display and parse + * values of fields of this type. + */ public DocValueFormat docValueFormat(@Nullable String format, ZoneId timeZone) { + checkNoFormat(format); + checkNoTimeZone(timeZone); + return DocValueFormat.RAW; + } + + /** + * Validate the provided {@code format} is null. + */ + protected void checkNoFormat(@Nullable String format) { if (format != null) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] does not support custom formats"); } + } + + /** + * Validate the provided {@code timeZone} is null. + */ + protected void checkNoTimeZone(@Nullable ZoneId timeZone) { if (timeZone != null) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] does not support custom time zones"); } - return DocValueFormat.RAW; } /** diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java b/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java index 5d8fc7138ec50..8f4d259abb140 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java @@ -8,6 +8,7 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.codecs.PostingsFormat; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; @@ -53,6 +54,7 @@ private CacheKey() {} private final List indexTimeScriptMappers = new ArrayList<>(); private final Mapping mapping; private final Set shadowedFields; + private final Set completionFields = new HashSet<>(); /** * Creates a new {@link MappingLookup} instance by parsing the provided mapping and extracting its field definitions. @@ -148,6 +150,9 @@ private MappingLookup(Mapping mapping, if (mapper.hasScript()) { indexTimeScriptMappers.add(mapper); } + if (mapper instanceof CompletionFieldMapper) { + completionFields.add(mapper.name()); + } } for (FieldAliasMapper aliasMapper : aliasMappers) { @@ -213,6 +218,15 @@ public boolean isShadowed(String field) { return shadowedFields.contains(field); } + /** + * Gets the postings format for a particular field + * @param field the field to retrieve a postings format for + * @return the postings format for the field, or {@code null} if the default format should be used + */ + public PostingsFormat getPostingsFormat(String field) { + return completionFields.contains(field) ? CompletionFieldMapper.postingsFormat() : null; + } + void checkLimits(IndexSettings settings) { checkFieldLimit(settings.getMappingTotalFieldsLimit()); checkObjectDepthLimit(settings.getMappingDepthLimit()); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NestedPathFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NestedPathFieldMapper.java index d097d3de2309f..249fbae5ca36d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NestedPathFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NestedPathFieldMapper.java @@ -23,8 +23,12 @@ public class NestedPathFieldMapper extends MetadataFieldMapper { public static final String NAME_PRE_V8 = "_type"; + public static final String NAME = "_nested_path"; + private static final NestedPathFieldMapper INSTANCE = new NestedPathFieldMapper(NAME); + private static final NestedPathFieldMapper INSTANCE_PRE_V8 = new NestedPathFieldMapper(NAME_PRE_V8); + public static String name(Version version) { if (version.before(Version.V_8_0_0)) { return NAME_PRE_V8; @@ -53,12 +57,13 @@ public static class Defaults { } } - public static final TypeParser PARSER = new FixedTypeParser(c -> new NestedPathFieldMapper(c.indexVersionCreated())); + public static final TypeParser PARSER = + new FixedTypeParser(c -> c.indexVersionCreated().before(Version.V_8_0_0) ? INSTANCE_PRE_V8 : INSTANCE); public static final class NestedPathFieldType extends StringFieldType { - private NestedPathFieldType(Version version) { - super(NestedPathFieldMapper.name(version), true, false, false, TextSearchInfo.SIMPLE_MATCH_ONLY, Collections.emptyMap()); + private NestedPathFieldType(String name) { + super(name, true, false, false, TextSearchInfo.SIMPLE_MATCH_ONLY, Collections.emptyMap()); } @Override @@ -77,8 +82,8 @@ public ValueFetcher valueFetcher(SearchExecutionContext context, String format) } } - private NestedPathFieldMapper(Version version) { - super(new NestedPathFieldType(version)); + private NestedPathFieldMapper(String name) { + super(new NestedPathFieldType(name)); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index e66f829c9cfc9..0ded3c7bf98fd 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -81,7 +81,7 @@ public static class Builder extends FieldMapper.Builder { private final Parameter nullValue; - private final Parameter