diff --git a/.ci/init.gradle b/.ci/init.gradle new file mode 100644 index 000000000000..62e6c48b8121 --- /dev/null +++ b/.ci/init.gradle @@ -0,0 +1,43 @@ +if (System.env.ELASTIC_ARTIFACTORY_USERNAME == null || System.env.ELASTIC_ARTIFACTORY_TOKEN == null) { + throw new GradleException("Using init script without configuration") +} else { + logger.info("Using elastic artifactory repos") + settingsEvaluated { settings -> + settings.pluginManagement { + repositories { + maven { + url "https://artifactory.elstc.co/artifactory/gradle-plugins" + credentials { + username System.env.ELASTIC_ARTIFACTORY_USERNAME + password System.env.ELASTIC_ARTIFACTORY_TOKEN + } + } + gradlePluginPortal() + } + } + } + projectsLoaded { + allprojects { + buildscript { + repositories { + maven { + url "https://artifactory.elstc.co/artifactory/gradle-release/" + credentials { + username System.env.ELASTIC_ARTIFACTORY_USERNAME + password System.env.ELASTIC_ARTIFACTORY_TOKEN + } + } + } + } + repositories { + maven { + url "https://artifactory.elstc.co/artifactory/gradle-release/" + credentials { + username System.env.ELASTIC_ARTIFACTORY_USERNAME + password System.env.ELASTIC_ARTIFACTORY_TOKEN + } + } + } + } + } +} diff --git a/.ci/matrix-runtime-javas.yml b/.ci/matrix-runtime-javas.yml index 0cea9c939e14..3545103f191c 100644 --- a/.ci/matrix-runtime-javas.yml +++ b/.ci/matrix-runtime-javas.yml @@ -9,6 +9,8 @@ ES_RUNTIME_JAVA: - java8 - java8fips - java11 + - java12 - openjdk12 - zulu8 - zulu11 + - zulu12 diff --git a/.ci/packer_cache.sh b/.ci/packer_cache.sh index 04511f81281b..4533213920c3 100755 --- a/.ci/packer_cache.sh +++ b/.ci/packer_cache.sh @@ -20,5 +20,5 @@ export JAVA_HOME="${HOME}"/.java/${ES_BUILD_JAVA} # We are caching BWC versions too, need these so we can build those export JAVA8_HOME="${HOME}"/.java/java8 export JAVA11_HOME="${HOME}"/.java/java11 -export JAVA12_HOME="${HOME}"/.java/java12 +export JAVA12_HOME="${HOME}"/.java/openjdk12 ./gradlew --parallel clean --scan -Porg.elasticsearch.acceptScanTOS=true -s resolveAllDependencies diff --git a/build.gradle b/build.gradle index 80ed642369ac..f6c3222a4074 100644 --- a/build.gradle +++ b/build.gradle @@ -32,7 +32,7 @@ plugins { id 'com.gradle.build-scan' version '2.0.2' id 'base' } -if (properties.get("org.elasticsearch.acceptScanTOS", "false") == "true") { +if (Boolean.valueOf(project.findProperty('org.elasticsearch.acceptScanTOS') ?: "false")) { buildScan { termsOfServiceUrl = 'https://gradle.com/terms-of-service' termsOfServiceAgree = 'yes' @@ -162,8 +162,8 @@ task verifyVersions { * after the backport of the backcompat code is complete. */ -boolean bwc_tests_enabled = true -final String bwc_tests_disabled_issue = "" /* place a PR link here when committing bwc changes */ +boolean bwc_tests_enabled = false +final String bwc_tests_disabled_issue = "https://github.com/elastic/elasticsearch/pull/40319" /* place a PR link here when committing bwc changes */ if (bwc_tests_enabled == false) { if (bwc_tests_disabled_issue.isEmpty()) { throw new GradleException("bwc_tests_disabled_issue must be set when bwc_tests_enabled == false") diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 9f658c91ab39..be54b2c68f63 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -232,6 +232,11 @@ if (project != rootProject) { if (isLuceneSnapshot) { systemProperty 'test.lucene-snapshot-revision', isLuceneSnapshot[0][1] } + String defaultParallel = System.getProperty('tests.jvms', project.rootProject.ext.defaultParallel) + if (defaultParallel == "auto") { + defaultParallel = Math.max(Runtime.getRuntime().availableProcessors(), 4) + } + maxParallelForks defaultParallel as Integer } check.dependsOn(integTest) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index fb0cd1a41eca..75230e27c16c 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -44,6 +44,7 @@ public class PluginBuildPlugin extends BuildPlugin { public void apply(Project project) { super.apply(project) configureDependencies(project) + // this afterEvaluate must happen before the afterEvaluate added by integTest creation, // so that the file name resolution for installing the plugin will be setup project.afterEvaluate { @@ -69,7 +70,7 @@ public class PluginBuildPlugin extends BuildPlugin { if (isModule) { throw new RuntimeException("Testclusters does not support modules yet"); } else { - project.testClusters.integTestCluster.plugin( + project.testClusters.integTest.plugin( project.file(project.tasks.bundlePlugin.archiveFile) ) } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index 01f8d229e67e..fccb190bcc38 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -693,10 +693,10 @@ class ClusterFormationTasks { node.args.each { arg(value: it) } if (Os.isFamily(Os.FAMILY_WINDOWS)) { // Having no TMP on Windows defaults to C:\Windows and permission errors - // Since we configure ant to run with a new environment above, we need to set this to a dir we have access to - File tmpDir = new File(node.baseDir, "tmp") - tmpDir.mkdirs() - env(key: "TMP", value: tmpDir.absolutePath) + // Since we configure ant to run with a new environment above, we need to explicitly pass this + String tmp = System.getenv("TMP") + assert tmp != null + env(key: "TMP", value: tmp) } } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index c2fca819ef3e..8e7dbafb2c2f 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -62,13 +62,13 @@ public class RestIntegTestTask extends DefaultTask { clusterConfig = project.extensions.create("${name}Cluster", ClusterConfiguration.class, project) } else { project.testClusters { - integTestCluster { + "$name" { distribution = 'INTEG_TEST' version = project.version javaHome = project.file(project.ext.runtimeJavaHome) } } - runner.useCluster project.testClusters.integTestCluster + runner.useCluster project.testClusters."$name" } // override/add more for rest tests @@ -81,7 +81,7 @@ public class RestIntegTestTask extends DefaultTask { throw new IllegalArgumentException("tests.rest.cluster and tests.cluster must both be null or non-null") } if (usesTestclusters == true) { - ElasticsearchNode node = project.testClusters.integTestCluster + ElasticsearchNode node = project.testClusters."${name}" runner.systemProperty('tests.rest.cluster', {node.allHttpSocketURI.join(",") }) runner.systemProperty('tests.config.dir', {node.getConfigDir()}) runner.systemProperty('tests.cluster', {node.transportPortURI}) @@ -187,6 +187,10 @@ public class RestIntegTestTask extends DefaultTask { clusterInit.mustRunAfter(tasks) } + public void runner(Closure configure) { + project.tasks.getByName("${name}Runner").configure(configure) + } + /** Print out an excerpt of the log from the given node. */ protected static void printLogExcerpt(NodeInfo nodeInfo) { File logFile = new File(nodeInfo.homeDir, "logs/${nodeInfo.clusterName}.log") diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index c43b5f62b7fc..4bdef1ff6fd3 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -78,7 +78,6 @@ class VagrantTestPlugin implements Plugin { private static final PACKAGING_TEST_CONFIGURATION = 'packagingTest' private static final BATS = 'bats' private static final String BATS_TEST_COMMAND ="cd \$PACKAGING_ARCHIVES && sudo bats --tap \$BATS_TESTS/*.$BATS" - private static final String PLATFORM_TEST_COMMAND ="rm -rf ~/elasticsearch && rsync -r /elasticsearch/ ~/elasticsearch && cd ~/elasticsearch && ./gradlew test integTest" /** Boxes that have been supplied and are available for testing **/ List availableBoxes = [] @@ -388,15 +387,6 @@ class VagrantTestPlugin implements Plugin { } } - private static void createPlatformTestTask(Project project) { - project.tasks.create('platformTest') { - group 'Verification' - description "Test unit and integ tests on different platforms using vagrant. See TESTING.asciidoc for details. This test " + - "is unmaintained." - dependsOn 'vagrantCheckVersion' - } - } - private void createBoxListTasks(Project project) { project.tasks.create('listAllBoxes') { group 'Verification' @@ -429,7 +419,6 @@ class VagrantTestPlugin implements Plugin { createSmokeTestTask(project) createPrepareVagrantTestEnvTask(project) createPackagingTestTask(project) - createPlatformTestTask(project) createBoxListTasks(project) } @@ -454,9 +443,6 @@ class VagrantTestPlugin implements Plugin { assert project.tasks.packagingTest != null Task packagingTest = project.tasks.packagingTest - assert project.tasks.platformTest != null - Task platformTest = project.tasks.platformTest - /* * We always use the main project.rootDir as Vagrant's current working directory (VAGRANT_CWD) * so that boxes are not duplicated for every Gradle project that use this VagrantTestPlugin. @@ -610,31 +596,6 @@ class VagrantTestPlugin implements Plugin { packagingTest.dependsOn(javaPackagingTest) } } - - /* - * This test is unmaintained and was created to run on Linux. We won't allow it to run on Windows - * until it's been brought back into maintenance - */ - if (LINUX_BOXES.contains(box)) { - Task platform = project.tasks.create("vagrant${boxTask}#platformTest", VagrantCommandTask) { - command 'ssh' - boxName box - environmentVars vagrantEnvVars - dependsOn up - finalizedBy halt - args '--command', PLATFORM_TEST_COMMAND + " -Dtests.seed=${-> project.testSeed}" - } - TaskExecutionAdapter platformReproListener = createReproListener(project, platform.path) - platform.doFirst { - project.gradle.addListener(platformReproListener) - } - platform.doLast { - project.gradle.removeListener(platformReproListener) - } - if (project.extensions.esvagrant.boxes.contains(box)) { - platformTest.dependsOn(platform) - } - } } } diff --git a/buildSrc/src/main/java/org/elasticsearch/GradleServicesAdapter.java b/buildSrc/src/main/java/org/elasticsearch/GradleServicesAdapter.java index b5327ed6322b..763edb80f5ac 100644 --- a/buildSrc/src/main/java/org/elasticsearch/GradleServicesAdapter.java +++ b/buildSrc/src/main/java/org/elasticsearch/GradleServicesAdapter.java @@ -30,6 +30,7 @@ import org.gradle.process.JavaExecSpec; import java.io.File; +import java.nio.file.Path; /** * Bridge a gap until Gradle offers service injection for plugins. @@ -76,4 +77,8 @@ public FileCollection fileTree(File dir) { public void loggedExec(Action action) { LoggedExec.exec(project, action); } + + public void delete(Path path) { + project.delete(path.toFile()); + } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index f718b181c5e4..4138131d7a15 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -26,8 +26,10 @@ import org.gradle.api.logging.Logging; import java.io.BufferedReader; +import java.io.ByteArrayInputStream; import java.io.File; import java.io.IOException; +import java.io.InputStream; import java.io.InputStreamReader; import java.io.UncheckedIOException; import java.net.HttpURLConnection; @@ -39,15 +41,18 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; +import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Predicate; +import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -71,6 +76,10 @@ public class ElasticsearchNode { private final LinkedHashMap> waitConditions; private final List plugins = new ArrayList<>(); + private final Map> settings = new LinkedHashMap<>(); + private final Map> keystoreSettings = new LinkedHashMap<>(); + private final Map> systemProperties = new LinkedHashMap<>(); + private final Map> environment = new LinkedHashMap<>(); private final Path confPathRepo; private final Path configFile; @@ -143,6 +152,55 @@ public void plugin(File plugin) { plugin(plugin.toURI()); } + public void keystore(String key, String value) { + addSupplier("Keystore", keystoreSettings, key, value); + } + + public void keystore(String key, Supplier valueSupplier) { + addSupplier("Keystore", keystoreSettings, key, valueSupplier); + } + + public void setting(String key, String value) { + addSupplier("Settings", settings, key, value); + } + + public void setting(String key, Supplier valueSupplier) { + addSupplier("Setting", settings, key, valueSupplier); + } + + public void systemProperty(String key, String value) { + addSupplier("Java System property", systemProperties, key, value); + } + + public void systemProperty(String key, Supplier valueSupplier) { + addSupplier("Java System property", systemProperties, key, valueSupplier); + } + + public void environment(String key, String value) { + addSupplier("Environment variable", environment, key, value); + } + + public void environment(String key, Supplier valueSupplier) { + addSupplier("Environment variable", environment, key, valueSupplier); + } + + private void addSupplier(String name, Map> collector, String key, Supplier valueSupplier) { + requireNonNull(key, name + " key was null when configuring test cluster `" + this + "`"); + requireNonNull(valueSupplier, name + " value supplier was null when configuring test cluster `" + this + "`"); + collector.put(key, valueSupplier); + } + + private void addSupplier(String name, Map> collector, String key, String actualValue) { + requireNonNull(actualValue, name + " value was null when configuring test cluster `" + this + "`"); + addSupplier(name, collector, key, () -> actualValue); + } + + private void checkSuppliers(String name, Map> collector) { + collector.forEach((key, value) -> { + requireNonNull(value.get().toString(), name + " supplied value was null when configuring test cluster `" + this + "`"); + }); + } + public Path getConfigDir() { return configFile.getParent(); } @@ -168,6 +226,8 @@ public File getJavaHome() { return javaHome; } + + private void waitForUri(String description, String uri) { waitConditions.put(description, (node) -> { try { @@ -209,10 +269,6 @@ synchronized void start() { if (Files.isDirectory(distroArtifact) == false) { throw new TestClustersException("Can not start " + this + ", is not a directory: " + distroArtifact); } - services.sync(spec -> { - spec.from(distroArtifact); - spec.into(workingDir); - }); try { createWorkingDir(distroArtifact); @@ -226,46 +282,79 @@ synchronized void start() { "install", "--batch", plugin.toString()) ); + if (keystoreSettings.isEmpty() == false) { + checkSuppliers("Keystore", keystoreSettings); + runElaticsearchBinScript("elasticsearch-keystore", "create"); + keystoreSettings.forEach((key, value) -> { + runElaticsearchBinScriptWithInput(value.get().toString(), "elasticsearch-keystore", "add", "-x", key); + }); + } + startElasticsearchProcess(); } + private void runElaticsearchBinScriptWithInput(String input, String tool, String... args) { + try (InputStream byteArrayInputStream = new ByteArrayInputStream(input.getBytes(StandardCharsets.UTF_8))) { + services.loggedExec(spec -> { + spec.setEnvironment(getESEnvironment()); + spec.workingDir(workingDir); + spec.executable( + OS.conditionalString() + .onUnix(() -> "./bin/" + tool) + .onWindows(() -> "cmd") + .supply() + ); + spec.args( + OS.>conditional() + .onWindows(() -> { + ArrayList result = new ArrayList<>(); + result.add("/c"); + result.add("bin\\" + tool + ".bat"); + for (String arg : args) { + result.add(arg); + } + return result; + }) + .onUnix(() -> Arrays.asList(args)) + .supply() + ); + spec.setStandardInput(byteArrayInputStream); + + }); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + private void runElaticsearchBinScript(String tool, String... args) { - services.loggedExec(spec -> { - spec.setEnvironment(getESEnvironment()); - spec.workingDir(workingDir); - spec.executable( - OS.conditionalString() - .onUnix(() -> "./bin/" + tool) - .onWindows(() -> "cmd") - .supply() - ); - spec.args( - OS.>conditional() - .onWindows(() -> { - ArrayList result = new ArrayList<>(); - result.add("/c"); - result.add("bin\\" + tool + ".bat"); - for (String arg : args) { - result.add(arg); - } - return result; - }) - .onUnix(() -> Arrays.asList(args)) - .supply() - ); - }); + runElaticsearchBinScriptWithInput("", tool, args); } private Map getESEnvironment() { - Map environment= new HashMap<>(); - environment.put("JAVA_HOME", getJavaHome().getAbsolutePath()); - environment.put("ES_PATH_CONF", configFile.getParent().toString()); - environment.put("ES_JAVA_OPTS", "-Xms512m -Xmx512m"); - environment.put("ES_TMPDIR", tmpDir.toString()); + Map defaultEnv = new HashMap<>(); + defaultEnv.put("JAVA_HOME", getJavaHome().getAbsolutePath()); + defaultEnv.put("ES_PATH_CONF", configFile.getParent().toString()); + String systemPropertiesString = ""; + if (systemProperties.isEmpty() == false) { + checkSuppliers("Java System property", systemProperties); + systemPropertiesString = " " + systemProperties.entrySet().stream() + .map(entry -> "-D" + entry.getKey() + "=" + entry.getValue().get()) + .collect(Collectors.joining(" ")); + } + defaultEnv.put("ES_JAVA_OPTS", "-Xms512m -Xmx512m -ea -esa" + systemPropertiesString); + defaultEnv.put("ES_TMPDIR", tmpDir.toString()); // Windows requires this as it defaults to `c:\windows` despite ES_TMPDIR + defaultEnv.put("TMP", tmpDir.toString()); + + Set commonKeys = new HashSet<>(environment.keySet()); + commonKeys.retainAll(defaultEnv.keySet()); + if (commonKeys.isEmpty() == false) { + throw new IllegalStateException("testcluster does not allow setting the following env vars " + commonKeys); + } - environment.put("TMP", tmpDir.toString()); - return environment; + checkSuppliers("Environment variable", environment); + environment.forEach((key, value) -> defaultEnv.put(key, value.get().toString())); + return defaultEnv; } private void startElasticsearchProcess() { @@ -397,10 +486,7 @@ private void waitForProcessToExit(ProcessHandle processHandle) { } private void createWorkingDir(Path distroExtractDir) throws IOException { - services.sync(spec -> { - spec.from(distroExtractDir.toFile()); - spec.into(workingDir.toFile()); - }); + syncWithLinks(distroExtractDir, workingDir); Files.createDirectories(configFile.getParent()); Files.createDirectories(confPathRepo); Files.createDirectories(confPathData); @@ -408,35 +494,93 @@ private void createWorkingDir(Path distroExtractDir) throws IOException { Files.createDirectories(tmpDir); } + /** + * Does the equivalent of `cp -lr` and `chmod -r a-w` to save space and improve speed. + * We remove write permissions to make sure files are note mistakenly edited ( e.x. the config file ) and changes + * reflected across all copies. Permissions are retained to be able to replace the links. + * + * @param sourceRoot where to copy from + * @param destinationRoot destination to link to + */ + private void syncWithLinks(Path sourceRoot, Path destinationRoot) { + if (Files.exists(destinationRoot)) { + services.delete(destinationRoot); + } + + try (Stream stream = Files.walk(sourceRoot)) { + stream.forEach(source -> { + Path destination = destinationRoot.resolve(sourceRoot.relativize(source)); + if (Files.isDirectory(source)) { + try { + Files.createDirectories(destination); + } catch (IOException e) { + throw new UncheckedIOException("Can't create directory " + destination.getParent(), e); + } + } else { + try { + Files.createDirectories(destination.getParent()); + } catch (IOException e) { + throw new UncheckedIOException("Can't create directory " + destination.getParent(), e); + } + try { + Files.createLink(destination, source); + } catch (IOException e) { + // Note does not work for network drives, e.g. Vagrant + throw new UncheckedIOException( + "Failed to create hard link " + destination + " pointing to " + source, e + ); + } + } + }); + } catch (IOException e) { + throw new UncheckedIOException("Can't walk source " + sourceRoot, e); + } + } + private void createConfiguration() { - LinkedHashMap config = new LinkedHashMap<>(); + LinkedHashMap defaultConfig = new LinkedHashMap<>(); String nodeName = safeName(name); - config.put("cluster.name",nodeName); - config.put("node.name", nodeName); - config.put("path.repo", confPathRepo.toAbsolutePath().toString()); - config.put("path.data", confPathData.toAbsolutePath().toString()); - config.put("path.logs", confPathLogs.toAbsolutePath().toString()); - config.put("path.shared_data", workingDir.resolve("sharedData").toString()); - config.put("node.attr.testattr", "test"); - config.put("node.portsfile", "true"); - config.put("http.port", "0"); - config.put("transport.tcp.port", "0"); + defaultConfig.put("cluster.name",nodeName); + defaultConfig.put("node.name", nodeName); + defaultConfig.put("path.repo", confPathRepo.toAbsolutePath().toString()); + defaultConfig.put("path.data", confPathData.toAbsolutePath().toString()); + defaultConfig.put("path.logs", confPathLogs.toAbsolutePath().toString()); + defaultConfig.put("path.shared_data", workingDir.resolve("sharedData").toString()); + defaultConfig.put("node.attr.testattr", "test"); + defaultConfig.put("node.portsfile", "true"); + defaultConfig.put("http.port", "0"); + defaultConfig.put("transport.tcp.port", "0"); // Default the watermarks to absurdly low to prevent the tests from failing on nodes without enough disk space - config.put("cluster.routing.allocation.disk.watermark.low", "1b"); - config.put("cluster.routing.allocation.disk.watermark.high", "1b"); + defaultConfig.put("cluster.routing.allocation.disk.watermark.low", "1b"); + defaultConfig.put("cluster.routing.allocation.disk.watermark.high", "1b"); // increase script compilation limit since tests can rapid-fire script compilations - config.put("script.max_compilations_rate", "2048/1m"); + defaultConfig.put("script.max_compilations_rate", "2048/1m"); if (Version.fromString(version).getMajor() >= 6) { - config.put("cluster.routing.allocation.disk.watermark.flood_stage", "1b"); + defaultConfig.put("cluster.routing.allocation.disk.watermark.flood_stage", "1b"); } if (Version.fromString(version).getMajor() >= 7) { - config.put("cluster.initial_master_nodes", "[" + nodeName + "]"); + defaultConfig.put("cluster.initial_master_nodes", "[" + nodeName + "]"); + } + checkSuppliers("Settings", settings); + Map userConfig = settings.entrySet().stream() + .collect(Collectors.toMap(entry -> entry.getKey(), entry -> entry.getValue().get().toString())); + HashSet overriden = new HashSet<>(defaultConfig.keySet()); + overriden.retainAll(userConfig.keySet()); + if (overriden.isEmpty() ==false) { + throw new IllegalArgumentException("Testclusters does not allow the following settings to be changed:" + overriden); } + try { + // We create hard links for the distribution, so we need to remove the config file before writing it + // to prevent the changes to reflect across all copies. + Files.delete(configFile); Files.write( configFile, - config.entrySet().stream() + Stream.concat( + userConfig.entrySet().stream(), + defaultConfig.entrySet().stream() + ) .map(entry -> entry.getKey() + ": " + entry.getValue()) .collect(Collectors.joining("\n")) .getBytes(StandardCharsets.UTF_8) diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java index 59cb851974cb..57f77d6d1a25 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java @@ -31,7 +31,6 @@ import org.gradle.api.Task; import org.gradle.api.plugins.BasePlugin; import org.gradle.api.plugins.ExtraPropertiesExtension; -import org.gradle.api.tasks.Input; import org.gradle.api.tasks.TaskContainer; import java.lang.reflect.InvocationTargetException; @@ -104,6 +103,7 @@ public void apply(Project project) { "but none could be found so these will be skipped", project.getPath() ); disableTaskByType(tasks, getTaskClass("com.carrotsearch.gradle.junit4.RandomizedTestingTask")); + disableTaskByType(tasks, getTaskClass("org.elasticsearch.gradle.test.RestIntegTestTask")); // conventions are not honored when the tasks are disabled disableTaskByType(tasks, TestingConventionsTasks.class); disableTaskByType(tasks, ComposeUp.class); @@ -122,6 +122,7 @@ public void apply(Project project) { fixtureProject, (name, port) -> setSystemProperty(task, name, port) ); + task.dependsOn(fixtureProject.getTasks().getByName("postProcessFixture")); }) ); @@ -155,7 +156,6 @@ private void configureServiceInfoForTask(Task task, Project fixtureProject, BiCo ); } - @Input public boolean dockerComposeSupported(Project project) { if (OS.current().equals(OS.WINDOWS)) { return false; diff --git a/buildSrc/src/main/resources/minimumGradleVersion b/buildSrc/src/main/resources/minimumGradleVersion index 804440660c71..11aa145248e6 100644 --- a/buildSrc/src/main/resources/minimumGradleVersion +++ b/buildSrc/src/main/resources/minimumGradleVersion @@ -1 +1 @@ -5.2.1 \ No newline at end of file +5.3 \ No newline at end of file diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameClient.java index 1dbe0d39587c..b758968f0a98 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameClient.java @@ -22,6 +22,10 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.core.AcknowledgedResponse; import org.elasticsearch.client.dataframe.DeleteDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.GetDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.GetDataFrameTransformResponse; +import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsRequest; +import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsResponse; import org.elasticsearch.client.dataframe.PreviewDataFrameTransformRequest; import org.elasticsearch.client.dataframe.PreviewDataFrameTransformResponse; import org.elasticsearch.client.dataframe.PutDataFrameTransformRequest; @@ -82,6 +86,46 @@ public void putDataFrameTransformAsync(PutDataFrameTransformRequest request, Req Collections.emptySet()); } + /** + * Get the running statistics of a Data Frame Transform + *

+ * For additional info + * see Get Data Frame transform stats documentation + * + * @param request Specifies the which transforms to get the stats for + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return The Data Frame Transform stats + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public GetDataFrameTransformStatsResponse getDataFrameTransformStats(GetDataFrameTransformStatsRequest request, RequestOptions options) + throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + DataFrameRequestConverters::getDataFrameTransformStats, + options, + GetDataFrameTransformStatsResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Get the running statistics of a Data Frame Transform asynchronously and notifies listener on completion + *

+ * For additional info + * see Get Data Frame transform stats documentation + * + * @param request Specifies the which transforms to get the stats for + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void getDataFrameTransformStatsAsync(GetDataFrameTransformStatsRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + DataFrameRequestConverters::getDataFrameTransformStats, + options, + GetDataFrameTransformStatsResponse::fromXContent, + listener, + Collections.emptySet()); + } + /** * Delete a data frame transform *

@@ -233,7 +277,7 @@ public StopDataFrameTransformResponse stopDataFrameTransform(StopDataFrameTransf * @param listener Listener to be notified upon request completion */ public void stopDataFrameTransformAsync(StopDataFrameTransformRequest request, RequestOptions options, - ActionListener listener) { + ActionListener listener) { restHighLevelClient.performRequestAsyncAndParseEntity(request, DataFrameRequestConverters::stopDataFrameTransform, options, @@ -241,4 +285,44 @@ public void stopDataFrameTransformAsync(StopDataFrameTransformRequest request, R listener, Collections.emptySet()); } + + /** + * Get one or more data frame transform configurations + *

+ * For additional info + * see Get Data Frame transform documentation + * + * @param request The get data frame transform request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return An GetDataFrameTransformResponse containing the requested transforms + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public GetDataFrameTransformResponse getDataFrameTransform(GetDataFrameTransformRequest request, RequestOptions options) + throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + DataFrameRequestConverters::getDataFrameTransform, + options, + GetDataFrameTransformResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Get one or more data frame transform configurations asynchronously and notifies listener on completion + *

+ * For additional info + * see Get Data Frame transform documentation + * + * @param request The get data frame transform request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void getDataFrameTransformAsync(GetDataFrameTransformRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + DataFrameRequestConverters::getDataFrameTransform, + options, + GetDataFrameTransformResponse::fromXContent, + listener, + Collections.emptySet()); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameRequestConverters.java index d5b1f14090a5..309a37fedf8c 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameRequestConverters.java @@ -20,13 +20,17 @@ package org.elasticsearch.client; import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.elasticsearch.client.dataframe.DeleteDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.GetDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsRequest; import org.elasticsearch.client.dataframe.PreviewDataFrameTransformRequest; import org.elasticsearch.client.dataframe.PutDataFrameTransformRequest; import org.elasticsearch.client.dataframe.StartDataFrameTransformRequest; import org.elasticsearch.client.dataframe.StopDataFrameTransformRequest; +import org.elasticsearch.common.Strings; import java.io.IOException; @@ -47,6 +51,21 @@ static Request putDataFrameTransform(PutDataFrameTransformRequest putRequest) th return request; } + static Request getDataFrameTransform(GetDataFrameTransformRequest getRequest) { + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_data_frame", "transforms") + .addPathPart(Strings.collectionToCommaDelimitedString(getRequest.getId())) + .build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + if (getRequest.getFrom() != null) { + request.addParameter("from", getRequest.getFrom().toString()); + } + if (getRequest.getSize() != null) { + request.addParameter("size", getRequest.getSize().toString()); + } + return request; + } + static Request deleteDataFrameTransform(DeleteDataFrameTransformRequest request) { String endpoint = new RequestConverters.EndpointBuilder() .addPathPartAsIs("_data_frame", "transforms") @@ -94,4 +113,13 @@ static Request previewDataFrameTransform(PreviewDataFrameTransformRequest previe request.setEntity(createEntity(previewRequest, REQUEST_BODY_CONTENT_TYPE)); return request; } + + static Request getDataFrameTransformStats(GetDataFrameTransformStatsRequest statsRequest) { + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_data_frame", "transforms") + .addPathPart(statsRequest.getId()) + .addPathPartAsIs("_stats") + .build(); + return new Request(HttpGet.METHOD_NAME, endpoint); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index d0917b8d4546..77eac4a6e2a8 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -511,7 +511,7 @@ public final void bulkAsync(BulkRequest bulkRequest, RequestOptions options, Act */ public final BulkByScrollResponse reindex(ReindexRequest reindexRequest, RequestOptions options) throws IOException { return performRequestAndParseEntity( - reindexRequest, RequestConverters::reindex, options, BulkByScrollResponse::fromXContent, emptySet() + reindexRequest, RequestConverters::reindex, options, BulkByScrollResponse::fromXContent, singleton(409) ); } @@ -537,7 +537,7 @@ public final TaskSubmissionResponse submitReindexTask(ReindexRequest reindexRequ */ public final void reindexAsync(ReindexRequest reindexRequest, RequestOptions options, ActionListener listener) { performRequestAsyncAndParseEntity( - reindexRequest, RequestConverters::reindex, options, BulkByScrollResponse::fromXContent, listener, emptySet() + reindexRequest, RequestConverters::reindex, options, BulkByScrollResponse::fromXContent, listener, singleton(409) ); } @@ -551,7 +551,7 @@ public final void reindexAsync(ReindexRequest reindexRequest, RequestOptions opt */ public final BulkByScrollResponse updateByQuery(UpdateByQueryRequest updateByQueryRequest, RequestOptions options) throws IOException { return performRequestAndParseEntity( - updateByQueryRequest, RequestConverters::updateByQuery, options, BulkByScrollResponse::fromXContent, emptySet() + updateByQueryRequest, RequestConverters::updateByQuery, options, BulkByScrollResponse::fromXContent, singleton(409) ); } @@ -566,7 +566,7 @@ public final BulkByScrollResponse updateByQuery(UpdateByQueryRequest updateByQue public final void updateByQueryAsync(UpdateByQueryRequest updateByQueryRequest, RequestOptions options, ActionListener listener) { performRequestAsyncAndParseEntity( - updateByQueryRequest, RequestConverters::updateByQuery, options, BulkByScrollResponse::fromXContent, listener, emptySet() + updateByQueryRequest, RequestConverters::updateByQuery, options, BulkByScrollResponse::fromXContent, listener, singleton(409) ); } @@ -580,7 +580,7 @@ public final void updateByQueryAsync(UpdateByQueryRequest updateByQueryRequest, */ public final BulkByScrollResponse deleteByQuery(DeleteByQueryRequest deleteByQueryRequest, RequestOptions options) throws IOException { return performRequestAndParseEntity( - deleteByQueryRequest, RequestConverters::deleteByQuery, options, BulkByScrollResponse::fromXContent, emptySet() + deleteByQueryRequest, RequestConverters::deleteByQuery, options, BulkByScrollResponse::fromXContent, singleton(409) ); } @@ -595,7 +595,7 @@ public final BulkByScrollResponse deleteByQuery(DeleteByQueryRequest deleteByQue public final void deleteByQueryAsync(DeleteByQueryRequest deleteByQueryRequest, RequestOptions options, ActionListener listener) { performRequestAsyncAndParseEntity( - deleteByQueryRequest, RequestConverters::deleteByQuery, options, BulkByScrollResponse::fromXContent, listener, emptySet() + deleteByQueryRequest, RequestConverters::deleteByQuery, options, BulkByScrollResponse::fromXContent, listener, singleton(409) ); } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/IndexerJobStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/IndexerJobStats.java new file mode 100644 index 000000000000..71aea2e81e96 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/IndexerJobStats.java @@ -0,0 +1,177 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.core; + +import org.elasticsearch.common.ParseField; + +import java.util.Objects; + +public abstract class IndexerJobStats { + public static final String NAME = "data_frame_indexer_transform_stats"; + public static ParseField NUM_PAGES = new ParseField("pages_processed"); + public static ParseField NUM_INPUT_DOCUMENTS = new ParseField("documents_processed"); + public static ParseField NUM_OUTPUT_DOCUMENTS = new ParseField("documents_indexed"); + public static ParseField NUM_INVOCATIONS = new ParseField("trigger_count"); + public static ParseField INDEX_TIME_IN_MS = new ParseField("index_time_in_ms"); + public static ParseField SEARCH_TIME_IN_MS = new ParseField("search_time_in_ms"); + public static ParseField INDEX_TOTAL = new ParseField("index_total"); + public static ParseField SEARCH_TOTAL = new ParseField("search_total"); + public static ParseField SEARCH_FAILURES = new ParseField("search_failures"); + public static ParseField INDEX_FAILURES = new ParseField("index_failures"); + + private final long numPages; + private final long numInputDocuments; + private final long numOuputDocuments; + private final long numInvocations; + private final long indexTime; + private final long indexTotal; + private final long searchTime; + private final long searchTotal; + private final long indexFailures; + private final long searchFailures; + + public IndexerJobStats(long numPages, long numInputDocuments, long numOuputDocuments, long numInvocations, + long indexTime, long searchTime, long indexTotal, long searchTotal, long indexFailures, long searchFailures) { + this.numPages = numPages; + this.numInputDocuments = numInputDocuments; + this.numOuputDocuments = numOuputDocuments; + this.numInvocations = numInvocations; + this.indexTime = indexTime; + this.indexTotal = indexTotal; + this.searchTime = searchTime; + this.searchTotal = searchTotal; + this.indexFailures = indexFailures; + this.searchFailures = searchFailures; + } + + /** + * The number of pages read from the input indices + */ + public long getNumPages() { + return numPages; + } + + /** + * The number of documents read from the input indices + */ + public long getNumDocuments() { + return numInputDocuments; + } + + /** + * Number of times that the job woke up to write documents + */ + public long getNumInvocations() { + return numInvocations; + } + + /** + * Number of documents written + */ + public long getOutputDocuments() { + return numOuputDocuments; + } + + /** + * Number of index failures that have occurred + */ + public long getIndexFailures() { + return indexFailures; + } + + /** + * Number of failures that have occurred + */ + public long getSearchFailures() { + return searchFailures; + } + + /** + * Returns the time spent indexing (cumulative) in milliseconds + */ + public long getIndexTime() { + return indexTime; + } + + /** + * Returns the time spent searching (cumulative) in milliseconds + */ + public long getSearchTime() { + return searchTime; + } + + /** + * Returns the total number of indexing requests that have been processed + * (Note: this is not the number of _documents_ that have been indexed) + */ + public long getIndexTotal() { + return indexTotal; + } + + /** + * Returns the total number of search requests that have been made + */ + public long getSearchTotal() { + return searchTotal; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other instanceof IndexerJobStats == false) { + return false; + } + + IndexerJobStats that = (IndexerJobStats) other; + return Objects.equals(this.numPages, that.numPages) + && Objects.equals(this.numInputDocuments, that.numInputDocuments) + && Objects.equals(this.numOuputDocuments, that.numOuputDocuments) + && Objects.equals(this.numInvocations, that.numInvocations) + && Objects.equals(this.indexTime, that.indexTime) + && Objects.equals(this.searchTime, that.searchTime) + && Objects.equals(this.indexFailures, that.indexFailures) + && Objects.equals(this.searchFailures, that.searchFailures) + && Objects.equals(this.searchTotal, that.searchTotal) + && Objects.equals(this.indexTotal, that.indexTotal); + } + + @Override + public int hashCode() { + return Objects.hash(numPages, numInputDocuments, numOuputDocuments, numInvocations, + indexTime, searchTime, indexFailures, searchFailures, searchTotal, indexTotal); + } + + @Override + public final String toString() { + return "{pages=" + numPages + + ", input_docs=" + numInputDocuments + + ", output_docs=" + numOuputDocuments + + ", invocations=" + numInvocations + + ", index_failures=" + indexFailures + + ", search_failures=" + searchFailures + + ", index_time_in_ms=" + indexTime + + ", index_total=" + indexTotal + + ", search_time_in_ms=" + searchTime + + ", search_total=" + searchTotal+ "}"; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/IndexerState.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/IndexerState.java new file mode 100644 index 000000000000..8e5f1c4a85b6 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/IndexerState.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.core; + + +import java.util.Locale; + +/** + * IndexerState represents the internal state of the indexer. It + * is also persistent when changing from started/stopped in case the allocated + * task is restarted elsewhere. + */ +public enum IndexerState { + /** Indexer is running, but not actively indexing data (e.g. it's idle). */ + STARTED, + + /** Indexer is actively indexing data. */ + INDEXING, + + /** + * Transition state to where an indexer has acknowledged the stop + * but is still in process of halting. + */ + STOPPING, + + /** Indexer is "paused" and ignoring scheduled triggers. */ + STOPPED, + + /** + * Something (internal or external) has requested the indexer abort + * and shutdown. + */ + ABORTING; + + public static IndexerState fromString(String name) { + return valueOf(name.trim().toUpperCase(Locale.ROOT)); + } + + public String value() { + return name().toLowerCase(Locale.ROOT); + } +} \ No newline at end of file diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/AcknowledgedTasksResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/AcknowledgedTasksResponse.java similarity index 90% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/core/AcknowledgedTasksResponse.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/AcknowledgedTasksResponse.java index b94c3454f863..bccaf609d5f3 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/AcknowledgedTasksResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/AcknowledgedTasksResponse.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.core; +package org.elasticsearch.client.dataframe; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.TaskOperationFailure; @@ -26,7 +26,6 @@ import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Objects; @@ -36,8 +35,8 @@ public class AcknowledgedTasksResponse { - protected static final ParseField TASK_FAILURES = new ParseField("task_failures"); - protected static final ParseField NODE_FAILURES = new ParseField("node_failures"); + public static final ParseField TASK_FAILURES = new ParseField("task_failures"); + public static final ParseField NODE_FAILURES = new ParseField("node_failures"); @SuppressWarnings("unchecked") protected static ConstructingObjectParser generateParser( @@ -60,8 +59,8 @@ protected static ConstructingObjectParser< public AcknowledgedTasksResponse(boolean acknowledged, @Nullable List taskFailures, @Nullable List nodeFailures) { this.acknowledged = acknowledged; - this.taskFailures = taskFailures == null ? Collections.emptyList() : Collections.unmodifiableList(new ArrayList<>(taskFailures)); - this.nodeFailures = nodeFailures == null ? Collections.emptyList() : Collections.unmodifiableList(new ArrayList<>(nodeFailures)); + this.taskFailures = taskFailures == null ? Collections.emptyList() : Collections.unmodifiableList(taskFailures); + this.nodeFailures = nodeFailures == null ? Collections.emptyList() : Collections.unmodifiableList(nodeFailures); } public boolean isAcknowledged() { diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformRequest.java new file mode 100644 index 000000000000..9577a0f5c72b --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformRequest.java @@ -0,0 +1,96 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe; + +import org.elasticsearch.client.Validatable; +import org.elasticsearch.client.ValidationException; + +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +public class GetDataFrameTransformRequest implements Validatable { + + private final List ids; + private Integer from; + private Integer size; + + /** + * Helper method to create a request that will get ALL Data Frame Transforms + * @return new {@link GetDataFrameTransformRequest} object for the id "_all" + */ + public static GetDataFrameTransformRequest getAllDataFrameTransformsRequest() { + return new GetDataFrameTransformRequest("_all"); + } + + public GetDataFrameTransformRequest(String... ids) { + this.ids = Arrays.asList(ids); + } + + public List getId() { + return ids; + } + + public Integer getFrom() { + return from; + } + + public void setFrom(Integer from) { + this.from = from; + } + + public Integer getSize() { + return size; + } + + public void setSize(Integer size) { + this.size = size; + } + + @Override + public Optional validate() { + if (ids == null || ids.isEmpty()) { + ValidationException validationException = new ValidationException(); + validationException.addValidationError("data frame transform id must not be null"); + return Optional.of(validationException); + } else { + return Optional.empty(); + } + } + + @Override + public int hashCode() { + return Objects.hash(ids); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + GetDataFrameTransformRequest other = (GetDataFrameTransformRequest) obj; + return Objects.equals(ids, other.ids); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformResponse.java new file mode 100644 index 000000000000..93fc91f08cee --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformResponse.java @@ -0,0 +1,142 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe; + +import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +public class GetDataFrameTransformResponse { + + public static final ParseField TRANSFORMS = new ParseField("transforms"); + public static final ParseField INVALID_TRANSFORMS = new ParseField("invalid_transforms"); + public static final ParseField COUNT = new ParseField("count"); + + @SuppressWarnings("unchecked") + static final ConstructingObjectParser INVALID_TRANSFORMS_PARSER = + new ConstructingObjectParser<>("invalid_transforms", true, args -> new InvalidTransforms((List) args[0])); + + @SuppressWarnings("unchecked") + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "get_data_frame_transform", true, args -> new GetDataFrameTransformResponse( + (List) args[0], (int) args[1], (InvalidTransforms) args[2])); + static { + // Discard the count field which is the size of the transforms array + INVALID_TRANSFORMS_PARSER.declareInt((a, b) -> {}, COUNT); + INVALID_TRANSFORMS_PARSER.declareStringArray(constructorArg(), TRANSFORMS); + + PARSER.declareObjectArray(constructorArg(), DataFrameTransformConfig.PARSER::apply, TRANSFORMS); + PARSER.declareInt(constructorArg(), COUNT); + PARSER.declareObject(optionalConstructorArg(), INVALID_TRANSFORMS_PARSER::apply, INVALID_TRANSFORMS); + } + + public static GetDataFrameTransformResponse fromXContent(final XContentParser parser) { + return GetDataFrameTransformResponse.PARSER.apply(parser, null); + } + + private List transformConfigurations; + private int count; + private InvalidTransforms invalidTransforms; + + public GetDataFrameTransformResponse(List transformConfigurations, + int count, + @Nullable InvalidTransforms invalidTransforms) { + this.transformConfigurations = transformConfigurations; + this.count = count; + this.invalidTransforms = invalidTransforms; + } + + @Nullable + public InvalidTransforms getInvalidTransforms() { + return invalidTransforms; + } + + public int getCount() { + return count; + } + + public List getTransformConfigurations() { + return transformConfigurations; + } + + @Override + public int hashCode() { + return Objects.hash(transformConfigurations, count, invalidTransforms); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + final GetDataFrameTransformResponse that = (GetDataFrameTransformResponse) other; + return Objects.equals(this.transformConfigurations, that.transformConfigurations) + && Objects.equals(this.count, that.count) + && Objects.equals(this.invalidTransforms, that.invalidTransforms); + } + + static class InvalidTransforms { + private final List transformIds; + + InvalidTransforms(List transformIds) { + this.transformIds = transformIds; + } + + public int getCount() { + return transformIds.size(); + } + + public List getTransformIds() { + return transformIds; + } + + @Override + public int hashCode() { + return Objects.hash(transformIds); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + final InvalidTransforms that = (InvalidTransforms) other; + return Objects.equals(this.transformIds, that.transformIds); + } + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsRequest.java new file mode 100644 index 000000000000..e90c8a1e276d --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsRequest.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe; + +import org.elasticsearch.client.Validatable; +import org.elasticsearch.client.ValidationException; + +import java.util.Objects; +import java.util.Optional; + +public class GetDataFrameTransformStatsRequest implements Validatable { + private final String id; + + public GetDataFrameTransformStatsRequest(String id) { + this.id = id; + } + + public String getId() { + return id; + } + + @Override + public Optional validate() { + if (id == null) { + ValidationException validationException = new ValidationException(); + validationException.addValidationError("data frame transform id must not be null"); + return Optional.of(validationException); + } else { + return Optional.empty(); + } + } + + @Override + public int hashCode() { + return Objects.hash(id); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + GetDataFrameTransformStatsRequest other = (GetDataFrameTransformStatsRequest) obj; + return Objects.equals(id, other.id); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsResponse.java new file mode 100644 index 000000000000..d3f854719a70 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsResponse.java @@ -0,0 +1,106 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.TaskOperationFailure; +import org.elasticsearch.client.dataframe.transforms.DataFrameTransformStateAndStats; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +public class GetDataFrameTransformStatsResponse { + + public static final ParseField TRANSFORMS = new ParseField("transforms"); + public static final ParseField COUNT = new ParseField("count"); + + @SuppressWarnings("unchecked") + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "get_data_frame_transform_stats_response", true, + args -> new GetDataFrameTransformStatsResponse((List) args[0], + (List) args[1], (List) args[2])); + + static { + PARSER.declareObjectArray(constructorArg(), DataFrameTransformStateAndStats.PARSER::apply, TRANSFORMS); + // Discard the count field which is the size of the transforms array + PARSER.declareInt((a, b) -> {}, COUNT); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> TaskOperationFailure.fromXContent(p), + AcknowledgedTasksResponse.TASK_FAILURES); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> ElasticsearchException.fromXContent(p), + AcknowledgedTasksResponse.NODE_FAILURES); + } + + public static GetDataFrameTransformStatsResponse fromXContent(final XContentParser parser) { + return GetDataFrameTransformStatsResponse.PARSER.apply(parser, null); + } + + private final List transformsStateAndStats; + private final List taskFailures; + private final List nodeFailures; + + public GetDataFrameTransformStatsResponse(List transformsStateAndStats, + @Nullable List taskFailures, + @Nullable List nodeFailures) { + this.transformsStateAndStats = transformsStateAndStats; + this.taskFailures = taskFailures == null ? Collections.emptyList() : Collections.unmodifiableList(taskFailures); + this.nodeFailures = nodeFailures == null ? Collections.emptyList() : Collections.unmodifiableList(nodeFailures); + } + + public List getTransformsStateAndStats() { + return transformsStateAndStats; + } + + public List getNodeFailures() { + return nodeFailures; + } + + public List getTaskFailures() { + return taskFailures; + } + + @Override + public int hashCode() { + return Objects.hash(transformsStateAndStats, nodeFailures, taskFailures); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + final GetDataFrameTransformStatsResponse that = (GetDataFrameTransformStatsResponse) other; + return Objects.equals(this.transformsStateAndStats, that.transformsStateAndStats) + && Objects.equals(this.nodeFailures, that.nodeFailures) + && Objects.equals(this.taskFailures, that.taskFailures); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StartDataFrameTransformResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StartDataFrameTransformResponse.java index 3878824b91f4..f11ecd096c16 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StartDataFrameTransformResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StartDataFrameTransformResponse.java @@ -21,7 +21,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.TaskOperationFailure; -import org.elasticsearch.client.core.AcknowledgedTasksResponse; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StopDataFrameTransformResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StopDataFrameTransformResponse.java index 5020ba270d99..3224dfb4703e 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StopDataFrameTransformResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StopDataFrameTransformResponse.java @@ -21,7 +21,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.TaskOperationFailure; -import org.elasticsearch.client.core.AcknowledgedTasksResponse; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameIndexerTransformStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameIndexerTransformStats.java new file mode 100644 index 000000000000..aa40b7cb86c9 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameIndexerTransformStats.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe.transforms; + +import org.elasticsearch.client.core.IndexerJobStats; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +public class DataFrameIndexerTransformStats extends IndexerJobStats { + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + NAME, true, args -> new DataFrameIndexerTransformStats((long) args[0], (long) args[1], (long) args[2], + (long) args[3], (long) args[4], (long) args[5], (long) args[6], (long) args[7], (long) args[8], (long) args[9])); + + static { + PARSER.declareLong(constructorArg(), NUM_PAGES); + PARSER.declareLong(constructorArg(), NUM_INPUT_DOCUMENTS); + PARSER.declareLong(constructorArg(), NUM_OUTPUT_DOCUMENTS); + PARSER.declareLong(constructorArg(), NUM_INVOCATIONS); + PARSER.declareLong(constructorArg(), INDEX_TIME_IN_MS); + PARSER.declareLong(constructorArg(), SEARCH_TIME_IN_MS); + PARSER.declareLong(constructorArg(), INDEX_TOTAL); + PARSER.declareLong(constructorArg(), SEARCH_TOTAL); + PARSER.declareLong(constructorArg(), INDEX_FAILURES); + PARSER.declareLong(constructorArg(), SEARCH_FAILURES); + } + + public static DataFrameIndexerTransformStats fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + public DataFrameIndexerTransformStats(long numPages, long numInputDocuments, long numOuputDocuments, + long numInvocations, long indexTime, long searchTime, + long indexTotal, long searchTotal, long indexFailures, long searchFailures) { + super(numPages, numInputDocuments, numOuputDocuments, numInvocations, indexTime, searchTime, + indexTotal, searchTotal, indexFailures, searchFailures); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfig.java index 08ef3a7be84a..88670a7b36d1 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfig.java @@ -38,32 +38,28 @@ public class DataFrameTransformConfig implements ToXContentObject { public static final ParseField ID = new ParseField("id"); public static final ParseField SOURCE = new ParseField("source"); public static final ParseField DEST = new ParseField("dest"); - public static final ParseField QUERY = new ParseField("query"); // types of transforms public static final ParseField PIVOT_TRANSFORM = new ParseField("pivot"); private final String id; - private final String source; - private final String dest; - private final QueryConfig queryConfig; + private final SourceConfig source; + private final DestConfig dest; private final PivotConfig pivotConfig; - public static final ConstructingObjectParser PARSER = + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("data_frame_transform", true, (args) -> { String id = (String) args[0]; - String source = (String) args[1]; - String dest = (String) args[2]; - QueryConfig queryConfig = (QueryConfig) args[3]; - PivotConfig pivotConfig = (PivotConfig) args[4]; - return new DataFrameTransformConfig(id, source, dest, queryConfig, pivotConfig); + SourceConfig source = (SourceConfig) args[1]; + DestConfig dest = (DestConfig) args[2]; + PivotConfig pivotConfig = (PivotConfig) args[3]; + return new DataFrameTransformConfig(id, source, dest, pivotConfig); }); static { PARSER.declareString(constructorArg(), ID); - PARSER.declareString(constructorArg(), SOURCE); - PARSER.declareString(constructorArg(), DEST); - PARSER.declareObject(optionalConstructorArg(), (p, c) -> QueryConfig.fromXContent(p), QUERY); + PARSER.declareObject(constructorArg(), (p, c) -> SourceConfig.PARSER.apply(p, null), SOURCE); + PARSER.declareObject(constructorArg(), (p, c) -> DestConfig.PARSER.apply(p, null), DEST); PARSER.declareObject(optionalConstructorArg(), (p, c) -> PivotConfig.fromXContent(p), PIVOT_TRANSFORM); } @@ -73,14 +69,12 @@ public static DataFrameTransformConfig fromXContent(final XContentParser parser) public DataFrameTransformConfig(final String id, - final String source, - final String dest, - final QueryConfig queryConfig, + final SourceConfig source, + final DestConfig dest, final PivotConfig pivotConfig) { this.id = id; this.source = source; this.dest = dest; - this.queryConfig = queryConfig; this.pivotConfig = pivotConfig; } @@ -88,11 +82,11 @@ public String getId() { return id; } - public String getSource() { + public SourceConfig getSource() { return source; } - public String getDestination() { + public DestConfig getDestination() { return dest; } @@ -100,23 +94,18 @@ public PivotConfig getPivotConfig() { return pivotConfig; } - public QueryConfig getQueryConfig() { - return queryConfig; - } - @Override public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { builder.startObject(); if (id != null) { builder.field(ID.getPreferredName(), id); } - builder.field(SOURCE.getPreferredName(), source); + if (source != null) { + builder.field(SOURCE.getPreferredName(), source); + } if (dest != null) { builder.field(DEST.getPreferredName(), dest); } - if (queryConfig != null) { - builder.field(QUERY.getPreferredName(), queryConfig); - } if (pivotConfig != null) { builder.field(PIVOT_TRANSFORM.getPreferredName(), pivotConfig); } @@ -139,13 +128,12 @@ public boolean equals(Object other) { return Objects.equals(this.id, that.id) && Objects.equals(this.source, that.source) && Objects.equals(this.dest, that.dest) - && Objects.equals(this.queryConfig, that.queryConfig) && Objects.equals(this.pivotConfig, that.pivotConfig); } @Override public int hashCode() { - return Objects.hash(id, source, dest, queryConfig, pivotConfig); + return Objects.hash(id, source, dest, pivotConfig); } @Override diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformState.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformState.java new file mode 100644 index 000000000000..fd191bb600ca --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformState.java @@ -0,0 +1,131 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe.transforms; + +import org.elasticsearch.client.core.IndexerState; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.SortedMap; +import java.util.TreeMap; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +public class DataFrameTransformState { + + private static final ParseField INDEXER_STATE = new ParseField("indexer_state"); + private static final ParseField TASK_STATE = new ParseField("task_state"); + private static final ParseField CURRENT_POSITION = new ParseField("current_position"); + private static final ParseField GENERATION = new ParseField("generation"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("data_frame_transform_state", + args -> new DataFrameTransformState((DataFrameTransformTaskState) args[0], + (IndexerState) args[1], + (HashMap) args[2], + (long) args[3])); + + static { + PARSER.declareField(constructorArg(), + p -> DataFrameTransformTaskState.fromString(p.text()), + TASK_STATE, + ObjectParser.ValueType.STRING); + PARSER.declareField(constructorArg(), p -> IndexerState.fromString(p.text()), INDEXER_STATE, ObjectParser.ValueType.STRING); + PARSER.declareField(optionalConstructorArg(), p -> { + if (p.currentToken() == XContentParser.Token.START_OBJECT) { + return p.map(); + } + if (p.currentToken() == XContentParser.Token.VALUE_NULL) { + return null; + } + throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); + }, CURRENT_POSITION, ObjectParser.ValueType.VALUE_OBJECT_ARRAY); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), GENERATION); + } + + public static DataFrameTransformState fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + private final DataFrameTransformTaskState taskState; + private final IndexerState indexerState; + private final long generation; + private final SortedMap currentPosition; + + public DataFrameTransformState(DataFrameTransformTaskState taskState, + IndexerState indexerState, + @Nullable Map position, + long generation) { + this.taskState = taskState; + this.indexerState = indexerState; + this.currentPosition = position == null ? null : Collections.unmodifiableSortedMap(new TreeMap<>(position)); + this.generation = generation; + } + + public IndexerState getIndexerState() { + return indexerState; + } + + public DataFrameTransformTaskState getTaskState() { + return taskState; + } + + @Nullable + public Map getPosition() { + return currentPosition; + } + + public long getGeneration() { + return generation; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + DataFrameTransformState that = (DataFrameTransformState) other; + + return Objects.equals(this.taskState, that.taskState) && + Objects.equals(this.indexerState, that.indexerState) && + Objects.equals(this.currentPosition, that.currentPosition) && + this.generation == that.generation; + } + + @Override + public int hashCode() { + return Objects.hash(taskState, indexerState, currentPosition, generation); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateAndStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateAndStats.java new file mode 100644 index 000000000000..9c45bfb1325c --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateAndStats.java @@ -0,0 +1,92 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe.transforms; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public class DataFrameTransformStateAndStats { + + public static final ParseField ID = new ParseField("id"); + public static final ParseField STATE_FIELD = new ParseField("state"); + public static final ParseField STATS_FIELD = new ParseField("stats"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "data_frame_transform_state_and_stats", true, + a -> new DataFrameTransformStateAndStats((String) a[0], (DataFrameTransformState) a[1], (DataFrameIndexerTransformStats) a[2])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), ID); + PARSER.declareObject(ConstructingObjectParser.constructorArg(), DataFrameTransformState.PARSER::apply, STATE_FIELD); + PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> DataFrameIndexerTransformStats.fromXContent(p), + STATS_FIELD); + } + + public static DataFrameTransformStateAndStats fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + private final String id; + private final DataFrameTransformState transformState; + private final DataFrameIndexerTransformStats transformStats; + + public DataFrameTransformStateAndStats(String id, DataFrameTransformState state, DataFrameIndexerTransformStats stats) { + this.id = id; + this.transformState = state; + this.transformStats = stats; + } + + public String getId() { + return id; + } + + public DataFrameIndexerTransformStats getTransformStats() { + return transformStats; + } + + public DataFrameTransformState getTransformState() { + return transformState; + } + + @Override + public int hashCode() { + return Objects.hash(id, transformState, transformStats); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + DataFrameTransformStateAndStats that = (DataFrameTransformStateAndStats) other; + + return Objects.equals(this.id, that.id) && Objects.equals(this.transformState, that.transformState) + && Objects.equals(this.transformStats, that.transformStats); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformTaskState.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformTaskState.java new file mode 100644 index 000000000000..7235a0aed28e --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformTaskState.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe.transforms; + +import java.util.Locale; + +public enum DataFrameTransformTaskState { + STOPPED, STARTED, FAILED; + + public static DataFrameTransformTaskState fromString(String name) { + return valueOf(name.trim().toUpperCase(Locale.ROOT)); + } + + public String value() { + return name().toLowerCase(Locale.ROOT); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DestConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DestConfig.java new file mode 100644 index 000000000000..5e81a368f66b --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DestConfig.java @@ -0,0 +1,82 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe.transforms; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Configuration containing the destination index for the {@link DataFrameTransformConfig} + */ +public class DestConfig implements ToXContentObject { + + public static final ParseField INDEX = new ParseField("index"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("data_frame_config_dest", + true, + args -> new DestConfig((String)args[0])); + + static { + PARSER.declareString(constructorArg(), INDEX); + } + + private final String index; + + public DestConfig(String index) { + this.index = Objects.requireNonNull(index, INDEX.getPreferredName()); + } + + public String getIndex() { + return index; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(INDEX.getPreferredName(), index); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } + if (other == null || other.getClass() != getClass()) { + return false; + } + + DestConfig that = (DestConfig) other; + return Objects.equals(index, that.index); + } + + @Override + public int hashCode(){ + return Objects.hash(index); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/QueryConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/QueryConfig.java index 9ab67ad0ae0a..2adc2953c591 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/QueryConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/QueryConfig.java @@ -28,6 +28,9 @@ import java.io.IOException; import java.util.Objects; +/** + * Object for encapsulating the desired Query for a DataFrameTransform + */ public class QueryConfig implements ToXContentObject { private final QueryBuilder query; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/SourceConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/SourceConfig.java new file mode 100644 index 000000000000..19c099ba601d --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/SourceConfig.java @@ -0,0 +1,124 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe.transforms; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + + +/** + * Class encapsulating all options for a {@link DataFrameTransformConfig} gathering data + */ +public class SourceConfig implements ToXContentObject { + + public static final ParseField QUERY = new ParseField("query"); + public static final ParseField INDEX = new ParseField("index"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("data_frame_config_source", + true, + args -> { + @SuppressWarnings("unchecked") + String[] index = ((List)args[0]).toArray(new String[0]); + // default handling: if the user does not specify a query, we default to match_all + QueryConfig queryConfig = (QueryConfig) args[1]; + return new SourceConfig(index, queryConfig); + }); + static { + PARSER.declareStringArray(constructorArg(), INDEX); + PARSER.declareObject(optionalConstructorArg(), (p, c) -> QueryConfig.fromXContent(p), QUERY); + } + + private final String[] index; + private final QueryConfig queryConfig; + + /** + * Create a new SourceConfig for the provided indices. + * + * {@link QueryConfig} defaults to a MatchAll query. + * + * @param index Any number of indices. At least one non-null, non-empty, index should be provided + */ + public SourceConfig(String... index) { + this.index = index; + this.queryConfig = null; + } + + /** + * Create a new SourceConfig for the provided indices, from which data is gathered with the provided {@link QueryConfig} + * + * @param index Any number of indices. At least one non-null, non-empty, index should be provided + * @param queryConfig A QueryConfig object that contains the desired query. Defaults to MatchAll query. + */ + public SourceConfig(String[] index, QueryConfig queryConfig) { + this.index = index; + this.queryConfig = queryConfig; + } + + public String[] getIndex() { + return index; + } + + public QueryConfig getQueryConfig() { + return queryConfig; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (index != null) { + builder.array(INDEX.getPreferredName(), index); + } + if (queryConfig != null) { + builder.field(QUERY.getPreferredName(), queryConfig); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } + if (other == null || other.getClass() != getClass()) { + return false; + } + + SourceConfig that = (SourceConfig) other; + return Arrays.equals(index, that.index) && Objects.equals(queryConfig, that.queryConfig); + } + + @Override + public int hashCode(){ + // Using Arrays.hashCode as Objects.hash does not deeply hash nested arrays. Since we are doing Array.equals, this is necessary + int hash = Arrays.hashCode(index); + return 31 * hash + (queryConfig == null ? 0 : queryConfig.hashCode()); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreRequest.java index 4d2a000a00c8..3040b8a121cf 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreRequest.java @@ -108,10 +108,26 @@ public GraphExploreRequest indicesOptions(IndicesOptions indicesOptions) { return this; } + /** + * The document types to execute the explore against. Defaults to be executed against + * all types. + * + * @deprecated Types are in the process of being removed. Instead of using a type, prefer to + * filter on a field on the document. + */ + @Deprecated public String[] types() { return this.types; } + /** + * The document types to execute the explore request against. Defaults to be executed against + * all types. + * + * @deprecated Types are in the process of being removed. Instead of using a type, prefer to + * filter on a field on the document. + */ + @Deprecated public GraphExploreRequest types(String... types) { this.types = types; return this; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/GetRollupJobResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/GetRollupJobResponse.java index c01914ed2fd9..125bd6dab6c2 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/GetRollupJobResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/GetRollupJobResponse.java @@ -19,6 +19,8 @@ package org.elasticsearch.client.rollup; +import org.elasticsearch.client.core.IndexerJobStats; +import org.elasticsearch.client.core.IndexerState; import org.elasticsearch.client.rollup.job.config.RollupJobConfig; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; @@ -27,7 +29,6 @@ import java.io.IOException; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Objects; @@ -44,19 +45,10 @@ public class GetRollupJobResponse { static final ParseField CONFIG = new ParseField("config"); static final ParseField STATS = new ParseField("stats"); static final ParseField STATUS = new ParseField("status"); - static final ParseField NUM_PAGES = new ParseField("pages_processed"); - static final ParseField NUM_INPUT_DOCUMENTS = new ParseField("documents_processed"); - static final ParseField NUM_OUTPUT_DOCUMENTS = new ParseField("rollups_indexed"); - static final ParseField NUM_INVOCATIONS = new ParseField("trigger_count"); static final ParseField STATE = new ParseField("job_state"); static final ParseField CURRENT_POSITION = new ParseField("current_position"); + static final ParseField ROLLUPS_INDEXED = new ParseField("rollups_indexed"); static final ParseField UPGRADED_DOC_ID = new ParseField("upgraded_doc_id"); - static final ParseField INDEX_TIME_IN_MS = new ParseField("index_time_in_ms"); - static final ParseField SEARCH_TIME_IN_MS = new ParseField("search_time_in_ms"); - static final ParseField INDEX_TOTAL = new ParseField("index_total"); - static final ParseField SEARCH_TOTAL = new ParseField("search_total"); - static final ParseField SEARCH_FAILURES = new ParseField("search_failures"); - static final ParseField INDEX_FAILURES = new ParseField("index_failures"); private List jobs; @@ -182,101 +174,12 @@ public final String toString() { * The Rollup specialization of stats for the AsyncTwoPhaseIndexer. * Note: instead of `documents_indexed`, this XContent show `rollups_indexed` */ - public static class RollupIndexerJobStats { - private final long numPages; - private final long numInputDocuments; - private final long numOuputDocuments; - private final long numInvocations; - private long indexTime; - private long indexTotal; - private long searchTime; - private long searchTotal; - private long indexFailures; - private long searchFailures; + public static class RollupIndexerJobStats extends IndexerJobStats { RollupIndexerJobStats(long numPages, long numInputDocuments, long numOuputDocuments, long numInvocations, long indexTime, long indexTotal, long searchTime, long searchTotal, long indexFailures, long searchFailures) { - this.numPages = numPages; - this.numInputDocuments = numInputDocuments; - this.numOuputDocuments = numOuputDocuments; - this.numInvocations = numInvocations; - this.indexTime = indexTime; - this.indexTotal = indexTotal; - this.searchTime = searchTime; - this.searchTotal = searchTotal; - this.indexFailures = indexFailures; - this.searchFailures = searchFailures; - } - - /** - * The number of pages read from the input indices. - */ - public long getNumPages() { - return numPages; - } - - /** - * The number of documents read from the input indices. - */ - public long getNumDocuments() { - return numInputDocuments; - } - - /** - * Number of times that the job woke up to write documents. - */ - public long getNumInvocations() { - return numInvocations; - } - - /** - * Number of documents written to the result indices. - */ - public long getOutputDocuments() { - return numOuputDocuments; - } - - /** - * Number of failures that have occurred during the bulk indexing phase of Rollup - */ - public long getIndexFailures() { - return indexFailures; - } - - /** - * Number of failures that have occurred during the search phase of Rollup - */ - public long getSearchFailures() { - return searchFailures; - } - - /** - * Returns the time spent indexing (cumulative) in milliseconds - */ - public long getIndexTime() { - return indexTime; - } - - /** - * Returns the time spent searching (cumulative) in milliseconds - */ - public long getSearchTime() { - return searchTime; - } - - /** - * Returns the total number of indexing requests that have been sent by the rollup job - * (Note: this is not the number of _documents_ that have been indexed) - */ - public long getIndexTotal() { - return indexTotal; - } - - /** - * Returns the total number of search requests that have been sent by the rollup job - */ - public long getSearchTotal() { - return searchTotal; + super(numPages, numInputDocuments, numOuputDocuments, numInvocations, + indexTime, searchTime, indexTotal, searchTotal, indexFailures, searchFailures); } private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( @@ -287,7 +190,7 @@ public long getSearchTotal() { static { PARSER.declareLong(constructorArg(), NUM_PAGES); PARSER.declareLong(constructorArg(), NUM_INPUT_DOCUMENTS); - PARSER.declareLong(constructorArg(), NUM_OUTPUT_DOCUMENTS); + PARSER.declareLong(constructorArg(), ROLLUPS_INDEXED); PARSER.declareLong(constructorArg(), NUM_INVOCATIONS); PARSER.declareLong(constructorArg(), INDEX_TIME_IN_MS); PARSER.declareLong(constructorArg(), INDEX_TOTAL); @@ -296,43 +199,6 @@ public long getSearchTotal() { PARSER.declareLong(constructorArg(), INDEX_FAILURES); PARSER.declareLong(constructorArg(), SEARCH_FAILURES); } - - @Override - public boolean equals(Object other) { - if (this == other) return true; - if (other == null || getClass() != other.getClass()) return false; - RollupIndexerJobStats that = (RollupIndexerJobStats) other; - return Objects.equals(this.numPages, that.numPages) - && Objects.equals(this.numInputDocuments, that.numInputDocuments) - && Objects.equals(this.numOuputDocuments, that.numOuputDocuments) - && Objects.equals(this.numInvocations, that.numInvocations) - && Objects.equals(this.indexTime, that.indexTime) - && Objects.equals(this.searchTime, that.searchTime) - && Objects.equals(this.indexFailures, that.indexFailures) - && Objects.equals(this.searchFailures, that.searchFailures) - && Objects.equals(this.searchTotal, that.searchTotal) - && Objects.equals(this.indexTotal, that.indexTotal); - } - - @Override - public int hashCode() { - return Objects.hash(numPages, numInputDocuments, numOuputDocuments, numInvocations, - indexTime, searchTime, indexFailures, searchFailures, searchTotal, indexTotal); - } - - @Override - public final String toString() { - return "{pages=" + numPages - + ", input_docs=" + numInputDocuments - + ", output_docs=" + numOuputDocuments - + ", invocations=" + numInvocations - + ", index_failures=" + indexFailures - + ", search_failures=" + searchFailures - + ", index_time_in_ms=" + indexTime - + ", index_total=" + indexTotal - + ", search_time_in_ms=" + searchTime - + ", search_total=" + searchTotal+ "}"; - } } /** @@ -417,40 +283,4 @@ public final String toString() { + ", upgradedDocumentId=" + upgradedDocumentId + "}"; } } - - /** - * IndexerState represents the internal state of the indexer. It - * is also persistent when changing from started/stopped in case the allocated - * task is restarted elsewhere. - */ - public enum IndexerState { - /** Indexer is running, but not actively indexing data (e.g. it's idle). */ - STARTED, - - /** Indexer is actively indexing data. */ - INDEXING, - - /** - * Transition state to where an indexer has acknowledged the stop - * but is still in process of halting. - */ - STOPPING, - - /** Indexer is "paused" and ignoring scheduled triggers. */ - STOPPED, - - /** - * Something (internal or external) has requested the indexer abort - * and shutdown. - */ - ABORTING; - - static IndexerState fromString(String name) { - return valueOf(name.trim().toUpperCase(Locale.ROOT)); - } - - String value() { - return name().toLowerCase(Locale.ROOT); - } - } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ExpressionRoleMapping.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ExpressionRoleMapping.java index 9cb78dd9c83e..447c67abe329 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ExpressionRoleMapping.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ExpressionRoleMapping.java @@ -29,8 +29,10 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Objects; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; /** * A representation of a single role-mapping. @@ -42,13 +44,14 @@ public final class ExpressionRoleMapping { @SuppressWarnings("unchecked") static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("role-mapping", true, - (args, name) -> new ExpressionRoleMapping(name, (RoleMapperExpression) args[0], (List) args[1], - (Map) args[2], (boolean) args[3])); + (args, name) -> new ExpressionRoleMapping(name, (RoleMapperExpression) args[0], (List) args[1], + (List) args[2], (Map) args[3], (boolean) args[4])); static { PARSER.declareField(constructorArg(), (parser, context) -> RoleMapperExpressionParser.fromXContent(parser), Fields.RULES, ObjectParser.ValueType.OBJECT); - PARSER.declareStringArray(constructorArg(), Fields.ROLES); + PARSER.declareStringArray(optionalConstructorArg(), Fields.ROLES); + PARSER.declareObjectArray(optionalConstructorArg(), (parser, ctx) -> TemplateRoleName.fromXContent(parser), Fields.ROLE_TEMPLATES); PARSER.declareField(constructorArg(), XContentParser::map, Fields.METADATA, ObjectParser.ValueType.OBJECT); PARSER.declareBoolean(constructorArg(), Fields.ENABLED); } @@ -56,6 +59,7 @@ public final class ExpressionRoleMapping { private final String name; private final RoleMapperExpression expression; private final List roles; + private final List roleTemplates; private final Map metadata; private final boolean enabled; @@ -70,10 +74,11 @@ public final class ExpressionRoleMapping { * @param enabled a flag when {@code true} signifies the role mapping is active */ public ExpressionRoleMapping(final String name, final RoleMapperExpression expr, final List roles, - final Map metadata, boolean enabled) { + final List templates, final Map metadata, boolean enabled) { this.name = name; this.expression = expr; - this.roles = Collections.unmodifiableList(roles); + this.roles = roles == null ? Collections.emptyList() : Collections.unmodifiableList(roles); + this.roleTemplates = templates == null ? Collections.emptyList() : Collections.unmodifiableList(templates); this.metadata = (metadata == null) ? Collections.emptyMap() : Collections.unmodifiableMap(metadata); this.enabled = enabled; } @@ -90,6 +95,10 @@ public List getRoles() { return roles; } + public List getRoleTemplates() { + return roleTemplates; + } + public Map getMetadata() { return metadata; } @@ -99,53 +108,26 @@ public boolean isEnabled() { } @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + (enabled ? 1231 : 1237); - result = prime * result + ((expression == null) ? 0 : expression.hashCode()); - result = prime * result + ((metadata == null) ? 0 : metadata.hashCode()); - result = prime * result + ((name == null) ? 0 : name.hashCode()); - result = prime * result + ((roles == null) ? 0 : roles.hashCode()); - return result; + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final ExpressionRoleMapping that = (ExpressionRoleMapping) o; + return this.enabled == that.enabled && + Objects.equals(this.name, that.name) && + Objects.equals(this.expression, that.expression) && + Objects.equals(this.roles, that.roles) && + Objects.equals(this.roleTemplates, that.roleTemplates) && + Objects.equals(this.metadata, that.metadata); } @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null) - return false; - if (getClass() != obj.getClass()) - return false; - final ExpressionRoleMapping other = (ExpressionRoleMapping) obj; - if (enabled != other.enabled) - return false; - if (expression == null) { - if (other.expression != null) - return false; - } else if (!expression.equals(other.expression)) - return false; - if (metadata == null) { - if (other.metadata != null) - return false; - } else if (!metadata.equals(other.metadata)) - return false; - if (name == null) { - if (other.name != null) - return false; - } else if (!name.equals(other.name)) - return false; - if (roles == null) { - if (other.roles != null) - return false; - } else if (!roles.equals(other.roles)) - return false; - return true; + public int hashCode() { + return Objects.hash(name, expression, roles, roleTemplates, metadata, enabled); } public interface Fields { ParseField ROLES = new ParseField("roles"); + ParseField ROLE_TEMPLATES = new ParseField("role_templates"); ParseField ENABLED = new ParseField("enabled"); ParseField RULES = new ParseField("rules"); ParseField METADATA = new ParseField("metadata"); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/PutRoleMappingRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/PutRoleMappingRequest.java index b8da17da72da..9a9e0fa62f96 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/PutRoleMappingRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/PutRoleMappingRequest.java @@ -40,22 +40,34 @@ public final class PutRoleMappingRequest implements Validatable, ToXContentObjec private final String name; private final boolean enabled; private final List roles; + private final List roleTemplates; private final RoleMapperExpression rules; private final Map metadata; private final RefreshPolicy refreshPolicy; + @Deprecated public PutRoleMappingRequest(final String name, final boolean enabled, final List roles, final RoleMapperExpression rules, - @Nullable final Map metadata, @Nullable final RefreshPolicy refreshPolicy) { + @Nullable final Map metadata, @Nullable final RefreshPolicy refreshPolicy) { + this(name, enabled, roles, Collections.emptyList(), rules, metadata, refreshPolicy); + } + + public PutRoleMappingRequest(final String name, final boolean enabled, final List roles, final List templates, + final RoleMapperExpression rules, @Nullable final Map metadata, + @Nullable final RefreshPolicy refreshPolicy) { if (Strings.hasText(name) == false) { throw new IllegalArgumentException("role-mapping name is missing"); } this.name = name; this.enabled = enabled; - if (roles == null || roles.isEmpty()) { - throw new IllegalArgumentException("role-mapping roles are missing"); + this.roles = Collections.unmodifiableList(Objects.requireNonNull(roles, "role-mapping roles cannot be null")); + this.roleTemplates = Collections.unmodifiableList(Objects.requireNonNull(templates, "role-mapping role_templates cannot be null")); + if (this.roles.isEmpty() && this.roleTemplates.isEmpty()) { + throw new IllegalArgumentException("in a role-mapping, one of roles or role_templates is required"); + } + if (this.roles.isEmpty() == false && this.roleTemplates.isEmpty() == false) { + throw new IllegalArgumentException("in a role-mapping, cannot specify both roles and role_templates"); } - this.roles = Collections.unmodifiableList(roles); this.rules = Objects.requireNonNull(rules, "role-mapping rules are missing"); this.metadata = (metadata == null) ? Collections.emptyMap() : metadata; this.refreshPolicy = (refreshPolicy == null) ? RefreshPolicy.getDefault() : refreshPolicy; @@ -73,6 +85,10 @@ public List getRoles() { return roles; } + public List getRoleTemplates() { + return roleTemplates; + } + public RoleMapperExpression getRules() { return rules; } @@ -87,7 +103,7 @@ public RefreshPolicy getRefreshPolicy() { @Override public int hashCode() { - return Objects.hash(name, enabled, refreshPolicy, roles, rules, metadata); + return Objects.hash(name, enabled, refreshPolicy, roles, roleTemplates, rules, metadata); } @Override @@ -104,11 +120,12 @@ public boolean equals(Object obj) { final PutRoleMappingRequest other = (PutRoleMappingRequest) obj; return (enabled == other.enabled) && - (refreshPolicy == other.refreshPolicy) && - Objects.equals(name, other.name) && - Objects.equals(roles, other.roles) && - Objects.equals(rules, other.rules) && - Objects.equals(metadata, other.metadata); + (refreshPolicy == other.refreshPolicy) && + Objects.equals(name, other.name) && + Objects.equals(roles, other.roles) && + Objects.equals(roleTemplates, other.roleTemplates) && + Objects.equals(rules, other.rules) && + Objects.equals(metadata, other.metadata); } @Override @@ -116,9 +133,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field("enabled", enabled); builder.field("roles", roles); + builder.field("role_templates", roleTemplates); builder.field("rules", rules); builder.field("metadata", metadata); return builder.endObject(); } - } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/TemplateRoleName.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/TemplateRoleName.java new file mode 100644 index 000000000000..a6263cee69d1 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/TemplateRoleName.java @@ -0,0 +1,117 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParserUtils; +import org.elasticsearch.common.xcontent.XContentType; + +import java.io.IOException; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * A role name that uses a dynamic template. + */ +public class TemplateRoleName implements ToXContentObject { + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("template-role-name", + true, args -> new TemplateRoleName((String) args[0], (Format) args[1])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), Fields.TEMPLATE); + PARSER.declareField(optionalConstructorArg(), Format::fromXContent, Fields.FORMAT, ObjectParser.ValueType.STRING); + } + private final String template; + private final Format format; + + public TemplateRoleName(String template, Format format) { + this.template = Objects.requireNonNull(template); + this.format = Objects.requireNonNull(format); + } + + public TemplateRoleName(Map template, Format format) throws IOException { + this(Strings.toString(XContentBuilder.builder(XContentType.JSON.xContent()).map(template)), format); + } + + public String getTemplate() { + return template; + } + + public Format getFormat() { + return format; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final TemplateRoleName that = (TemplateRoleName) o; + return Objects.equals(this.template, that.template) && + this.format == that.format; + } + + @Override + public int hashCode() { + return Objects.hash(template, format); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field(Fields.TEMPLATE.getPreferredName(), template) + .field(Fields.FORMAT.getPreferredName(), format.name().toLowerCase(Locale.ROOT)) + .endObject(); + } + + static TemplateRoleName fromXContent(XContentParser parser) throws IOException { + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation); + return PARSER.parse(parser, null); + } + + + public enum Format { + STRING, JSON; + + private static Format fromXContent(XContentParser parser) throws IOException { + XContentParserUtils.ensureExpectedToken(XContentParser.Token.VALUE_STRING, parser.currentToken(), parser::getTokenLocation); + return Format.valueOf(parser.text().toUpperCase(Locale.ROOT)); + } + } + + public interface Fields { + ParseField TEMPLATE = new ParseField("template"); + ParseField FORMAT = new ParseField("format"); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java index e2102236cc42..6c161444e247 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java @@ -21,12 +21,8 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; -import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; -import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkProcessor; import org.elasticsearch.action.bulk.BulkRequest; @@ -39,7 +35,6 @@ import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; @@ -58,12 +53,6 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.get.GetResult; -import org.elasticsearch.index.query.IdsQueryBuilder; -import org.elasticsearch.index.reindex.BulkByScrollResponse; -import org.elasticsearch.index.reindex.DeleteByQueryAction; -import org.elasticsearch.index.reindex.DeleteByQueryRequest; -import org.elasticsearch.index.reindex.UpdateByQueryAction; -import org.elasticsearch.index.reindex.UpdateByQueryRequest; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.document.RestBulkAction; import org.elasticsearch.rest.action.document.RestDeleteAction; @@ -74,8 +63,6 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; -import org.elasticsearch.tasks.RawTaskStatus; -import org.elasticsearch.tasks.TaskId; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.joda.time.format.DateTimeFormat; @@ -85,18 +72,12 @@ import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.lessThan; public class CrudIT extends ESRestHighLevelClientTestCase { @@ -137,7 +118,7 @@ public void testDelete() throws IOException { ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync)); assertEquals(RestStatus.CONFLICT, exception.status()); - assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[_doc][" + docId + "]: " + + assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[" + docId + "]: " + "version conflict, required seqNo [2], primary term [2]. current document has seqNo [3] and primary term [1]]", exception.getMessage()); assertEquals("index", exception.getMetadata("es.index").get(0)); @@ -166,7 +147,7 @@ public void testDelete() throws IOException { execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync); }); assertEquals(RestStatus.CONFLICT, exception.status()); - assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[_doc][" + + assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[" + docId + "]: version conflict, current version [12] is higher or equal to the one provided [10]]", exception.getMessage()); assertEquals("index", exception.getMetadata("es.index").get(0)); } @@ -301,7 +282,7 @@ public void testGet() throws IOException { ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync)); assertEquals(RestStatus.CONFLICT, exception.status()); - assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, " + "reason=[_doc][id]: " + + assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, " + "reason=[id]: " + "version conflict, current version [1] is different than the one provided [2]]", exception.getMessage()); assertEquals("index", exception.getMetadata("es.index").get(0)); } @@ -527,7 +508,7 @@ public void testIndex() throws IOException { execute(wrongRequest, highLevelClient()::index, highLevelClient()::indexAsync); }); assertEquals(RestStatus.CONFLICT, exception.status()); - assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[_doc][id]: " + + assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[id]: " + "version conflict, required seqNo [1], primary term [5]. current document has seqNo [2] and primary term [1]]", exception.getMessage()); assertEquals("index", exception.getMetadata("es.index").get(0)); @@ -574,7 +555,7 @@ public void testIndex() throws IOException { }); assertEquals(RestStatus.CONFLICT, exception.status()); - assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[_doc][with_create_op_type]: " + + assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[with_create_op_type]: " + "version conflict, document already exists (current version [1])]", exception.getMessage()); } } @@ -857,230 +838,6 @@ public void testBulk() throws IOException { validateBulkResponses(nbItems, errors, bulkResponse, bulkRequest); } - private TaskId findTaskToRethrottle(String actionName) throws IOException { - long start = System.nanoTime(); - ListTasksRequest request = new ListTasksRequest(); - request.setActions(actionName); - request.setDetailed(true); - do { - ListTasksResponse list = highLevelClient().tasks().list(request, RequestOptions.DEFAULT); - list.rethrowFailures("Finding tasks to rethrottle"); - assertThat("tasks are left over from the last execution of this test", - list.getTaskGroups(), hasSize(lessThan(2))); - if (0 == list.getTaskGroups().size()) { - // The parent task hasn't started yet - continue; - } - TaskGroup taskGroup = list.getTaskGroups().get(0); - assertThat(taskGroup.getChildTasks(), empty()); - return taskGroup.getTaskInfo().getTaskId(); - } while (System.nanoTime() - start < TimeUnit.SECONDS.toNanos(10)); - throw new AssertionError("Couldn't find tasks to rethrottle. Here are the running tasks " + - highLevelClient().tasks().list(request, RequestOptions.DEFAULT)); - } - - public void testUpdateByQuery() throws Exception { - final String sourceIndex = "source1"; - { - // Prepare - Settings settings = Settings.builder() - .put("number_of_shards", 1) - .put("number_of_replicas", 0) - .build(); - createIndex(sourceIndex, settings); - assertEquals( - RestStatus.OK, - highLevelClient().bulk( - new BulkRequest() - .add(new IndexRequest(sourceIndex).id("1") - .source(Collections.singletonMap("foo", 1), XContentType.JSON)) - .add(new IndexRequest(sourceIndex).id("2") - .source(Collections.singletonMap("foo", 2), XContentType.JSON)) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE), - RequestOptions.DEFAULT - ).status() - ); - } - { - // test1: create one doc in dest - UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(); - updateByQueryRequest.indices(sourceIndex); - updateByQueryRequest.setQuery(new IdsQueryBuilder().addIds("1")); - updateByQueryRequest.setRefresh(true); - BulkByScrollResponse bulkResponse = - execute(updateByQueryRequest, highLevelClient()::updateByQuery, highLevelClient()::updateByQueryAsync); - assertEquals(1, bulkResponse.getTotal()); - assertEquals(1, bulkResponse.getUpdated()); - assertEquals(0, bulkResponse.getNoops()); - assertEquals(0, bulkResponse.getVersionConflicts()); - assertEquals(1, bulkResponse.getBatches()); - assertTrue(bulkResponse.getTook().getMillis() > 0); - assertEquals(1, bulkResponse.getBatches()); - assertEquals(0, bulkResponse.getBulkFailures().size()); - assertEquals(0, bulkResponse.getSearchFailures().size()); - } - { - // test2: update using script - UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(); - updateByQueryRequest.indices(sourceIndex); - updateByQueryRequest.setScript(new Script("if (ctx._source.foo == 2) ctx._source.foo++;")); - updateByQueryRequest.setRefresh(true); - BulkByScrollResponse bulkResponse = - execute(updateByQueryRequest, highLevelClient()::updateByQuery, highLevelClient()::updateByQueryAsync); - assertEquals(2, bulkResponse.getTotal()); - assertEquals(2, bulkResponse.getUpdated()); - assertEquals(0, bulkResponse.getDeleted()); - assertEquals(0, bulkResponse.getNoops()); - assertEquals(0, bulkResponse.getVersionConflicts()); - assertEquals(1, bulkResponse.getBatches()); - assertTrue(bulkResponse.getTook().getMillis() > 0); - assertEquals(1, bulkResponse.getBatches()); - assertEquals(0, bulkResponse.getBulkFailures().size()); - assertEquals(0, bulkResponse.getSearchFailures().size()); - assertEquals( - 3, - (int) (highLevelClient().get(new GetRequest(sourceIndex, "2"), RequestOptions.DEFAULT) - .getSourceAsMap().get("foo")) - ); - } - { - // test update-by-query rethrottling - UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(); - updateByQueryRequest.indices(sourceIndex); - updateByQueryRequest.setQuery(new IdsQueryBuilder().addIds("1")); - updateByQueryRequest.setRefresh(true); - - // this following settings are supposed to halt reindexing after first document - updateByQueryRequest.setBatchSize(1); - updateByQueryRequest.setRequestsPerSecond(0.00001f); - final CountDownLatch taskFinished = new CountDownLatch(1); - highLevelClient().updateByQueryAsync(updateByQueryRequest, RequestOptions.DEFAULT, new ActionListener() { - - @Override - public void onResponse(BulkByScrollResponse response) { - taskFinished.countDown(); - } - - @Override - public void onFailure(Exception e) { - fail(e.toString()); - } - }); - - TaskId taskIdToRethrottle = findTaskToRethrottle(UpdateByQueryAction.NAME); - float requestsPerSecond = 1000f; - ListTasksResponse response = execute(new RethrottleRequest(taskIdToRethrottle, requestsPerSecond), - highLevelClient()::updateByQueryRethrottle, highLevelClient()::updateByQueryRethrottleAsync); - assertThat(response.getTasks(), hasSize(1)); - assertEquals(taskIdToRethrottle, response.getTasks().get(0).getTaskId()); - assertThat(response.getTasks().get(0).getStatus(), instanceOf(RawTaskStatus.class)); - assertEquals(Float.toString(requestsPerSecond), - ((RawTaskStatus) response.getTasks().get(0).getStatus()).toMap().get("requests_per_second").toString()); - taskFinished.await(2, TimeUnit.SECONDS); - - // any rethrottling after the update-by-query is done performed with the same taskId should result in a failure - response = execute(new RethrottleRequest(taskIdToRethrottle, requestsPerSecond), - highLevelClient()::updateByQueryRethrottle, highLevelClient()::updateByQueryRethrottleAsync); - assertTrue(response.getTasks().isEmpty()); - assertFalse(response.getNodeFailures().isEmpty()); - assertEquals(1, response.getNodeFailures().size()); - assertEquals("Elasticsearch exception [type=resource_not_found_exception, reason=task [" + taskIdToRethrottle + "] is missing]", - response.getNodeFailures().get(0).getCause().getMessage()); - } - } - - public void testDeleteByQuery() throws Exception { - final String sourceIndex = "source1"; - { - // Prepare - Settings settings = Settings.builder() - .put("number_of_shards", 1) - .put("number_of_replicas", 0) - .build(); - createIndex(sourceIndex, settings); - assertEquals( - RestStatus.OK, - highLevelClient().bulk( - new BulkRequest() - .add(new IndexRequest(sourceIndex).id("1") - .source(Collections.singletonMap("foo", 1), XContentType.JSON)) - .add(new IndexRequest(sourceIndex).id("2") - .source(Collections.singletonMap("foo", 2), XContentType.JSON)) - .add(new IndexRequest(sourceIndex).id("3") - .source(Collections.singletonMap("foo", 3), XContentType.JSON)) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE), - RequestOptions.DEFAULT - ).status() - ); - } - { - // test1: delete one doc - DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(); - deleteByQueryRequest.indices(sourceIndex); - deleteByQueryRequest.setQuery(new IdsQueryBuilder().addIds("1")); - deleteByQueryRequest.setRefresh(true); - BulkByScrollResponse bulkResponse = - execute(deleteByQueryRequest, highLevelClient()::deleteByQuery, highLevelClient()::deleteByQueryAsync); - assertEquals(1, bulkResponse.getTotal()); - assertEquals(1, bulkResponse.getDeleted()); - assertEquals(0, bulkResponse.getNoops()); - assertEquals(0, bulkResponse.getVersionConflicts()); - assertEquals(1, bulkResponse.getBatches()); - assertTrue(bulkResponse.getTook().getMillis() > 0); - assertEquals(1, bulkResponse.getBatches()); - assertEquals(0, bulkResponse.getBulkFailures().size()); - assertEquals(0, bulkResponse.getSearchFailures().size()); - assertEquals( - 2, - highLevelClient().search(new SearchRequest(sourceIndex), RequestOptions.DEFAULT).getHits().getTotalHits().value - ); - } - { - // test delete-by-query rethrottling - DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(); - deleteByQueryRequest.indices(sourceIndex); - deleteByQueryRequest.setQuery(new IdsQueryBuilder().addIds("2", "3")); - deleteByQueryRequest.setRefresh(true); - - // this following settings are supposed to halt reindexing after first document - deleteByQueryRequest.setBatchSize(1); - deleteByQueryRequest.setRequestsPerSecond(0.00001f); - final CountDownLatch taskFinished = new CountDownLatch(1); - highLevelClient().deleteByQueryAsync(deleteByQueryRequest, RequestOptions.DEFAULT, new ActionListener() { - - @Override - public void onResponse(BulkByScrollResponse response) { - taskFinished.countDown(); - } - - @Override - public void onFailure(Exception e) { - fail(e.toString()); - } - }); - - TaskId taskIdToRethrottle = findTaskToRethrottle(DeleteByQueryAction.NAME); - float requestsPerSecond = 1000f; - ListTasksResponse response = execute(new RethrottleRequest(taskIdToRethrottle, requestsPerSecond), - highLevelClient()::deleteByQueryRethrottle, highLevelClient()::deleteByQueryRethrottleAsync); - assertThat(response.getTasks(), hasSize(1)); - assertEquals(taskIdToRethrottle, response.getTasks().get(0).getTaskId()); - assertThat(response.getTasks().get(0).getStatus(), instanceOf(RawTaskStatus.class)); - assertEquals(Float.toString(requestsPerSecond), - ((RawTaskStatus) response.getTasks().get(0).getStatus()).toMap().get("requests_per_second").toString()); - taskFinished.await(2, TimeUnit.SECONDS); - - // any rethrottling after the delete-by-query is done performed with the same taskId should result in a failure - response = execute(new RethrottleRequest(taskIdToRethrottle, requestsPerSecond), - highLevelClient()::deleteByQueryRethrottle, highLevelClient()::deleteByQueryRethrottleAsync); - assertTrue(response.getTasks().isEmpty()); - assertFalse(response.getNodeFailures().isEmpty()); - assertEquals(1, response.getNodeFailures().size()); - assertEquals("Elasticsearch exception [type=resource_not_found_exception, reason=task [" + taskIdToRethrottle + "] is missing]", - response.getNodeFailures().get(0).getCause().getMessage()); - } - } - public void testBulkProcessorIntegration() throws IOException { int nbItems = randomIntBetween(10, 100); boolean[] errors = new boolean[nbItems]; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameRequestConvertersTests.java index 30560b0e31fd..8c6b1c604585 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameRequestConvertersTests.java @@ -20,9 +20,12 @@ package org.elasticsearch.client; import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.elasticsearch.client.dataframe.DeleteDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.GetDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsRequest; import org.elasticsearch.client.dataframe.PreviewDataFrameTransformRequest; import org.elasticsearch.client.dataframe.PutDataFrameTransformRequest; import org.elasticsearch.client.dataframe.StartDataFrameTransformRequest; @@ -137,4 +140,37 @@ public void testPreviewDataFrameTransform() throws IOException { assertThat(parsedConfig, equalTo(previewRequest.getConfig())); } } + + public void testGetDataFrameTransformStats() { + GetDataFrameTransformStatsRequest getStatsRequest = new GetDataFrameTransformStatsRequest("foo"); + Request request = DataFrameRequestConverters.getDataFrameTransformStats(getStatsRequest); + + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertThat(request.getEndpoint(), equalTo("/_data_frame/transforms/foo/_stats")); + } + + public void testGetDataFrameTransform() { + GetDataFrameTransformRequest getRequest = new GetDataFrameTransformRequest("bar"); + Request request = DataFrameRequestConverters.getDataFrameTransform(getRequest); + + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertThat(request.getEndpoint(), equalTo("/_data_frame/transforms/bar")); + + assertFalse(request.getParameters().containsKey("from")); + assertFalse(request.getParameters().containsKey("size")); + + getRequest.setFrom(0); + getRequest.setSize(10); + request = DataFrameRequestConverters.getDataFrameTransform(getRequest); + assertEquals("0", request.getParameters().get("from")); + assertEquals("10", request.getParameters().get("size")); + } + + public void testGetDataFrameTransform_givenMulitpleIds() { + GetDataFrameTransformRequest getRequest = new GetDataFrameTransformRequest("foo", "bar", "baz"); + Request request = DataFrameRequestConverters.getDataFrameTransform(getRequest); + + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertThat(request.getEndpoint(), equalTo("/_data_frame/transforms/foo,bar,baz")); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java index 736c3e373caa..e8724cc071da 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java @@ -25,7 +25,12 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.core.AcknowledgedResponse; +import org.elasticsearch.client.core.IndexerState; import org.elasticsearch.client.dataframe.DeleteDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.GetDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.GetDataFrameTransformResponse; +import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsRequest; +import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsResponse; import org.elasticsearch.client.dataframe.PreviewDataFrameTransformRequest; import org.elasticsearch.client.dataframe.PreviewDataFrameTransformResponse; import org.elasticsearch.client.dataframe.PutDataFrameTransformRequest; @@ -33,8 +38,13 @@ import org.elasticsearch.client.dataframe.StartDataFrameTransformResponse; import org.elasticsearch.client.dataframe.StopDataFrameTransformRequest; import org.elasticsearch.client.dataframe.StopDataFrameTransformResponse; +import org.elasticsearch.client.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.client.dataframe.transforms.DataFrameTransformStateAndStats; +import org.elasticsearch.client.dataframe.transforms.DataFrameTransformTaskState; +import org.elasticsearch.client.dataframe.transforms.DestConfig; import org.elasticsearch.client.dataframe.transforms.QueryConfig; +import org.elasticsearch.client.dataframe.transforms.SourceConfig; import org.elasticsearch.client.dataframe.transforms.pivot.AggregationConfig; import org.elasticsearch.client.dataframe.transforms.pivot.GroupConfig; import org.elasticsearch.client.dataframe.transforms.pivot.PivotConfig; @@ -44,6 +54,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.junit.After; @@ -59,7 +70,9 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; public class DataFrameTransformIT extends ESRestHighLevelClientTestCase { @@ -144,15 +157,8 @@ public void testCreateDelete() throws IOException { String sourceIndex = "transform-source"; createIndex(sourceIndex); - QueryConfig queryConfig = new QueryConfig(new MatchAllQueryBuilder()); - GroupConfig groupConfig = new GroupConfig(Collections.singletonMap("reviewer", new TermsGroupSource("user_id"))); - AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder(); - aggBuilder.addAggregator(AggregationBuilders.avg("avg_rating").field("stars")); - AggregationConfig aggConfig = new AggregationConfig(aggBuilder); - PivotConfig pivotConfig = new PivotConfig(groupConfig, aggConfig); - String id = "test-crud"; - DataFrameTransformConfig transform = new DataFrameTransformConfig(id, sourceIndex, "pivot-dest", queryConfig, pivotConfig); + DataFrameTransformConfig transform = validDataFrameTransformConfig(id, sourceIndex, "pivot-dest"); DataFrameClient client = highLevelClient().dataFrame(); AcknowledgedResponse ack = execute(new PutDataFrameTransformRequest(transform), client::putDataFrameTransform, @@ -170,19 +176,78 @@ public void testCreateDelete() throws IOException { assertThat(deleteError.getMessage(), containsString("Transform with id [test-crud] could not be found")); } - public void testStartStop() throws IOException { + public void testGetTransform() throws IOException { String sourceIndex = "transform-source"; createIndex(sourceIndex); - QueryConfig queryConfig = new QueryConfig(new MatchAllQueryBuilder()); - GroupConfig groupConfig = new GroupConfig(Collections.singletonMap("reviewer", new TermsGroupSource("user_id"))); - AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder(); - aggBuilder.addAggregator(AggregationBuilders.avg("avg_rating").field("stars")); - AggregationConfig aggConfig = new AggregationConfig(aggBuilder); - PivotConfig pivotConfig = new PivotConfig(groupConfig, aggConfig); + String id = "test-get"; + DataFrameTransformConfig transform = validDataFrameTransformConfig(id, sourceIndex, "pivot-dest"); + + DataFrameClient client = highLevelClient().dataFrame(); + AcknowledgedResponse ack = execute(new PutDataFrameTransformRequest(transform), client::putDataFrameTransform, + client::putDataFrameTransformAsync); + assertTrue(ack.isAcknowledged()); + + GetDataFrameTransformRequest getRequest = new GetDataFrameTransformRequest(id); + GetDataFrameTransformResponse getResponse = execute(getRequest, client::getDataFrameTransform, + client::getDataFrameTransformAsync); + assertNull(getResponse.getInvalidTransforms()); + assertThat(getResponse.getTransformConfigurations(), hasSize(1)); + assertEquals(transform, getResponse.getTransformConfigurations().get(0)); + } + + public void testGetAllAndPageTransforms() throws IOException { + String sourceIndex = "transform-source"; + createIndex(sourceIndex); + + DataFrameClient client = highLevelClient().dataFrame(); + + DataFrameTransformConfig transform = validDataFrameTransformConfig("test-get-all-1", sourceIndex, "pivot-dest-1"); + AcknowledgedResponse ack = execute(new PutDataFrameTransformRequest(transform), client::putDataFrameTransform, + client::putDataFrameTransformAsync); + assertTrue(ack.isAcknowledged()); + + transform = validDataFrameTransformConfig("test-get-all-2", sourceIndex, "pivot-dest-2"); + ack = execute(new PutDataFrameTransformRequest(transform), client::putDataFrameTransform, + client::putDataFrameTransformAsync); + assertTrue(ack.isAcknowledged()); + + GetDataFrameTransformRequest getRequest = new GetDataFrameTransformRequest("_all"); + GetDataFrameTransformResponse getResponse = execute(getRequest, client::getDataFrameTransform, + client::getDataFrameTransformAsync); + assertNull(getResponse.getInvalidTransforms()); + assertThat(getResponse.getTransformConfigurations(), hasSize(2)); + assertEquals(transform, getResponse.getTransformConfigurations().get(1)); + + getRequest.setFrom(0); + getRequest.setSize(1); + getResponse = execute(getRequest, client::getDataFrameTransform, + client::getDataFrameTransformAsync); + assertNull(getResponse.getInvalidTransforms()); + assertThat(getResponse.getTransformConfigurations(), hasSize(1)); + + GetDataFrameTransformRequest getMulitple = new GetDataFrameTransformRequest("test-get-all-1", "test-get-all-2"); + getResponse = execute(getMulitple, client::getDataFrameTransform, + client::getDataFrameTransformAsync); + assertNull(getResponse.getInvalidTransforms()); + assertThat(getResponse.getTransformConfigurations(), hasSize(2)); + } + + public void testGetMissingTransform() { + DataFrameClient client = highLevelClient().dataFrame(); + + ElasticsearchStatusException missingError = expectThrows(ElasticsearchStatusException.class, + () -> execute(new GetDataFrameTransformRequest("unknown"), client::getDataFrameTransform, + client::getDataFrameTransformAsync)); + assertThat(missingError.status(), equalTo(RestStatus.NOT_FOUND)); + } + + public void testStartStop() throws IOException { + String sourceIndex = "transform-source"; + createIndex(sourceIndex); String id = "test-stop-start"; - DataFrameTransformConfig transform = new DataFrameTransformConfig(id, sourceIndex, "pivot-dest", queryConfig, pivotConfig); + DataFrameTransformConfig transform = validDataFrameTransformConfig(id, sourceIndex, "pivot-dest"); DataFrameClient client = highLevelClient().dataFrame(); AcknowledgedResponse ack = execute(new PutDataFrameTransformRequest(transform), client::putDataFrameTransform, @@ -197,7 +262,10 @@ public void testStartStop() throws IOException { assertThat(startResponse.getNodeFailures(), empty()); assertThat(startResponse.getTaskFailures(), empty()); - // TODO once get df stats is implemented assert the df has started + GetDataFrameTransformStatsResponse statsResponse = execute(new GetDataFrameTransformStatsRequest(id), + client::getDataFrameTransformStats, client::getDataFrameTransformStatsAsync); + assertThat(statsResponse.getTransformsStateAndStats(), hasSize(1)); + assertEquals(IndexerState.STARTED, statsResponse.getTransformsStateAndStats().get(0).getTransformState().getIndexerState()); StopDataFrameTransformRequest stopRequest = new StopDataFrameTransformRequest(id); StopDataFrameTransformResponse stopResponse = @@ -212,14 +280,7 @@ public void testPreview() throws IOException { createIndex(sourceIndex); indexData(sourceIndex); - QueryConfig queryConfig = new QueryConfig(new MatchAllQueryBuilder()); - GroupConfig groupConfig = new GroupConfig(Collections.singletonMap("reviewer", new TermsGroupSource("user_id"))); - AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder(); - aggBuilder.addAggregator(AggregationBuilders.avg("avg_rating").field("stars")); - AggregationConfig aggConfig = new AggregationConfig(aggBuilder); - PivotConfig pivotConfig = new PivotConfig(groupConfig, aggConfig); - - DataFrameTransformConfig transform = new DataFrameTransformConfig("test-preview", sourceIndex, null, queryConfig, pivotConfig); + DataFrameTransformConfig transform = validDataFrameTransformConfig("test-preview", sourceIndex, null); DataFrameClient client = highLevelClient().dataFrame(); PreviewDataFrameTransformResponse preview = execute(new PreviewDataFrameTransformRequest(transform), @@ -230,11 +291,75 @@ public void testPreview() throws IOException { assertThat(docs, hasSize(2)); Optional> theresa = docs.stream().filter(doc -> "theresa".equals(doc.get("reviewer"))).findFirst(); assertTrue(theresa.isPresent()); - assertEquals(2.5d, (double)theresa.get().get("avg_rating"), 0.01d); + assertEquals(2.5d, (double) theresa.get().get("avg_rating"), 0.01d); Optional> michel = docs.stream().filter(doc -> "michel".equals(doc.get("reviewer"))).findFirst(); assertTrue(michel.isPresent()); - assertEquals(3.6d, (double)michel.get().get("avg_rating"), 0.1d); + assertEquals(3.6d, (double) michel.get().get("avg_rating"), 0.1d); + } + + private DataFrameTransformConfig validDataFrameTransformConfig(String id, String source, String destination) { + QueryConfig queryConfig = new QueryConfig(new MatchAllQueryBuilder()); + GroupConfig groupConfig = new GroupConfig(Collections.singletonMap("reviewer", new TermsGroupSource("user_id"))); + AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder(); + aggBuilder.addAggregator(AggregationBuilders.avg("avg_rating").field("stars")); + AggregationConfig aggConfig = new AggregationConfig(aggBuilder); + PivotConfig pivotConfig = new PivotConfig(groupConfig, aggConfig); + + DestConfig destConfig = (destination != null) ? new DestConfig(destination) : null; + + return new DataFrameTransformConfig(id, + new SourceConfig(new String[]{source}, queryConfig), + destConfig, + pivotConfig); + } + + public void testGetStats() throws Exception { + String sourceIndex = "transform-source"; + createIndex(sourceIndex); + indexData(sourceIndex); + + QueryConfig queryConfig = new QueryConfig(new MatchAllQueryBuilder()); + GroupConfig groupConfig = new GroupConfig(Collections.singletonMap("reviewer", new TermsGroupSource("user_id"))); + AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder(); + aggBuilder.addAggregator(AggregationBuilders.avg("avg_rating").field("stars")); + AggregationConfig aggConfig = new AggregationConfig(aggBuilder); + PivotConfig pivotConfig = new PivotConfig(groupConfig, aggConfig); + + String id = "test-get-stats"; + DataFrameTransformConfig transform = new DataFrameTransformConfig(id, + new SourceConfig(new String[]{sourceIndex}, queryConfig), new DestConfig("pivot-dest"), pivotConfig); + + DataFrameClient client = highLevelClient().dataFrame(); + AcknowledgedResponse ack = execute(new PutDataFrameTransformRequest(transform), client::putDataFrameTransform, + client::putDataFrameTransformAsync); + assertTrue(ack.isAcknowledged()); + transformsToClean.add(id); + + GetDataFrameTransformStatsResponse statsResponse = execute(new GetDataFrameTransformStatsRequest(id), + client::getDataFrameTransformStats, client::getDataFrameTransformStatsAsync); + + assertEquals(1, statsResponse.getTransformsStateAndStats().size()); + DataFrameTransformStateAndStats stats = statsResponse.getTransformsStateAndStats().get(0); + assertEquals(DataFrameTransformTaskState.STOPPED, stats.getTransformState().getTaskState()); + assertEquals(IndexerState.STOPPED, stats.getTransformState().getIndexerState()); + + DataFrameIndexerTransformStats zeroIndexerStats = new DataFrameIndexerTransformStats(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L); + assertEquals(zeroIndexerStats, stats.getTransformStats()); + + // start the transform + StartDataFrameTransformResponse startTransformResponse = execute(new StartDataFrameTransformRequest(id), + client::startDataFrameTransform, + client::startDataFrameTransformAsync); + assertThat(startTransformResponse.isStarted(), is(true)); + assertBusy(() -> { + GetDataFrameTransformStatsResponse response = execute(new GetDataFrameTransformStatsRequest(id), + client::getDataFrameTransformStats, client::getDataFrameTransformStatsAsync); + DataFrameTransformStateAndStats stateAndStats = response.getTransformsStateAndStats().get(0); + assertEquals(IndexerState.STARTED, stateAndStats.getTransformState().getIndexerState()); + assertEquals(DataFrameTransformTaskState.STARTED, stateAndStats.getTransformState().getTaskState()); + assertNotEquals(zeroIndexerStats, stateAndStats.getTransformStats()); + }); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java index 976ae754d335..f758156c222a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java @@ -34,6 +34,7 @@ import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ingest.Pipeline; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.rest.ESRestTestCase; import org.junit.AfterClass; import org.junit.Before; @@ -130,7 +131,7 @@ protected interface AsyncMethodNoRequest { private static class HighLevelClient extends RestHighLevelClient { private HighLevelClient(RestClient restClient) { - super(restClient, (client) -> {}, Collections.emptyList()); + super(restClient, (client) -> {}, new SearchModule(Settings.EMPTY, false, Collections.emptyList()).getNamedXContents()); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java index cfdd29cdfbfb..73cca7827e73 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java @@ -19,23 +19,54 @@ package org.elasticsearch.client; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.tasks.TaskSubmissionResponse; import org.elasticsearch.common.CheckedRunnable; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.IdsQueryBuilder; import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.index.reindex.DeleteByQueryAction; +import org.elasticsearch.index.reindex.DeleteByQueryRequest; import org.elasticsearch.index.reindex.ReindexRequest; +import org.elasticsearch.index.reindex.ScrollableHitSource; +import org.elasticsearch.index.reindex.UpdateByQueryAction; +import org.elasticsearch.index.reindex.UpdateByQueryRequest; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.script.Script; +import org.elasticsearch.tasks.RawTaskStatus; +import org.elasticsearch.tasks.TaskId; import java.io.IOException; import java.util.Collections; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.everyItem; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.lessThan; public class ReindexIT extends ESRestHighLevelClientTestCase { + private static final String CONFLICT_PIPELINE_ID = "conflict_pipeline"; + public void testReindex() throws IOException { final String sourceIndex = "source1"; final String destinationIndex = "dest"; @@ -122,10 +153,338 @@ public void testReindexTask() throws Exception { } } + public void testReindexConflict() throws IOException { + final String sourceIndex = "testreindexconflict_source"; + final String destIndex = "testreindexconflict_dest"; + + final Settings settings = Settings.builder() + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .build(); + createIndex(sourceIndex, settings); + createIndex(destIndex, settings); + final BulkRequest bulkRequest = new BulkRequest() + .add(new IndexRequest(sourceIndex).id("1").source(Collections.singletonMap("foo", "bar"), XContentType.JSON)) + .add(new IndexRequest(sourceIndex).id("2").source(Collections.singletonMap("foo", "bar"), XContentType.JSON)) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + assertThat(highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT).status(), equalTo(RestStatus.OK)); + + putConflictPipeline(); + + final ReindexRequest reindexRequest = new ReindexRequest(); + reindexRequest.setSourceIndices(sourceIndex); + reindexRequest.setDestIndex(destIndex); + reindexRequest.setRefresh(true); + reindexRequest.setDestPipeline(CONFLICT_PIPELINE_ID); + final BulkByScrollResponse response = highLevelClient().reindex(reindexRequest, RequestOptions.DEFAULT); + + assertThat(response.getVersionConflicts(), equalTo(2L)); + assertThat(response.getBulkFailures(), empty()); + assertThat(response.getSearchFailures(), hasSize(2)); + assertThat( + response.getSearchFailures().stream().map(ScrollableHitSource.SearchFailure::toString).collect(Collectors.toSet()), + everyItem(containsString("version conflict")) + ); + + assertThat(response.getTotal(), equalTo(2L)); + assertThat(response.getCreated(), equalTo(0L)); + assertThat(response.getUpdated(), equalTo(0L)); + assertThat(response.getDeleted(), equalTo(0L)); + assertThat(response.getNoops(), equalTo(0L)); + assertThat(response.getBatches(), equalTo(1)); + assertTrue(response.getTook().getMillis() > 0); + } + + public void testUpdateByQuery() throws Exception { + final String sourceIndex = "source1"; + { + // Prepare + Settings settings = Settings.builder() + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .build(); + createIndex(sourceIndex, settings); + assertEquals( + RestStatus.OK, + highLevelClient().bulk( + new BulkRequest() + .add(new IndexRequest(sourceIndex).id("1") + .source(Collections.singletonMap("foo", 1), XContentType.JSON)) + .add(new IndexRequest(sourceIndex).id("2") + .source(Collections.singletonMap("foo", 2), XContentType.JSON)) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), + RequestOptions.DEFAULT + ).status() + ); + } + { + // test1: create one doc in dest + UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(); + updateByQueryRequest.indices(sourceIndex); + updateByQueryRequest.setQuery(new IdsQueryBuilder().addIds("1")); + updateByQueryRequest.setRefresh(true); + BulkByScrollResponse bulkResponse = + execute(updateByQueryRequest, highLevelClient()::updateByQuery, highLevelClient()::updateByQueryAsync); + assertEquals(1, bulkResponse.getTotal()); + assertEquals(1, bulkResponse.getUpdated()); + assertEquals(0, bulkResponse.getNoops()); + assertEquals(0, bulkResponse.getVersionConflicts()); + assertEquals(1, bulkResponse.getBatches()); + assertTrue(bulkResponse.getTook().getMillis() > 0); + assertEquals(1, bulkResponse.getBatches()); + assertEquals(0, bulkResponse.getBulkFailures().size()); + assertEquals(0, bulkResponse.getSearchFailures().size()); + } + { + // test2: update using script + UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(); + updateByQueryRequest.indices(sourceIndex); + updateByQueryRequest.setScript(new Script("if (ctx._source.foo == 2) ctx._source.foo++;")); + updateByQueryRequest.setRefresh(true); + BulkByScrollResponse bulkResponse = + execute(updateByQueryRequest, highLevelClient()::updateByQuery, highLevelClient()::updateByQueryAsync); + assertEquals(2, bulkResponse.getTotal()); + assertEquals(2, bulkResponse.getUpdated()); + assertEquals(0, bulkResponse.getDeleted()); + assertEquals(0, bulkResponse.getNoops()); + assertEquals(0, bulkResponse.getVersionConflicts()); + assertEquals(1, bulkResponse.getBatches()); + assertTrue(bulkResponse.getTook().getMillis() > 0); + assertEquals(1, bulkResponse.getBatches()); + assertEquals(0, bulkResponse.getBulkFailures().size()); + assertEquals(0, bulkResponse.getSearchFailures().size()); + assertEquals( + 3, + (int) (highLevelClient().get(new GetRequest(sourceIndex, "2"), RequestOptions.DEFAULT) + .getSourceAsMap().get("foo")) + ); + } + { + // test update-by-query rethrottling + UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(); + updateByQueryRequest.indices(sourceIndex); + updateByQueryRequest.setQuery(new IdsQueryBuilder().addIds("1")); + updateByQueryRequest.setRefresh(true); + + // this following settings are supposed to halt reindexing after first document + updateByQueryRequest.setBatchSize(1); + updateByQueryRequest.setRequestsPerSecond(0.00001f); + final CountDownLatch taskFinished = new CountDownLatch(1); + highLevelClient().updateByQueryAsync(updateByQueryRequest, RequestOptions.DEFAULT, new ActionListener() { + + @Override + public void onResponse(BulkByScrollResponse response) { + taskFinished.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail(e.toString()); + } + }); + + TaskId taskIdToRethrottle = findTaskToRethrottle(UpdateByQueryAction.NAME); + float requestsPerSecond = 1000f; + ListTasksResponse response = execute(new RethrottleRequest(taskIdToRethrottle, requestsPerSecond), + highLevelClient()::updateByQueryRethrottle, highLevelClient()::updateByQueryRethrottleAsync); + assertThat(response.getTasks(), hasSize(1)); + assertEquals(taskIdToRethrottle, response.getTasks().get(0).getTaskId()); + assertThat(response.getTasks().get(0).getStatus(), instanceOf(RawTaskStatus.class)); + assertEquals(Float.toString(requestsPerSecond), + ((RawTaskStatus) response.getTasks().get(0).getStatus()).toMap().get("requests_per_second").toString()); + taskFinished.await(2, TimeUnit.SECONDS); + + // any rethrottling after the update-by-query is done performed with the same taskId should result in a failure + response = execute(new RethrottleRequest(taskIdToRethrottle, requestsPerSecond), + highLevelClient()::updateByQueryRethrottle, highLevelClient()::updateByQueryRethrottleAsync); + assertTrue(response.getTasks().isEmpty()); + assertFalse(response.getNodeFailures().isEmpty()); + assertEquals(1, response.getNodeFailures().size()); + assertEquals("Elasticsearch exception [type=resource_not_found_exception, reason=task [" + taskIdToRethrottle + "] is missing]", + response.getNodeFailures().get(0).getCause().getMessage()); + } + } + + public void testUpdateByQueryConflict() throws IOException { + final String index = "testupdatebyqueryconflict"; + + final Settings settings = Settings.builder() + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .build(); + createIndex(index, settings); + final BulkRequest bulkRequest = new BulkRequest() + .add(new IndexRequest(index).id("1").source(Collections.singletonMap("foo", "bar"), XContentType.JSON)) + .add(new IndexRequest(index).id("2").source(Collections.singletonMap("foo", "bar"), XContentType.JSON)) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + assertThat(highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT).status(), equalTo(RestStatus.OK)); + + putConflictPipeline(); + + final UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(); + updateByQueryRequest.indices(index); + updateByQueryRequest.setRefresh(true); + updateByQueryRequest.setPipeline(CONFLICT_PIPELINE_ID); + final BulkByScrollResponse response = highLevelClient().updateByQuery(updateByQueryRequest, RequestOptions.DEFAULT); + + assertThat(response.getVersionConflicts(), equalTo(1L)); + assertThat(response.getBulkFailures(), empty()); + assertThat(response.getSearchFailures(), hasSize(1)); + assertThat( + response.getSearchFailures().stream().map(ScrollableHitSource.SearchFailure::toString).collect(Collectors.toSet()), + everyItem(containsString("version conflict")) + ); + + assertThat(response.getTotal(), equalTo(2L)); + assertThat(response.getCreated(), equalTo(0L)); + assertThat(response.getUpdated(), equalTo(1L)); + assertThat(response.getDeleted(), equalTo(0L)); + assertThat(response.getNoops(), equalTo(0L)); + assertThat(response.getBatches(), equalTo(1)); + assertTrue(response.getTook().getMillis() > 0); + } + + public void testDeleteByQuery() throws Exception { + final String sourceIndex = "source1"; + { + // Prepare + Settings settings = Settings.builder() + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .build(); + createIndex(sourceIndex, settings); + assertEquals( + RestStatus.OK, + highLevelClient().bulk( + new BulkRequest() + .add(new IndexRequest(sourceIndex).id("1") + .source(Collections.singletonMap("foo", 1), XContentType.JSON)) + .add(new IndexRequest(sourceIndex).id("2") + .source(Collections.singletonMap("foo", 2), XContentType.JSON)) + .add(new IndexRequest(sourceIndex).id("3") + .source(Collections.singletonMap("foo", 3), XContentType.JSON)) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), + RequestOptions.DEFAULT + ).status() + ); + } + { + // test1: delete one doc + DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(); + deleteByQueryRequest.indices(sourceIndex); + deleteByQueryRequest.setQuery(new IdsQueryBuilder().addIds("1")); + deleteByQueryRequest.setRefresh(true); + BulkByScrollResponse bulkResponse = + execute(deleteByQueryRequest, highLevelClient()::deleteByQuery, highLevelClient()::deleteByQueryAsync); + assertEquals(1, bulkResponse.getTotal()); + assertEquals(1, bulkResponse.getDeleted()); + assertEquals(0, bulkResponse.getNoops()); + assertEquals(0, bulkResponse.getVersionConflicts()); + assertEquals(1, bulkResponse.getBatches()); + assertTrue(bulkResponse.getTook().getMillis() > 0); + assertEquals(1, bulkResponse.getBatches()); + assertEquals(0, bulkResponse.getBulkFailures().size()); + assertEquals(0, bulkResponse.getSearchFailures().size()); + assertEquals( + 2, + highLevelClient().search(new SearchRequest(sourceIndex), RequestOptions.DEFAULT).getHits().getTotalHits().value + ); + } + { + // test delete-by-query rethrottling + DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(); + deleteByQueryRequest.indices(sourceIndex); + deleteByQueryRequest.setQuery(new IdsQueryBuilder().addIds("2", "3")); + deleteByQueryRequest.setRefresh(true); + + // this following settings are supposed to halt reindexing after first document + deleteByQueryRequest.setBatchSize(1); + deleteByQueryRequest.setRequestsPerSecond(0.00001f); + final CountDownLatch taskFinished = new CountDownLatch(1); + highLevelClient().deleteByQueryAsync(deleteByQueryRequest, RequestOptions.DEFAULT, new ActionListener() { + + @Override + public void onResponse(BulkByScrollResponse response) { + taskFinished.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail(e.toString()); + } + }); + + TaskId taskIdToRethrottle = findTaskToRethrottle(DeleteByQueryAction.NAME); + float requestsPerSecond = 1000f; + ListTasksResponse response = execute(new RethrottleRequest(taskIdToRethrottle, requestsPerSecond), + highLevelClient()::deleteByQueryRethrottle, highLevelClient()::deleteByQueryRethrottleAsync); + assertThat(response.getTasks(), hasSize(1)); + assertEquals(taskIdToRethrottle, response.getTasks().get(0).getTaskId()); + assertThat(response.getTasks().get(0).getStatus(), instanceOf(RawTaskStatus.class)); + assertEquals(Float.toString(requestsPerSecond), + ((RawTaskStatus) response.getTasks().get(0).getStatus()).toMap().get("requests_per_second").toString()); + taskFinished.await(2, TimeUnit.SECONDS); + + // any rethrottling after the delete-by-query is done performed with the same taskId should result in a failure + response = execute(new RethrottleRequest(taskIdToRethrottle, requestsPerSecond), + highLevelClient()::deleteByQueryRethrottle, highLevelClient()::deleteByQueryRethrottleAsync); + assertTrue(response.getTasks().isEmpty()); + assertFalse(response.getNodeFailures().isEmpty()); + assertEquals(1, response.getNodeFailures().size()); + assertEquals("Elasticsearch exception [type=resource_not_found_exception, reason=task [" + taskIdToRethrottle + "] is missing]", + response.getNodeFailures().get(0).getCause().getMessage()); + } + } + + private static TaskId findTaskToRethrottle(String actionName) throws IOException { + long start = System.nanoTime(); + ListTasksRequest request = new ListTasksRequest(); + request.setActions(actionName); + request.setDetailed(true); + do { + ListTasksResponse list = highLevelClient().tasks().list(request, RequestOptions.DEFAULT); + list.rethrowFailures("Finding tasks to rethrottle"); + assertThat("tasks are left over from the last execution of this test", + list.getTaskGroups(), hasSize(lessThan(2))); + if (0 == list.getTaskGroups().size()) { + // The parent task hasn't started yet + continue; + } + TaskGroup taskGroup = list.getTaskGroups().get(0); + assertThat(taskGroup.getChildTasks(), empty()); + return taskGroup.getTaskInfo().getTaskId(); + } while (System.nanoTime() - start < TimeUnit.SECONDS.toNanos(10)); + throw new AssertionError("Couldn't find tasks to rethrottle. Here are the running tasks " + + highLevelClient().tasks().list(request, RequestOptions.DEFAULT)); + } + static CheckedRunnable checkCompletionStatus(RestClient client, String taskId) { return () -> { Response response = client.performRequest(new Request("GET", "/_tasks/" + taskId)); assertTrue((boolean) entityAsMap(response).get("completed")); }; } + + private void putConflictPipeline() throws IOException { + final XContentBuilder pipelineBuilder = jsonBuilder() + .startObject() + .startArray("processors") + .startObject() + .startObject("set") + .field("field", "_version") + .field("value", 1) + .endObject() + .endObject() + .startObject() + .startObject("set") + .field("field", "_id") + .field("value", "1") + .endObject() + .endObject() + .endArray() + .endObject(); + final PutPipelineRequest putPipelineRequest = new PutPipelineRequest(CONFLICT_PIPELINE_ID, BytesReference.bytes(pipelineBuilder), + pipelineBuilder.contentType()); + assertTrue(highLevelClient().ingest().putPipeline(putPipelineRequest, RequestOptions.DEFAULT).isAcknowledged()); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java index e57493acdf30..d876ce6ed5fb 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java @@ -29,6 +29,7 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.core.IndexerState; import org.elasticsearch.client.core.AcknowledgedResponse; import org.elasticsearch.client.rollup.DeleteRollupJobRequest; import org.elasticsearch.client.rollup.GetRollupCapsRequest; @@ -37,7 +38,6 @@ import org.elasticsearch.client.rollup.GetRollupIndexCapsResponse; import org.elasticsearch.client.rollup.GetRollupJobRequest; import org.elasticsearch.client.rollup.GetRollupJobResponse; -import org.elasticsearch.client.rollup.GetRollupJobResponse.IndexerState; import org.elasticsearch.client.rollup.GetRollupJobResponse.JobWrapper; import org.elasticsearch.client.rollup.PutRollupJobRequest; import org.elasticsearch.client.rollup.StartRollupJobRequest; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java index b2c2028d0fbb..1176cabcc3d9 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java @@ -141,8 +141,8 @@ public void testPutRoleMapping() throws IOException { .addExpression(FieldRoleMapperExpression.ofUsername(username)) .addExpression(FieldRoleMapperExpression.ofGroups(groupname)) .build(); - final PutRoleMappingRequest putRoleMappingRequest = new PutRoleMappingRequest(roleMappingName, true, Collections.singletonList( - rolename), rules, null, refreshPolicy); + final PutRoleMappingRequest putRoleMappingRequest = new PutRoleMappingRequest(roleMappingName, true, + Collections.singletonList(rolename), Collections.emptyList(), rules, null, refreshPolicy); final Request request = SecurityRequestConverters.putRoleMapping(putRoleMappingRequest); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/core/AcknowledgedTasksResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/AcknowledgedTasksResponseTests.java similarity index 66% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/core/AcknowledgedTasksResponseTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/AcknowledgedTasksResponseTests.java index 584c2f4f8772..df7fcc14b95f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/core/AcknowledgedTasksResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/AcknowledgedTasksResponseTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.core; +package org.elasticsearch.client.dataframe; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.TaskOperationFailure; @@ -42,7 +42,7 @@ public void testFromXContent() throws IOException { this::createTestInstance, AcknowledgedTasksResponseTests::toXContent, AcknowledgedTasksResponseTests::fromXContent) - .assertEqualsConsumer(this::assertEqualInstances) + .assertEqualsConsumer(AcknowledgedTasksResponseTests::assertEqualInstances) .assertToXContentEquivalence(false) .supportsUnknownFields(false) .test(); @@ -50,32 +50,15 @@ public void testFromXContent() throws IOException { // Serialisation of TaskOperationFailure and ElasticsearchException changes // the object so use a custom compare method rather than Object.equals - private void assertEqualInstances(AcknowledgedTasksResponse expected, AcknowledgedTasksResponse actual) { + private static void assertEqualInstances(AcknowledgedTasksResponse expected, AcknowledgedTasksResponse actual) { assertNotSame(expected, actual); assertEquals(expected.isAcknowledged(), actual.isAcknowledged()); - List expectedTaskFailures = expected.getTaskFailures(); - List actualTaskFailures = actual.getTaskFailures(); - - assertListEquals(expectedTaskFailures, actualTaskFailures, (a, b) -> - Objects.equals(a.getNodeId(), b.getNodeId()) - && Objects.equals(a.getTaskId(), b.getTaskId()) - && Objects.equals(a.getStatus(), b.getStatus()) - ); - - List expectedExceptions = expected.getNodeFailures(); - List actualExceptions = actual.getNodeFailures(); - - // actualException is a wrapped copy of expectedException so the - // error messages won't be the same but actualException should contain - // the error message from expectedException - assertListEquals(expectedExceptions, actualExceptions, (expectedException, actualException) -> { - assertThat(actualException.getDetailedMessage(), containsString(expectedException.getMessage())); - return true; - }); + assertTaskOperationFailuresEqual(expected.getTaskFailures(), actual.getTaskFailures()); + assertNodeFailuresEqual(expected.getNodeFailures(), actual.getNodeFailures()); } - private void assertListEquals(List expected, List actual, BiPredicate comparator) { + private static void assertListEquals(List expected, List actual, BiPredicate comparator) { if (expected == null) { assertNull(actual); return; @@ -89,6 +72,26 @@ private void assertListEquals(List expected, List actual, BiPredicate< } } + public static void assertTaskOperationFailuresEqual(List expected, + List actual) { + assertListEquals(expected, actual, (a, b) -> + Objects.equals(a.getNodeId(), b.getNodeId()) + && Objects.equals(a.getTaskId(), b.getTaskId()) + && Objects.equals(a.getStatus(), b.getStatus()) + ); + } + + public static void assertNodeFailuresEqual(List expected, + List actual) { + // actualException is a wrapped copy of expectedException so the + // error messages won't be the same but actualException should contain + // the error message from expectedException + assertListEquals(expected, actual, (expectedException, actualException) -> { + assertThat(actualException.getDetailedMessage(), containsString(expectedException.getMessage())); + return true; + }); + } + private static AcknowledgedTasksResponse fromXContent(XContentParser parser) { return AcknowledgedTasksResponse.generateParser("ack_tasks_response", AcknowledgedTasksResponse::new, "acknowleged") @@ -120,32 +123,35 @@ public static void toXContent(AcknowledgedTasksResponse response, XContentBuilde builder.startObject(); { builder.field("acknowleged", response.isAcknowledged()); + taskFailuresToXContent(response.getTaskFailures(), builder); + nodeFailuresToXContent(response.getNodeFailures(), builder); + } + builder.endObject(); + } - List taskFailures = response.getTaskFailures(); - if (taskFailures != null && taskFailures.isEmpty() == false) { - builder.startArray(AcknowledgedTasksResponse.TASK_FAILURES.getPreferredName()); - for (TaskOperationFailure failure : taskFailures) { - builder.startObject(); - failure.toXContent(builder, ToXContent.EMPTY_PARAMS); - builder.endObject(); - } - builder.endArray(); + public static void taskFailuresToXContent(List taskFailures, XContentBuilder builder) throws IOException { + if (taskFailures != null && taskFailures.isEmpty() == false) { + builder.startArray(AcknowledgedTasksResponse.TASK_FAILURES.getPreferredName()); + for (TaskOperationFailure failure : taskFailures) { + builder.startObject(); + failure.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); } + builder.endArray(); + } + } - List nodeFailures = response.getNodeFailures(); - if (nodeFailures != null && nodeFailures.isEmpty() == false) { - builder.startArray(AcknowledgedTasksResponse.NODE_FAILURES.getPreferredName()); - for (ElasticsearchException failure : nodeFailures) { - builder.startObject(); - failure.toXContent(builder, ToXContent.EMPTY_PARAMS); - builder.endObject(); - } - builder.endArray(); + public static void nodeFailuresToXContent(List nodeFailures, XContentBuilder builder) throws IOException { + if (nodeFailures != null && nodeFailures.isEmpty() == false) { + builder.startArray(AcknowledgedTasksResponse.NODE_FAILURES.getPreferredName()); + for (ElasticsearchException failure : nodeFailures) { + builder.startObject(); + failure.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); } + builder.endArray(); } - builder.endObject(); } - } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformRequestTests.java new file mode 100644 index 000000000000..818eea4520ac --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformRequestTests.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.containsString; + +public class GetDataFrameTransformRequestTests extends ESTestCase { + public void testValidate() { + assertFalse(new GetDataFrameTransformRequest("valid-id").validate().isPresent()); + assertThat(new GetDataFrameTransformRequest(new String[0]).validate().get().getMessage(), + containsString("data frame transform id must not be null")); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformResponseTests.java new file mode 100644 index 000000000000..f7386e936301 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformResponseTests.java @@ -0,0 +1,84 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe; + +import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfigTests; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; + + +public class GetDataFrameTransformResponseTests extends ESTestCase { + + public void testXContentParser() throws IOException { + xContentTester(this::createParser, + GetDataFrameTransformResponseTests::createTestInstance, + GetDataFrameTransformResponseTests::toXContent, + GetDataFrameTransformResponse::fromXContent) + .supportsUnknownFields(false) + .test(); + } + + private static GetDataFrameTransformResponse createTestInstance() { + int numTransforms = randomIntBetween(0, 3); + List transforms = new ArrayList<>(); + for (int i=0; i invalidIds = Arrays.asList(generateRandomStringArray(5, 6, false, false)); + invalidTransforms = new GetDataFrameTransformResponse.InvalidTransforms(invalidIds); + } + return new GetDataFrameTransformResponse(transforms, transforms.size() + 10, invalidTransforms); + } + + private static void toXContent(GetDataFrameTransformResponse response, XContentBuilder builder) throws IOException { + builder.startObject(); + { + builder.field("count", response.getCount()); + builder.field("transforms", response.getTransformConfigurations()); + if (response.getInvalidTransforms() != null) { + builder.startObject("invalid_transforms"); + builder.field("count", response.getInvalidTransforms().getCount()); + builder.field("transforms", response.getInvalidTransforms().getTransformIds()); + builder.endObject(); + } + } + builder.endObject(); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedXContentRegistry(searchModule.getNamedXContents()); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsRequestTests.java new file mode 100644 index 000000000000..300a8180168f --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsRequestTests.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.containsString; + +public class GetDataFrameTransformStatsRequestTests extends ESTestCase { + public void testValidate() { + assertFalse(new GetDataFrameTransformStatsRequest("valid-id").validate().isPresent()); + assertThat(new GetDataFrameTransformStatsRequest(null).validate().get().getMessage(), + containsString("data frame transform id must not be null")); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsResponseTests.java new file mode 100644 index 000000000000..89c0813eff8c --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsResponseTests.java @@ -0,0 +1,99 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.TaskOperationFailure; +import org.elasticsearch.client.dataframe.transforms.DataFrameTransformStateAndStats; +import org.elasticsearch.client.dataframe.transforms.DataFrameTransformStateAndStatsTests; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; + +public class GetDataFrameTransformStatsResponseTests extends ESTestCase { + + public void testXContentParser() throws IOException { + xContentTester(this::createParser, + GetDataFrameTransformStatsResponseTests::createTestInstance, + GetDataFrameTransformStatsResponseTests::toXContent, + GetDataFrameTransformStatsResponse::fromXContent) + .assertEqualsConsumer(GetDataFrameTransformStatsResponseTests::assertEqualInstances) + .assertToXContentEquivalence(false) + .supportsUnknownFields(true) + .randomFieldsExcludeFilter(path -> path.isEmpty() == false) + .test(); + } + + private static GetDataFrameTransformStatsResponse createTestInstance() { + int count = randomIntBetween(1, 3); + List stats = new ArrayList<>(); + for (int i=0; i taskFailures = null; + if (randomBoolean()) { + taskFailures = new ArrayList<>(); + int numTaskFailures = randomIntBetween(1, 4); + for (int i=0; i nodeFailures = null; + if (randomBoolean()) { + nodeFailures = new ArrayList<>(); + int numNodeFailures = randomIntBetween(1, 4); + for (int i=0; i { @@ -65,14 +65,13 @@ public void testValidate() { containsString("preview requires a non-null data frame config")); // null id and destination is valid - DataFrameTransformConfig config = new DataFrameTransformConfig(null, "source", null, - QueryConfigTests.randomQueryConfig(), PivotConfigTests.randomPivotConfig()); + DataFrameTransformConfig config = new DataFrameTransformConfig(null, randomSourceConfig(), null, + PivotConfigTests.randomPivotConfig()); assertFalse(new PreviewDataFrameTransformRequest(config).validate().isPresent()); // null source is not valid - config = new DataFrameTransformConfig(null, null, null, - QueryConfigTests.randomQueryConfig(), PivotConfigTests.randomPivotConfig()); + config = new DataFrameTransformConfig(null, null, null, PivotConfigTests.randomPivotConfig()); Optional error = new PreviewDataFrameTransformRequest(config).validate(); assertTrue(error.isPresent()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/PutDataFrameTransformRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/PutDataFrameTransformRequestTests.java index ee73ccd8b7d4..9782418e322b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/PutDataFrameTransformRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/PutDataFrameTransformRequestTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.client.ValidationException; import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfigTests; -import org.elasticsearch.client.dataframe.transforms.QueryConfigTests; import org.elasticsearch.client.dataframe.transforms.pivot.PivotConfigTests; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -41,8 +40,7 @@ public class PutDataFrameTransformRequestTests extends AbstractXContentTestCase< public void testValidate() { assertFalse(createTestInstance().validate().isPresent()); - DataFrameTransformConfig config = new DataFrameTransformConfig(null, null, null, - QueryConfigTests.randomQueryConfig(), PivotConfigTests.randomPivotConfig()); + DataFrameTransformConfig config = new DataFrameTransformConfig(null, null, null, PivotConfigTests.randomPivotConfig()); Optional error = new PutDataFrameTransformRequest(config).validate(); assertTrue(error.isPresent()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameIndexerTransformStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameIndexerTransformStatsTests.java new file mode 100644 index 000000000000..f5c05ae47fea --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameIndexerTransformStatsTests.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe.transforms; + +import org.elasticsearch.client.core.IndexerJobStats; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; + +public class DataFrameIndexerTransformStatsTests extends ESTestCase { + + public void testFromXContent() throws IOException { + xContentTester( + this::createParser, + DataFrameIndexerTransformStatsTests::randomStats, + DataFrameIndexerTransformStatsTests::toXContent, + DataFrameIndexerTransformStats::fromXContent) + .supportsUnknownFields(true) + .test(); + } + + public static DataFrameIndexerTransformStats randomStats() { + return new DataFrameIndexerTransformStats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), + randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), + randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()); + } + + public static void toXContent(DataFrameIndexerTransformStats stats, XContentBuilder builder) throws IOException { + builder.startObject(); + builder.field(IndexerJobStats.NUM_PAGES.getPreferredName(), stats.getNumPages()); + builder.field(IndexerJobStats.NUM_INPUT_DOCUMENTS.getPreferredName(), stats.getNumDocuments()); + builder.field(IndexerJobStats.NUM_OUTPUT_DOCUMENTS.getPreferredName(), stats.getOutputDocuments()); + builder.field(IndexerJobStats.NUM_INVOCATIONS.getPreferredName(), stats.getNumInvocations()); + builder.field(IndexerJobStats.INDEX_TIME_IN_MS.getPreferredName(), stats.getIndexTime()); + builder.field(IndexerJobStats.INDEX_TOTAL.getPreferredName(), stats.getIndexTotal()); + builder.field(IndexerJobStats.INDEX_FAILURES.getPreferredName(), stats.getIndexFailures()); + builder.field(IndexerJobStats.SEARCH_TIME_IN_MS.getPreferredName(), stats.getSearchTime()); + builder.field(IndexerJobStats.SEARCH_TOTAL.getPreferredName(), stats.getSearchTotal()); + builder.field(IndexerJobStats.SEARCH_FAILURES.getPreferredName(), stats.getSearchFailures()); + builder.endObject(); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfigTests.java index af90c15c3d90..fd744b2f9f8a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfigTests.java @@ -30,11 +30,14 @@ import java.util.Collections; import java.util.function.Predicate; +import static org.elasticsearch.client.dataframe.transforms.DestConfigTests.randomDestConfig; +import static org.elasticsearch.client.dataframe.transforms.SourceConfigTests.randomSourceConfig; + public class DataFrameTransformConfigTests extends AbstractXContentTestCase { public static DataFrameTransformConfig randomDataFrameTransformConfig() { - return new DataFrameTransformConfig(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10), - randomAlphaOfLengthBetween(1, 10), QueryConfigTests.randomQueryConfig(), PivotConfigTests.randomPivotConfig()); + return new DataFrameTransformConfig(randomAlphaOfLengthBetween(1, 10), randomSourceConfig(), + randomDestConfig(), PivotConfigTests.randomPivotConfig()); } @Override diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateAndStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateAndStatsTests.java new file mode 100644 index 000000000000..5c75120f86e0 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateAndStatsTests.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe.transforms; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; + +public class DataFrameTransformStateAndStatsTests extends ESTestCase { + + public void testFromXContent() throws IOException { + xContentTester(this::createParser, + DataFrameTransformStateAndStatsTests::randomInstance, + DataFrameTransformStateAndStatsTests::toXContent, + DataFrameTransformStateAndStats::fromXContent) + .supportsUnknownFields(true) + .randomFieldsExcludeFilter(field -> field.startsWith("state")) + .test(); + } + + public static DataFrameTransformStateAndStats randomInstance() { + return new DataFrameTransformStateAndStats(randomAlphaOfLength(10), + DataFrameTransformStateTests.randomDataFrameTransformState(), + DataFrameIndexerTransformStatsTests.randomStats()); + } + + public static void toXContent(DataFrameTransformStateAndStats stateAndStats, XContentBuilder builder) throws IOException { + builder.startObject(); + builder.field(DataFrameTransformStateAndStats.ID.getPreferredName(), stateAndStats.getId()); + builder.field(DataFrameTransformStateAndStats.STATE_FIELD.getPreferredName()); + DataFrameTransformStateTests.toXContent(stateAndStats.getTransformState(), builder); + builder.field(DataFrameTransformStateAndStats.STATS_FIELD.getPreferredName()); + DataFrameIndexerTransformStatsTests.toXContent(stateAndStats.getTransformStats(), builder); + builder.endObject(); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateTests.java new file mode 100644 index 000000000000..17dc38894816 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStateTests.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe.transforms; + +import org.elasticsearch.client.core.IndexerState; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; + +public class DataFrameTransformStateTests extends ESTestCase { + + public void testFromXContent() throws IOException { + xContentTester(this::createParser, + DataFrameTransformStateTests::randomDataFrameTransformState, + DataFrameTransformStateTests::toXContent, + DataFrameTransformState::fromXContent) + .supportsUnknownFields(false) + .test(); + } + + public static DataFrameTransformState randomDataFrameTransformState() { + return new DataFrameTransformState(randomFrom(DataFrameTransformTaskState.values()), + randomFrom(IndexerState.values()), + randomPositionMap(), + randomLongBetween(0,10)); + } + + public static void toXContent(DataFrameTransformState state, XContentBuilder builder) throws IOException { + builder.startObject(); + builder.field("task_state", state.getTaskState().value()); + builder.field("indexer_state", state.getIndexerState().value()); + if (state.getPosition() != null) { + builder.field("current_position", state.getPosition()); + } + builder.field("generation", state.getGeneration()); + builder.endObject(); + } + + private static Map randomPositionMap() { + if (randomBoolean()) { + return null; + } + int numFields = randomIntBetween(1, 5); + Map position = new HashMap<>(); + for (int i = 0; i < numFields; i++) { + Object value; + if (randomBoolean()) { + value = randomLong(); + } else { + value = randomAlphaOfLengthBetween(1, 10); + } + position.put(randomAlphaOfLengthBetween(3, 10), value); + } + return position; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DestConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DestConfigTests.java new file mode 100644 index 000000000000..f2950b64cf7c --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DestConfigTests.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe.transforms; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class DestConfigTests extends AbstractXContentTestCase { + + public static DestConfig randomDestConfig() { + return new DestConfig(randomAlphaOfLength(10)); + } + + @Override + protected DestConfig doParseInstance(XContentParser parser) throws IOException { + return DestConfig.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected DestConfig createTestInstance() { + return randomDestConfig(); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/SourceConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/SourceConfigTests.java new file mode 100644 index 000000000000..722c265c9a05 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/SourceConfigTests.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe.transforms; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.function.Predicate; + +import static java.util.Collections.emptyList; + + +public class SourceConfigTests extends AbstractXContentTestCase { + + public static SourceConfig randomSourceConfig() { + return new SourceConfig(generateRandomStringArray(10, 10, false, false), + QueryConfigTests.randomQueryConfig()); + } + + + @Override + protected SourceConfig doParseInstance(XContentParser parser) throws IOException { + return SourceConfig.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + // allow unknown fields in the root of the object only as QueryConfig stores a Map + return field -> !field.isEmpty(); + } + + @Override + protected SourceConfig createTestInstance() { + return randomSourceConfig(); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList()); + return new NamedXContentRegistry(searchModule.getNamedXContents()); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java index 5e69107ff36c..b7d6967206c2 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java @@ -25,7 +25,12 @@ import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.core.AcknowledgedResponse; +import org.elasticsearch.client.core.IndexerState; import org.elasticsearch.client.dataframe.DeleteDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.GetDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.GetDataFrameTransformResponse; +import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsRequest; +import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsResponse; import org.elasticsearch.client.dataframe.PreviewDataFrameTransformRequest; import org.elasticsearch.client.dataframe.PreviewDataFrameTransformResponse; import org.elasticsearch.client.dataframe.PutDataFrameTransformRequest; @@ -33,8 +38,13 @@ import org.elasticsearch.client.dataframe.StartDataFrameTransformResponse; import org.elasticsearch.client.dataframe.StopDataFrameTransformRequest; import org.elasticsearch.client.dataframe.StopDataFrameTransformResponse; +import org.elasticsearch.client.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.client.dataframe.transforms.DataFrameTransformStateAndStats; +import org.elasticsearch.client.dataframe.transforms.DataFrameTransformTaskState; +import org.elasticsearch.client.dataframe.transforms.DestConfig; import org.elasticsearch.client.dataframe.transforms.QueryConfig; +import org.elasticsearch.client.dataframe.transforms.SourceConfig; import org.elasticsearch.client.dataframe.transforms.pivot.AggregationConfig; import org.elasticsearch.client.dataframe.transforms.pivot.GroupConfig; import org.elasticsearch.client.dataframe.transforms.pivot.PivotConfig; @@ -56,6 +66,7 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.hasSize; public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTestCase { @@ -106,6 +117,10 @@ public void testPutDataFrameTransform() throws IOException, InterruptedException // tag::put-data-frame-transform-query-config QueryConfig queryConfig = new QueryConfig(new MatchAllQueryBuilder()); // end::put-data-frame-transform-query-config + // tag::put-data-frame-transform-source-config + SourceConfig sourceConfig = + new SourceConfig(new String[]{"source-index"}, queryConfig); + // end::put-data-frame-transform-source-config // tag::put-data-frame-transform-group-config GroupConfig groupConfig = new GroupConfig(Collections.singletonMap("reviewer", // <1> @@ -123,10 +138,9 @@ public void testPutDataFrameTransform() throws IOException, InterruptedException // tag::put-data-frame-transform-config DataFrameTransformConfig transformConfig = new DataFrameTransformConfig("reviewer-avg-rating", // <1> - "source-index", // <2> - "pivot-destination", // <3> - queryConfig, // <4> - pivotConfig); // <5> + sourceConfig, // <2> + new DestConfig("pivot-destination"), // <3> + pivotConfig); // <4> // end::put-data-frame-transform-config { @@ -145,7 +159,7 @@ public void testPutDataFrameTransform() throws IOException, InterruptedException } { DataFrameTransformConfig configWithDifferentId = new DataFrameTransformConfig("reviewer-avg-rating2", - transformConfig.getSource(), transformConfig.getDestination(), transformConfig.getQueryConfig(), + transformConfig.getSource(), transformConfig.getDestination(), transformConfig.getPivotConfig()); PutDataFrameTransformRequest request = new PutDataFrameTransformRequest(configWithDifferentId); @@ -166,7 +180,6 @@ public void onFailure(Exception e) { // Replace the empty listener by a blocking listener in test final CountDownLatch latch = new CountDownLatch(1); - ActionListener ackListener = listener; listener = new LatchedActionListener<>(listener, latch); // tag::put-data-frame-transform-execute-async @@ -191,7 +204,7 @@ public void testStartStop() throws IOException, InterruptedException { PivotConfig pivotConfig = new PivotConfig(groupConfig, aggConfig); DataFrameTransformConfig transformConfig = new DataFrameTransformConfig("mega-transform", - "source-data", "pivot-dest", queryConfig, pivotConfig); + new SourceConfig(new String[]{"source-data"}, queryConfig), new DestConfig("pivot-dest"), pivotConfig); client.dataFrame().putDataFrameTransform(new PutDataFrameTransformRequest(transformConfig), RequestOptions.DEFAULT); transformsToClean.add(transformConfig.getId()); @@ -252,7 +265,6 @@ public void onFailure(Exception e) { // Replace the empty listener by a blocking listener in test final CountDownLatch latch = new CountDownLatch(1); - ActionListener ackListener = listener; listener = new LatchedActionListener<>(listener, latch); StartDataFrameTransformRequest request = new StartDataFrameTransformRequest("mega-transform"); @@ -282,7 +294,6 @@ public void onFailure(Exception e) { // Replace the empty listener by a blocking listener in test final CountDownLatch latch = new CountDownLatch(1); - ActionListener ackListener = listener; listener = new LatchedActionListener<>(listener, latch); StopDataFrameTransformRequest request = new StopDataFrameTransformRequest("mega-transform"); @@ -308,9 +319,9 @@ public void testDeleteDataFrameTransform() throws IOException, InterruptedExcept PivotConfig pivotConfig = new PivotConfig(groupConfig, aggConfig); DataFrameTransformConfig transformConfig1 = new DataFrameTransformConfig("mega-transform", - "source-data", "pivot-dest", queryConfig, pivotConfig); + new SourceConfig(new String[]{"source-data"}, queryConfig), new DestConfig("pivot-dest"), pivotConfig); DataFrameTransformConfig transformConfig2 = new DataFrameTransformConfig("mega-transform2", - "source-data", "pivot-dest2", queryConfig, pivotConfig); + new SourceConfig(new String[]{"source-data"}, queryConfig), new DestConfig("pivot-dest2"), pivotConfig); client.dataFrame().putDataFrameTransform(new PutDataFrameTransformRequest(transformConfig1), RequestOptions.DEFAULT); client.dataFrame().putDataFrameTransform(new PutDataFrameTransformRequest(transformConfig2), RequestOptions.DEFAULT); @@ -375,20 +386,19 @@ public void testPreview() throws IOException, InterruptedException { // tag::preview-data-frame-transform-request DataFrameTransformConfig transformConfig = new DataFrameTransformConfig(null, // <1> - "source-data", + new SourceConfig(new String[]{"source-data"}, queryConfig), null, // <2> - queryConfig, pivotConfig); PreviewDataFrameTransformRequest request = - new PreviewDataFrameTransformRequest(transformConfig); // <3> + new PreviewDataFrameTransformRequest(transformConfig); // <3> // end::preview-data-frame-transform-request { // tag::preview-data-frame-transform-execute PreviewDataFrameTransformResponse response = - client.dataFrame() - .previewDataFrameTransform(request, RequestOptions.DEFAULT); + client.dataFrame() + .previewDataFrameTransform(request, RequestOptions.DEFAULT); // end::preview-data-frame-transform-execute assertNotNull(response.getDocs()); @@ -421,4 +431,155 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } } + + public void testGetStats() throws IOException, InterruptedException { + createIndex("source-data"); + + RestHighLevelClient client = highLevelClient(); + + QueryConfig queryConfig = new QueryConfig(new MatchAllQueryBuilder()); + GroupConfig groupConfig = new GroupConfig(Collections.singletonMap("reviewer", new TermsGroupSource("user_id"))); + AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder(); + aggBuilder.addAggregator(AggregationBuilders.avg("avg_rating").field("stars")); + AggregationConfig aggConfig = new AggregationConfig(aggBuilder); + PivotConfig pivotConfig = new PivotConfig(groupConfig, aggConfig); + + String id = "statisitcal-transform"; + DataFrameTransformConfig transformConfig = new DataFrameTransformConfig(id, + new SourceConfig(new String[]{"source-data"}, queryConfig), new DestConfig("dest"), pivotConfig); + client.dataFrame().putDataFrameTransform(new PutDataFrameTransformRequest(transformConfig), RequestOptions.DEFAULT); + + // tag::get-data-frame-transform-stats-request + GetDataFrameTransformStatsRequest request = + new GetDataFrameTransformStatsRequest(id); // <1> + // end::get-data-frame-transform-stats-request + + { + // tag::get-data-frame-transform-stats-execute + GetDataFrameTransformStatsResponse response = + client.dataFrame() + .getDataFrameTransformStats(request, RequestOptions.DEFAULT); + // end::get-data-frame-transform-stats-execute + + assertThat(response.getTransformsStateAndStats(), hasSize(1)); + + // tag::get-data-frame-transform-stats-response + DataFrameTransformStateAndStats stateAndStats = + response.getTransformsStateAndStats().get(0); // <1> + DataFrameTransformTaskState taskState = + stateAndStats.getTransformState().getTaskState(); // <2> + IndexerState indexerState = + stateAndStats.getTransformState().getIndexerState(); // <3> + DataFrameIndexerTransformStats transformStats = + stateAndStats.getTransformStats(); // <4> + // end::get-data-frame-transform-stats-response + + assertEquals(IndexerState.STOPPED, indexerState); + assertEquals(DataFrameTransformTaskState.STOPPED, taskState); + assertNotNull(transformStats); + } + { + // tag::get-data-frame-transform-stats-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse( + GetDataFrameTransformStatsResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::get-data-frame-transform-stats-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::get-data-frame-transform-stats-execute-async + client.dataFrame().getDataFrameTransformStatsAsync( + request, RequestOptions.DEFAULT, listener); // <1> + // end::get-data-frame-transform-stats-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + + + public void testGetDataFrameTransform() throws IOException, InterruptedException { + createIndex("source-data"); + + QueryConfig queryConfig = new QueryConfig(new MatchAllQueryBuilder()); + GroupConfig groupConfig = new GroupConfig(Collections.singletonMap("reviewer", new TermsGroupSource("user_id"))); + AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder(); + aggBuilder.addAggregator(AggregationBuilders.avg("avg_rating").field("stars")); + AggregationConfig aggConfig = new AggregationConfig(aggBuilder); + PivotConfig pivotConfig = new PivotConfig(groupConfig, aggConfig); + + + DataFrameTransformConfig putTransformConfig = new DataFrameTransformConfig("mega-transform", + new SourceConfig(new String[]{"source-data"}, queryConfig), + new DestConfig("pivot-dest"), pivotConfig); + + RestHighLevelClient client = highLevelClient(); + client.dataFrame().putDataFrameTransform(new PutDataFrameTransformRequest(putTransformConfig), RequestOptions.DEFAULT); + transformsToClean.add(putTransformConfig.getId()); + + { + // tag::get-data-frame-transform-request + GetDataFrameTransformRequest request = + new GetDataFrameTransformRequest("mega-transform"); // <1> + // end::get-data-frame-transform-request + + // tag::get-data-frame-transform-request-options + request.setFrom(0); // <1> + request.setSize(100); // <2> + // end::get-data-frame-transform-request-options + + // tag::get-data-frame-transform-execute + GetDataFrameTransformResponse response = + client.dataFrame() + .getDataFrameTransform(request, RequestOptions.DEFAULT); + // end::get-data-frame-transform-execute + + // tag::get-data-frame-transform-response + List transformConfigs = + response.getTransformConfigurations(); + // end::get-data-frame-transform-response + + assertEquals(1, transformConfigs.size()); + } + { + // tag::get-data-frame-transform-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(GetDataFrameTransformResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::get-data-frame-transform-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + GetDataFrameTransformRequest request = new GetDataFrameTransformRequest("mega-transform"); + + // tag::get-data-frame-transform-execute-async + client.dataFrame().getDataFrameTransformAsync( + request, RequestOptions.DEFAULT, listener); // <1> + // end::get-data-frame-transform-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java index 1afe6382fa5f..b095ca5a9a0d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java @@ -75,6 +75,7 @@ import org.elasticsearch.client.security.PutUserRequest; import org.elasticsearch.client.security.PutUserResponse; import org.elasticsearch.client.security.RefreshPolicy; +import org.elasticsearch.client.security.TemplateRoleName; import org.elasticsearch.client.security.support.ApiKey; import org.elasticsearch.client.security.support.CertificateInfo; import org.elasticsearch.client.security.support.expressiondsl.RoleMapperExpression; @@ -94,6 +95,8 @@ import org.elasticsearch.common.util.set.Sets; import org.hamcrest.Matchers; +import javax.crypto.SecretKeyFactory; +import javax.crypto.spec.PBEKeySpec; import java.io.IOException; import java.time.Instant; import java.util.ArrayList; @@ -108,9 +111,6 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import javax.crypto.SecretKeyFactory; -import javax.crypto.spec.PBEKeySpec; - import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; @@ -120,6 +120,7 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.isIn; +import static org.hamcrest.Matchers.iterableWithSize; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -366,8 +367,8 @@ public void testPutRoleMapping() throws Exception { .addExpression(FieldRoleMapperExpression.ofUsername("*")) .addExpression(FieldRoleMapperExpression.ofGroups("cn=admins,dc=example,dc=com")) .build(); - final PutRoleMappingRequest request = new PutRoleMappingRequest("mapping-example", true, Collections.singletonList("superuser"), - rules, null, RefreshPolicy.NONE); + final PutRoleMappingRequest request = new PutRoleMappingRequest("mapping-example", true, + Collections.singletonList("superuser"), Collections.emptyList(), rules, null, RefreshPolicy.NONE); final PutRoleMappingResponse response = client.security().putRoleMapping(request, RequestOptions.DEFAULT); // end::put-role-mapping-execute // tag::put-role-mapping-response @@ -381,7 +382,8 @@ public void testPutRoleMapping() throws Exception { .addExpression(FieldRoleMapperExpression.ofUsername("*")) .addExpression(FieldRoleMapperExpression.ofGroups("cn=admins,dc=example,dc=com")) .build(); - final PutRoleMappingRequest request = new PutRoleMappingRequest("mapping-example", true, Collections.singletonList("superuser"), + final PutRoleMappingRequest request = new PutRoleMappingRequest("mapping-example", true, Collections.emptyList(), + Collections.singletonList(new TemplateRoleName("{\"source\":\"{{username}}\"}", TemplateRoleName.Format.STRING)), rules, null, RefreshPolicy.NONE); // tag::put-role-mapping-execute-listener ActionListener listener = new ActionListener() { @@ -397,25 +399,32 @@ public void onFailure(Exception e) { }; // end::put-role-mapping-execute-listener + // avoid unused local warning + assertNotNull(listener); + // Replace the empty listener by a blocking listener in test - final CountDownLatch latch = new CountDownLatch(1); - listener = new LatchedActionListener<>(listener, latch); + final PlainActionFuture future = new PlainActionFuture<>(); + listener = future; // tag::put-role-mapping-execute-async client.security().putRoleMappingAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::put-role-mapping-execute-async - assertTrue(latch.await(30L, TimeUnit.SECONDS)); + assertThat(future.get(), notNullValue()); + assertThat(future.get().isCreated(), is(false)); } } public void testGetRoleMappings() throws Exception { final RestHighLevelClient client = highLevelClient(); + final TemplateRoleName monitoring = new TemplateRoleName("{\"source\":\"monitoring\"}", TemplateRoleName.Format.STRING); + final TemplateRoleName template = new TemplateRoleName("{\"source\":\"{{username}}\"}", TemplateRoleName.Format.STRING); + final RoleMapperExpression rules1 = AnyRoleMapperExpression.builder().addExpression(FieldRoleMapperExpression.ofUsername("*")) .addExpression(FieldRoleMapperExpression.ofGroups("cn=admins,dc=example,dc=com")).build(); - final PutRoleMappingRequest putRoleMappingRequest1 = new PutRoleMappingRequest("mapping-example-1", true, Collections.singletonList( - "superuser"), rules1, null, RefreshPolicy.NONE); + final PutRoleMappingRequest putRoleMappingRequest1 = new PutRoleMappingRequest("mapping-example-1", true, Collections.emptyList(), + Arrays.asList(monitoring, template), rules1, null, RefreshPolicy.NONE); final PutRoleMappingResponse putRoleMappingResponse1 = client.security().putRoleMapping(putRoleMappingRequest1, RequestOptions.DEFAULT); boolean isCreated1 = putRoleMappingResponse1.isCreated(); @@ -424,8 +433,8 @@ public void testGetRoleMappings() throws Exception { "cn=admins,dc=example,dc=com")).build(); final Map metadata2 = new HashMap<>(); metadata2.put("k1", "v1"); - final PutRoleMappingRequest putRoleMappingRequest2 = new PutRoleMappingRequest("mapping-example-2", true, Collections.singletonList( - "monitoring"), rules2, metadata2, RefreshPolicy.NONE); + final PutRoleMappingRequest putRoleMappingRequest2 = new PutRoleMappingRequest("mapping-example-2", true, + Arrays.asList("superuser"), Collections.emptyList(), rules2, metadata2, RefreshPolicy.NONE); final PutRoleMappingResponse putRoleMappingResponse2 = client.security().putRoleMapping(putRoleMappingRequest2, RequestOptions.DEFAULT); boolean isCreated2 = putRoleMappingResponse2.isCreated(); @@ -445,7 +454,9 @@ public void testGetRoleMappings() throws Exception { assertThat(mappings.get(0).getName(), is("mapping-example-1")); assertThat(mappings.get(0).getExpression(), equalTo(rules1)); assertThat(mappings.get(0).getMetadata(), equalTo(Collections.emptyMap())); - assertThat(mappings.get(0).getRoles(), contains("superuser")); + assertThat(mappings.get(0).getRoles(), iterableWithSize(0)); + assertThat(mappings.get(0).getRoleTemplates(), iterableWithSize(2)); + assertThat(mappings.get(0).getRoleTemplates(), containsInAnyOrder(monitoring, template)); } { @@ -462,11 +473,13 @@ public void testGetRoleMappings() throws Exception { if (roleMapping.getName().equals("mapping-example-1")) { assertThat(roleMapping.getMetadata(), equalTo(Collections.emptyMap())); assertThat(roleMapping.getExpression(), equalTo(rules1)); - assertThat(roleMapping.getRoles(), contains("superuser")); + assertThat(roleMapping.getRoles(), emptyIterable()); + assertThat(roleMapping.getRoleTemplates(), contains(monitoring, template)); } else { assertThat(roleMapping.getMetadata(), equalTo(metadata2)); assertThat(roleMapping.getExpression(), equalTo(rules2)); - assertThat(roleMapping.getRoles(), contains("monitoring")); + assertThat(roleMapping.getRoles(), contains("superuser")); + assertThat(roleMapping.getRoleTemplates(), emptyIterable()); } } } @@ -485,11 +498,13 @@ public void testGetRoleMappings() throws Exception { if (roleMapping.getName().equals("mapping-example-1")) { assertThat(roleMapping.getMetadata(), equalTo(Collections.emptyMap())); assertThat(roleMapping.getExpression(), equalTo(rules1)); - assertThat(roleMapping.getRoles(), contains("superuser")); + assertThat(roleMapping.getRoles(), emptyIterable()); + assertThat(roleMapping.getRoleTemplates(), containsInAnyOrder(monitoring, template)); } else { assertThat(roleMapping.getMetadata(), equalTo(metadata2)); assertThat(roleMapping.getExpression(), equalTo(rules2)); - assertThat(roleMapping.getRoles(), contains("monitoring")); + assertThat(roleMapping.getRoles(), contains("superuser")); + assertThat(roleMapping.getRoleTemplates(), emptyIterable()); } } } @@ -1093,8 +1108,8 @@ public void testDeleteRoleMapping() throws Exception { { // Create role mappings final RoleMapperExpression rules = FieldRoleMapperExpression.ofUsername("*"); - final PutRoleMappingRequest request = new PutRoleMappingRequest("mapping-example", true, Collections.singletonList("superuser"), - rules, null, RefreshPolicy.NONE); + final PutRoleMappingRequest request = new PutRoleMappingRequest("mapping-example", true, + Collections.singletonList("superuser"), Collections.emptyList(), rules, null, RefreshPolicy.NONE); final PutRoleMappingResponse response = client.security().putRoleMapping(request, RequestOptions.DEFAULT); boolean isCreated = response.isCreated(); assertTrue(isCreated); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/GetRollupJobResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/GetRollupJobResponseTests.java index 3b3c94c31f59..a063294cae6d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/GetRollupJobResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/GetRollupJobResponseTests.java @@ -19,13 +19,14 @@ package org.elasticsearch.client.rollup; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.client.rollup.GetRollupJobResponse.IndexerState; +import org.elasticsearch.client.core.IndexerJobStats; +import org.elasticsearch.client.core.IndexerState; import org.elasticsearch.client.rollup.GetRollupJobResponse.JobWrapper; import org.elasticsearch.client.rollup.GetRollupJobResponse.RollupIndexerJobStats; import org.elasticsearch.client.rollup.GetRollupJobResponse.RollupJobStatus; import org.elasticsearch.client.rollup.job.config.RollupJobConfigTests; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -113,16 +114,16 @@ public void toXContent(RollupJobStatus status, XContentBuilder builder, ToXConte public void toXContent(RollupIndexerJobStats stats, XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(); - builder.field(GetRollupJobResponse.NUM_PAGES.getPreferredName(), stats.getNumPages()); - builder.field(GetRollupJobResponse.NUM_INPUT_DOCUMENTS.getPreferredName(), stats.getNumDocuments()); - builder.field(GetRollupJobResponse.NUM_OUTPUT_DOCUMENTS.getPreferredName(), stats.getOutputDocuments()); - builder.field(GetRollupJobResponse.NUM_INVOCATIONS.getPreferredName(), stats.getNumInvocations()); - builder.field(GetRollupJobResponse.INDEX_TIME_IN_MS.getPreferredName(), stats.getIndexTime()); - builder.field(GetRollupJobResponse.INDEX_TOTAL.getPreferredName(), stats.getIndexTotal()); - builder.field(GetRollupJobResponse.INDEX_FAILURES.getPreferredName(), stats.getIndexFailures()); - builder.field(GetRollupJobResponse.SEARCH_TIME_IN_MS.getPreferredName(), stats.getSearchTime()); - builder.field(GetRollupJobResponse.SEARCH_TOTAL.getPreferredName(), stats.getSearchTotal()); - builder.field(GetRollupJobResponse.SEARCH_FAILURES.getPreferredName(), stats.getSearchFailures()); + builder.field(IndexerJobStats.NUM_PAGES.getPreferredName(), stats.getNumPages()); + builder.field(IndexerJobStats.NUM_INPUT_DOCUMENTS.getPreferredName(), stats.getNumDocuments()); + builder.field(GetRollupJobResponse.ROLLUPS_INDEXED.getPreferredName(), stats.getOutputDocuments()); + builder.field(IndexerJobStats.NUM_INVOCATIONS.getPreferredName(), stats.getNumInvocations()); + builder.field(IndexerJobStats.INDEX_TIME_IN_MS.getPreferredName(), stats.getIndexTime()); + builder.field(IndexerJobStats.INDEX_TOTAL.getPreferredName(), stats.getIndexTotal()); + builder.field(IndexerJobStats.INDEX_FAILURES.getPreferredName(), stats.getIndexFailures()); + builder.field(IndexerJobStats.SEARCH_TIME_IN_MS.getPreferredName(), stats.getSearchTime()); + builder.field(IndexerJobStats.SEARCH_TOTAL.getPreferredName(), stats.getSearchTotal()); + builder.field(IndexerJobStats.SEARCH_FAILURES.getPreferredName(), stats.getSearchFailures()); builder.endObject(); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/ExpressionRoleMappingTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/ExpressionRoleMappingTests.java index 29bc7812f5b7..f30307ebde51 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/ExpressionRoleMappingTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/ExpressionRoleMappingTests.java @@ -31,6 +31,7 @@ import java.util.HashMap; import java.util.Map; +import static java.util.Collections.singletonList; import static org.hamcrest.Matchers.equalTo; public class ExpressionRoleMappingTests extends ESTestCase { @@ -59,48 +60,53 @@ public void usedDeprecatedName(String usedName, String modernName) { public void usedDeprecatedField(String usedName, String replacedWith) { } }, json), "example-role-mapping"); - final ExpressionRoleMapping expectedRoleMapping = new ExpressionRoleMapping("example-role-mapping", FieldRoleMapperExpression - .ofKeyValues("realm.name", "kerb1"), Collections.singletonList("superuser"), null, true); + final ExpressionRoleMapping expectedRoleMapping = new ExpressionRoleMapping("example-role-mapping", + FieldRoleMapperExpression.ofKeyValues("realm.name", "kerb1"), + singletonList("superuser"), Collections.emptyList(), + null, true); assertThat(expressionRoleMapping, equalTo(expectedRoleMapping)); } public void testEqualsHashCode() { - final ExpressionRoleMapping expressionRoleMapping = new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression - .ofKeyValues("realm.name", "kerb1"), Collections.singletonList("superuser"), null, true); - EqualsHashCodeTestUtils.checkEqualsAndHashCode(expressionRoleMapping, (original) -> { - return new ExpressionRoleMapping(original.getName(), original.getExpression(), original.getRoles(), original.getMetadata(), - original.isEnabled()); - }); - EqualsHashCodeTestUtils.checkEqualsAndHashCode(expressionRoleMapping, (original) -> { - return new ExpressionRoleMapping(original.getName(), original.getExpression(), original.getRoles(), original.getMetadata(), - original.isEnabled()); - }, ExpressionRoleMappingTests::mutateTestItem); + final ExpressionRoleMapping expressionRoleMapping = new ExpressionRoleMapping("kerberosmapping", + FieldRoleMapperExpression.ofKeyValues("realm.name", "kerb1"), + singletonList("superuser"), Collections.emptyList(), + null, true); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(expressionRoleMapping, original -> + new ExpressionRoleMapping(original.getName(), original.getExpression(), original.getRoles(), original.getRoleTemplates(), + original.getMetadata(), original.isEnabled()), ExpressionRoleMappingTests::mutateTestItem); } - private static ExpressionRoleMapping mutateTestItem(ExpressionRoleMapping original) { + private static ExpressionRoleMapping mutateTestItem(ExpressionRoleMapping original) throws IOException { ExpressionRoleMapping mutated = null; - switch (randomIntBetween(0, 4)) { + switch (randomIntBetween(0, 5)) { case 0: - mutated = new ExpressionRoleMapping("namechanged", FieldRoleMapperExpression.ofKeyValues("realm.name", "kerb1"), Collections - .singletonList("superuser"), null, true); + mutated = new ExpressionRoleMapping("namechanged", FieldRoleMapperExpression.ofKeyValues("realm.name", "kerb1"), + singletonList("superuser"), Collections.emptyList(), null, true); break; case 1: - mutated = new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression.ofKeyValues("changed", "changed"), Collections - .singletonList("superuser"), null, true); + mutated = new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression.ofKeyValues("changed", "changed"), + singletonList("superuser"), Collections.emptyList(), null, true); break; case 2: - mutated = new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression.ofKeyValues("realm.name", "kerb1"), Collections - .singletonList("changed"), null, true); + mutated = new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression.ofKeyValues("realm.name", "kerb1"), + singletonList("changed"), Collections.emptyList(), null, true); break; case 3: Map metadata = new HashMap<>(); metadata.put("a", "b"); - mutated = new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression.ofKeyValues("realm.name", "kerb1"), Collections - .singletonList("superuser"), metadata, true); + mutated = new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression.ofKeyValues("realm.name", "kerb1"), + singletonList("superuser"), Collections.emptyList(), metadata, true); break; case 4: - mutated = new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression.ofKeyValues("realm.name", "kerb1"), Collections - .singletonList("superuser"), null, false); + mutated = new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression.ofKeyValues("realm.name", "kerb1"), + Collections.emptyList(), + singletonList(new TemplateRoleName(Collections.singletonMap("source", "superuser"), TemplateRoleName.Format.STRING)), + null, true); + break; + case 5: + mutated = new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression.ofKeyValues("realm.name", "kerb1"), + singletonList("superuser"), Collections.emptyList(), null, false); break; } return mutated; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetRoleMappingsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetRoleMappingsResponseTests.java index b612c9ead28a..20883b859f9a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetRoleMappingsResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetRoleMappingsResponseTests.java @@ -74,9 +74,10 @@ public void usedDeprecatedField(String usedName, String replacedWith) { }, json)); final List expectedRoleMappingsList = new ArrayList<>(); expectedRoleMappingsList.add(new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression.ofKeyValues("realm.name", - "kerb1"), Collections.singletonList("superuser"), null, true)); + "kerb1"), Collections.singletonList("superuser"), Collections.emptyList(), null, true)); expectedRoleMappingsList.add(new ExpressionRoleMapping("ldapmapping", FieldRoleMapperExpression.ofGroups( - "cn=ipausers,cn=groups,cn=accounts,dc=ipademo,dc=local"), Collections.singletonList("monitoring"), null, false)); + "cn=ipausers,cn=groups,cn=accounts,dc=ipademo,dc=local"), Collections.singletonList("monitoring"), Collections.emptyList(), + null, false)); final GetRoleMappingsResponse expectedResponse = new GetRoleMappingsResponse(expectedRoleMappingsList); assertThat(response, equalTo(expectedResponse)); } @@ -84,7 +85,7 @@ public void usedDeprecatedField(String usedName, String replacedWith) { public void testEqualsHashCode() { final List roleMappingsList = new ArrayList<>(); roleMappingsList.add(new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression.ofKeyValues("realm.name", - "kerb1"), Collections.singletonList("superuser"), null, true)); + "kerb1"), Collections.singletonList("superuser"), Collections.emptyList(), null, true)); final GetRoleMappingsResponse response = new GetRoleMappingsResponse(roleMappingsList); assertNotNull(response); EqualsHashCodeTestUtils.checkEqualsAndHashCode(response, (original) -> { @@ -101,15 +102,16 @@ private static GetRoleMappingsResponse mutateTestItem(GetRoleMappingsResponse or case 0: final List roleMappingsList1 = new ArrayList<>(); roleMappingsList1.add(new ExpressionRoleMapping("ldapmapping", FieldRoleMapperExpression.ofGroups( - "cn=ipausers,cn=groups,cn=accounts,dc=ipademo,dc=local"), Collections.singletonList("monitoring"), null, false)); + "cn=ipausers,cn=groups,cn=accounts,dc=ipademo,dc=local"), Collections.singletonList("monitoring"), Collections.emptyList(), + null, false)); mutated = new GetRoleMappingsResponse(roleMappingsList1); break; case 1: final List roleMappingsList2 = new ArrayList<>(); - ExpressionRoleMapping orginialRoleMapping = original.getMappings().get(0); - roleMappingsList2.add(new ExpressionRoleMapping(orginialRoleMapping.getName(), FieldRoleMapperExpression.ofGroups( - "cn=ipausers,cn=groups,cn=accounts,dc=ipademo,dc=local"), - orginialRoleMapping.getRoles(), orginialRoleMapping.getMetadata(), !orginialRoleMapping.isEnabled())); + ExpressionRoleMapping originalRoleMapping = original.getMappings().get(0); + roleMappingsList2.add(new ExpressionRoleMapping(originalRoleMapping.getName(), + FieldRoleMapperExpression.ofGroups("cn=ipausers,cn=groups,cn=accounts,dc=ipademo,dc=local"), originalRoleMapping.getRoles(), + Collections.emptyList(), originalRoleMapping.getMetadata(), !originalRoleMapping.isEnabled())); mutated = new GetRoleMappingsResponse(roleMappingsList2); break; } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/PutRoleMappingRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/PutRoleMappingRequestTests.java index f0a3f7572ef3..bf5ba34bffc5 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/PutRoleMappingRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/PutRoleMappingRequestTests.java @@ -29,12 +29,12 @@ import org.elasticsearch.test.EqualsHashCodeTestUtils; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.stream.Collectors; import static org.hamcrest.Matchers.equalTo; @@ -49,7 +49,8 @@ public void testPutRoleMappingRequest() { metadata.put("k1", "v1"); final RefreshPolicy refreshPolicy = randomFrom(RefreshPolicy.values()); - PutRoleMappingRequest putRoleMappingRequest = new PutRoleMappingRequest(name, enabled, roles, rules, metadata, refreshPolicy); + PutRoleMappingRequest putRoleMappingRequest = new PutRoleMappingRequest(name, enabled, roles, Collections.emptyList(), rules, + metadata, refreshPolicy); assertNotNull(putRoleMappingRequest); assertThat(putRoleMappingRequest.getName(), equalTo(name)); assertThat(putRoleMappingRequest.isEnabled(), equalTo(enabled)); @@ -68,23 +69,39 @@ public void testPutRoleMappingRequestThrowsExceptionForNullOrEmptyName() { metadata.put("k1", "v1"); final RefreshPolicy refreshPolicy = randomFrom(RefreshPolicy.values()); - final IllegalArgumentException ile = expectThrows(IllegalArgumentException.class, () -> new PutRoleMappingRequest(name, enabled, - roles, rules, metadata, refreshPolicy)); + final IllegalArgumentException ile = expectThrows(IllegalArgumentException.class, + () -> new PutRoleMappingRequest(name, enabled, roles, Collections.emptyList(), rules, metadata, refreshPolicy)); assertThat(ile.getMessage(), equalTo("role-mapping name is missing")); } - public void testPutRoleMappingRequestThrowsExceptionForNullOrEmptyRoles() { + public void testPutRoleMappingRequestThrowsExceptionForNullRoles() { final String name = randomAlphaOfLength(5); final boolean enabled = randomBoolean(); - final List roles = randomBoolean() ? null : Collections.emptyList(); + final List roles = null ; + final List roleTemplates = Collections.emptyList(); final RoleMapperExpression rules = FieldRoleMapperExpression.ofUsername("user"); final Map metadata = new HashMap<>(); metadata.put("k1", "v1"); final RefreshPolicy refreshPolicy = randomFrom(RefreshPolicy.values()); - final IllegalArgumentException ile = expectThrows(IllegalArgumentException.class, () -> new PutRoleMappingRequest(name, enabled, - roles, rules, metadata, refreshPolicy)); - assertThat(ile.getMessage(), equalTo("role-mapping roles are missing")); + final RuntimeException ex = expectThrows(RuntimeException.class, + () -> new PutRoleMappingRequest(name, enabled, roles, roleTemplates, rules, metadata, refreshPolicy)); + assertThat(ex.getMessage(), equalTo("role-mapping roles cannot be null")); + } + + public void testPutRoleMappingRequestThrowsExceptionForEmptyRoles() { + final String name = randomAlphaOfLength(5); + final boolean enabled = randomBoolean(); + final List roles = Collections.emptyList(); + final List roleTemplates = Collections.emptyList(); + final RoleMapperExpression rules = FieldRoleMapperExpression.ofUsername("user"); + final Map metadata = new HashMap<>(); + metadata.put("k1", "v1"); + final RefreshPolicy refreshPolicy = randomFrom(RefreshPolicy.values()); + + final RuntimeException ex = expectThrows(RuntimeException.class, + () -> new PutRoleMappingRequest(name, enabled, roles, roleTemplates, rules, metadata, refreshPolicy)); + assertThat(ex.getMessage(), equalTo("in a role-mapping, one of roles or role_templates is required")); } public void testPutRoleMappingRequestThrowsExceptionForNullRules() { @@ -96,7 +113,8 @@ public void testPutRoleMappingRequestThrowsExceptionForNullRules() { metadata.put("k1", "v1"); final RefreshPolicy refreshPolicy = randomFrom(RefreshPolicy.values()); - expectThrows(NullPointerException.class, () -> new PutRoleMappingRequest(name, enabled, roles, rules, metadata, refreshPolicy)); + expectThrows(NullPointerException.class, () -> new PutRoleMappingRequest(name, enabled, roles, Collections.emptyList(), rules, + metadata, refreshPolicy)); } public void testPutRoleMappingRequestToXContent() throws IOException { @@ -108,7 +126,8 @@ public void testPutRoleMappingRequestToXContent() throws IOException { metadata.put("k1", "v1"); final RefreshPolicy refreshPolicy = randomFrom(RefreshPolicy.values()); - final PutRoleMappingRequest putRoleMappingRequest = new PutRoleMappingRequest(name, enabled, roles, rules, metadata, refreshPolicy); + final PutRoleMappingRequest putRoleMappingRequest = new PutRoleMappingRequest(name, enabled, roles, Collections.emptyList(), rules, + metadata, refreshPolicy); final XContentBuilder builder = XContentFactory.jsonBuilder(); putRoleMappingRequest.toXContent(builder, ToXContent.EMPTY_PARAMS); @@ -117,6 +136,42 @@ public void testPutRoleMappingRequestToXContent() throws IOException { "{"+ "\"enabled\":" + enabled + "," + "\"roles\":[\"superuser\"]," + + "\"role_templates\":[]," + + "\"rules\":{" + + "\"field\":{\"username\":[\"user\"]}" + + "}," + + "\"metadata\":{\"k1\":\"v1\"}" + + "}"; + + assertThat(output, equalTo(expected)); + } + + public void testPutRoleMappingRequestWithTemplateToXContent() throws IOException { + final String name = randomAlphaOfLength(5); + final boolean enabled = randomBoolean(); + final List templates = Arrays.asList( + new TemplateRoleName(Collections.singletonMap("source" , "_realm_{{realm.name}}"), TemplateRoleName.Format.STRING), + new TemplateRoleName(Collections.singletonMap("source" , "some_role"), TemplateRoleName.Format.STRING) + ); + final RoleMapperExpression rules = FieldRoleMapperExpression.ofUsername("user"); + final Map metadata = new HashMap<>(); + metadata.put("k1", "v1"); + final RefreshPolicy refreshPolicy = randomFrom(RefreshPolicy.values()); + + final PutRoleMappingRequest putRoleMappingRequest = new PutRoleMappingRequest(name, enabled, Collections.emptyList(), templates, + rules, metadata, refreshPolicy); + + final XContentBuilder builder = XContentFactory.jsonBuilder(); + putRoleMappingRequest.toXContent(builder, ToXContent.EMPTY_PARAMS); + final String output = Strings.toString(builder); + final String expected = + "{"+ + "\"enabled\":" + enabled + "," + + "\"roles\":[]," + + "\"role_templates\":[" + + "{\"template\":\"{\\\"source\\\":\\\"_realm_{{realm.name}}\\\"}\",\"format\":\"string\"}," + + "{\"template\":\"{\\\"source\\\":\\\"some_role\\\"}\",\"format\":\"string\"}" + + "]," + "\"rules\":{" + "\"field\":{\"username\":[\"user\"]}" + "}," + @@ -129,48 +184,59 @@ public void testPutRoleMappingRequestToXContent() throws IOException { public void testEqualsHashCode() { final String name = randomAlphaOfLength(5); final boolean enabled = randomBoolean(); - final List roles = Collections.singletonList("superuser"); + final List roles; + final List templates; + if (randomBoolean()) { + roles = Arrays.asList(randomArray(1, 3, String[]::new, () -> randomAlphaOfLengthBetween(6, 12))); + templates = Collections.emptyList(); + } else { + roles = Collections.emptyList(); + templates = Arrays.asList( + randomArray(1, 3, TemplateRoleName[]::new, + () -> new TemplateRoleName(randomAlphaOfLengthBetween(12, 60), randomFrom(TemplateRoleName.Format.values())) + )); + } final RoleMapperExpression rules = FieldRoleMapperExpression.ofUsername("user"); final Map metadata = new HashMap<>(); metadata.put("k1", "v1"); final RefreshPolicy refreshPolicy = randomFrom(RefreshPolicy.values()); - PutRoleMappingRequest putRoleMappingRequest = new PutRoleMappingRequest(name, enabled, roles, rules, metadata, refreshPolicy); + PutRoleMappingRequest putRoleMappingRequest = new PutRoleMappingRequest(name, enabled, roles, templates, rules, metadata, + refreshPolicy); assertNotNull(putRoleMappingRequest); EqualsHashCodeTestUtils.checkEqualsAndHashCode(putRoleMappingRequest, (original) -> { - return new PutRoleMappingRequest(original.getName(), original.isEnabled(), original.getRoles(), original.getRules(), original - .getMetadata(), original.getRefreshPolicy()); - }); - EqualsHashCodeTestUtils.checkEqualsAndHashCode(putRoleMappingRequest, (original) -> { - return new PutRoleMappingRequest(original.getName(), original.isEnabled(), original.getRoles(), original.getRules(), original - .getMetadata(), original.getRefreshPolicy()); + return new PutRoleMappingRequest(original.getName(), original.isEnabled(), original.getRoles(), original.getRoleTemplates(), + original.getRules(), original.getMetadata(), original.getRefreshPolicy()); }, PutRoleMappingRequestTests::mutateTestItem); } private static PutRoleMappingRequest mutateTestItem(PutRoleMappingRequest original) { - switch (randomIntBetween(0, 4)) { + switch (randomIntBetween(0, 5)) { case 0: - return new PutRoleMappingRequest(randomAlphaOfLength(5), original.isEnabled(), original.getRoles(), original.getRules(), - original.getMetadata(), original.getRefreshPolicy()); + return new PutRoleMappingRequest(randomAlphaOfLength(5), original.isEnabled(), original.getRoles(), + original.getRoleTemplates(), original.getRules(), original.getMetadata(), original.getRefreshPolicy()); case 1: - return new PutRoleMappingRequest(original.getName(), !original.isEnabled(), original.getRoles(), original.getRules(), - original.getMetadata(), original.getRefreshPolicy()); + return new PutRoleMappingRequest(original.getName(), !original.isEnabled(), original.getRoles(), original.getRoleTemplates(), + original.getRules(), original.getMetadata(), original.getRefreshPolicy()); case 2: - return new PutRoleMappingRequest(original.getName(), original.isEnabled(), original.getRoles(), + return new PutRoleMappingRequest(original.getName(), original.isEnabled(), original.getRoles(), original.getRoleTemplates(), FieldRoleMapperExpression.ofGroups("group"), original.getMetadata(), original.getRefreshPolicy()); case 3: - return new PutRoleMappingRequest(original.getName(), original.isEnabled(), original.getRoles(), original.getRules(), - Collections.emptyMap(), original.getRefreshPolicy()); + return new PutRoleMappingRequest(original.getName(), original.isEnabled(), original.getRoles(), original.getRoleTemplates(), + original.getRules(), Collections.emptyMap(), original.getRefreshPolicy()); case 4: - List values = Arrays.stream(RefreshPolicy.values()) - .filter(rp -> rp != original.getRefreshPolicy()) - .collect(Collectors.toList()); - return new PutRoleMappingRequest(original.getName(), original.isEnabled(), original.getRoles(), original.getRules(), original - .getMetadata(), randomFrom(values)); + return new PutRoleMappingRequest(original.getName(), original.isEnabled(), original.getRoles(), original.getRoleTemplates(), + original.getRules(), original.getMetadata(), + randomValueOtherThan(original.getRefreshPolicy(), () -> randomFrom(RefreshPolicy.values()))); + case 5: + List roles = new ArrayList<>(original.getRoles()); + roles.add(randomAlphaOfLengthBetween(3, 5)); + return new PutRoleMappingRequest(original.getName(), original.isEnabled(), roles, Collections.emptyList(), + original.getRules(), original.getMetadata(), original.getRefreshPolicy()); + default: - return new PutRoleMappingRequest(randomAlphaOfLength(5), original.isEnabled(), original.getRoles(), original.getRules(), - original.getMetadata(), original.getRefreshPolicy()); + throw new IllegalStateException("Bad random value"); } } diff --git a/distribution/build.gradle b/distribution/build.gradle index 306005e5d2f2..f3a59f06d0bd 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -248,8 +248,12 @@ for (String platform : ['linux', 'darwin', 'windows']) { } it.includeEmptyDirs false } + String extractDir = "${buildDir}/jdks/openjdk-${jdkVersion}_${platform}" project.task("extract${platform.capitalize()}Jdk", type: Copy) { - into "${buildDir}/jdks/openjdk-${jdkVersion}_${platform}" + doFirst { + project.delete(extractDir) + } + into extractDir if (extension.equals('zip')) { from({ zipTree(jdkConfig.singleFile) }, removeRootDir) } else { diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index ddd02497e843..2a581cf1480c 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -169,7 +169,7 @@ bwcVersions.forPreviousUnreleased { BwcVersions.UnreleasedVersionInfo unreleased 'JAVA_HOME', getJavaHome(it, Integer.parseInt( lines - .findAll({ it.startsWith("ES_BUILD_JAVA=")}) + .findAll({ it.startsWith("ES_BUILD_JAVA=") }) .collect({ it.replace("ES_BUILD_JAVA=java", "").trim() }) .collect({ it.replace("ES_BUILD_JAVA=openjdk", "").trim() }) .join("!!") diff --git a/distribution/docker/src/docker/Dockerfile b/distribution/docker/src/docker/Dockerfile index a2ee7ae52417..5fdcee16ef15 100644 --- a/distribution/docker/src/docker/Dockerfile +++ b/distribution/docker/src/docker/Dockerfile @@ -39,9 +39,10 @@ FROM centos:7 ENV ELASTIC_CONTAINER true -RUN yum update -y && \ +RUN for iter in {1..10}; do yum update -y && \ yum install -y nc && \ - yum clean all + yum clean all && exit_code=0 && break || exit_code=$? && echo "yum error: retry $iter in 10s" && sleep 10; done; \ + (exit $exit_code) RUN groupadd -g 1000 elasticsearch && \ adduser -u 1000 -g 1000 -G 0 -d /usr/share/elasticsearch elasticsearch && \ diff --git a/distribution/src/bin/elasticsearch b/distribution/src/bin/elasticsearch index 50913fd94fcf..9bfaded3385a 100755 --- a/distribution/src/bin/elasticsearch +++ b/distribution/src/bin/elasticsearch @@ -18,7 +18,7 @@ source "`dirname "$0"`"/elasticsearch-env ES_JVM_OPTIONS="$ES_PATH_CONF"/jvm.options JVM_OPTIONS=`"$JAVA" -cp "$ES_CLASSPATH" org.elasticsearch.tools.launchers.JvmOptionsParser "$ES_JVM_OPTIONS"` -ES_JAVA_OPTS="${JVM_OPTIONS//\"\$\{ES_TMPDIR\}\"/$ES_TMPDIR} $ES_JAVA_OPTS" +ES_JAVA_OPTS="${JVM_OPTIONS//\$\{ES_TMPDIR\}/$ES_TMPDIR} $ES_JAVA_OPTS" # manual parsing to find out, if process should be detached if ! echo $* | grep -E '(^-d |-d$| -d |--daemonize$|--daemonize )' > /dev/null; then diff --git a/distribution/src/bin/elasticsearch-env.bat b/distribution/src/bin/elasticsearch-env.bat index bfaf56d74661..baf65a0465c5 100644 --- a/distribution/src/bin/elasticsearch-env.bat +++ b/distribution/src/bin/elasticsearch-env.bat @@ -16,7 +16,25 @@ for %%I in ("%ES_HOME%..") do set ES_HOME=%%~dpfI rem now set the classpath set ES_CLASSPATH=!ES_HOME!\lib\* -rem now set the path to java +set HOSTNAME=%COMPUTERNAME% + +if not defined ES_PATH_CONF ( + set ES_PATH_CONF=!ES_HOME!\config +) + +rem now make ES_PATH_CONF absolute +for %%I in ("%ES_PATH_CONF%..") do set ES_PATH_CONF=%%~dpfI + +set ES_DISTRIBUTION_FLAVOR=${es.distribution.flavor} +set ES_DISTRIBUTION_TYPE=${es.distribution.type} + +cd /d "%ES_HOME%" + +rem now set the path to java, pass "nojava" arg to skip setting JAVA_HOME and JAVA +if "%1" == "nojava" ( + exit /b +) + if defined JAVA_HOME ( set JAVA="%JAVA_HOME%\bin\java.exe" ) else ( @@ -45,20 +63,6 @@ if defined JAVA_OPTS ( rem check the Java version %JAVA% -cp "%ES_CLASSPATH%" "org.elasticsearch.tools.java_version_checker.JavaVersionChecker" || exit /b 1 -set HOSTNAME=%COMPUTERNAME% - -if not defined ES_PATH_CONF ( - set ES_PATH_CONF=!ES_HOME!\config -) - -rem now make ES_PATH_CONF absolute -for %%I in ("%ES_PATH_CONF%..") do set ES_PATH_CONF=%%~dpfI - -set ES_DISTRIBUTION_FLAVOR=${es.distribution.flavor} -set ES_DISTRIBUTION_TYPE=${es.distribution.type} - if not defined ES_TMPDIR ( for /f "tokens=* usebackq" %%a in (`"%JAVA% -cp "!ES_CLASSPATH!" "org.elasticsearch.tools.launchers.TempDirectory""`) do set ES_TMPDIR=%%a ) - -cd /d "%ES_HOME%" diff --git a/distribution/src/bin/elasticsearch-service.bat b/distribution/src/bin/elasticsearch-service.bat index 43f8bef5148c..2886d6f68568 100644 --- a/distribution/src/bin/elasticsearch-service.bat +++ b/distribution/src/bin/elasticsearch-service.bat @@ -3,7 +3,10 @@ setlocal enabledelayedexpansion setlocal enableextensions -call "%~dp0elasticsearch-env.bat" || exit /b 1 +set NOJAVA=nojava +if /i "%1" == "install" set NOJAVA= + +call "%~dp0elasticsearch-env.bat" %NOJAVA% || exit /b 1 set EXECUTABLE=%ES_HOME%\bin\elasticsearch-service-x64.exe if "%SERVICE_ID%" == "" set SERVICE_ID=elasticsearch-service-x64 @@ -177,8 +180,7 @@ if not "%SERVICE_USERNAME%" == "" ( set SERVICE_PARAMS=%SERVICE_PARAMS% --ServiceUser "%SERVICE_USERNAME%" --ServicePassword "%SERVICE_PASSWORD%" ) ) - -"%EXECUTABLE%" //IS//%SERVICE_ID% --Startup %ES_START_TYPE% --StopTimeout %ES_STOP_TIMEOUT% --StartClass org.elasticsearch.bootstrap.Elasticsearch --StartMethod main ++StartParams --quiet --StopClass org.elasticsearch.bootstrap.Elasticsearch --StopMethod close --Classpath "%ES_CLASSPATH%" --JvmMs %JVM_MS% --JvmMx %JVM_MX% --JvmSs %JVM_SS% --JvmOptions %ES_JAVA_OPTS% ++JvmOptions %ES_PARAMS% %LOG_OPTS% --PidFile "%SERVICE_ID%.pid" --DisplayName "%SERVICE_DISPLAY_NAME%" --Description "%SERVICE_DESCRIPTION%" --Jvm "%%JAVA_HOME%%%JVM_DLL%" --StartMode jvm --StopMode jvm --StartPath "%ES_HOME%" %SERVICE_PARAMS% ++Environment HOSTNAME="%%COMPUTERNAME%%" +"%EXECUTABLE%" //IS//%SERVICE_ID% --Startup %ES_START_TYPE% --StopTimeout %ES_STOP_TIMEOUT% --StartClass org.elasticsearch.bootstrap.Elasticsearch --StartMethod main ++StartParams --quiet --StopClass org.elasticsearch.bootstrap.Elasticsearch --StopMethod close --Classpath "%ES_CLASSPATH%" --JvmMs %JVM_MS% --JvmMx %JVM_MX% --JvmSs %JVM_SS% --JvmOptions %ES_JAVA_OPTS% ++JvmOptions %ES_PARAMS% %LOG_OPTS% --PidFile "%SERVICE_ID%.pid" --DisplayName "%SERVICE_DISPLAY_NAME%" --Description "%SERVICE_DESCRIPTION%" --Jvm "%JAVA_HOME%%JVM_DLL%" --StartMode jvm --StopMode jvm --StartPath "%ES_HOME%" %SERVICE_PARAMS% ++Environment HOSTNAME="%%COMPUTERNAME%%" if not errorlevel 1 goto installed echo Failed installing '%SERVICE_ID%' service diff --git a/distribution/src/config/jvm.options b/distribution/src/config/jvm.options index e7da20dfe7e3..2b30d6a87b4a 100644 --- a/distribution/src/config/jvm.options +++ b/distribution/src/config/jvm.options @@ -86,7 +86,7 @@ -Dlog4j.shutdownHookEnabled=false -Dlog4j2.disable.jmx=true --Djava.io.tmpdir="${ES_TMPDIR}" +-Djava.io.tmpdir=${ES_TMPDIR} ## heap dumps diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index 54a8d246ba5f..75a9784c4822 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -13,7 +13,7 @@ release-state can be: released | prerelease | unreleased ////////// -:release-state: prerelease +:release-state: unreleased :issue: https://github.com/elastic/elasticsearch/issues/ :ml-issue: https://github.com/elastic/ml-cpp/issues/ diff --git a/docs/java-rest/high-level/dataframe/get_data_frame.asciidoc b/docs/java-rest/high-level/dataframe/get_data_frame.asciidoc new file mode 100644 index 000000000000..41fa841060b3 --- /dev/null +++ b/docs/java-rest/high-level/dataframe/get_data_frame.asciidoc @@ -0,0 +1,45 @@ +-- +:api: get-data-frame-transform +:request: GetDataFrameTransformRequest +:response: GetDataFrameTransformResponse +-- +[id="{upid}-{api}"] +=== Get Data Frame Transform API + +The Get Data Frame Transform API is used get one or more {dataframe-transform}. +The API accepts a +{request}+ object and returns a +{response}+. + +[id="{upid}-{api}-request"] +==== Get Data Frame Request + +A +{request}+ requires either a data frame transform id, a comma separated list of ids or +the special wildcard `_all` to get all {dataframe-transform}s + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> Constructing a new GET request referencing an existing {dataframe-transform} + +==== Optional Arguments + +The following arguments are optional. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request-options] +-------------------------------------------------- +<1> Page {dataframe-transform}s starting from this value +<2> Return at most `size` {dataframe-transform}s + +include::../execution.asciidoc[] + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ contains the requested {dataframe-transform}s. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- \ No newline at end of file diff --git a/docs/java-rest/high-level/dataframe/get_data_frame_stats.asciidoc b/docs/java-rest/high-level/dataframe/get_data_frame_stats.asciidoc new file mode 100644 index 000000000000..2b377d22c815 --- /dev/null +++ b/docs/java-rest/high-level/dataframe/get_data_frame_stats.asciidoc @@ -0,0 +1,40 @@ +-- +:api: get-data-frame-transform-stats +:request: GetDataFrameTransformStatsRequest +:response: GetDataFrameTransformStatsResponse +-- +[id="{upid}-{api}"] +=== Get Data Frame Transform Stats API + +The Get Data Frame Transform Stats API is used read the operational statistics +of one or more {dataframe-transform}s. +The API accepts a +{request}+ object and returns a +{response}+. + +[id="{upid}-{api}-request"] +==== Get Data Frame Transform Stats Request + +A +{request}+ requires a data frame transform id or the special wildcard `_all` +to get the statistics for all {dataframe-transform}s + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> Constructing a new GET Stats request referencing an existing {dataframe-transform} + + +include::../execution.asciidoc[] + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ contains the requested {dataframe-transform} statistics. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> The response contains a list of `DataFrameTransformStateAndStats` objects +<2> The running state of the transform task e.g `started` +<3> The running state of the transform indexer e.g `started`, `indexing`, etc. +<4> The transform progress statistics recording the number of documents indexed etc \ No newline at end of file diff --git a/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc b/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc index 7f8b1b0e9fbf..2db0bbea7ac8 100644 --- a/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc +++ b/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc @@ -32,16 +32,25 @@ configuration and contains the following arguments: include-tagged::{doc-tests-file}[{api}-config] -------------------------------------------------- <1> The {dataframe-transform} ID -<2> The source index or index pattern +<2> The source indices and query from which to gather data <3> The destination index -<4> Optionally a QueryConfig -<5> The PivotConfig +<4> The PivotConfig [id="{upid}-{api}-query-config"] -==== QueryConfig + +==== SourceConfig + +The indices and the query from which to collect data. +If query is not set, a `match_all` query is used by default. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-source-config] +-------------------------------------------------- + +===== QueryConfig The query with which to select data from the source. -If not set a `match_all` query is used by default. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index d346ed0f0dc9..4e28efc2941d 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -554,12 +554,16 @@ include::ilm/remove_lifecycle_policy_from_index.asciidoc[] The Java High Level REST Client supports the following Data Frame APIs: +* <<{upid}-get-data-frame-transform>> +* <<{upid}-get-data-frame-transform-stats>> * <<{upid}-put-data-frame-transform>> * <<{upid}-delete-data-frame-transform>> * <<{upid}-preview-data-frame-transform>> * <<{upid}-start-data-frame-transform>> * <<{upid}-stop-data-frame-transform>> +include::dataframe/get_data_frame.asciidoc[] +include::dataframe/get_data_frame_stats.asciidoc[] include::dataframe/put_data_frame.asciidoc[] include::dataframe/delete_data_frame.asciidoc[] include::dataframe/preview_data_frame.asciidoc[] diff --git a/docs/plugins/repository-azure.asciidoc b/docs/plugins/repository-azure.asciidoc index 9249efd5d174..61dcadd6e10d 100644 --- a/docs/plugins/repository-azure.asciidoc +++ b/docs/plugins/repository-azure.asciidoc @@ -126,9 +126,7 @@ The Azure repository supports following settings: setting doesn't affect index files that are already compressed by default. Defaults to `true`. -`readonly`:: - - Makes repository read-only. Defaults to `false`. +include::repository-shared-settings.asciidoc[] `location_mode`:: diff --git a/docs/plugins/repository-gcs.asciidoc b/docs/plugins/repository-gcs.asciidoc index f655d2930707..b34c9456a930 100644 --- a/docs/plugins/repository-gcs.asciidoc +++ b/docs/plugins/repository-gcs.asciidoc @@ -240,6 +240,8 @@ The following settings are supported: setting doesn't affect index files that are already compressed by default. Defaults to `true`. +include::repository-shared-settings.asciidoc[] + `application_name`:: deprecated[7.0.0, This setting is now defined in the <>] diff --git a/docs/plugins/repository-hdfs.asciidoc b/docs/plugins/repository-hdfs.asciidoc index e798682a3869..bedb0e7e1ef8 100644 --- a/docs/plugins/repository-hdfs.asciidoc +++ b/docs/plugins/repository-hdfs.asciidoc @@ -64,6 +64,8 @@ The following settings are supported: Whether to compress the metadata or not. (Enabled by default) +include::repository-shared-settings.asciidoc[] + `chunk_size`:: Override the chunk size. (Disabled by default) diff --git a/docs/plugins/repository-s3.asciidoc b/docs/plugins/repository-s3.asciidoc index 7c4e763a3b04..084d67f23647 100644 --- a/docs/plugins/repository-s3.asciidoc +++ b/docs/plugins/repository-s3.asciidoc @@ -213,6 +213,8 @@ The following settings are supported: setting doesn't affect index files that are already compressed by default. Defaults to `true`. +include::repository-shared-settings.asciidoc[] + `server_side_encryption`:: When set to `true` files are encrypted on server side using AES256 diff --git a/docs/plugins/repository-shared-settings.asciidoc b/docs/plugins/repository-shared-settings.asciidoc new file mode 100644 index 000000000000..ca9345e0ffc2 --- /dev/null +++ b/docs/plugins/repository-shared-settings.asciidoc @@ -0,0 +1,11 @@ +`max_restore_bytes_per_sec`:: + + Throttles per node restore rate. Defaults to `40mb` per second. + +`max_snapshot_bytes_per_sec`:: + + Throttles per node snapshot rate. Defaults to `40mb` per second. + +`readonly`:: + + Makes repository read-only. Defaults to `false`. \ No newline at end of file diff --git a/docs/reference/cat/allocation.asciidoc b/docs/reference/cat/allocation.asciidoc index a9de182e3c00..0ff745c3e5cb 100644 --- a/docs/reference/cat/allocation.asciidoc +++ b/docs/reference/cat/allocation.asciidoc @@ -21,5 +21,6 @@ shards disk.indices disk.used disk.avail disk.total disk.percent host ip // TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/ s/46/\\d+/] // TESTRESPONSE[s/CSUXak2/.+/ _cat] -Here we can see that each node has been allocated a single shard and -that they're all using about the same amount of space. +Here we can see that the single shard created has been allocated to the single +node available. + diff --git a/docs/reference/commands/certutil.asciidoc b/docs/reference/commands/certutil.asciidoc index 06e9dc53bd9b..6f4d3224d7ae 100644 --- a/docs/reference/commands/certutil.asciidoc +++ b/docs/reference/commands/certutil.asciidoc @@ -177,14 +177,17 @@ with the `ca` parameter. `--pass `:: Specifies the password for the generated private keys. + -Keys stored in PKCS#12 format are always password protected. +Keys stored in PKCS#12 format are always password protected, however, +this password may be _blank_. If you want to specify a blank password +without a prompt, use `--pass ""` (with no `=`) on the command line. + Keys stored in PEM format are password protected only if the `--pass` parameter is specified. If you do not supply an argument for the `--pass` parameter, you are prompted for a password. -+ -If you want to specify a _blank_ password (without prompting), use -`--pass ""` (with no `=`). +Encrypted PEM files do not support blank passwords (if you do not +wish to password-protect your PEM keys, then do not specify +`--pass`). + `--pem`:: Generates certificates and keys in PEM format instead of PKCS#12. This parameter cannot be used with the `csr` parameter. diff --git a/docs/reference/data-frames/apis/get-transform-stats.asciidoc b/docs/reference/data-frames/apis/get-transform-stats.asciidoc index f377f3d510c0..badb1b665f65 100644 --- a/docs/reference/data-frames/apis/get-transform-stats.asciidoc +++ b/docs/reference/data-frames/apis/get-transform-stats.asciidoc @@ -63,7 +63,8 @@ The API returns the following results: { "id" : "ecommerce_transform", "state" : { - "transform_state" : "started", + "indexer_state" : "started", + "task_state": "started", "current_position" : { "customer_id" : "9" }, diff --git a/docs/reference/frozen-indices.asciidoc b/docs/reference/frozen-indices.asciidoc index 687db3234dd0..b37c3426b170 100644 --- a/docs/reference/frozen-indices.asciidoc +++ b/docs/reference/frozen-indices.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] [testenv="basic"] [[frozen-indices]] -= Frozen Indices += Frozen indices [partintro] -- @@ -47,7 +47,10 @@ To make a frozen index writable again, use the <> threadpool, which is `1` by default. +limited by the number of threads in the <> threadpool, which is `1` by default. Search requests will not be executed against frozen indices by default, even if a frozen index is named explicitly. This is to prevent accidental slowdowns by targeting a frozen index by mistake. To include frozen indices a search request must be executed with the query parameter `ignore_throttled=false`. @@ -90,10 +96,13 @@ The default value for `pre_filter_shard_size` is `128` but it's recommended to s significant overhead associated with this pre-filter phase. ================================ +[role="xpack"] +[testenv="basic"] +[[monitoring_frozen_indices]] == Monitoring frozen indices Frozen indices are ordinary indices that use search throttling and a memory efficient shard implementation. For API's like the -`<>` frozen indicies may identified by an index's `search.throttled` property (`sth`). +<> frozen indices may identified by an index's `search.throttled` property (`sth`). [source,js] -------------------------------------------------- diff --git a/docs/reference/images/sql/client-apps/dbeaver-1-new-conn.png b/docs/reference/images/sql/client-apps/dbeaver-1-new-conn.png index 2307f0393266..bf7f1c63135a 100644 Binary files a/docs/reference/images/sql/client-apps/dbeaver-1-new-conn.png and b/docs/reference/images/sql/client-apps/dbeaver-1-new-conn.png differ diff --git a/docs/reference/images/sql/client-apps/dbeaver-2-conn-es.png b/docs/reference/images/sql/client-apps/dbeaver-2-conn-es.png index 1ca209a57e55..f63df0987c16 100644 Binary files a/docs/reference/images/sql/client-apps/dbeaver-2-conn-es.png and b/docs/reference/images/sql/client-apps/dbeaver-2-conn-es.png differ diff --git a/docs/reference/images/sql/client-apps/dbeaver-3-conn-props.png b/docs/reference/images/sql/client-apps/dbeaver-3-conn-props.png index 7561e94bdd99..825ce1b6357f 100644 Binary files a/docs/reference/images/sql/client-apps/dbeaver-3-conn-props.png and b/docs/reference/images/sql/client-apps/dbeaver-3-conn-props.png differ diff --git a/docs/reference/images/sql/client-apps/dbeaver-4-driver-ver.png b/docs/reference/images/sql/client-apps/dbeaver-4-driver-ver.png index 62cef87a7ae9..bcad2a75d801 100644 Binary files a/docs/reference/images/sql/client-apps/dbeaver-4-driver-ver.png and b/docs/reference/images/sql/client-apps/dbeaver-4-driver-ver.png differ diff --git a/docs/reference/images/sql/client-apps/dbeaver-5-test-conn.png b/docs/reference/images/sql/client-apps/dbeaver-5-test-conn.png index 70f2a1dd4dc2..c76ae19937a0 100644 Binary files a/docs/reference/images/sql/client-apps/dbeaver-5-test-conn.png and b/docs/reference/images/sql/client-apps/dbeaver-5-test-conn.png differ diff --git a/docs/reference/images/sql/client-apps/dbeaver-6-data.png b/docs/reference/images/sql/client-apps/dbeaver-6-data.png index 5d33441fe3b8..053042b79111 100644 Binary files a/docs/reference/images/sql/client-apps/dbeaver-6-data.png and b/docs/reference/images/sql/client-apps/dbeaver-6-data.png differ diff --git a/docs/reference/images/sql/client-apps/dbvis-1-driver-manager.png b/docs/reference/images/sql/client-apps/dbvis-1-driver-manager.png index b0ff89cc9d75..cde4d9cc7cf2 100644 Binary files a/docs/reference/images/sql/client-apps/dbvis-1-driver-manager.png and b/docs/reference/images/sql/client-apps/dbvis-1-driver-manager.png differ diff --git a/docs/reference/images/sql/client-apps/dbvis-2-driver.png b/docs/reference/images/sql/client-apps/dbvis-2-driver.png index b0f3a2927c96..cae3824547bc 100644 Binary files a/docs/reference/images/sql/client-apps/dbvis-2-driver.png and b/docs/reference/images/sql/client-apps/dbvis-2-driver.png differ diff --git a/docs/reference/images/sql/client-apps/dbvis-3-new-conn.png b/docs/reference/images/sql/client-apps/dbvis-3-new-conn.png index 7f89cf84a8e6..332895a2c8a8 100644 Binary files a/docs/reference/images/sql/client-apps/dbvis-3-new-conn.png and b/docs/reference/images/sql/client-apps/dbvis-3-new-conn.png differ diff --git a/docs/reference/images/sql/client-apps/dbvis-4-conn-props.png b/docs/reference/images/sql/client-apps/dbvis-4-conn-props.png index 2027949c401a..d854dc826b1e 100644 Binary files a/docs/reference/images/sql/client-apps/dbvis-4-conn-props.png and b/docs/reference/images/sql/client-apps/dbvis-4-conn-props.png differ diff --git a/docs/reference/images/sql/client-apps/dbvis-5-data.png b/docs/reference/images/sql/client-apps/dbvis-5-data.png index fb5ce8b86aa7..c67336568edc 100644 Binary files a/docs/reference/images/sql/client-apps/dbvis-5-data.png and b/docs/reference/images/sql/client-apps/dbvis-5-data.png differ diff --git a/docs/reference/images/sql/client-apps/squirell-3-add-driver.png b/docs/reference/images/sql/client-apps/squirell-3-add-driver.png index 9a9c2c2634e3..29f06b7033d7 100644 Binary files a/docs/reference/images/sql/client-apps/squirell-3-add-driver.png and b/docs/reference/images/sql/client-apps/squirell-3-add-driver.png differ diff --git a/docs/reference/images/sql/client-apps/squirell-4-driver-list.png b/docs/reference/images/sql/client-apps/squirell-4-driver-list.png index 35f389747c97..a269e29d672e 100644 Binary files a/docs/reference/images/sql/client-apps/squirell-4-driver-list.png and b/docs/reference/images/sql/client-apps/squirell-4-driver-list.png differ diff --git a/docs/reference/images/sql/client-apps/squirell-5-add-alias.png b/docs/reference/images/sql/client-apps/squirell-5-add-alias.png index d5587060d2ea..1fc8e9ad6019 100644 Binary files a/docs/reference/images/sql/client-apps/squirell-5-add-alias.png and b/docs/reference/images/sql/client-apps/squirell-5-add-alias.png differ diff --git a/docs/reference/images/sql/client-apps/squirell-7-data.png b/docs/reference/images/sql/client-apps/squirell-7-data.png index 760ade7c670f..70837963b74b 100644 Binary files a/docs/reference/images/sql/client-apps/squirell-7-data.png and b/docs/reference/images/sql/client-apps/squirell-7-data.png differ diff --git a/docs/reference/images/sql/client-apps/workbench-2-add-driver.png b/docs/reference/images/sql/client-apps/workbench-2-add-driver.png index 03e740f400ae..659cfd0c4076 100644 Binary files a/docs/reference/images/sql/client-apps/workbench-2-add-driver.png and b/docs/reference/images/sql/client-apps/workbench-2-add-driver.png differ diff --git a/docs/reference/images/sql/client-apps/workbench-3-connection.png b/docs/reference/images/sql/client-apps/workbench-3-connection.png index 32643375e3de..9262ef0f533a 100644 Binary files a/docs/reference/images/sql/client-apps/workbench-3-connection.png and b/docs/reference/images/sql/client-apps/workbench-3-connection.png differ diff --git a/docs/reference/images/sql/client-apps/workbench-4-data.png b/docs/reference/images/sql/client-apps/workbench-4-data.png index 602f09d06e46..7b8251fc9588 100644 Binary files a/docs/reference/images/sql/client-apps/workbench-4-data.png and b/docs/reference/images/sql/client-apps/workbench-4-data.png differ diff --git a/docs/reference/index-modules/similarity.asciidoc b/docs/reference/index-modules/similarity.asciidoc index 014923d463cb..ee6cf3958375 100644 --- a/docs/reference/index-modules/similarity.asciidoc +++ b/docs/reference/index-modules/similarity.asciidoc @@ -92,22 +92,14 @@ from randomness] framework. This similarity has the following options: [horizontal] `basic_model`:: - Possible values: {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelG.html[`be`], - {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelD.html[`d`], - {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelG.html[`g`], + Possible values: {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelG.html[`g`], {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelIF.html[`if`], - {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelIn.html[`in`], - {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelIne.html[`ine`] and - {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelP.html[`p`]. - -`be`, `d` and `p` should be avoided in practice as they might return scores that -are equal to 0 or infinite with terms that do not meet the expected random -distribution. + {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelIn.html[`in`] and + {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelIne.html[`ine`]. `after_effect`:: - Possible values: {lucene-core-javadoc}/org/apache/lucene/search/similarities/AfterEffect.NoAfterEffect.html[`no`], - {lucene-core-javadoc}/org/apache/lucene/search/similarities/AfterEffectB.html[`b`] and - {lucene-core-javadoc}/org/apache/lucene/search/similarities/AfterEffectL.html[`l`]. + Possible values: {lucene-core-javadoc}/org/apache/lucene/search/similarities/AfterEffectB.html[`b`] and + {lucene-core-javadoc}/org/apache/lucene/search/similarities/AfterEffectB.html[`l`]. `normalization`:: Possible values: {lucene-core-javadoc}/org/apache/lucene/search/similarities/Normalization.NoNormalization.html[`no`], diff --git a/docs/reference/licensing/update-license.asciidoc b/docs/reference/licensing/update-license.asciidoc index 0dfe9b2da888..81d80509252f 100644 --- a/docs/reference/licensing/update-license.asciidoc +++ b/docs/reference/licensing/update-license.asciidoc @@ -92,7 +92,7 @@ On Windows machine, use the following command: [source,shell] ------------------------------------------------------------ -gc .\license.json | Invoke-WebRequest -uri http://:/_license -Credential elastic -Method Put -ContentType "application/json" +Invoke-WebRequest -uri http://:/_xpack/license -Credential elastic -Method Put -ContentType "application/json" -InFile .\license.json ------------------------------------------------------------ In these examples, diff --git a/docs/reference/mapping.asciidoc b/docs/reference/mapping.asciidoc index b6a7052f69a0..2e09a0a8ca24 100644 --- a/docs/reference/mapping.asciidoc +++ b/docs/reference/mapping.asciidoc @@ -97,6 +97,12 @@ causing a mapping explosion: 100 objects within a nested field, will actually create 101 documents, as each nested object will be indexed as a separate hidden document. +`index.mapping.field_name_length.limit`:: + Setting for the maximum length of a field name. The default value is + Long.MAX_VALUE (no limit). This setting isn't really something that addresses + mappings explosion but might still be useful if you want to limit the field length. + It usually shouldn't be necessary to set this setting. The default is okay + unless a user starts to add a huge number of fields with really long names. [float] == Dynamic mapping diff --git a/docs/reference/mapping/types.asciidoc b/docs/reference/mapping/types.asciidoc index 76b832a529fb..c0db156dc3a1 100644 --- a/docs/reference/mapping/types.asciidoc +++ b/docs/reference/mapping/types.asciidoc @@ -52,6 +52,7 @@ string:: <> and <> <>:: Record sparse vectors of float values. +<>:: A text-like field optimized for queries to implement as-you-type completion [float] === Multi-fields @@ -110,3 +111,5 @@ include::types/rank-features.asciidoc[] include::types/dense-vector.asciidoc[] include::types/sparse-vector.asciidoc[] + +include::types/search-as-you-type.asciidoc[] diff --git a/docs/reference/mapping/types/search-as-you-type.asciidoc b/docs/reference/mapping/types/search-as-you-type.asciidoc new file mode 100644 index 000000000000..aec21f2e3ca6 --- /dev/null +++ b/docs/reference/mapping/types/search-as-you-type.asciidoc @@ -0,0 +1,258 @@ +[[search-as-you-type]] +=== Search as you type datatype + +experimental[] + +The `search_as_you_type` field type is a text-like field that is optimized to +provide out-of-the-box support for queries that serve an as-you-type completion +use case. It creates a series of subfields that are analyzed to index terms +that can be efficiently matched by a query that partially matches the entire +indexed text value. Both prefix completion (i.e matching terms starting at the +beginning of the input) and infix completion (i.e. matching terms at any +position within the input) are supported. + +When adding a field of this type to a mapping + +[source,js] +-------------------------------------------------- +PUT my_index +{ + "mappings": { + "properties": { + "my_field": { + "type": "search_as_you_type" + } + } + } +} +-------------------------------------------------- +// CONSOLE + +This creates the following fields + +[horizontal] + +`my_field`:: + + Analyzed as configured in the mapping. If an analyzer is not configured, + the default analyzer for the index is used + +`my_field._2gram`:: + + Wraps the analyzer of `my_field` with a shingle token filter of shingle + size 2 + +`my_field._3gram`:: + + Wraps the analyzer of `my_field` with a shingle token filter of shingle + size 3 + +`my_field._index_prefix`:: + + Wraps the analyzer of `my_field._3gram` with an edge ngram token filter + + +The size of shingles in subfields can be configured with the `max_shingle_size` +mapping parameter. The default is 3, and valid values for this parameter are +integer values 2 - 4 inclusive. Shingle subfields will be created for each +shingle size from 2 up to and including the `max_shingle_size`. The +`my_field._index_prefix` subfield will always use the analyzer from the shingle +subfield with the `max_shingle_size` when constructing its own analyzer. + +Increasing the `max_shingle_size` will improve matches for queries with more +consecutive terms, at the cost of larger index size. The default +`max_shingle_size` should usually be sufficient. + +The same input text is indexed into each of these fields automatically, with +their differing analysis chains, when an indexed document has a value for the +root field `my_field`. + +[source,js] +-------------------------------------------------- +PUT my_index/_doc/1?refresh +{ + "my_field": "quick brown fox jump lazy dog" +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +The most efficient way of querying to serve a search-as-you-type use case is +usually a <> query of type +<> that targets the root +`search_as_you_type` field and its shingle subfields. This can match the query +terms in any order, but will score documents higher if they contain the terms +in order in a shingle subfield. + +[source,js] +-------------------------------------------------- +GET my_index/_search +{ + "query": { + "multi_match": { + "query": "brown f", + "type": "bool_prefix", + "fields": [ + "my_field", + "my_field._2gram", + "my_field._3gram" + ] + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[source,js] +-------------------------------------------------- +{ + "took" : 44, + "timed_out" : false, + "_shards" : { + "total" : 1, + "successful" : 1, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : { + "value" : 1, + "relation" : "eq" + }, + "max_score" : 0.8630463, + "hits" : [ + { + "_index" : "my_index", + "_type" : "_doc", + "_id" : "1", + "_score" : 0.8630463, + "_source" : { + "my_field" : "quick brown fox jump lazy dog" + } + } + ] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"took" : 44/"took" : $body.took/] +// TESTRESPONSE[s/"max_score" : 0.8630463/"max_score" : $body.hits.max_score/] +// TESTRESPONSE[s/"_score" : 0.8630463/"_score" : $body.hits.hits.0._score/] + +To search for documents that strictly match the query terms in order, or to +search using other properties of phrase queries, use a +<> on the root +field. A <> can also be used +if the last term should be matched exactly, and not as a prefix. Using phrase +queries may be less efficient than using the `match_bool_prefix` query. + +[source,js] +-------------------------------------------------- +GET my_index/_search +{ + "query": { + "match_phrase_prefix": { + "my_field": "brown f" + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[[specific-params]] +==== Parameters specific to the `search_as_you_type` field + +The following parameters are accepted in a mapping for the `search_as_you_type` +field and are specific to this field type + +[horizontal] + +`max_shingle_size`:: + + The largest shingle size to index the input with and create subfields for, + creating one subfield for each shingle size between 2 and + `max_shingle_size`. Accepts integer values between 2 and 4 inclusive. This + option defaults to 3. + + +[[general-params]] +==== Parameters of the field type as a text field + +The following parameters are accepted in a mapping for the `search_as_you_type` +field due to its nature as a text-like field, and behave similarly to their +behavior when configuring a field of the <> datatype. Unless +otherwise noted, these options configure the root fields subfields in +the same way. + +<>:: + + The <> which should be used for + <> string fields, both at index-time and at + search-time (unless overridden by the + <>). Defaults to the default index + analyzer, or the <>. + +<>:: + + Should the field be searchable? Accepts `true` (default) or `false`. + +<>:: + + What information should be stored in the index, for search and highlighting + purposes. Defaults to `positions`. + +<>:: + + Whether field-length should be taken into account when scoring queries. + Accepts `true` or `false`. This option configures the root field + and shingle subfields, where its default is `true`. It does not configure + the prefix subfield, where it it `false`. + +<>:: + + Whether the field value should be stored and retrievable separately from + the <> field. Accepts `true` or `false` + (default). This option only configures the root field, and does not + configure any subfields. + +<>:: + + The <> that should be used at search time on + <> fields. Defaults to the `analyzer` setting. + +<>:: + + The <> that should be used at search time when a + phrase is encountered. Defaults to the `search_analyzer` setting. + +<>:: + + Which scoring algorithm or _similarity_ should be used. Defaults + to `BM25`. + +<>:: + + Whether term vectors should be stored for an <> + field. Defaults to `no`. This option configures the root field and shingle + subfields, but not the prefix subfield. + + +[[prefix-queries]] +==== Optimization of prefix queries + +When making a <> query to the root field or +any of its subfields, the query will be rewritten to a +<> query on the `._index_prefix` subfield. This +matches more efficiently than is typical of `prefix` queries on text fields, +as prefixes up to a certain length of each shingle are indexed directly as +terms in the `._index_prefix` subfield. + +The analyzer of the `._index_prefix` subfield slightly modifies the +shingle-building behavior to also index prefixes of the terms at the end of the +field's value that normally would not be produced as shingles. For example, if +the value `quick brown fox` is indexed into a `search_as_you_type` field with +`max_shingle_size` of 3, prefixes for `brown fox` and `fox` are also indexed +into the `._index_prefix` subfield even though they do not appear as terms in +the `._3gram` subfield. This allows for completion of all the terms in the +field's input. diff --git a/docs/reference/migration/migrate_8_0.asciidoc b/docs/reference/migration/migrate_8_0.asciidoc index c8d5b7d5b169..0c695a3b2bb4 100644 --- a/docs/reference/migration/migrate_8_0.asciidoc +++ b/docs/reference/migration/migrate_8_0.asciidoc @@ -11,16 +11,14 @@ See also <> and <>. coming[8.0.0] -* <> * <> * <> * <> * <> +* <> -[float] -[[breaking_80_notable]] -=== Notable changes -// NOTE: The content in this section is also used in the Installation and Upgrade Guide. +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide //tag::notable-breaking-changes[] [float] @@ -44,3 +42,4 @@ include::migrate_8_0/analysis.asciidoc[] include::migrate_8_0/discovery.asciidoc[] include::migrate_8_0/mappings.asciidoc[] include::migrate_8_0/snapshots.asciidoc[] +include::migrate_8_0/security.asciidoc[] diff --git a/docs/reference/migration/migrate_8_0/security.asciidoc b/docs/reference/migration/migrate_8_0/security.asciidoc new file mode 100644 index 000000000000..e09d21764f74 --- /dev/null +++ b/docs/reference/migration/migrate_8_0/security.asciidoc @@ -0,0 +1,18 @@ +[float] +[[breaking_80_security_changes]] +=== Security changes + +[float] +==== The `accept_default_password` setting has been removed + +The `xpack.security.authc.accept_default_password` setting has not had any affect +since the 6.0 release of {es}. It has been removed and cannot be used. + +[float] +==== The `roles.index.cache.*` settings have been removed + +The `xpack.security.authz.store.roles.index.cache.max_size` and +`xpack.security.authz.store.roles.index.cache.ttl` settings have +been removed. These settings have been redundant and deprecated +since the 5.2 release of {es}. + diff --git a/docs/reference/modules/remote-clusters.asciidoc b/docs/reference/modules/remote-clusters.asciidoc index b134a626739f..1c51cda907cf 100644 --- a/docs/reference/modules/remote-clusters.asciidoc +++ b/docs/reference/modules/remote-clusters.asciidoc @@ -227,7 +227,7 @@ PUT _cluster/settings clusters are kept alive. If set to `-1`, application-level ping messages to this remote cluster are not sent. If unset, application-level ping messages are sent according to the global `transport.ping_schedule` setting, which - defaults to ``-1` meaning that pings are not sent. + defaults to `-1` meaning that pings are not sent. `cluster.remote.${cluster_alias}.transport.compress`:: @@ -237,6 +237,14 @@ PUT _cluster/settings Elasticsearch compresses the response. If unset, the global `transport.compress` is used as the fallback setting. +`cluster.remote.${cluster_alias}.proxy`:: + + Sets a proxy address for the specified remote cluster. By default this is not + set, meaning that Elasticsearch will connect directly to the nodes in the + remote cluster using their <>. + If this setting is set to an IP address or hostname then Elasticsearch will + connect to the nodes in the remote cluster using this address instead. + [float] [[retrieve-remote-clusters-info]] === Retrieving remote clusters info diff --git a/docs/reference/query-dsl/full-text-queries.asciidoc b/docs/reference/query-dsl/full-text-queries.asciidoc index 5fb5447dbb79..0af99b61f194 100644 --- a/docs/reference/query-dsl/full-text-queries.asciidoc +++ b/docs/reference/query-dsl/full-text-queries.asciidoc @@ -18,7 +18,12 @@ The queries in this group are: <>:: - The poor man's _search-as-you-type_. Like the `match_phrase` query, but does a wildcard search on the final word. + Like the `match_phrase` query, but does a wildcard search on the final word. + +<>:: + + Creates a `bool` query that matches each term as a `term` query, except for + the last term, which is matched as a `prefix` query <>:: @@ -50,6 +55,8 @@ include::match-phrase-query.asciidoc[] include::match-phrase-prefix-query.asciidoc[] +include::match-bool-prefix-query.asciidoc[] + include::multi-match-query.asciidoc[] include::common-terms-query.asciidoc[] diff --git a/docs/reference/query-dsl/match-bool-prefix-query.asciidoc b/docs/reference/query-dsl/match-bool-prefix-query.asciidoc new file mode 100644 index 000000000000..623f2423d805 --- /dev/null +++ b/docs/reference/query-dsl/match-bool-prefix-query.asciidoc @@ -0,0 +1,85 @@ +[[query-dsl-match-bool-prefix-query]] +=== Match Bool Prefix Query + +A `match_bool_prefix` query analyzes its input and constructs a +<> from the terms. Each term except the last +is used in a `term` query. The last term is used in a `prefix` query. A +`match_bool_prefix` query such as + +[source,js] +-------------------------------------------------- +GET /_search +{ + "query": { + "match_bool_prefix" : { + "message" : "quick brown f" + } + } +} +-------------------------------------------------- +// CONSOLE + +where analysis produces the terms `quick`, `brown`, and `f` is similar to the +following `bool` query + +[source,js] +-------------------------------------------------- +GET /_search +{ + "query": { + "bool" : { + "should": [ + { "term": { "message": "quick" }}, + { "term": { "message": "brown" }}, + { "prefix": { "message": "f"}} + ] + } + } +} +-------------------------------------------------- +// CONSOLE + +An important difference between the `match_bool_prefix` query and +<> is that the +`match_phrase_prefix` query matches its terms as a phrase, but the +`match_bool_prefix` query can match its terms in any position. The example +`match_bool_prefix` query above could match a field containing containing +`quick brown fox`, but it could also match `brown fox quick`. It could also +match a field containing the term `quick`, the term `brown` and a term +starting with `f`, appearing in any position. + +==== Parameters + +By default, `match_bool_prefix` queries' input text will be analyzed using the +analyzer from the queried field's mapping. A different search analyzer can be +configured with the `analyzer` parameter + +[source,js] +-------------------------------------------------- +GET /_search +{ + "query": { + "match_bool_prefix" : { + "message": { + "query": "quick brown f", + "analyzer": "keyword" + } + } + } +} +-------------------------------------------------- +// CONSOLE + +`match_bool_prefix` queries support the +<> and `operator` +parameters as described for the +<>, applying the setting to the +constructed `bool` query. The number of clauses in the constructed `bool` +query will in most cases be the number of terms produced by analysis of the +query text. + +The <>, `prefix_length`, +`max_expansions`, `fuzzy_transpositions`, and `fuzzy_rewrite` parameters can +be applied to the `term` subqueries constructed for all terms but the final +term. They do not have any effect on the prefix query constructed for the +final term. diff --git a/docs/reference/query-dsl/match-phrase-prefix-query.asciidoc b/docs/reference/query-dsl/match-phrase-prefix-query.asciidoc index 73f1be9143cf..304eaf9a5b4f 100644 --- a/docs/reference/query-dsl/match-phrase-prefix-query.asciidoc +++ b/docs/reference/query-dsl/match-phrase-prefix-query.asciidoc @@ -59,6 +59,6 @@ for appears. For better solutions for _search-as-you-type_ see the <> and -{defguide}/_index_time_search_as_you_type.html[Index-Time Search-as-You-Type]. +the <>. =================================================== diff --git a/docs/reference/query-dsl/match-query.asciidoc b/docs/reference/query-dsl/match-query.asciidoc index b939364f1202..64c356ccaa80 100644 --- a/docs/reference/query-dsl/match-query.asciidoc +++ b/docs/reference/query-dsl/match-query.asciidoc @@ -202,7 +202,6 @@ process. It does not support field name prefixes, wildcard characters, or other "advanced" features. For this reason, chances of it failing are very small / non existent, and it provides an excellent behavior when it comes to just analyze and run that text as a query behavior (which is -usually what a text search box does). Also, the <> -type can provide a great "as you type" behavior to automatically load search results. +usually what a text search box does). ************************************************** diff --git a/docs/reference/query-dsl/multi-match-query.asciidoc b/docs/reference/query-dsl/multi-match-query.asciidoc index 512eee4900b4..b8fbb61a950d 100644 --- a/docs/reference/query-dsl/multi-match-query.asciidoc +++ b/docs/reference/query-dsl/multi-match-query.asciidoc @@ -91,6 +91,10 @@ parameter, which can be set to: `phrase_prefix`:: Runs a `match_phrase_prefix` query on each field and combines the `_score` from each field. See <>. +`bool_prefix`:: Creates a `match_bool_prefix` query on each field and + combines the `_score` from each field. See + <>. + [[type-best-fields]] ==== `best_fields` @@ -516,3 +520,36 @@ per-term `blended` queries. It accepts: =================================================== The `fuzziness` parameter cannot be used with the `cross_fields` type. =================================================== + +[[type-bool-prefix]] +==== `bool_prefix` + +The `bool_prefix` type's scoring behaves like <>, but using a +<> instead of a +`match` query. + +[source,js] +-------------------------------------------------- +GET /_search +{ + "query": { + "multi_match" : { + "query": "quick brown f", + "type": "bool_prefix", + "fields": [ "subject", "message" ] + } + } +} +-------------------------------------------------- +// CONSOLE + +The `analyzer`, `boost`, `operator`, `minimum_should_match`, `lenient`, +`zero_terms_query`, and `auto_generate_synonyms_phrase_query` parameters as +explained in <> are supported. The +`fuzziness`, `prefix_length`, `max_expansions`, `rewrite`, and +`fuzzy_transpositions` parameters are supported for the terms that are used to +construct term queries, but do not have an effect on the prefix query +constructed from the final term. + +The `slop` and `cutoff_frequency` parameters are not supported by this query +type. diff --git a/docs/reference/query-dsl/script-score-query.asciidoc b/docs/reference/query-dsl/script-score-query.asciidoc index ee68d3e40fe1..56c4f7c41b8e 100644 --- a/docs/reference/query-dsl/script-score-query.asciidoc +++ b/docs/reference/query-dsl/script-score-query.asciidoc @@ -182,60 +182,44 @@ different from the query's vector, 0 is used for missing dimensions in the calculations of vector functions. -[[random-functions]] -===== Random functions -There are two predefined ways to produce random values: -`randomNotReproducible` and `randomReproducible`. +[[random-score-function]] +===== Random score function +`random_score` function generates scores that are uniformly distributed +from 0 up to but not including 1. -`randomNotReproducible()` uses `java.util.Random` class -to generate a random value of the type `long`. -The generated values are not reproducible between requests' invocations. +`randomScore` function has the following syntax: +`randomScore(, )`. +It has a required parameter - `seed` as an integer value, +and an optional parameter - `fieldName` as a string value. [source,js] -------------------------------------------------- "script" : { - "source" : "randomNotReproducible()" + "source" : "randomScore(100, '_seq_no')" } -------------------------------------------------- // NOTCONSOLE - -`randomReproducible(String seedValue, int seed)` produces -reproducible random values of type `long`. This function requires -more computational time and memory than the non-reproducible version. - -A good candidate for the `seedValue` is document field values that -are unique across documents and already pre-calculated and preloaded -in the memory. For example, values of the document's `_seq_no` field -is a good candidate, as documents on the same shard have unique values -for the `_seq_no` field. +If the `fieldName` parameter is omitted, the internal Lucene +document ids will be used as a source of randomness. This is very efficient, +but unfortunately not reproducible since documents might be renumbered +by merges. [source,js] -------------------------------------------------- "script" : { - "source" : "randomReproducible(Long.toString(doc['_seq_no'].value), 100)" + "source" : "randomScore(100)" } -------------------------------------------------- // NOTCONSOLE -A drawback of using `_seq_no` is that generated values change if -documents are updated. Another drawback is not absolute uniqueness, as -documents from different shards with the same sequence numbers -generate the same random values. - -If you need random values to be distinct across different shards, -you can use a field with unique values across shards, -such as `_id`, but watch out for the memory usage as all -these unique values need to be loaded into memory. - -[source,js] --------------------------------------------------- -"script" : { - "source" : "randomReproducible(doc['_id'].value, 100)" -} --------------------------------------------------- -// NOTCONSOLE +Note that documents that are within the same shard and have the +same value for field will get the same score, so it is usually desirable +to use a field that has unique values for all documents across a shard. +A good default choice might be to use the `_seq_no` +field, whose only drawback is that scores will change if the document is +updated since update operations also update the value of the `_seq_no` field. [[decay-functions]] @@ -349,8 +333,8 @@ the following script: ===== `random_score` -Use `randomReproducible` and `randomNotReproducible` functions -as described in <>. +Use `randomScore` function +as described in <>. ===== `field_value_factor` diff --git a/docs/reference/release-notes/highlights-8.0.0.asciidoc b/docs/reference/release-notes/highlights-8.0.0.asciidoc index 7865a6f0ec76..995bbe241d29 100644 --- a/docs/reference/release-notes/highlights-8.0.0.asciidoc +++ b/docs/reference/release-notes/highlights-8.0.0.asciidoc @@ -6,13 +6,11 @@ coming[8.0.0] -See also <> and <>. +See also <> and <>. + +//NOTE: The notable-highlights tagged regions are re-used in the +//Installation and Upgrade Guide -//// -The following section is re-used in the Installation and Upgrade Guide -[[notable-highlights-8.0.0]] -=== Notable breaking changes -//// // tag::notable-highlights[] // end::notable-highlights[] diff --git a/docs/reference/rollup/apis/rollup-job-config.asciidoc b/docs/reference/rollup/apis/rollup-job-config.asciidoc index 885d4e82cf6b..852f7b879fb3 100644 --- a/docs/reference/rollup/apis/rollup-job-config.asciidoc +++ b/docs/reference/rollup/apis/rollup-job-config.asciidoc @@ -69,7 +69,7 @@ In the above example, there are several pieces of logistical configuration for t `rollup_index` (required):: (string) The index that you wish to store rollup results into. All the rollup data that is generated by the job will be stored in this index. When searching the rollup data, this index will be used in the <> endpoint's URL. - The rollup index be shared with other rollup jobs. The data is stored so that it doesn't interfere with unrelated jobs. + The rollup index can be shared with other rollup jobs. The data is stored so that it doesn't interfere with unrelated jobs. `cron` (required):: (string) A cron string which defines when the rollup job should be executed. The cron string defines an interval of when to run diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc index b27e6f0ef0b5..c89dce3d2416 100644 --- a/docs/reference/search/suggesters/completion-suggest.asciidoc +++ b/docs/reference/search/suggesters/completion-suggest.asciidoc @@ -2,7 +2,9 @@ === Completion Suggester NOTE: In order to understand the format of suggestions, please -read the <> page first. +read the <> page first. For more flexible +search-as-you-type searches that do not use suggesters, see the +<>. The `completion` suggester provides auto-complete/search-as-you-type functionality. This is a navigational feature to guide users to diff --git a/docs/reference/sql/endpoints/client-apps/dbeaver.asciidoc b/docs/reference/sql/endpoints/client-apps/dbeaver.asciidoc index 82b3402edfaf..8a7792b52586 100644 --- a/docs/reference/sql/endpoints/client-apps/dbeaver.asciidoc +++ b/docs/reference/sql/endpoints/client-apps/dbeaver.asciidoc @@ -12,7 +12,7 @@ IMPORTANT: Elastic does not endorse, promote or provide support for this applica ==== Prerequisites -* DBeaver version 5.1.4 or higher +* DBeaver version 6.0.0 or higher * {es-sql} <> ==== New Connection diff --git a/docs/reference/sql/endpoints/client-apps/index.asciidoc b/docs/reference/sql/endpoints/client-apps/index.asciidoc index 87f3c2f609d3..a84b8c2fb09e 100644 --- a/docs/reference/sql/endpoints/client-apps/index.asciidoc +++ b/docs/reference/sql/endpoints/client-apps/index.asciidoc @@ -33,5 +33,5 @@ include::ps1.asciidoc[] include::microstrat.asciidoc[] include::qlik.asciidoc[] include::squirrel.asciidoc[] -include::tableau.asciidoc[] include::workbench.asciidoc[] +include::tableau.asciidoc[] diff --git a/docs/reference/sql/endpoints/jdbc.asciidoc b/docs/reference/sql/endpoints/jdbc.asciidoc index 9b4e2fa74819..37f3d59ef641 100644 --- a/docs/reference/sql/endpoints/jdbc.asciidoc +++ b/docs/reference/sql/endpoints/jdbc.asciidoc @@ -124,7 +124,8 @@ Query timeout (in seconds). That is the maximum amount of time waiting for a que [float] ==== Mapping -`field.multi.value.leniency` (default `true`):: Whether to be lenient and return the first value for fields with multiple values (true) or throw an exception. +`field.multi.value.leniency` (default `true`):: Whether to be lenient and return the first value (without any guarantees of what that +will be - typically the first in natural ascending order) for fields with multiple values (true) or throw an exception. [float] ==== Additional diff --git a/docs/reference/sql/endpoints/odbc.asciidoc b/docs/reference/sql/endpoints/odbc.asciidoc index 1a7dd974281c..fd92a37dca65 100644 --- a/docs/reference/sql/endpoints/odbc.asciidoc +++ b/docs/reference/sql/endpoints/odbc.asciidoc @@ -9,9 +9,12 @@ [float] === Overview -{odbc} is a feature-rich 3.80 ODBC driver for {es}. -It is a core level driver, exposing all of the functionality accessible through the {es}'s SQL ODBC API, converting ODBC calls into -{es-sql}. +{odbc} is a 3.80 compliant ODBC driver for {es}. +It is a core level driver, exposing all of the functionality accessible through +the {es}'s SQL API, converting ODBC calls into {es-sql}. + +In order to make use of the driver, the server must have {es-sql} installed and +running with the valid license. * <> * <> diff --git a/docs/reference/sql/endpoints/odbc/configuration.asciidoc b/docs/reference/sql/endpoints/odbc/configuration.asciidoc index 8bda67ce063d..70ba437b6485 100644 --- a/docs/reference/sql/endpoints/odbc/configuration.asciidoc +++ b/docs/reference/sql/endpoints/odbc/configuration.asciidoc @@ -162,6 +162,8 @@ In case the server uses a certificate that is not part of the PKI, for example u + The driver will only read the contents of the file just before a connection is attempted. See <> section further on how to check the validity of the provided parameters. + +NOTE: The certificate file can not be bundled or password protected since the driver will not prompt for a password. ++ If using the file browser to locate the certificate - by pressing the _Browse..._ button - only files with _.pem_ and _.der_ extensions will be considered by default. Choose _All Files (\*.*)_ from the drop down, if your file ends with a different extension: + @@ -260,7 +262,95 @@ image:images/sql/odbc/env_var_log.png[] NOTE: When enabling the logging through the environment variable, the driver will create *one log file per process*. -Both ways of configuring the logging can coexist and both can use the same destination logging directory. However, one logging message -will only be logged once, the connection logging taking precedence over the environment variable logging. +Both ways of configuring the logging can coexist and both can use the same +destination logging directory. However, one logging message will only be logged +once, the connection logging taking precedence over the environment variable +logging. + +[[odbc-cfg-dsnparams]] +[float] +==== Connection string parameters + +The following is a list of additional parameters that can be configured for a +particular connection, in case the default behavior of the driver is not +suitable. This can be done within the client application, in a manner +particular to that application, generally in a free text input box (sometimes +named "Connection string", "String extras", or similar). The format of the +string is `Attribute1=Value1`. Multiple attributes can be specified, separated +by a semicolon `Attribute1=Value1;Attribute2=Value2;`. The attribute names are +given below. + +`Timeout` (default: `0`):: +The maximum time (in seconds) a request to the server can take. This can be +overridden by a larger statement-level timeout setting. The value 0 means no +timeout. + +`Follow` (default: `yes`):: +A boolean value (`yes`|`no` / `true`|`false` / `0`|`1`) controlling if the +driver will follow HTTP redirects. + + +`MaxFetchSize` (default: `0`):: +The maximum number of rows that {es-sql} server should send the driver for one +page. This corresponds to {es-sql}'s request parameter `fetch_size` (see +<>). The value 0 means server default. + + +`MaxBodySizeMB` (default: `100`):: +The maximum size (in megabytes) that an answer can grow to, before being +rejected as too large by the driver. +This is concerning the HTTP answer body of one page, not the cumulated data +volume that a query might generate. + + +`ApplyTZ` (default: `no`):: +A boolean value controlling the timezone of: + +* the context in which the query will execute (especially relevant for functions dealing with timestamp components); + +* the timestamps received from / sent to the server. +If disabled, the UTC timezone will apply; otherwise, the local machine's set +timezone. + + +`ScientificFloats` (default: `default`):: +Controls how the floating point numbers will be printed, when these are +converted to string by the driver. Possible values given to this parameter: + +* `scientific`: the exponential notation (ex.: 1.23E01); + +* `default`: the default notation (ex.: 12.3); + +* `auto`: the driver will choose one of the above depending on the value to be +printed. +Note that the number of decimals is dependent on the precision (or ODBC scale) +of the value being printed and varies with the different floating point types +supported by {es-sql}. +This setting is not effective when the application fetches from the driver the +values as numbers and then does the conversion subsequently itself. + + +`VersionChecking` (default: `strict`):: +By default, the version of the driver and that of the server must be the same. +This parameter will allow a driver to connect to a server of different version. +The variation however can only be of the minor version, both endpoints must be +of same major version number. +Possible values: + +* `strict`: the versions must be in sync; + +* `major`: the versions must have the same major number. + +WARNING: This parameter can only be used for troubleshooting purposes. Running +with versions out of sync is not supported. + + +`MultiFieldLenient` (default: `true`):: +This boolean parameter controls the behavior of the server in case a +multi-value field is queried. In case this is set and the server encounters +such a field, it will pick a value in the set - without any guarantees of what +that will be, but typically the first in natural ascending order - and return +it as the value for the column. If not set, the server will return an error. +This corresponds to {es-sql}'s request parameter `field_multi_value_leniency` +(see <>). -// vim: set noet fenc=utf-8 ff=dos sts=0 sw=4 ts=4 tw=138 diff --git a/docs/reference/sql/endpoints/odbc/installation.asciidoc b/docs/reference/sql/endpoints/odbc/installation.asciidoc index 08f0c66ee2a8..3a024e443d7c 100644 --- a/docs/reference/sql/endpoints/odbc/installation.asciidoc +++ b/docs/reference/sql/endpoints/odbc/installation.asciidoc @@ -21,6 +21,11 @@ If you fail to meet any of the prerequisites the installer will show an error me NOTE: It is not possible to inline upgrade using the MSI. In order to upgrade, you will first have to uninstall the old driver and then install the new driver. +NOTE: When installing the MSI, the Windows Defender SmartScreen might warn +about running an unrecognized app. If the MSI has been downloaded from +Elastic's web site, it is safe to acknowledge the message by allowing the +installation to continue (`Run anyway`). + [[download]] ==== Download the `.msi` package(s) diff --git a/docs/reference/sql/functions/aggs.asciidoc b/docs/reference/sql/functions/aggs.asciidoc index 30bf23fbaa22..cc0f06cb3bb5 100644 --- a/docs/reference/sql/functions/aggs.asciidoc +++ b/docs/reference/sql/functions/aggs.asciidoc @@ -6,10 +6,12 @@ Functions for computing a _single_ result from a set of input values. {es-sql} supports aggregate functions only alongside <> (implicit or explicit). -==== General Purpose +[[sql-functions-aggs-general]] +[float] +=== General Purpose [[sql-functions-aggs-avg]] -===== `AVG` +==== `AVG` .Synopsis: [source, sql] @@ -29,11 +31,11 @@ Returns the https://en.wikipedia.org/wiki/Arithmetic_mean[Average] (arithmetic m ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[aggAvg] +include-tagged::{sql-specs}/docs/docs.csv-spec[aggAvg] -------------------------------------------------- [[sql-functions-aggs-count]] -===== `COUNT` +==== `COUNT` .Synopsis: [source, sql] @@ -58,12 +60,12 @@ In case of `COUNT()` `null` values are not considered. ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[aggCountStar] +include-tagged::{sql-specs}/docs/docs.csv-spec[aggCountStar] -------------------------------------------------- [[sql-functions-aggs-count-all]] -===== `COUNT(ALL)` +==== `COUNT(ALL)` .Synopsis: [source, sql] @@ -83,12 +85,12 @@ Returns the total number (count) of all _non-null_ input values. `COUNT(>. [[sql-functions-aggs-last]] -===== `LAST/LAST_VALUE` +==== `LAST/LAST_VALUE` .Synopsis: [source, sql] @@ -270,29 +272,29 @@ s| LAST(a, b) ["source","sql",subs="attributes,macros"] ----------------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[lastWithOneArg] +include-tagged::{sql-specs}/docs/docs.csv-spec[lastWithOneArg] ----------------------------------------------------------- ["source","sql",subs="attributes,macros"] ------------------------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[lastWithOneArgAndGroupBy] +include-tagged::{sql-specs}/docs/docs.csv-spec[lastWithOneArgAndGroupBy] ------------------------------------------------------------------- ["source","sql",subs="attributes,macros"] ----------------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[lastWithTwoArgs] +include-tagged::{sql-specs}/docs/docs.csv-spec[lastWithTwoArgs] ----------------------------------------------------------- ["source","sql",subs="attributes,macros"] -------------------------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[lastWithTwoArgsAndGroupBy] +include-tagged::{sql-specs}/docs/docs.csv-spec[lastWithTwoArgsAndGroupBy] -------------------------------------------------------------------- `LAST_VALUE` is a name alias and can be used instead of `LAST`, e.g.: ["source","sql",subs="attributes,macros"] ------------------------------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[lastValueWithTwoArgsAndGroupBy] +include-tagged::{sql-specs}/docs/docs.csv-spec[lastValueWithTwoArgsAndGroupBy] ------------------------------------------------------------------------- [NOTE] @@ -302,7 +304,7 @@ include-tagged::{sql-specs}/docs.csv-spec[lastValueWithTwoArgsAndGroupBy] the field is also <>. [[sql-functions-aggs-max]] -===== `MAX` +==== `MAX` .Synopsis: [source, sql] @@ -322,7 +324,7 @@ Returns the maximum value across input values in the field `field_name`. ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[aggMax] +include-tagged::{sql-specs}/docs/docs.csv-spec[aggMax] -------------------------------------------------- [NOTE] @@ -330,7 +332,7 @@ include-tagged::{sql-specs}/docs.csv-spec[aggMax] <> and therefore, it cannot be used in `HAVING` clause. [[sql-functions-aggs-min]] -===== `MIN` +==== `MIN` .Synopsis: [source, sql] @@ -350,7 +352,7 @@ Returns the minimum value across input values in the field `field_name`. ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[aggMin] +include-tagged::{sql-specs}/docs/docs.csv-spec[aggMin] -------------------------------------------------- [NOTE] @@ -358,7 +360,7 @@ include-tagged::{sql-specs}/docs.csv-spec[aggMin] <> and therefore, it cannot be used in `HAVING` clause. [[sql-functions-aggs-sum]] -===== `SUM` +==== `SUM` .Synopsis: [source, sql] @@ -378,13 +380,15 @@ Returns the sum of input values in the field `field_name`. ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[aggSum] +include-tagged::{sql-specs}/docs/docs.csv-spec[aggSum] -------------------------------------------------- -==== Statistics +[[sql-functions-aggs-statistics]] +[float] +=== Statistics [[sql-functions-aggs-kurtosis]] -===== `KURTOSIS` +==== `KURTOSIS` .Synopsis: [source, sql] @@ -404,11 +408,11 @@ https://en.wikipedia.org/wiki/Kurtosis[Quantify] the shape of the distribution o ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[aggKurtosis] +include-tagged::{sql-specs}/docs/docs.csv-spec[aggKurtosis] -------------------------------------------------- [[sql-functions-aggs-mad]] -===== `MAD` +==== `MAD` .Synopsis: [source, sql] @@ -428,11 +432,11 @@ https://en.wikipedia.org/wiki/Median_absolute_deviation[Measure] the variability ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[aggMad] +include-tagged::{sql-specs}/docs/docs.csv-spec[aggMad] -------------------------------------------------- [[sql-functions-aggs-percentile]] -===== `PERCENTILE` +==== `PERCENTILE` .Synopsis: [source, sql] @@ -454,11 +458,11 @@ of input values in the field `field_name`. ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[aggPercentile] +include-tagged::{sql-specs}/docs/docs.csv-spec[aggPercentile] -------------------------------------------------- [[sql-functions-aggs-percentile-rank]] -===== `PERCENTILE_RANK` +==== `PERCENTILE_RANK` .Synopsis: [source, sql] @@ -480,11 +484,11 @@ of input values in the field `field_name`. ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[aggPercentileRank] +include-tagged::{sql-specs}/docs/docs.csv-spec[aggPercentileRank] -------------------------------------------------- [[sql-functions-aggs-skewness]] -===== `SKEWNESS` +==== `SKEWNESS` .Synopsis: [source, sql] @@ -504,11 +508,11 @@ https://en.wikipedia.org/wiki/Skewness[Quantify] the asymmetric distribution of ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[aggSkewness] +include-tagged::{sql-specs}/docs/docs.csv-spec[aggSkewness] -------------------------------------------------- [[sql-functions-aggs-stddev-pop]] -===== `STDDEV_POP` +==== `STDDEV_POP` .Synopsis: [source, sql] @@ -528,11 +532,11 @@ Returns the https://en.wikipedia.org/wiki/Standard_deviations[population standar ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[aggStddevPop] +include-tagged::{sql-specs}/docs/docs.csv-spec[aggStddevPop] -------------------------------------------------- [[sql-functions-aggs-sum-squares]] -===== `SUM_OF_SQUARES` +==== `SUM_OF_SQUARES` .Synopsis: [source, sql] @@ -552,11 +556,11 @@ Returns the https://en.wikipedia.org/wiki/Total_sum_of_squares[sum of squares] o ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[aggSumOfSquares] +include-tagged::{sql-specs}/docs/docs.csv-spec[aggSumOfSquares] -------------------------------------------------- [[sql-functions-aggs-var-pop]] -===== `VAR_POP` +==== `VAR_POP` .Synopsis: [source, sql] @@ -576,5 +580,5 @@ Returns the https://en.wikipedia.org/wiki/Variance[population variance] of input ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[aggVarPop] +include-tagged::{sql-specs}/docs/docs.csv-spec[aggVarPop] -------------------------------------------------- diff --git a/docs/reference/sql/functions/conditional.asciidoc b/docs/reference/sql/functions/conditional.asciidoc index da011089222e..ce8d5c3e66ce 100644 --- a/docs/reference/sql/functions/conditional.asciidoc +++ b/docs/reference/sql/functions/conditional.asciidoc @@ -37,22 +37,21 @@ If all arguments are null, then it returns `null`. ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[coalesceReturnNonNull] +include-tagged::{sql-specs}/docs/docs.csv-spec[coalesceReturnNonNull] ---- ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[coalesceReturnNull] +include-tagged::{sql-specs}/docs/docs.csv-spec[coalesceReturnNull] ---- - -[[sql-functions-conditional-ifnull]] -==== `IFNULL` +[[sql-functions-conditional-greatest]] +==== `GREATEST` .Synopsis: [source, sql] ---- -IFNULL(expression<1>, expression<2>) +GREATEST(expression<1>, expression<2>, ...) ---- *Input*: @@ -61,35 +60,39 @@ IFNULL(expression<1>, expression<2>) <2> 2nd expression +... -*Output*: 2nd expression if 1st expression is null, otherwise 1st expression. +**N**th expression + +GREATEST can take an arbitrary number of arguments and +all of them must be of the same data type. + +*Output*: one of the expressions or `null` .Description -Variant of <> with only two arguments. -Returns the first of its arguments that is not null. +Returns the argument that has the largest value which is not null. If all arguments are null, then it returns `null`. ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[ifNullReturnFirst] +include-tagged::{sql-specs}/docs/docs.csv-spec[greatestReturnNonNull] ---- ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[ifNullReturnSecond] +include-tagged::{sql-specs}/docs/docs.csv-spec[greatestReturnNull] ---- - -[[sql-functions-conditional-isnull]] -==== `ISNULL` +[[sql-functions-conditional-ifnull]] +==== `IFNULL` .Synopsis: [source, sql] ---- -ISNULL(expression<1>, expression<2>) +IFNULL(expression<1>, expression<2>) ---- *Input*: @@ -111,22 +114,22 @@ If all arguments are null, then it returns `null`. ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[isNullReturnFirst] +include-tagged::{sql-specs}/docs/docs.csv-spec[ifNullReturnFirst] ---- ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[isNullReturnSecond] +include-tagged::{sql-specs}/docs/docs.csv-spec[ifNullReturnSecond] ---- -[[sql-functions-conditional-nvl]] -==== `NVL` +[[sql-functions-conditional-isnull]] +==== `ISNULL` .Synopsis: [source, sql] ---- -NVL(expression<1>, expression<2>) +ISNULL(expression<1>, expression<2>) ---- *Input*: @@ -148,22 +151,22 @@ If all arguments are null, then it returns `null`. ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[nvlReturnFirst] +include-tagged::{sql-specs}/docs/docs.csv-spec[isNullReturnFirst] ---- ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[nvlReturnSecond] +include-tagged::{sql-specs}/docs/docs.csv-spec[isNullReturnSecond] ---- -[[sql-functions-conditional-nullif]] -==== `NULLIF` +[[sql-functions-conditional-least]] +==== `LEAST` .Synopsis: [source, sql] ---- -NULLIF(expression<1>, expression<2>) +LEAST(expression<1>, expression<2>, ...) ---- *Input*: @@ -172,33 +175,40 @@ NULLIF(expression<1>, expression<2>) <2> 2nd expression +... -*Output*: `null` if the 2 expressions are equal, otherwise the 1st expression. +**N**th expression + +LEAST can take an arbitrary number of arguments and +all of them must be of the same data type. + +*Output*: one of the expressions or `null` .Description -Returns `null` when the two input expressions are equal and -if not, it returns the 1st expression. +Returns the argument that has the smallest value which is not null. +If all arguments are null, then it returns `null`. + ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[nullIfReturnFirst] +include-tagged::{sql-specs}/docs/docs.csv-spec[leastReturnNonNull] ---- ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[nullIfReturnNull] +include-tagged::{sql-specs}/docs/docs.csv-spec[leastReturnNull] ---- -[[sql-functions-conditional-greatest]] -==== `GREATEST` +[[sql-functions-conditional-nullif]] +==== `NULLIF` .Synopsis: [source, sql] ---- -GREATEST(expression<1>, expression<2>, ...) +NULLIF(expression<1>, expression<2>) ---- *Input*: @@ -207,40 +217,33 @@ GREATEST(expression<1>, expression<2>, ...) <2> 2nd expression -... - -**N**th expression -GREATEST can take an arbitrary number of arguments and -all of them must be of the same data type. - -*Output*: one of the expressions or `null` +*Output*: `null` if the 2 expressions are equal, otherwise the 1st expression. .Description -Returns the argument that has the largest value which is not null. -If all arguments are null, then it returns `null`. - +Returns `null` when the two input expressions are equal and +if not, it returns the 1st expression. ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[greatestReturnNonNull] +include-tagged::{sql-specs}/docs/docs.csv-spec[nullIfReturnFirst] ---- ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[greatestReturnNull] +include-tagged::{sql-specs}/docs/docs.csv-spec[nullIfReturnNull] ---- -[[sql-functions-conditional-least]] -==== `LEAST` +[[sql-functions-conditional-nvl]] +==== `NVL` .Synopsis: [source, sql] ---- -LEAST(expression<1>, expression<2>, ...) +NVL(expression<1>, expression<2>) ---- *Input*: @@ -249,28 +252,25 @@ LEAST(expression<1>, expression<2>, ...) <2> 2nd expression -... - -**N**th expression - -LEAST can take an arbitrary number of arguments and -all of them must be of the same data type. -*Output*: one of the expressions or `null` +*Output*: 2nd expression if 1st expression is null, otherwise 1st expression. .Description -Returns the argument that has the smallest value which is not null. +Variant of <> with only two arguments. +Returns the first of its arguments that is not null. If all arguments are null, then it returns `null`. ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[leastReturnNonNull] +include-tagged::{sql-specs}/docs/docs.csv-spec[nvlReturnFirst] ---- ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[leastReturnNull] +include-tagged::{sql-specs}/docs/docs.csv-spec[nvlReturnSecond] ---- + + diff --git a/docs/reference/sql/functions/date-time.asciidoc b/docs/reference/sql/functions/date-time.asciidoc index de173f3af06f..d4beaa34bd77 100644 --- a/docs/reference/sql/functions/date-time.asciidoc +++ b/docs/reference/sql/functions/date-time.asciidoc @@ -59,32 +59,32 @@ Basic arithmetic operators (`+`, `-`, etc) support date/time parameters as indic ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[dtIntervalPlusInterval] +include-tagged::{sql-specs}/docs/docs.csv-spec[dtIntervalPlusInterval] -------------------------------------------------- ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[dtDateTimePlusInterval] +include-tagged::{sql-specs}/docs/docs.csv-spec[dtDateTimePlusInterval] -------------------------------------------------- ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[dtMinusInterval] +include-tagged::{sql-specs}/docs/docs.csv-spec[dtMinusInterval] -------------------------------------------------- ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[dtIntervalMinusInterval] +include-tagged::{sql-specs}/docs/docs.csv-spec[dtIntervalMinusInterval] -------------------------------------------------- ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[dtDateTimeMinusInterval] +include-tagged::{sql-specs}/docs/docs.csv-spec[dtDateTimeMinusInterval] -------------------------------------------------- ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[dtIntervalMul] +include-tagged::{sql-specs}/docs/docs.csv-spec[dtIntervalMul] -------------------------------------------------- ==== Functions @@ -117,12 +117,12 @@ This method always returns the same value for its every occurrence within the sa ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[curDate] +include-tagged::{sql-specs}/docs/docs.csv-spec[curDate] -------------------------------------------------- ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[curDateFunction] +include-tagged::{sql-specs}/docs/docs.csv-spec[curDateFunction] -------------------------------------------------- Typically, this function (as well as its twin <> function @@ -130,7 +130,7 @@ is used for relative date filtering: ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[filterToday] +include-tagged::{sql-specs}/docs/docs.csv-spec[filterToday] -------------------------------------------------- [[sql-functions-current-timestamp]] @@ -160,17 +160,17 @@ This method always returns the same value for its every occurrence within the sa ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[curTs] +include-tagged::{sql-specs}/docs/docs.csv-spec[curTs] -------------------------------------------------- ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[curTsFunction] +include-tagged::{sql-specs}/docs/docs.csv-spec[curTsFunction] -------------------------------------------------- ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[curTsFunctionPrecision] +include-tagged::{sql-specs}/docs/docs.csv-spec[curTsFunctionPrecision] -------------------------------------------------- Typically, this function (as well as its twin <> function is used for @@ -178,7 +178,7 @@ relative date/time filtering: ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[filterNow] +include-tagged::{sql-specs}/docs/docs.csv-spec[filterNow] -------------------------------------------------- [[sql-functions-datetime-day]] @@ -202,7 +202,7 @@ Extract the day of the month from a date/datetime. ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[dayOfMonth] +include-tagged::{sql-specs}/docs/docs.csv-spec[dayOfMonth] -------------------------------------------------- [[sql-functions-datetime-dow]] @@ -226,7 +226,7 @@ Extract the day of the week from a date/datetime. Sunday is `1`, Monday is `2`, ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[dayOfWeek] +include-tagged::{sql-specs}/docs/docs.csv-spec[dayOfWeek] -------------------------------------------------- [[sql-functions-datetime-doy]] @@ -250,7 +250,7 @@ Extract the day of the year from a date/datetime. ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[dayOfYear] +include-tagged::{sql-specs}/docs/docs.csv-spec[dayOfYear] -------------------------------------------------- [[sql-functions-datetime-dayname]] @@ -274,7 +274,7 @@ Extract the day of the week from a date/datetime in text format (`Monday`, `Tues ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[dayName] +include-tagged::{sql-specs}/docs/docs.csv-spec[dayName] -------------------------------------------------- [[sql-functions-datetime-hour]] @@ -298,7 +298,7 @@ Extract the hour of the day from a date/datetime. ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[hourOfDay] +include-tagged::{sql-specs}/docs/docs.csv-spec[hourOfDay] -------------------------------------------------- [[sql-functions-datetime-isodow]] @@ -323,7 +323,7 @@ Monday is `1`, Tuesday is `2`, etc. ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[isoDayOfWeek] +include-tagged::{sql-specs}/docs/docs.csv-spec[isoDayOfWeek] -------------------------------------------------- [[sql-functions-datetime-isoweek]] @@ -348,7 +348,7 @@ of a year is the first week with a majority (4 or more) of its days in January. ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[isoWeekOfYear] +include-tagged::{sql-specs}/docs/docs.csv-spec[isoWeekOfYear] -------------------------------------------------- [[sql-functions-datetime-minuteofday]] @@ -372,7 +372,7 @@ Extract the minute of the day from a date/datetime. ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[minuteOfDay] +include-tagged::{sql-specs}/docs/docs.csv-spec[minuteOfDay] -------------------------------------------------- [[sql-functions-datetime-minute]] @@ -396,7 +396,7 @@ Extract the minute of the hour from a date/datetime. ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[minuteOfHour] +include-tagged::{sql-specs}/docs/docs.csv-spec[minuteOfHour] -------------------------------------------------- [[sql-functions-datetime-month]] @@ -420,7 +420,7 @@ Extract the month of the year from a date/datetime. ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[monthOfYear] +include-tagged::{sql-specs}/docs/docs.csv-spec[monthOfYear] -------------------------------------------------- [[sql-functions-datetime-monthname]] @@ -444,7 +444,7 @@ Extract the month from a date/datetime in text format (`January`, `February`...) ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[monthName] +include-tagged::{sql-specs}/docs/docs.csv-spec[monthName] -------------------------------------------------- [[sql-functions-now]] @@ -468,7 +468,7 @@ occurrence within the same query. ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[nowFunction] +include-tagged::{sql-specs}/docs/docs.csv-spec[nowFunction] -------------------------------------------------- Typically, this function (as well as its twin <> function is used @@ -476,7 +476,7 @@ for relative date/time filtering: ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[filterNow] +include-tagged::{sql-specs}/docs/docs.csv-spec[filterNow] -------------------------------------------------- [[sql-functions-datetime-second]] @@ -500,7 +500,7 @@ Extract the second of the minute from a date/datetime. ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[secondOfMinute] +include-tagged::{sql-specs}/docs/docs.csv-spec[secondOfMinute] -------------------------------------------------- [[sql-functions-datetime-quarter]] @@ -524,7 +524,7 @@ Extract the year quarter the date/datetime falls in. ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[quarter] +include-tagged::{sql-specs}/docs/docs.csv-spec[quarter] -------------------------------------------------- [[sql-functions-today]] @@ -548,7 +548,7 @@ within the same query. ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[todayFunction] +include-tagged::{sql-specs}/docs/docs.csv-spec[todayFunction] -------------------------------------------------- Typically, this function (as well as its twin <> function is used @@ -556,7 +556,7 @@ for relative date filtering: ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[filterToday] +include-tagged::{sql-specs}/docs/docs.csv-spec[filterToday] -------------------------------------------------- [[sql-functions-datetime-week]] @@ -580,7 +580,7 @@ Extract the week of the year from a date/datetime. ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[weekOfYear] +include-tagged::{sql-specs}/docs/docs.csv-spec[weekOfYear] -------------------------------------------------- [[sql-functions-datetime-year]] @@ -604,7 +604,7 @@ Extract the year from a date/datetime. ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[year] +include-tagged::{sql-specs}/docs/docs.csv-spec[year] -------------------------------------------------- [[sql-functions-datetime-extract]] @@ -630,12 +630,12 @@ The following ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[extractDayOfYear] +include-tagged::{sql-specs}/docs/docs.csv-spec[extractDayOfYear] -------------------------------------------------- is the equivalent to ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[dayOfYear] +include-tagged::{sql-specs}/docs/docs.csv-spec[dayOfYear] -------------------------------------------------- diff --git a/docs/reference/sql/functions/grouping.asciidoc b/docs/reference/sql/functions/grouping.asciidoc index 2b353b832425..d3a57f0d3a81 100644 --- a/docs/reference/sql/functions/grouping.asciidoc +++ b/docs/reference/sql/functions/grouping.asciidoc @@ -41,14 +41,14 @@ NOTE:: The histogram in SQL does *NOT* return empty buckets for missing interval ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[histogramNumeric] +include-tagged::{sql-specs}/docs/docs.csv-spec[histogramNumeric] ---- or date/time fields: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[histogramDateTime] +include-tagged::{sql-specs}/docs/docs.csv-spec[histogramDateTime] ---- Expressions inside the histogram are also supported as long as the @@ -56,14 +56,14 @@ return type is numeric: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[histogramNumericExpression] +include-tagged::{sql-specs}/docs/docs.csv-spec[histogramNumericExpression] ---- Do note that histograms (and grouping functions in general) allow custom expressions but cannot have any functions applied to them in the `GROUP BY`. In other words, the following statement is *NOT* allowed: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[expressionOnHistogramNotAllowed] +include-tagged::{sql-specs}/docs/docs.csv-spec[expressionOnHistogramNotAllowed] ---- as it requires two groupings (one for histogram followed by a second for applying the function on top of the histogram groups). @@ -72,7 +72,7 @@ Instead one can rewrite the query to move the expression on the histogram _insid ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[histogramDateTimeExpression] +include-tagged::{sql-specs}/docs/docs.csv-spec[histogramDateTimeExpression] ---- [IMPORTANT] diff --git a/docs/reference/sql/functions/index.asciidoc b/docs/reference/sql/functions/index.asciidoc index 94b5f767f86f..6e966403ce0e 100644 --- a/docs/reference/sql/functions/index.asciidoc +++ b/docs/reference/sql/functions/index.asciidoc @@ -7,14 +7,114 @@ * <> * <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> * <> +** <> * <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> * <> +** <> +** <> +** <> * <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> * <> -* <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +* <> +** <> +** <> * <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> * <> +** <> +** <> include::operators.asciidoc[] include::aggs.asciidoc[] diff --git a/docs/reference/sql/functions/math.asciidoc b/docs/reference/sql/functions/math.asciidoc index 258bd81f2b17..a520a89ebaa2 100644 --- a/docs/reference/sql/functions/math.asciidoc +++ b/docs/reference/sql/functions/math.asciidoc @@ -6,10 +6,12 @@ All math and trigonometric functions require their input (where applicable) to be numeric. -==== Generic +[[sql-functions-math-generic]] +[float] +=== Generic [[sql-functions-math-abs]] -===== `ABS` +==== `ABS` .Synopsis: [source, sql] @@ -29,11 +31,11 @@ Returns the https://en.wikipedia.org/wiki/Absolute_value[absolute value] of `num ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[abs] +include-tagged::{sql-specs}/docs/docs.csv-spec[abs] -------------------------------------------------- [[sql-functions-math-cbrt]] -===== `CBRT` +==== `CBRT` .Synopsis: [source, sql] @@ -53,11 +55,11 @@ Returns the https://en.wikipedia.org/wiki/Cube_root[cube root] of `numeric_exp`. ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathInlineCbrtWithNegativeValue] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineCbrtWithNegativeValue] -------------------------------------------------- [[sql-functions-math-ceil]] -===== `CEIL/CEILING` +==== `CEIL/CEILING` .Synopsis: [source, sql] @@ -77,11 +79,11 @@ Returns the smallest integer greater than or equal to `numeric_exp`. ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathInlineCeiling] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineCeiling] -------------------------------------------------- [[sql-functions-math-e]] -===== `E` +==== `E` .Synopsis: [source, sql] @@ -99,11 +101,11 @@ Returns https://en.wikipedia.org/wiki/E_%28mathematical_constant%29[Euler's numb ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathEulersNumber] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathEulersNumber] -------------------------------------------------- [[sql-functions-math-exp]] -===== `EXP` +==== `EXP` .Synopsis: [source, sql] @@ -123,11 +125,11 @@ Returns https://en.wikipedia.org/wiki/Exponential_function[Euler's number at the ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathExpInline] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathExpInline] -------------------------------------------------- [[sql-functions-math-expm1]] -===== `EXPM1` +==== `EXPM1` .Synopsis: [source, sql] @@ -147,11 +149,11 @@ Returns https://docs.oracle.com/javase/8/docs/api/java/lang/Math.html#expm1-doub ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathExpm1Inline] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathExpm1Inline] -------------------------------------------------- [[sql-functions-math-floor]] -===== `FLOOR` +==== `FLOOR` .Synopsis: [source, sql] @@ -171,11 +173,11 @@ Returns the largest integer less than or equal to `numeric_exp`. ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathInlineFloor] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineFloor] -------------------------------------------------- [[sql-functions-math-log]] -===== `LOG` +==== `LOG` .Synopsis: [source, sql] @@ -195,11 +197,11 @@ Returns the https://en.wikipedia.org/wiki/Natural_logarithm[natural logarithm] o ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathInlineLog] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineLog] -------------------------------------------------- [[sql-functions-math-log10]] -===== `LOG10` +==== `LOG10` .Synopsis: [source, sql] @@ -219,11 +221,11 @@ Returns the https://en.wikipedia.org/wiki/Common_logarithm[base 10 logarithm] of ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathInlineLog10] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineLog10] -------------------------------------------------- [[sql-functions-math-pi]] -===== `PI` +==== `PI` .Synopsis: [source, sql] @@ -241,11 +243,11 @@ Returns https://en.wikipedia.org/wiki/Pi[PI number]. ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathPINumber] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathPINumber] -------------------------------------------------- [[sql-functions-math-power]] -===== `POWER` +==== `POWER` .Synopsis: [source, sql] @@ -266,16 +268,16 @@ Returns the value of `numeric_exp` to the power of `integer_exp`. ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathInlinePowerPositive] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlinePowerPositive] -------------------------------------------------- ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathInlinePowerNegative] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlinePowerNegative] -------------------------------------------------- [[sql-functions-math-random]] -===== `RANDOM/RAND` +==== `RANDOM/RAND` .Synopsis: [source, sql] @@ -295,11 +297,11 @@ Returns a random double using the given seed. ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathRandom] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathRandom] -------------------------------------------------- [[sql-functions-math-round]] -===== `ROUND` +==== `ROUND` .Synopsis: [source, sql] @@ -321,16 +323,16 @@ of `numeric_exp`. ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathRoundWithPositiveParameter] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathRoundWithPositiveParameter] -------------------------------------------------- ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathRoundWithNegativeParameter] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathRoundWithNegativeParameter] -------------------------------------------------- [[sql-functions-math-sign]] -===== `SIGN/SIGNUM` +==== `SIGN/SIGNUM` .Synopsis: [source, sql] @@ -350,12 +352,12 @@ Returns an indicator of the sign of `numeric_exp`. If `numeric_exp` is less than ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathInlineSign] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineSign] -------------------------------------------------- [[sql-functions-math-sqrt]] -===== `SQRT` +==== `SQRT` .Synopsis: [source, sql] @@ -375,11 +377,11 @@ Returns https://en.wikipedia.org/wiki/Square_root[square root] of `numeric_exp`. ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathInlineSqrt] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineSqrt] -------------------------------------------------- [[sql-functions-math-truncate]] -===== `TRUNCATE` +==== `TRUNCATE` .Synopsis: [source, sql] @@ -401,18 +403,20 @@ of `numeric_exp`. ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathTruncateWithPositiveParameter] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathTruncateWithPositiveParameter] -------------------------------------------------- ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathTruncateWithNegativeParameter] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathTruncateWithNegativeParameter] -------------------------------------------------- -==== Trigonometric +[[sql-functions-math-trigonometric]] +[float] +=== Trigonometric [[sql-functions-math-acos]] -===== `ACOS` +==== `ACOS` .Synopsis: [source, sql] @@ -432,11 +436,11 @@ Returns the https://en.wikipedia.org/wiki/Inverse_trigonometric_functions[arccos ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathInlineAcos] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineAcos] -------------------------------------------------- [[sql-functions-math-asin]] -===== `ASIN` +==== `ASIN` .Synopsis: [source, sql] @@ -456,11 +460,11 @@ Returns the https://en.wikipedia.org/wiki/Inverse_trigonometric_functions[arcsin ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathInlineAsin] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineAsin] -------------------------------------------------- [[sql-functions-math-atan]] -===== `ATAN` +==== `ATAN` .Synopsis: [source, sql] @@ -480,11 +484,11 @@ Returns the https://en.wikipedia.org/wiki/Inverse_trigonometric_functions[arctan ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathInlineAtan] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineAtan] -------------------------------------------------- [[sql-functions-math-atan2]] -===== `ATAN2` +==== `ATAN2` .Synopsis: [source, sql] @@ -505,11 +509,11 @@ Returns the https://en.wikipedia.org/wiki/Atan2[arctangent of the `ordinate` and ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathInlineAtan2] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineAtan2] -------------------------------------------------- [[sql-functions-math-cos]] -===== `COS` +==== `COS` .Synopsis: [source, sql] @@ -529,11 +533,11 @@ Returns the https://en.wikipedia.org/wiki/Trigonometric_functions#cosine[cosine] ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathInlineCosine] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineCosine] -------------------------------------------------- [[sql-functions-math-cosh]] -===== `COSH` +==== `COSH` .Synopsis: [source, sql] @@ -553,11 +557,11 @@ Returns the https://en.wikipedia.org/wiki/Hyperbolic_function[hyperbolic cosine] ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathInlineCosh] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineCosh] -------------------------------------------------- [[sql-functions-math-cot]] -===== `COT` +==== `COT` .Synopsis: [source, sql] @@ -577,11 +581,11 @@ Returns the https://en.wikipedia.org/wiki/Trigonometric_functions#Cosecant,_seca ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathInlineCotangent] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineCotangent] -------------------------------------------------- [[sql-functions-math-degrees]] -===== `DEGREES` +==== `DEGREES` .Synopsis: [source, sql] @@ -602,11 +606,11 @@ to https://en.wikipedia.org/wiki/Degree_(angle)[degrees]. ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathInlineDegrees] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineDegrees] -------------------------------------------------- [[sql-functions-math-radians]] -===== `RADIANS` +==== `RADIANS` .Synopsis: [source, sql] @@ -627,11 +631,11 @@ to https://en.wikipedia.org/wiki/Radian[radians]. ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathInlineRadians] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineRadians] -------------------------------------------------- [[sql-functions-math-sin]] -===== `SIN` +==== `SIN` .Synopsis: [source, sql] @@ -651,11 +655,11 @@ Returns the https://en.wikipedia.org/wiki/Trigonometric_functions#sine[sine] of ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathInlineSine] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineSine] -------------------------------------------------- [[sql-functions-math-sinh]] -===== `SINH` +==== `SINH` .Synopsis: [source, sql] @@ -675,11 +679,11 @@ Returns the https://en.wikipedia.org/wiki/Hyperbolic_function[hyperbolic sine] o ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathInlineSinh] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineSinh] -------------------------------------------------- [[sql-functions-math-tan]] -===== `TAN` +==== `TAN` .Synopsis: [source, sql] @@ -699,5 +703,5 @@ Returns the https://en.wikipedia.org/wiki/Trigonometric_functions#tangent[tangen ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[mathInlineTanget] +include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineTanget] -------------------------------------------------- diff --git a/docs/reference/sql/functions/operators.asciidoc b/docs/reference/sql/functions/operators.asciidoc index 77ec9d93ff2d..e6cab5836b29 100644 --- a/docs/reference/sql/functions/operators.asciidoc +++ b/docs/reference/sql/functions/operators.asciidoc @@ -16,12 +16,12 @@ include-tagged::{sql-specs}/filter.sql-spec[whereFieldEquality] ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[nullEqualsCompareWithNull] +include-tagged::{sql-specs}/docs/docs.csv-spec[nullEqualsCompareWithNull] -------------------------------------------------- ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[nullEqualsCompareTwoNulls] +include-tagged::{sql-specs}/docs/docs.csv-spec[nullEqualsCompareTwoNulls] -------------------------------------------------- * Inequality (`<>` or `!=`) @@ -142,5 +142,5 @@ include-tagged::{sql-specs}/arithmetic.sql-spec[mod] ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[conversionStringToLongCastOperator] +include-tagged::{sql-specs}/docs/docs.csv-spec[conversionStringToLongCastOperator] -------------------------------------------------- diff --git a/docs/reference/sql/functions/search.asciidoc b/docs/reference/sql/functions/search.asciidoc index 954dc45e3b95..0534271caa91 100644 --- a/docs/reference/sql/functions/search.asciidoc +++ b/docs/reference/sql/functions/search.asciidoc @@ -8,6 +8,118 @@ when the `MATCH` or `QUERY` predicates are being used. Outside a, so-called, search context, these functions will return default values such as `0` or `NULL`. +[[sql-functions-search-match]] +==== `MATCH` + +.Synopsis: +[source, sql] +-------------------------------------------------- +MATCH(field_exp<1>, constant_exp<2>[, options]<3>) +-------------------------------------------------- + +*Input*: + +<1> field(s) to match +<2> matching text +<3> additional parameters; optional + +.Description: + +A full-text search option, in the form of a predicate, available in {es-sql} that gives the user control over powerful <> +and <> {es} queries. + +The first parameter is the field or fields to match against. In case it receives one value only, {es-sql} will use a `match` query to perform the search: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs/docs.csv-spec[simpleMatch] +---- + +However, it can also receive a list of fields and their corresponding optional `boost` value. In this case, {es-sql} will use a +`multi_match` query to match the documents: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs/docs.csv-spec[multiFieldsMatch] +---- + +NOTE: The `multi_match` query in {es} has the option of <> that gives preferential weight +(in terms of scoring) to fields being searched in, using the `^` character. In the example above, the `name` field has a greater weight in +the final score than the `author` field when searching for `frank dune` text in both of them. + +Both options above can be used in combination with the optional third parameter of the `MATCH()` predicate, where one can specify +additional configuration parameters (separated by semicolon `;`) for either `match` or `multi_match` queries. For example: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs/docs.csv-spec[optionalParamsForMatch] +---- + +In the more advanced example above, the `cutoff_frequency` parameter allows specifying an absolute or relative document frequency where +high frequency terms are moved into an optional subquery and are only scored if one of the low frequency (below the cutoff) terms in the +case of an `or` operator or all of the low frequency terms in the case of an `and` operator match. More about this you can find in the +<> page. + +NOTE: The allowed optional parameters for a single-field `MATCH()` variant (for the `match` {es} query) are: `analyzer`, `auto_generate_synonyms_phrase_query`, +`cutoff_frequency`, `lenient`, `fuzziness`, `fuzzy_transpositions`, `fuzzy_rewrite`, `minimum_should_match`, `operator`, +`max_expansions`, `prefix_length`. + +NOTE: The allowed optional parameters for a multi-field `MATCH()` variant (for the `multi_match` {es} query) are: `analyzer`, `auto_generate_synonyms_phrase_query`, +`cutoff_frequency`, `lenient`, `fuzziness`, `fuzzy_transpositions`, `fuzzy_rewrite`, `minimum_should_match`, `operator`, +`max_expansions`, `prefix_length`, `slop`, `tie_breaker`, `type`. + + +[[sql-functions-search-query]] +==== `QUERY` + +.Synopsis: +[source, sql] +-------------------------------------------------- +QUERY(constant_exp<1>[, options]<2>) +-------------------------------------------------- + +*Input*: + +<1> query text +<2> additional parameters; optional + +.Description: + +Just like `MATCH`, `QUERY` is a full-text search predicate that gives the user control over the <> query in {es}. + +The first parameter is basically the input that will be passed as is to the `query_string` query, which means that anything that `query_string` +accepts in its `query` field can be used here as well: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs/docs.csv-spec[simpleQueryQuery] +---- + +A more advanced example, showing more of the features that `query_string` supports, of course possible with {es-sql}: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs/docs.csv-spec[advancedQueryQuery] +---- + +The query above uses the `_exists_` query to select documents that have values in the `author` field, a range query for `page_count` and +regex and fuzziness queries for the `name` field. + +If one needs to customize various configuration options that `query_string` exposes, this can be done using the second _optional_ parameter. +Multiple settings can be specified separated by a semicolon `;`: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs/docs.csv-spec[optionalParameterQuery] +---- + +NOTE: The allowed optional parameters for `QUERY()` are: `allow_leading_wildcard`, `analyze_wildcard`, `analyzer`, +`auto_generate_synonyms_phrase_query`, `default_field`, `default_operator`, `enable_position_increments`, +`escape`, `fuzziness`, `fuzzy_max_expansions`, `fuzzy_prefix_length`, `fuzzy_rewrite`, `fuzzy_transpositions`, +`lenient`, `max_determinized_states`, `minimum_should_match`, `phrase_slop`, `rewrite`, `quote_analyzer`, +`quote_field_suffix`, `tie_breaker`, `time_zone`, `type`. + + [[sql-functions-search-score]] ==== `SCORE` @@ -34,12 +146,12 @@ Typically `SCORE` is used for ordering the results of a query based on their rel ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[orderByScore] +include-tagged::{sql-specs}/docs/docs.csv-spec[orderByScore] ---- However, it is perfectly fine to return the score without sorting by it: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[scoreWithMatch] +include-tagged::{sql-specs}/docs/docs.csv-spec[scoreWithMatch] ---- diff --git a/docs/reference/sql/functions/string.asciidoc b/docs/reference/sql/functions/string.asciidoc index 04b2937e6f9c..45389cd410e4 100644 --- a/docs/reference/sql/functions/string.asciidoc +++ b/docs/reference/sql/functions/string.asciidoc @@ -26,7 +26,7 @@ Returns the ASCII code value of the leftmost character of `string_exp` as an int ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[stringAscii] +include-tagged::{sql-specs}/docs/docs.csv-spec[stringAscii] -------------------------------------------------- [[sql-functions-string-bit-length]] @@ -49,7 +49,7 @@ Returns the length in bits of the `string_exp` input expression. ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[stringBitLength] +include-tagged::{sql-specs}/docs/docs.csv-spec[stringBitLength] -------------------------------------------------- [[sql-functions-string-char]] @@ -72,7 +72,7 @@ Returns the character that has the ASCII code value specified by the numeric inp ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[stringChar] +include-tagged::{sql-specs}/docs/docs.csv-spec[stringChar] -------------------------------------------------- [[sql-functions-string-char-length]] @@ -95,7 +95,7 @@ Returns the length in characters of the input, if the string expression is of a ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[stringCharLength] +include-tagged::{sql-specs}/docs/docs.csv-spec[stringCharLength] -------------------------------------------------- [[sql-functions-string-concat]] @@ -119,7 +119,7 @@ Returns a character string that is the result of concatenating `string_exp1` to ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[stringConcat] +include-tagged::{sql-specs}/docs/docs.csv-spec[stringConcat] -------------------------------------------------- [[sql-functions-string-insert]] @@ -145,7 +145,7 @@ Returns a string where `length` characters have been deleted from `source`, begi ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[stringInsert] +include-tagged::{sql-specs}/docs/docs.csv-spec[stringInsert] -------------------------------------------------- [[sql-functions-string-lcase]] @@ -168,7 +168,7 @@ Returns a string equal to that in `string_exp`, with all uppercase characters co ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[stringLCase] +include-tagged::{sql-specs}/docs/docs.csv-spec[stringLCase] -------------------------------------------------- [[sql-functions-string-left]] @@ -192,7 +192,7 @@ Returns the leftmost count characters of `string_exp`. ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[stringLeft] +include-tagged::{sql-specs}/docs/docs.csv-spec[stringLeft] -------------------------------------------------- [[sql-functions-string-length]] @@ -215,7 +215,7 @@ Returns the number of characters in `string_exp`, excluding trailing blanks. ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[stringLength] +include-tagged::{sql-specs}/docs/docs.csv-spec[stringLength] -------------------------------------------------- [[sql-functions-string-locate]] @@ -240,12 +240,12 @@ Returns the starting position of the first occurrence of `pattern` within `sourc ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[stringLocateWoStart] +include-tagged::{sql-specs}/docs/docs.csv-spec[stringLocateWoStart] -------------------------------------------------- ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[stringLocateWithStart] +include-tagged::{sql-specs}/docs/docs.csv-spec[stringLocateWithStart] -------------------------------------------------- [[sql-functions-string-ltrim]] @@ -268,7 +268,7 @@ Returns the characters of `string_exp`, with leading blanks removed. ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[stringLTrim] +include-tagged::{sql-specs}/docs/docs.csv-spec[stringLTrim] -------------------------------------------------- [[sql-functions-string-octet-length]] @@ -291,7 +291,7 @@ Returns the length in bytes of the `string_exp` input expression. ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[stringOctetLength] +include-tagged::{sql-specs}/docs/docs.csv-spec[stringOctetLength] -------------------------------------------------- [[sql-functions-string-position]] @@ -315,7 +315,7 @@ Returns the position of the `string_exp1` in `string_exp2`. The result is an exa ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[stringPosition] +include-tagged::{sql-specs}/docs/docs.csv-spec[stringPosition] -------------------------------------------------- [[sql-functions-string-repeat]] @@ -339,7 +339,7 @@ Returns a character string composed of `string_exp` repeated `count` times. ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[stringRepeat] +include-tagged::{sql-specs}/docs/docs.csv-spec[stringRepeat] -------------------------------------------------- [[sql-functions-string-replace]] @@ -364,7 +364,7 @@ Search `source` for occurrences of `pattern`, and replace with `replacement`. ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[stringReplace] +include-tagged::{sql-specs}/docs/docs.csv-spec[stringReplace] -------------------------------------------------- [[sql-functions-string-right]] @@ -388,7 +388,7 @@ Returns the rightmost count characters of `string_exp`. ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[stringRight] +include-tagged::{sql-specs}/docs/docs.csv-spec[stringRight] -------------------------------------------------- [[sql-functions-string-rtrim]] @@ -411,7 +411,7 @@ Returns the characters of `string_exp` with trailing blanks removed. ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[stringRTrim] +include-tagged::{sql-specs}/docs/docs.csv-spec[stringRTrim] -------------------------------------------------- [[sql-functions-string-space]] @@ -434,7 +434,7 @@ Returns a character string consisting of `count` spaces. ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[stringSpace] +include-tagged::{sql-specs}/docs/docs.csv-spec[stringSpace] -------------------------------------------------- [[sql-functions-string-substring]] @@ -459,7 +459,7 @@ Returns a character string that is derived from `source`, beginning at the chara ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[stringSubString] +include-tagged::{sql-specs}/docs/docs.csv-spec[stringSubString] -------------------------------------------------- [[sql-functions-string-ucase]] @@ -482,5 +482,5 @@ Returns a string equal to that of the input, with all lowercase characters conve ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[stringUCase] +include-tagged::{sql-specs}/docs/docs.csv-spec[stringUCase] -------------------------------------------------- diff --git a/docs/reference/sql/functions/system.asciidoc b/docs/reference/sql/functions/system.asciidoc index f9549d8ccb36..dfca7d526d3a 100644 --- a/docs/reference/sql/functions/system.asciidoc +++ b/docs/reference/sql/functions/system.asciidoc @@ -26,7 +26,7 @@ value. ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[database] +include-tagged::{sql-specs}/docs/docs.csv-spec[database] -------------------------------------------------- [[sql-functions-system-user]] @@ -48,5 +48,5 @@ return `null` in case {stack-ov}/elasticsearch-security.html[Security] is disabl ["source","sql",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[user] +include-tagged::{sql-specs}/docs/docs.csv-spec[user] -------------------------------------------------- diff --git a/docs/reference/sql/functions/type-conversion.asciidoc b/docs/reference/sql/functions/type-conversion.asciidoc index a696183d911d..2187d5a2e929 100644 --- a/docs/reference/sql/functions/type-conversion.asciidoc +++ b/docs/reference/sql/functions/type-conversion.asciidoc @@ -25,19 +25,24 @@ the value itself cannot be converted), the query fails. ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[conversionStringToIntCast] +include-tagged::{sql-specs}/docs/docs.csv-spec[conversionStringToIntCast] ---- ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[conversionIntToStringCast] +include-tagged::{sql-specs}/docs/docs.csv-spec[conversionIntToStringCast] ---- ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[conversionStringToDateTimeCast] +include-tagged::{sql-specs}/docs/docs.csv-spec[conversionStringToDateTimeCast] ---- +IMPORTANT: Both ANSI SQL and {es-sql} types are supported with the former taking +precedence. This only affects `FLOAT` which due naming conflict, is interpreted as ANSI SQL +and thus maps to `double` in {es} as oppose to `float`. +To obtain an {es} `float`, perform casting to its SQL equivalent, `real` type. + [[sql-functions-type-conversion-convert]] ==== `CONVERT` @@ -59,10 +64,10 @@ https://docs.microsoft.com/en-us/sql/odbc/reference/appendixes/explicit-data-typ ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[conversionStringToIntConvertODBCDataType] +include-tagged::{sql-specs}/docs/docs.csv-spec[conversionStringToIntConvertODBCDataType] ---- ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[conversionStringToIntConvertESDataType] +include-tagged::{sql-specs}/docs/docs.csv-spec[conversionStringToIntConvertESDataType] ---- diff --git a/docs/reference/sql/index.asciidoc b/docs/reference/sql/index.asciidoc index dc649e2434da..8f839a88a342 100644 --- a/docs/reference/sql/index.asciidoc +++ b/docs/reference/sql/index.asciidoc @@ -4,7 +4,7 @@ = SQL access :sql-tests: {xes-repo-dir}/../../plugin/sql/qa -:sql-specs: {sql-tests}/src/main/resources +:sql-specs: {sql-tests}/src/main/resources/ :jdbc-tests: {sql-tests}/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc :security-tests: {sql-tests}/security/src/test/java/org/elasticsearch/xpack/sql/qa/security :es-sql: Elasticsearch SQL diff --git a/docs/reference/sql/language/data-types.asciidoc b/docs/reference/sql/language/data-types.asciidoc index e59fd528fd80..42e5c842a418 100644 --- a/docs/reference/sql/language/data-types.asciidoc +++ b/docs/reference/sql/language/data-types.asciidoc @@ -21,9 +21,9 @@ s|SQL precision | <> | long | BIGINT | 19 | <> | double | DOUBLE | 15 | <> | float | REAL | 7 -| <> | half_float | FLOAT | 16 -| <> | scaled_float | FLOAT | 19 -| <> | keyword | VARCHAR | based on <> +| <> | half_float | FLOAT | 3 +| <> | scaled_float | DOUBLE | 15 +| <> | keyword | VARCHAR | 32,766 | <> | text | VARCHAR | 2,147,483,647 | <> | binary | VARBINARY | 2,147,483,647 | <> | datetime | TIMESTAMP | 24 diff --git a/docs/reference/sql/language/index-patterns.asciidoc b/docs/reference/sql/language/index-patterns.asciidoc index 2633a8b25e7c..44f951d36a02 100644 --- a/docs/reference/sql/language/index-patterns.asciidoc +++ b/docs/reference/sql/language/index-patterns.asciidoc @@ -14,7 +14,7 @@ For example: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[showTablesEsMultiIndex] +include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesEsMultiIndex] ---- Notice the pattern is surrounded by double quotes `"`. It enumerated `*` meaning all indices however @@ -28,7 +28,7 @@ For example: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[fromTablePatternQuoted] +include-tagged::{sql-specs}/docs/docs.csv-spec[fromTablePatternQuoted] ---- NOTE: There is the restriction that all resolved concrete tables have the exact same mapping. @@ -42,7 +42,7 @@ Using `SHOW TABLES` command again: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[showTablesLikeWildcard] +include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesLikeWildcard] ---- The pattern matches all tables that start with `emp`. @@ -51,7 +51,7 @@ This command supports _escaping_ as well, for example: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[showTablesLikeEscape] +include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesLikeEscape] ---- Notice how now `emp%` does not match any tables because `%`, which means match zero or more characters, diff --git a/docs/reference/sql/language/syntax/commands/describe-table.asciidoc b/docs/reference/sql/language/syntax/commands/describe-table.asciidoc index ebefe9bc34b6..81f2d386c26e 100644 --- a/docs/reference/sql/language/syntax/commands/describe-table.asciidoc +++ b/docs/reference/sql/language/syntax/commands/describe-table.asciidoc @@ -28,5 +28,5 @@ DESC [table identifier<1>|[LIKE pattern<2>]] ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[describeTable] +include-tagged::{sql-specs}/docs/docs.csv-spec[describeTable] ---- diff --git a/docs/reference/sql/language/syntax/commands/select.asciidoc b/docs/reference/sql/language/syntax/commands/select.asciidoc index 1cd5ba28dd13..26fdb2f337eb 100644 --- a/docs/reference/sql/language/syntax/commands/select.asciidoc +++ b/docs/reference/sql/language/syntax/commands/select.asciidoc @@ -38,7 +38,7 @@ As with a table, every output column of a `SELECT` has a name which can be eithe ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[selectColumnAlias] +include-tagged::{sql-specs}/docs/docs.csv-spec[selectColumnAlias] ---- Note: `AS` is an optional keyword however it helps with the readability and in some case ambiguity of the query @@ -48,14 +48,14 @@ assigned by {es-sql} if no name is given: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[selectInline] +include-tagged::{sql-specs}/docs/docs.csv-spec[selectInline] ---- or if it's a simple column reference, use its name as the column name: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[selectColumn] +include-tagged::{sql-specs}/docs/docs.csv-spec[selectColumn] ---- [[sql-syntax-select-wildcard]] @@ -65,13 +65,12 @@ To select all the columns in the source, one can use `*`: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[wildcardWithOrder] +include-tagged::{sql-specs}/docs/docs.csv-spec[wildcardWithOrder] ---- which essentially returns all(top-level fields, sub-fields, such as multi-fields are ignored] columns found. [[sql-syntax-from]] -[float] ==== FROM Clause The `FROM` clause specifies one table for the `SELECT` and has the following syntax: @@ -92,14 +91,14 @@ If the table name contains special SQL characters (such as `.`,`-`,`*`,etc...) u ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[fromTableQuoted] +include-tagged::{sql-specs}/docs/docs.csv-spec[fromTableQuoted] ---- The name can be a <> pointing to multiple indices (likely requiring quoting as mentioned above) with the restriction that *all* resolved concrete tables have **exact mapping**. ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[fromTablePatternQuoted] +include-tagged::{sql-specs}/docs/docs.csv-spec[fromTablePatternQuoted] ---- `alias`:: @@ -107,11 +106,10 @@ A substitute name for the `FROM` item containing the alias. An alias is used for ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[fromTableAlias] +include-tagged::{sql-specs}/docs/docs.csv-spec[fromTableAlias] ---- [[sql-syntax-where]] -[float] ==== WHERE Clause The optional `WHERE` clause is used to filter rows from the query and has the following syntax: @@ -129,11 +127,10 @@ Represents an expression that evaluates to a `boolean`. Only the rows that match ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[basicWhere] +include-tagged::{sql-specs}/docs/docs.csv-spec[basicWhere] ---- [[sql-syntax-group-by]] -[float] ==== GROUP BY The `GROUP BY` clause is used to divide the results into groups of rows on matching values from the designated columns. It has the following syntax: @@ -153,34 +150,34 @@ A common, group by column name: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[groupByColumn] +include-tagged::{sql-specs}/docs/docs.csv-spec[groupByColumn] ---- Grouping by output ordinal: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[groupByOrdinal] +include-tagged::{sql-specs}/docs/docs.csv-spec[groupByOrdinal] ---- Grouping by alias: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[groupByAlias] +include-tagged::{sql-specs}/docs/docs.csv-spec[groupByAlias] ---- And grouping by column expression (typically used along-side an alias): ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[groupByExpression] +include-tagged::{sql-specs}/docs/docs.csv-spec[groupByExpression] ---- Or a mixture of the above: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[groupByMulti] +include-tagged::{sql-specs}/docs/docs.csv-spec[groupByMulti] ---- @@ -190,25 +187,24 @@ To wit: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[groupByAndAgg] +include-tagged::{sql-specs}/docs/docs.csv-spec[groupByAndAgg] ---- Expressions over aggregates used in output: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[groupByAndAggExpression] +include-tagged::{sql-specs}/docs/docs.csv-spec[groupByAndAggExpression] ---- Multiple aggregates used: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[groupByAndMultipleAggs] +include-tagged::{sql-specs}/docs/docs.csv-spec[groupByAndMultipleAggs] ---- [[sql-syntax-group-by-implicit]] -[float] ===== Implicit Grouping When an aggregation is used without an associated `GROUP BY`, an __implicit grouping__ is applied, meaning all selected rows are considered to form a single default, or implicit group. @@ -218,18 +214,17 @@ A common example is counting the number of records: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[groupByImplicitCount] +include-tagged::{sql-specs}/docs/docs.csv-spec[groupByImplicitCount] ---- Of course, multiple aggregations can be applied: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[groupByImplicitMultipleAggs] +include-tagged::{sql-specs}/docs/docs.csv-spec[groupByImplicitMultipleAggs] ---- [[sql-syntax-having]] -[float] ==== HAVING The `HAVING` clause can be used _only_ along aggregate functions (and thus `GROUP BY`) to filter what groups are kept or not and has the following syntax: @@ -252,18 +247,17 @@ Both `WHERE` and `HAVING` are used for filtering however there are several signi ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[groupByHaving] +include-tagged::{sql-specs}/docs/docs.csv-spec[groupByHaving] ---- Further more, one can use multiple aggregate expressions inside `HAVING` even ones that are not used in the output (`SELECT`): ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[groupByHavingMultiple] +include-tagged::{sql-specs}/docs/docs.csv-spec[groupByHavingMultiple] ---- [[sql-syntax-having-group-by-implicit]] -[float] ===== Implicit Grouping As indicated above, it is possible to have a `HAVING` clause without a `GROUP BY`. In this case, the so-called <> is applied, meaning all selected rows are considered to form a single group and `HAVING` can be applied on any of the aggregate functions specified on this group. @@ -273,19 +267,18 @@ In this example, `HAVING` matches: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[groupByHavingImplicitMatch] +include-tagged::{sql-specs}/docs/docs.csv-spec[groupByHavingImplicitMatch] ---- //However `HAVING` can also not match, in which case an empty result is returned: // //["source","sql",subs="attributes,callouts,macros"] //---- -//include-tagged::{sql-specs}/docs.csv-spec[groupByHavingImplicitNoMatch] +//include-tagged::{sql-specs}/docs/docs.csv-spec[groupByHavingImplicitNoMatch] //---- [[sql-syntax-order-by]] -[float] ==== ORDER BY The `ORDER BY` clause is used to sort the results of `SELECT` by one or more expressions: @@ -309,7 +302,7 @@ For example, the following query sorts by an arbitrary input field (`page_count` ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[orderByBasic] +include-tagged::{sql-specs}/docs/docs.csv-spec[orderByBasic] ---- [[sql-syntax-order-by-grouping]] @@ -323,20 +316,20 @@ For example, to order groups simply indicate the grouping key: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[orderByGroup] +include-tagged::{sql-specs}/docs/docs.csv-spec[orderByGroup] ---- Multiple keys can be specified of course: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[groupByMulti] +include-tagged::{sql-specs}/docs/docs.csv-spec[groupByMulti] ---- Further more, it is possible to order groups based on aggregations of their values: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[orderByAgg] +include-tagged::{sql-specs}/docs/docs.csv-spec[orderByAgg] ---- IMPORTANT: Ordering by aggregation is possible for up to 512 entries for memory consumption reasons. @@ -357,7 +350,7 @@ To sort based on the `score`, use the special function `SCORE()`: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[orderByScore] +include-tagged::{sql-specs}/docs/docs.csv-spec[orderByScore] ---- Note that you can return `SCORE()` by using a full-text search predicate in the `WHERE` clause. @@ -365,7 +358,7 @@ This is possible even if `SCORE()` is not used for sorting: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[orderByScoreWithMatch] +include-tagged::{sql-specs}/docs/docs.csv-spec[orderByScoreWithMatch] ---- NOTE: @@ -373,7 +366,6 @@ Trying to return `score` from a non full-text query will return the same value f all are equally relevant. [[sql-syntax-limit]] -[float] ==== LIMIT The `LIMIT` clause restricts (limits) the number of rows returns using the format: @@ -393,5 +385,5 @@ To return ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[limitBasic] +include-tagged::{sql-specs}/docs/docs.csv-spec[limitBasic] ---- diff --git a/docs/reference/sql/language/syntax/commands/show-columns.asciidoc b/docs/reference/sql/language/syntax/commands/show-columns.asciidoc index 927430003628..6b6acc0c4ac5 100644 --- a/docs/reference/sql/language/syntax/commands/show-columns.asciidoc +++ b/docs/reference/sql/language/syntax/commands/show-columns.asciidoc @@ -21,6 +21,6 @@ List the columns in table and their data type (and other attributes). ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[showColumns] +include-tagged::{sql-specs}/docs/docs.csv-spec[showColumns] ---- diff --git a/docs/reference/sql/language/syntax/commands/show-functions.asciidoc b/docs/reference/sql/language/syntax/commands/show-functions.asciidoc index d77aa008586b..e12c4efed590 100644 --- a/docs/reference/sql/language/syntax/commands/show-functions.asciidoc +++ b/docs/reference/sql/language/syntax/commands/show-functions.asciidoc @@ -17,7 +17,7 @@ List all the SQL functions and their type. The `LIKE` clause can be used to rest ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[showFunctions] +include-tagged::{sql-specs}/docs/docs.csv-spec[showFunctions] ---- The list of functions returned can be customized based on the pattern. @@ -25,23 +25,23 @@ The list of functions returned can be customized based on the pattern. It can be an exact match: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[showFunctionsLikeExact] +include-tagged::{sql-specs}/docs/docs.csv-spec[showFunctionsLikeExact] ---- A wildcard for exactly one character: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[showFunctionsLikeChar] +include-tagged::{sql-specs}/docs/docs.csv-spec[showFunctionsLikeChar] ---- A wildcard matching zero or more characters: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[showFunctionsLikeWildcard] +include-tagged::{sql-specs}/docs/docs.csv-spec[showFunctionsLikeWildcard] ---- Or of course, a variation of the above: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[showFunctionsWithPattern] +include-tagged::{sql-specs}/docs/docs.csv-spec[showFunctionsWithPattern] ---- diff --git a/docs/reference/sql/language/syntax/commands/show-tables.asciidoc b/docs/reference/sql/language/syntax/commands/show-tables.asciidoc index 5748ae318062..28b5ad4c75d5 100644 --- a/docs/reference/sql/language/syntax/commands/show-tables.asciidoc +++ b/docs/reference/sql/language/syntax/commands/show-tables.asciidoc @@ -22,7 +22,7 @@ List the tables available to the current user and their type. ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[showTables] +include-tagged::{sql-specs}/docs/docs.csv-spec[showTables] ---- Match multiple indices by using {es} <> @@ -30,7 +30,7 @@ notation: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[showTablesEsMultiIndex] +include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesEsMultiIndex] ---- One can also use the `LIKE` clause to restrict the list of names to the given pattern. @@ -38,24 +38,24 @@ One can also use the `LIKE` clause to restrict the list of names to the given pa The pattern can be an exact match: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[showTablesLikeExact] +include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesLikeExact] ---- Multiple chars: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[showTablesLikeWildcard] +include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesLikeWildcard] ---- A single char: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[showTablesLikeOneChar] +include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesLikeOneChar] ---- Or a mixture of single and multiple chars: ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs.csv-spec[showTablesLikeMixed] +include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesLikeMixed] ---- diff --git a/docs/reference/sql/limitations.asciidoc b/docs/reference/sql/limitations.asciidoc index 182f6a76bb8f..e8c99901e27c 100644 --- a/docs/reference/sql/limitations.asciidoc +++ b/docs/reference/sql/limitations.asciidoc @@ -94,14 +94,14 @@ Using sub-selects (`SELECT X FROM (SELECT Y)`) is **supported to a small degree* ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[limitationSubSelect] +include-tagged::{sql-specs}/docs/docs.csv-spec[limitationSubSelect] -------------------------------------------------- The query above is possible because it is equivalent with: ["source","sql",subs="attributes,macros"] -------------------------------------------------- -include-tagged::{sql-specs}/docs.csv-spec[limitationSubSelectRewritten] +include-tagged::{sql-specs}/docs/docs.csv-spec[limitationSubSelectRewritten] -------------------------------------------------- But, if the sub-select would include a `GROUP BY` or `HAVING` or the enclosing `SELECT` would be more complex than `SELECT X diff --git a/docs/reference/upgrade/close-ml.asciidoc b/docs/reference/upgrade/close-ml.asciidoc index e30210b16a1a..179d4f9adff3 100644 --- a/docs/reference/upgrade/close-ml.asciidoc +++ b/docs/reference/upgrade/close-ml.asciidoc @@ -11,33 +11,35 @@ POST _ml/set_upgrade_mode?enabled=false // TEARDOWN //////////// -If your {ml} indices were created earlier than the previous major version, they -must be reindexed. In those circumstances, there must be no machine learning -jobs running during the upgrade. - -In all other circumstances, there is no requirement to close your {ml} jobs. -There are, however, advantages to doing so. If you choose to leave your jobs -running during the upgrade, they are affected when you stop the {ml} nodes. The -jobs move to another {ml} node and restore the model states. This scenario has -the least disruption to the active {ml} jobs but incurs the highest load on the -cluster. - -To close all {ml} jobs before you upgrade, see -{stack-ov}/stopping-ml.html[Stopping {ml}]. This method persists the model -state at the moment of closure, which means that when you open your jobs after -the upgrade, they use the exact same model. This scenario takes the most time, -however, especially if you have many jobs or jobs with large model states. - -To temporarily halt the tasks associated with your {ml} jobs and {dfeeds} and -prevent new jobs from opening, use the <>: +If your {ml} indices were created before {prev-major-version}, you must +<>. +If your {ml} indices were created in {prev-major-version}, you can: + +* Leave your {ml} jobs running during the upgrade. When you shut down a +{ml} node, its jobs automatically move to another node and restore the model +states. This option enables your jobs to continue running during the upgrade but +it puts increased load on the cluster. + +* Temporarily halt the tasks associated with your {ml} jobs and {dfeeds} and +prevent new jobs from opening by using the +<>: ++ +-- [source,js] -------------------------------------------------- POST _ml/set_upgrade_mode?enabled=true -------------------------------------------------- // CONSOLE -This method does not persist the absolute latest model state, rather it uses the -last model state that was automatically saved. By halting the tasks, you avoid -incurring the cost of managing active jobs during the upgrade and it's quicker -than stopping {dfeeds} and closing jobs. +When you disable upgrade mode, the jobs resume using the last model +state that was automatically saved. This option avoids the overhead of managing +active jobs during the upgrade and is faster than explicitly stopping {dfeeds} +and closing jobs. +-- + +* {stack-ov}/stopping-ml.html[Stop all {dfeeds} and close all jobs]. This option +saves the model state at the time of closure. When you reopen the jobs after the +upgrade, they use the exact same model. However, saving the latest model state +takes longer than using upgrade mode, especially if you have a lot of jobs or +jobs with large model states. diff --git a/docs/reference/upgrade/cluster_restart.asciidoc b/docs/reference/upgrade/cluster_restart.asciidoc index 1402bbd9bb71..20786857993a 100644 --- a/docs/reference/upgrade/cluster_restart.asciidoc +++ b/docs/reference/upgrade/cluster_restart.asciidoc @@ -26,7 +26,7 @@ recovery. include::synced-flush.asciidoc[] -- -. *Stop any machine learning jobs that are running.* +. *Temporarily stop the tasks associated with active {ml} jobs and {dfeeds}.* (Optional) + -- include::close-ml.asciidoc[] diff --git a/docs/reference/upgrade/disable-shard-alloc.asciidoc b/docs/reference/upgrade/disable-shard-alloc.asciidoc index abd40336e9b0..839488f541f5 100644 --- a/docs/reference/upgrade/disable-shard-alloc.asciidoc +++ b/docs/reference/upgrade/disable-shard-alloc.asciidoc @@ -3,17 +3,18 @@ When you shut down a node, the allocation process waits for `index.unassigned.node_left.delayed_timeout` (by default, one minute) before starting to replicate the shards on that node to other nodes in the cluster, which can involve a lot of I/O. Since the node is shortly going to be -restarted, this I/O is unnecessary. You can avoid racing the clock by disabling -allocation before shutting down the node: +restarted, this I/O is unnecessary. You can avoid racing the clock by +<> of replicas before shutting down +the node: [source,js] -------------------------------------------------- PUT _cluster/settings { "persistent": { - "cluster.routing.allocation.enable": "none" + "cluster.routing.allocation.enable": "primaries" } } -------------------------------------------------- // CONSOLE -// TEST[skip:indexes don't assign] \ No newline at end of file +// TEST[skip:indexes don't assign] diff --git a/docs/reference/upgrade/reindex_upgrade.asciidoc b/docs/reference/upgrade/reindex_upgrade.asciidoc index 1dc20ec6e709..7fc6c320263c 100644 --- a/docs/reference/upgrade/reindex_upgrade.asciidoc +++ b/docs/reference/upgrade/reindex_upgrade.asciidoc @@ -59,9 +59,10 @@ ifdef::include-xpack[] [TIP] ==== If you use {ml-features} and your {ml} indices were created before -{prev-major-version}, you must -{stack-ov}/stopping-ml.html[stop all {dfeeds} and close all {ml} jobs] before -you reindex the indices. +{prev-major-version}, you must temporarily halt the tasks associated with your +{ml} jobs and {dfeeds} and prevent new jobs from opening during the reindex. Use +the <> or +{stack-ov}/stopping-ml.html[stop all {dfeeds} and close all {ml} jobs]. If you use {es} {security-features}, before you reindex `.security*` internal indices it is a good idea to create a temporary superuser account in the `file` @@ -112,6 +113,17 @@ indices from the previous major version to be upgraded to the current major version. Skipping a major version means that you must resolve any backward compatibility issues yourself. +ifdef::include-xpack[] +If you use {ml-features} and you're migrating indices from a 6.5 or earlier +cluster, the job and {dfeed} configuration information are not stored in an +index. You must recreate your {ml} jobs in the new cluster. If you are migrating +from a 6.6 or later cluster, it is a good idea to temporarily halt the tasks +associated with your {ml} jobs and {dfeeds} to prevent inconsistencies between +different {ml} indices that are reindexed at slightly different times. Use the +<> or +{stack-ov}/stopping-ml.html[stop all {dfeeds} and close all {ml} jobs]. +endif::include-xpack[] + ============================================= To migrate your indices: @@ -184,4 +196,4 @@ monitor progress of the reindex job with the <>: `30s` and `1`). .. Once reindexing is complete and the status of the new index is `green`, - you can delete the old index. + you can delete the old index. \ No newline at end of file diff --git a/docs/reference/upgrade/rolling_upgrade.asciidoc b/docs/reference/upgrade/rolling_upgrade.asciidoc index 3d8038db85a5..e9c290d3bdb5 100644 --- a/docs/reference/upgrade/rolling_upgrade.asciidoc +++ b/docs/reference/upgrade/rolling_upgrade.asciidoc @@ -36,7 +36,7 @@ include::synced-flush.asciidoc[] -- -. *Stop any machine learning jobs that are running.* +. *Temporarily stop the tasks associated with active {ml} jobs and {dfeeds}.* (Optional) + -- include::close-ml.asciidoc[] diff --git a/docs/ruby/client.asciidoc b/docs/ruby/client.asciidoc index 2037ae1a0b28..074c77d41b03 100644 --- a/docs/ruby/client.asciidoc +++ b/docs/ruby/client.asciidoc @@ -1,3 +1,4 @@ +[[ruby_client]] == The Ruby Client The `elasticsearch` http://rubygems.org/gems/elasticsearch[Rubygem] provides a low-level client diff --git a/docs/ruby/copyright.asciidoc b/docs/ruby/copyright.asciidoc index 3747cc572e40..8a84be27636f 100644 --- a/docs/ruby/copyright.asciidoc +++ b/docs/ruby/copyright.asciidoc @@ -1,3 +1,4 @@ +[[copyright]] == Copyright and License This software is Copyright (c) 2013-2018 by Elasticsearch BV. diff --git a/docs/ruby/model.asciidoc b/docs/ruby/model.asciidoc index 0b0be45708fa..62339bb23914 100644 --- a/docs/ruby/model.asciidoc +++ b/docs/ruby/model.asciidoc @@ -1,3 +1,4 @@ +[[activemodel_activerecord]] == ActiveModel / ActiveRecord The `elasticsearch-model` http://rubygems.org/gems/elasticsearch-model[Rubygem] diff --git a/docs/ruby/persistence.asciidoc b/docs/ruby/persistence.asciidoc index 7d361978ee70..5306dae47c66 100644 --- a/docs/ruby/persistence.asciidoc +++ b/docs/ruby/persistence.asciidoc @@ -1,3 +1,4 @@ +[[persistence]] == Persistence The `elasticsearch-persistence` http://rubygems.org/gems/elasticsearch-persistence[Rubygem] diff --git a/docs/ruby/rails.asciidoc b/docs/ruby/rails.asciidoc index 1fef3f42381a..213258c7e226 100644 --- a/docs/ruby/rails.asciidoc +++ b/docs/ruby/rails.asciidoc @@ -1,3 +1,4 @@ +[[ruby_on_rails]] == Ruby On Rails The `elasticsearch-rails` http://rubygems.org/gems/elasticsearch-rails[Rubygem] diff --git a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java index b4a6c4975486..bbc6a64dcdb2 100644 --- a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java +++ b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java @@ -19,11 +19,12 @@ package org.elasticsearch.smoketest; -import org.apache.http.HttpHost; -import org.apache.lucene.util.BytesRef; - import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; +import org.apache.http.HttpHost; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.TimeUnits; import org.elasticsearch.Version; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.ParseField; @@ -48,12 +49,13 @@ import java.util.List; import java.util.Map; -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; - import static java.util.Collections.emptyMap; import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +//The default 20 minutes timeout isn't always enough, please do not increase further than 30 before analyzing what makes this suite so slow +@TimeoutSuite(millis = 30 * TimeUnits.MINUTE) public class DocsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { public DocsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index 87b738cbd051..5c2d1cf016b3 100644 Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 4ace7103b8ce..8d172843af1d 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,6 +1,6 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-5.2.1-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-5.3-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=9dc729f6dbfbbc4df1692665d301e028976dacac296a126f16148941a9cf012e +distributionSha256Sum=f4d820c2a9685710eba5b92f10e0e4fb20e0d6c0dd1f46971e658160f25e7147 diff --git a/gradlew b/gradlew index af6708ff229f..b0d6d0ab5deb 100755 --- a/gradlew +++ b/gradlew @@ -1,5 +1,21 @@ #!/usr/bin/env sh +# +# Copyright 2015 the original author or authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + ############################################################################## ## ## Gradle start up script for UN*X @@ -28,7 +44,7 @@ APP_NAME="Gradle" APP_BASE_NAME=`basename "$0"` # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -DEFAULT_JVM_OPTS='"-Xmx64m"' +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD="maximum" diff --git a/gradlew.bat b/gradlew.bat index 0f8d5937c4ad..15e1ee37a70d 100644 --- a/gradlew.bat +++ b/gradlew.bat @@ -1,3 +1,19 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem http://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + @if "%DEBUG%" == "" @echo off @rem ########################################################################## @rem @@ -14,7 +30,7 @@ set APP_BASE_NAME=%~n0 set APP_HOME=%DIRNAME% @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -set DEFAULT_JVM_OPTS="-Xmx64m" +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" @rem Find java.exe if defined JAVA_HOME goto findJavaFromJavaHome diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java index e02f9f176246..adcbf6ef1bee 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java @@ -25,7 +25,7 @@ import java.util.Map; /** - * Wrapper for a XContentParser that makes a single object to look like a complete document. + * Wrapper for a XContentParser that makes a single object/array look like a complete document. * * The wrapper prevents the parsing logic to consume tokens outside of the wrapped object as well * as skipping to the end of the object in case of a parsing error. The wrapper is intended to be @@ -39,8 +39,8 @@ public class XContentSubParser implements XContentParser { public XContentSubParser(XContentParser parser) { this.parser = parser; - if (parser.currentToken() != Token.START_OBJECT) { - throw new IllegalStateException("The sub parser has to be created on the start of an object"); + if (parser.currentToken() != Token.START_OBJECT && parser.currentToken() != Token.START_ARRAY) { + throw new IllegalStateException("The sub parser has to be created on the start of an object or array"); } level = 1; } diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java index 51bb5c3c65f6..fa6ffdd0407f 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java @@ -151,6 +151,12 @@ public int intValue(boolean coerce) throws IOException { protected abstract int doIntValue() throws IOException; + private static BigInteger LONG_MAX_VALUE_AS_BIGINTEGER = BigInteger.valueOf(Long.MAX_VALUE); + private static BigInteger LONG_MIN_VALUE_AS_BIGINTEGER = BigInteger.valueOf(Long.MIN_VALUE); + // weak bounds on the BigDecimal representation to allow for coercion + private static BigDecimal BIGDECIMAL_GREATER_THAN_LONG_MAX_VALUE = BigDecimal.valueOf(Long.MAX_VALUE).add(BigDecimal.ONE); + private static BigDecimal BIGDECIMAL_LESS_THAN_LONG_MIN_VALUE = BigDecimal.valueOf(Long.MIN_VALUE).subtract(BigDecimal.ONE); + /** Return the long that {@code stringValue} stores or throws an exception if the * stored value cannot be converted to a long that stores the exact same * value and {@code coerce} is false. */ @@ -163,7 +169,11 @@ private static long toLong(String stringValue, boolean coerce) { final BigInteger bigIntegerValue; try { - BigDecimal bigDecimalValue = new BigDecimal(stringValue); + final BigDecimal bigDecimalValue = new BigDecimal(stringValue); + if (bigDecimalValue.compareTo(BIGDECIMAL_GREATER_THAN_LONG_MAX_VALUE) >= 0 || + bigDecimalValue.compareTo(BIGDECIMAL_LESS_THAN_LONG_MIN_VALUE) <= 0) { + throw new IllegalArgumentException("Value [" + stringValue + "] is out of range for a long"); + } bigIntegerValue = coerce ? bigDecimalValue.toBigInteger() : bigDecimalValue.toBigIntegerExact(); } catch (ArithmeticException e) { throw new IllegalArgumentException("Value [" + stringValue + "] has a decimal part"); @@ -171,11 +181,11 @@ private static long toLong(String stringValue, boolean coerce) { throw new IllegalArgumentException("For input string: \"" + stringValue + "\""); } - if (bigIntegerValue.compareTo(BigInteger.valueOf(Long.MAX_VALUE)) > 0 || - bigIntegerValue.compareTo(BigInteger.valueOf(Long.MIN_VALUE)) < 0) { + if (bigIntegerValue.compareTo(LONG_MAX_VALUE_AS_BIGINTEGER) > 0 || bigIntegerValue.compareTo(LONG_MIN_VALUE_AS_BIGINTEGER) < 0) { throw new IllegalArgumentException("Value [" + stringValue + "] is out of range for a long"); } + assert bigIntegerValue.longValueExact() <= Long.MAX_VALUE; // asserting that no ArithmeticException is thrown return bigIntegerValue.longValue(); } diff --git a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java index 5dbe7be40f31..0fe8a2b9f91f 100644 --- a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.xcontent; import com.fasterxml.jackson.core.JsonParseException; + import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -329,7 +330,7 @@ public void testNestedMapInList() throws IOException { } } - public void testSubParser() throws IOException { + public void testSubParserObject() throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder(); int numberOfTokens; numberOfTokens = generateRandomObjectForMarking(builder); @@ -354,6 +355,7 @@ public void testSubParser() throws IOException { // And sometimes skipping children subParser.skipChildren(); } + } finally { assertFalse(subParser.isClosed()); subParser.close(); @@ -367,6 +369,50 @@ public void testSubParser() throws IOException { } } + @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/40617") + public void testSubParserArray() throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder(); + int numberOfArrayElements = randomInt(10); + builder.startObject(); + builder.field("array"); + builder.startArray(); + int numberOfTokens = 0; + for (int i = 0; i < numberOfArrayElements; ++i) { + numberOfTokens += generateRandomObjectForMarking(builder); + } + builder.endArray(); + builder.endObject(); + + String content = Strings.toString(builder); + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, content)) { + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); // array field + assertEquals("array", parser.currentName()); + assertEquals(XContentParser.Token.START_ARRAY, parser.nextToken()); // [ + XContentParser subParser = new XContentSubParser(parser); + try { + int tokensToSkip = randomInt(numberOfTokens - 1); + for (int i = 0; i < tokensToSkip; i++) { + // Simulate incomplete parsing + assertNotNull(subParser.nextToken()); + } + if (randomBoolean()) { + // And sometimes skipping children + subParser.skipChildren(); + } + + } finally { + assertFalse(subParser.isClosed()); + subParser.close(); + assertTrue(subParser.isClosed()); + } + assertEquals(XContentParser.Token.END_ARRAY, parser.currentToken()); + assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); + assertNull(parser.nextToken()); + } + } + public void testCreateSubParserAtAWrongPlace() throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder(); generateRandomObjectForMarking(builder); @@ -377,7 +423,7 @@ public void testCreateSubParserAtAWrongPlace() throws IOException { assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); // first field assertEquals("first_field", parser.currentName()); IllegalStateException exception = expectThrows(IllegalStateException.class, () -> new XContentSubParser(parser)); - assertEquals("The sub parser has to be created on the start of an object", exception.getMessage()); + assertEquals("The sub parser has to be created on the start of an object or array", exception.getMessage()); } } diff --git a/modules/ingest-common/build.gradle b/modules/ingest-common/build.gradle index 1681258e7c7e..b6179eb852ae 100644 --- a/modules/ingest-common/build.gradle +++ b/modules/ingest-common/build.gradle @@ -29,9 +29,6 @@ dependencies { compile project(':libs:dissect') } -compileJava.options.compilerArgs << "-Xlint:-unchecked,-rawtypes" -compileTestJava.options.compilerArgs << "-Xlint:-unchecked,-rawtypes" - integTestCluster { module project(':modules:lang-painless') } \ No newline at end of file diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AbstractStringProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AbstractStringProcessor.java index 792e5e4ebed2..546519aa5f60 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AbstractStringProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AbstractStringProcessor.java @@ -80,8 +80,8 @@ protected Factory(String processorType) { } @Override - public AbstractStringProcessor create(Map registry, String tag, - Map config) throws Exception { + public AbstractStringProcessor create(Map registry, String tag, + Map config) throws Exception { String field = ConfigurationUtils.readStringProperty(processorType, tag, config, "field"); boolean ignoreMissing = ConfigurationUtils.readBooleanProperty(processorType, tag, config, "ignore_missing", false); String targetField = ConfigurationUtils.readStringProperty(processorType, tag, config, "target_field", field); @@ -89,7 +89,7 @@ public AbstractStringProcessor create(Map registry, S return newProcessor(tag, config, field, ignoreMissing, targetField); } - protected abstract AbstractStringProcessor newProcessor(String processorTag, Map config, String field, - boolean ignoreMissing, String targetField); + protected abstract AbstractStringProcessor newProcessor(String processorTag, Map config, String field, + boolean ignoreMissing, String targetField); } } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/BytesProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/BytesProcessor.java index d07b56e1b3df..8de75878f5fe 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/BytesProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/BytesProcessor.java @@ -27,7 +27,7 @@ * Processor that converts the content of string fields to the byte value. * Throws exception is the field is not of type string or can not convert to the numeric byte value */ -public final class BytesProcessor extends AbstractStringProcessor { +public final class BytesProcessor extends AbstractStringProcessor { public static final String TYPE = "bytes"; diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GsubProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GsubProcessor.java index 39553910692f..9f3e656bba4b 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GsubProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GsubProcessor.java @@ -29,7 +29,7 @@ * Processor that allows to search for patterns in field content and replace them with corresponding string replacement. * Support fields of string type only, throws exception if a field is of a different type. */ -public final class GsubProcessor extends AbstractStringProcessor { +public final class GsubProcessor extends AbstractStringProcessor { public static final String TYPE = "gsub"; @@ -67,8 +67,8 @@ public Factory() { } @Override - protected AbstractStringProcessor newProcessor(String processorTag, Map config, String field, - boolean ignoreMissing, String targetField) { + protected GsubProcessor newProcessor(String processorTag, Map config, String field, + boolean ignoreMissing, String targetField) { String pattern = readStringProperty(TYPE, processorTag, config, "pattern"); String replacement = readStringProperty(TYPE, processorTag, config, "replacement"); Pattern searchPattern; diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/LowercaseProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/LowercaseProcessor.java index 4269cb05257f..6c14dbdabba7 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/LowercaseProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/LowercaseProcessor.java @@ -27,7 +27,7 @@ * Throws exception is the field is not of type string. */ -public final class LowercaseProcessor extends AbstractStringProcessor { +public final class LowercaseProcessor extends AbstractStringProcessor { public static final String TYPE = "lowercase"; diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/TrimProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/TrimProcessor.java index 98fe1223e539..d1b3c8778542 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/TrimProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/TrimProcessor.java @@ -25,7 +25,7 @@ * Processor that trims the content of string fields. * Throws exception is the field is not of type string. */ -public final class TrimProcessor extends AbstractStringProcessor { +public final class TrimProcessor extends AbstractStringProcessor { public static final String TYPE = "trim"; diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/URLDecodeProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/URLDecodeProcessor.java index fb6c5acf98b2..fa9d377714ee 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/URLDecodeProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/URLDecodeProcessor.java @@ -26,7 +26,7 @@ /** * Processor that URL-decodes a string */ -public final class URLDecodeProcessor extends AbstractStringProcessor { +public final class URLDecodeProcessor extends AbstractStringProcessor { public static final String TYPE = "urldecode"; diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/UppercaseProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/UppercaseProcessor.java index 6c428627c7d7..4503bfc02f71 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/UppercaseProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/UppercaseProcessor.java @@ -26,7 +26,7 @@ * Processor that converts the content of string fields to uppercase. * Throws exception is the field is not of type string. */ -public final class UppercaseProcessor extends AbstractStringProcessor { +public final class UppercaseProcessor extends AbstractStringProcessor { public static final String TYPE = "uppercase"; diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorFactoryTestCase.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorFactoryTestCase.java index 0465e2490284..ba6a2be73465 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorFactoryTestCase.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorFactoryTestCase.java @@ -37,7 +37,7 @@ protected Map modifyConfig(Map config) { return config; } - protected void assertProcessor(AbstractStringProcessor processor) {} + protected void assertProcessor(AbstractStringProcessor processor) {} public void testCreate() throws Exception { AbstractStringProcessor.Factory factory = newFactory(); @@ -47,7 +47,7 @@ public void testCreate() throws Exception { Map config = new HashMap<>(); config.put("field", fieldName); - AbstractStringProcessor processor = factory.create(null, processorTag, modifyConfig(config)); + AbstractStringProcessor processor = factory.create(null, processorTag, modifyConfig(config)); assertThat(processor.getTag(), equalTo(processorTag)); assertThat(processor.getField(), equalTo(fieldName)); assertThat(processor.isIgnoreMissing(), is(false)); @@ -64,7 +64,7 @@ public void testCreateWithIgnoreMissing() throws Exception { config.put("field", fieldName); config.put("ignore_missing", true); - AbstractStringProcessor processor = factory.create(null, processorTag, modifyConfig(config)); + AbstractStringProcessor processor = factory.create(null, processorTag, modifyConfig(config)); assertThat(processor.getTag(), equalTo(processorTag)); assertThat(processor.getField(), equalTo(fieldName)); assertThat(processor.isIgnoreMissing(), is(true)); @@ -82,7 +82,7 @@ public void testCreateWithTargetField() throws Exception { config.put("field", fieldName); config.put("target_field", targetFieldName); - AbstractStringProcessor processor = factory.create(null, processorTag, modifyConfig(config)); + AbstractStringProcessor processor = factory.create(null, processorTag, modifyConfig(config)); assertThat(processor.getTag(), equalTo(processorTag)); assertThat(processor.getField(), equalTo(fieldName)); assertThat(processor.isIgnoreMissing(), is(false)); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorTestCase.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorTestCase.java index 4e4182bfdc89..f667f84e5d7b 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorTestCase.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorTestCase.java @@ -33,7 +33,7 @@ public abstract class AbstractStringProcessorTestCase extends ESTestCase { - protected abstract AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField); + protected abstract AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField); protected String modifyInput(String input) { return input; @@ -41,8 +41,8 @@ protected String modifyInput(String input) { protected abstract T expectedResult(String input); - protected Class expectedResultType(){ - return (Class) String.class; // most results types are Strings + protected Class expectedResultType(){ + return String.class; // most results types are Strings } public void testProcessor() throws Exception { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/BytesProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/BytesProcessorTests.java index 788340a455a4..2520f3e5ad17 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/BytesProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/BytesProcessorTests.java @@ -29,12 +29,12 @@ import static org.hamcrest.Matchers.equalTo; -public class BytesProcessorTests extends AbstractStringProcessorTestCase { +public class BytesProcessorTests extends AbstractStringProcessorTestCase { private String modifiedInput; @Override - protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField) { + protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField) { return new BytesProcessor(randomAlphaOfLength(10), field, ignoreMissing, targetField); } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GsubProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GsubProcessorFactoryTests.java index 4a70b4686e0a..0dadefbb4ee6 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GsubProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GsubProcessorFactoryTests.java @@ -42,7 +42,7 @@ protected Map modifyConfig(Map config) { } @Override - protected void assertProcessor(AbstractStringProcessor processor) { + protected void assertProcessor(AbstractStringProcessor processor) { GsubProcessor gsubProcessor = (GsubProcessor) processor; assertThat(gsubProcessor.getPattern().toString(), equalTo("\\.")); assertThat(gsubProcessor.getReplacement(), equalTo("-")); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GsubProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GsubProcessorTests.java index 38d0202d3a1e..9c003356c3df 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GsubProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GsubProcessorTests.java @@ -21,10 +21,10 @@ import java.util.regex.Pattern; -public class GsubProcessorTests extends AbstractStringProcessorTestCase { +public class GsubProcessorTests extends AbstractStringProcessorTestCase { @Override - protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField) { + protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField) { return new GsubProcessor(randomAlphaOfLength(10), field, Pattern.compile("\\."), "-", ignoreMissing, targetField); } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/LowercaseProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/LowercaseProcessorTests.java index 67a73669c038..b804d3a0221c 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/LowercaseProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/LowercaseProcessorTests.java @@ -21,9 +21,9 @@ import java.util.Locale; -public class LowercaseProcessorTests extends AbstractStringProcessorTestCase { +public class LowercaseProcessorTests extends AbstractStringProcessorTestCase { @Override - protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField) { + protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField) { return new LowercaseProcessor(randomAlphaOfLength(10), field, ignoreMissing, targetField); } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/TrimProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/TrimProcessorTests.java index f0ae554f5cad..abd7cae12fe9 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/TrimProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/TrimProcessorTests.java @@ -19,10 +19,10 @@ package org.elasticsearch.ingest.common; -public class TrimProcessorTests extends AbstractStringProcessorTestCase { +public class TrimProcessorTests extends AbstractStringProcessorTestCase { @Override - protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField) { + protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField) { return new TrimProcessor(randomAlphaOfLength(10), field, ignoreMissing, targetField); } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/URLDecodeProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/URLDecodeProcessorTests.java index 7697f1fcba3d..150d594afd9a 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/URLDecodeProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/URLDecodeProcessorTests.java @@ -22,14 +22,14 @@ import java.io.UnsupportedEncodingException; import java.net.URLDecoder; -public class URLDecodeProcessorTests extends AbstractStringProcessorTestCase { +public class URLDecodeProcessorTests extends AbstractStringProcessorTestCase { @Override protected String modifyInput(String input) { return "Hello%20G%C3%BCnter" + input; } @Override - protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField) { + protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField) { return new URLDecodeProcessor(randomAlphaOfLength(10), field, ignoreMissing, targetField); } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UppercaseProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UppercaseProcessorTests.java index 76459f811689..1b027c438083 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UppercaseProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UppercaseProcessorTests.java @@ -21,10 +21,10 @@ import java.util.Locale; -public class UppercaseProcessorTests extends AbstractStringProcessorTestCase { +public class UppercaseProcessorTests extends AbstractStringProcessorTestCase { @Override - protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField) { + protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField) { return new UppercaseProcessor(randomAlphaOfLength(10), field, ignoreMissing, targetField); } diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.score.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.score.txt index 3d7b29826c74..03ec9275aa8b 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.score.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.score.txt @@ -19,11 +19,14 @@ # This file contains a whitelist for functions to be used in Score context +class org.elasticsearch.script.ScoreScript no_import { +} + static_import { double saturation(double, double) from_class org.elasticsearch.script.ScoreScriptUtils double sigmoid(double, double, double) from_class org.elasticsearch.script.ScoreScriptUtils - double randomReproducible(String, int) from_class org.elasticsearch.script.ScoreScriptUtils - double randomNotReproducible() bound_to org.elasticsearch.script.ScoreScriptUtils$RandomNotReproducible + double randomScore(org.elasticsearch.script.ScoreScript, int, String) bound_to org.elasticsearch.script.ScoreScriptUtils$RandomScoreField + double randomScore(org.elasticsearch.script.ScoreScript, int) bound_to org.elasticsearch.script.ScoreScriptUtils$RandomScoreDoc double decayGeoLinear(String, String, String, double, GeoPoint) bound_to org.elasticsearch.script.ScoreScriptUtils$DecayGeoLinear double decayGeoExp(String, String, String, double, GeoPoint) bound_to org.elasticsearch.script.ScoreScriptUtils$DecayGeoExp double decayGeoGauss(String, String, String, double, GeoPoint) bound_to org.elasticsearch.script.ScoreScriptUtils$DecayGeoGauss diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/80_script_score.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/80_script_score.yml index a3135777c952..cf55810058d9 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/80_script_score.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/80_script_score.yml @@ -72,61 +72,6 @@ setup: - match: { hits.hits.1._id: d2 } - match: { hits.hits.2._id: d1 } ---- -"Random functions": - - do: - indices.create: - index: test - body: - settings: - number_of_shards: 2 - mappings: - properties: - f1: - type: keyword - - do: - index: - index: test - id: 1 - body: {"f1": "v1"} - - do: - index: - index: test - id: 2 - body: {"f1": "v2"} - - do: - index: - index: test - id: 3 - body: {"f1": "v3"} - - - do: - indices.refresh: {} - - - do: - search: - rest_total_hits_as_int: true - index: test - body: - query: - script_score: - query: {match_all: {} } - script: - source: "randomReproducible(Long.toString(doc['_seq_no'].value), 100)" - - match: { hits.total: 3 } - - - do: - search: - rest_total_hits_as_int: true - index: test - body: - query: - script_score: - query: {match_all: {} } - script: - source: "randomNotReproducible()" - - match: { hits.total: 3 } - --- "Decay geo functions": - do: diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/85_script_score_random_score.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/85_script_score_random_score.yml new file mode 100644 index 000000000000..2879d50fedeb --- /dev/null +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/85_script_score_random_score.yml @@ -0,0 +1,146 @@ +# Integration tests for ScriptScoreQuery using Painless + +setup: +- skip: + version: " - 7.99.99" # correct to 7.09.99 after backporting to 7.1 + reason: "random score function of script score was added in 7.1" + +--- +"Random score function with _seq_no field": + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 2 + mappings: + properties: + f1: + type: keyword + + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "test"}}' + - '{"f1": "v0"}' + - '{"index": {"_index": "test"}}' + - '{"f1": "v1"}' + - '{"index": {"_index": "test"}}' + - '{"f1": "v2"}' + - '{"index": {"_index": "test"}}' + - '{"f1": "v3"}' + - '{"index": {"_index": "test"}}' + - '{"f1": "v4"}' + - '{"index": {"_index": "test"}}' + - '{"f1": "v5"}' + - '{"index": {"_index": "test"}}' + - '{"f1": "v6"}' + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + script_score: + query: {match_all: {} } + script: + source: "randomScore(100, '_seq_no')" + # stash ids to check for reproducibility of ranking + - set: { hits.hits.0._id: id0 } + - set: { hits.hits.1._id: id1 } + - set: { hits.hits.2._id: id2 } + - set: { hits.hits.3._id: id3 } + - set: { hits.hits.4._id: id4 } + - set: { hits.hits.5._id: id5 } + - set: { hits.hits.6._id: id6 } + + # check that ranking is reproducible + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + script_score: + query: {match_all: {} } + script: + source: "randomScore(100, '_seq_no')" + - match: { hits.hits.0._id: $id0 } + - match: { hits.hits.1._id: $id1 } + - match: { hits.hits.2._id: $id2 } + - match: { hits.hits.3._id: $id3 } + - match: { hits.hits.4._id: $id4 } + - match: { hits.hits.5._id: $id5 } + - match: { hits.hits.6._id: $id6 } + +--- +"Random score function with internal doc Ids": + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + mappings: + properties: + f1: + type: keyword + + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "test"}}' + - '{"f1": "v0"}' + - '{"index": {"_index": "test"}}' + - '{"f1": "v1"}' + - '{"index": {"_index": "test"}}' + - '{"f1": "v2"}' + - '{"index": {"_index": "test"}}' + - '{"f1": "v3"}' + - '{"index": {"_index": "test"}}' + - '{"f1": "v4"}' + - '{"index": {"_index": "test"}}' + - '{"f1": "v5"}' + - '{"index": {"_index": "test"}}' + - '{"f1": "v6"}' + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + script_score: + query: {match_all: {} } + script: + source: "randomScore(100)" + # stash ids to check for reproducibility of ranking + - set: { hits.hits.0._id: id0 } + - set: { hits.hits.1._id: id1 } + - set: { hits.hits.2._id: id2 } + - set: { hits.hits.3._id: id3 } + - set: { hits.hits.4._id: id4 } + - set: { hits.hits.5._id: id5 } + - set: { hits.hits.6._id: id6 } + + # check that ranking is reproducible + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + script_score: + query: {match_all: {} } + script: + source: "randomScore(100)" + - match: { hits.hits.0._id: $id0 } + - match: { hits.hits.1._id: $id1 } + - match: { hits.hits.2._id: $id2 } + - match: { hits.hits.3._id: $id3 } + - match: { hits.hits.4._id: $id4 } + - match: { hits.hits.5._id: $id5 } + - match: { hits.hits.6._id: $id6 } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/DenseVectorFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/DenseVectorFieldMapper.java index f4a61c3ebd35..d48a457ba08c 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/DenseVectorFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/DenseVectorFieldMapper.java @@ -46,7 +46,7 @@ public class DenseVectorFieldMapper extends FieldMapper implements ArrayValueMapperParser { public static final String CONTENT_TYPE = "dense_vector"; - public static short MAX_DIMS_COUNT = 500; //maximum allowed number of dimensions + public static short MAX_DIMS_COUNT = 1024; //maximum allowed number of dimensions private static final byte INT_BYTES = 4; public static class Defaults { @@ -169,10 +169,9 @@ public void parse(ParseContext context) throws IOException { buf[offset+2] = (byte) (intValue >> 8); buf[offset+3] = (byte) intValue; offset += INT_BYTES; - dim++; - if (dim >= MAX_DIMS_COUNT) { + if (dim++ >= MAX_DIMS_COUNT) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + - "] has exceeded the maximum allowed number of dimensions of :[" + MAX_DIMS_COUNT + "]"); + "] has exceeded the maximum allowed number of dimensions of [" + MAX_DIMS_COUNT + "]"); } } BinaryDocValuesField field = new BinaryDocValuesField(fieldType().name(), new BytesRef(buf, 0, offset)); diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java index cbafd0fd1eff..45a067d7994d 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java @@ -41,6 +41,7 @@ public Map getMappers() { mappers.put(RankFeaturesFieldMapper.CONTENT_TYPE, new RankFeaturesFieldMapper.TypeParser()); mappers.put(DenseVectorFieldMapper.CONTENT_TYPE, new DenseVectorFieldMapper.TypeParser()); mappers.put(SparseVectorFieldMapper.CONTENT_TYPE, new SparseVectorFieldMapper.TypeParser()); + mappers.put(SearchAsYouTypeFieldMapper.CONTENT_TYPE, new SearchAsYouTypeFieldMapper.TypeParser()); return Collections.unmodifiableMap(mappers); } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SearchAsYouTypeFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SearchAsYouTypeFieldMapper.java new file mode 100644 index 000000000000..867e975e9f51 --- /dev/null +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SearchAsYouTypeFieldMapper.java @@ -0,0 +1,836 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.AnalyzerWrapper; +import org.apache.lucene.analysis.CachingTokenFilter; +import org.apache.lucene.analysis.TokenFilter; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter; +import org.apache.lucene.analysis.shingle.FixedShingleFilter; +import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; +import org.apache.lucene.document.Field; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.AutomatonQuery; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.MultiTermQuery; +import org.apache.lucene.search.NormsFieldExistsQuery; +import org.apache.lucene.search.PrefixQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.spans.FieldMaskingSpanQuery; +import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper; +import org.apache.lucene.search.spans.SpanQuery; +import org.apache.lucene.search.spans.SpanTermQuery; +import org.apache.lucene.util.automaton.Automata; +import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.Operations; +import org.elasticsearch.common.collect.Iterators; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.analysis.AnalyzerScope; +import org.elasticsearch.index.analysis.NamedAnalyzer; +import org.elasticsearch.index.query.QueryShardContext; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeIntegerValue; +import static org.elasticsearch.index.mapper.TextFieldMapper.TextFieldType.hasGaps; +import static org.elasticsearch.index.mapper.TypeParsers.parseTextField; + +/** + * Mapper for a text field that optimizes itself for as-you-type completion by indexing its content into subfields. Each subfield + * modifies the analysis chain of the root field to index terms the user would create as they type out the value in the root field + * + * The structure of these fields is + * + *

+ *     [ SearchAsYouTypeFieldMapper, SearchAsYouTypeFieldType, unmodified analysis ]
+ *     ├── [ ShingleFieldMapper, ShingleFieldType, analysis wrapped with 2-shingles ]
+ *     ├── ...
+ *     ├── [ ShingleFieldMapper, ShingleFieldType, analysis wrapped with max_shingle_size-shingles ]
+ *     └── [ PrefixFieldMapper, PrefixFieldType, analysis wrapped with max_shingle_size-shingles and edge-ngrams ]
+ * 
+ */ +public class SearchAsYouTypeFieldMapper extends FieldMapper { + + public static final String CONTENT_TYPE = "search_as_you_type"; + private static final int MAX_SHINGLE_SIZE_LOWER_BOUND = 2; + private static final int MAX_SHINGLE_SIZE_UPPER_BOUND = 4; + private static final String PREFIX_FIELD_SUFFIX = "._index_prefix"; + + public static class Defaults { + + public static final int MIN_GRAM = 1; + public static final int MAX_GRAM = 20; + public static final int MAX_SHINGLE_SIZE = 3; + + public static final MappedFieldType FIELD_TYPE = new SearchAsYouTypeFieldType(); + + static { + FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); + FIELD_TYPE.freeze(); + } + } + + public static class TypeParser implements Mapper.TypeParser { + + @Override + public Mapper.Builder parse(String name, + Map node, + ParserContext parserContext) throws MapperParsingException { + + final Builder builder = new Builder(name); + + builder.fieldType().setIndexAnalyzer(parserContext.getIndexAnalyzers().getDefaultIndexAnalyzer()); + builder.fieldType().setSearchAnalyzer(parserContext.getIndexAnalyzers().getDefaultSearchAnalyzer()); + builder.fieldType().setSearchQuoteAnalyzer(parserContext.getIndexAnalyzers().getDefaultSearchQuoteAnalyzer()); + parseTextField(builder, name, node, parserContext); + for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { + final Map.Entry entry = iterator.next(); + final String fieldName = entry.getKey(); + final Object fieldNode = entry.getValue(); + + if (fieldName.equals("max_shingle_size")) { + builder.maxShingleSize(nodeIntegerValue(fieldNode)); + iterator.remove(); + } + // TODO should we allow to configure the prefix field + } + return builder; + } + } + + public static class Builder extends FieldMapper.Builder { + private int maxShingleSize = Defaults.MAX_SHINGLE_SIZE; + + public Builder(String name) { + super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); + this.builder = this; + } + + public Builder maxShingleSize(int maxShingleSize) { + if (maxShingleSize < MAX_SHINGLE_SIZE_LOWER_BOUND || maxShingleSize > MAX_SHINGLE_SIZE_UPPER_BOUND) { + throw new MapperParsingException("[max_shingle_size] must be at least [" + MAX_SHINGLE_SIZE_LOWER_BOUND + "] and at most " + + "[" + MAX_SHINGLE_SIZE_UPPER_BOUND + "], got [" + maxShingleSize + "]"); + } + this.maxShingleSize = maxShingleSize; + return builder; + } + + @Override + public SearchAsYouTypeFieldType fieldType() { + return (SearchAsYouTypeFieldType) this.fieldType; + } + + @Override + public SearchAsYouTypeFieldMapper build(Mapper.BuilderContext context) { + setupFieldType(context); + + final NamedAnalyzer indexAnalyzer = fieldType().indexAnalyzer(); + final NamedAnalyzer searchAnalyzer = fieldType().searchAnalyzer(); + final NamedAnalyzer searchQuoteAnalyzer = fieldType().searchQuoteAnalyzer(); + + // set up the prefix field + final String prefixFieldName = name() + PREFIX_FIELD_SUFFIX; + final PrefixFieldType prefixFieldType = new PrefixFieldType(name(), prefixFieldName, Defaults.MIN_GRAM, Defaults.MAX_GRAM); + prefixFieldType.setIndexOptions(fieldType().indexOptions()); + // wrap the root field's index analyzer with shingles and edge ngrams + final SearchAsYouTypeAnalyzer prefixIndexWrapper = + SearchAsYouTypeAnalyzer.withShingleAndPrefix(indexAnalyzer.analyzer(), maxShingleSize); + // wrap the root field's search analyzer with only shingles + final SearchAsYouTypeAnalyzer prefixSearchWrapper = + SearchAsYouTypeAnalyzer.withShingle(searchAnalyzer.analyzer(), maxShingleSize); + // don't wrap the root field's search quote analyzer as prefix field doesn't support phrase queries + prefixFieldType.setIndexAnalyzer(new NamedAnalyzer(indexAnalyzer.name(), AnalyzerScope.INDEX, prefixIndexWrapper)); + prefixFieldType.setSearchAnalyzer(new NamedAnalyzer(searchAnalyzer.name(), AnalyzerScope.INDEX, prefixSearchWrapper)); + final PrefixFieldMapper prefixFieldMapper = new PrefixFieldMapper(prefixFieldType, context.indexSettings()); + + // set up the shingle fields + final ShingleFieldMapper[] shingleFieldMappers = new ShingleFieldMapper[maxShingleSize - 1]; + final ShingleFieldType[] shingleFieldTypes = new ShingleFieldType[maxShingleSize - 1]; + for (int i = 0; i < shingleFieldMappers.length; i++) { + final int shingleSize = i + 2; + final ShingleFieldType shingleFieldType = new ShingleFieldType(fieldType(), shingleSize); + shingleFieldType.setName(getShingleFieldName(name(), shingleSize)); + // wrap the root field's index, search, and search quote analyzers with shingles + final SearchAsYouTypeAnalyzer shingleIndexWrapper = + SearchAsYouTypeAnalyzer.withShingle(indexAnalyzer.analyzer(), shingleSize); + final SearchAsYouTypeAnalyzer shingleSearchWrapper = + SearchAsYouTypeAnalyzer.withShingle(searchAnalyzer.analyzer(), shingleSize); + final SearchAsYouTypeAnalyzer shingleSearchQuoteWrapper = + SearchAsYouTypeAnalyzer.withShingle(searchQuoteAnalyzer.analyzer(), shingleSize); + shingleFieldType.setIndexAnalyzer(new NamedAnalyzer(indexAnalyzer.name(), AnalyzerScope.INDEX, shingleIndexWrapper)); + shingleFieldType.setSearchAnalyzer(new NamedAnalyzer(searchAnalyzer.name(), AnalyzerScope.INDEX, shingleSearchWrapper)); + shingleFieldType.setSearchQuoteAnalyzer( + new NamedAnalyzer(searchQuoteAnalyzer.name(), AnalyzerScope.INDEX, shingleSearchQuoteWrapper)); + shingleFieldType.setPrefixFieldType(prefixFieldType); + shingleFieldTypes[i] = shingleFieldType; + shingleFieldMappers[i] = new ShingleFieldMapper(shingleFieldType, context.indexSettings()); + } + fieldType().setPrefixField(prefixFieldType); + fieldType().setShingleFields(shingleFieldTypes); + return new SearchAsYouTypeFieldMapper(name, fieldType(), context.indexSettings(), copyTo, + maxShingleSize, prefixFieldMapper, shingleFieldMappers); + } + } + + private static int countPosition(TokenStream stream) throws IOException { + assert stream instanceof CachingTokenFilter; + PositionIncrementAttribute posIncAtt = stream.getAttribute(PositionIncrementAttribute.class); + stream.reset(); + int positionCount = 0; + while (stream.incrementToken()) { + if (posIncAtt.getPositionIncrement() != 0) { + positionCount += posIncAtt.getPositionIncrement(); + } + } + return positionCount; + } + + /** + * The root field type, which most queries should target as it will delegate queries to subfields better optimized for the query. When + * handling phrase queries, it analyzes the query text to find the appropriate sized shingle subfield to delegate to. When handling + * prefix or phrase prefix queries, it delegates to the prefix subfield + */ + static class SearchAsYouTypeFieldType extends StringFieldType { + + PrefixFieldType prefixField; + ShingleFieldType[] shingleFields = new ShingleFieldType[0]; + + SearchAsYouTypeFieldType() { + setTokenized(true); + } + + SearchAsYouTypeFieldType(SearchAsYouTypeFieldType other) { + super(other); + + if (other.prefixField != null) { + this.prefixField = other.prefixField.clone(); + } + if (other.shingleFields != null) { + this.shingleFields = new ShingleFieldType[other.shingleFields.length]; + for (int i = 0; i < this.shingleFields.length; i++) { + if (other.shingleFields[i] != null) { + this.shingleFields[i] = other.shingleFields[i].clone(); + } + } + } + } + + public void setPrefixField(PrefixFieldType prefixField) { + checkIfFrozen(); + this.prefixField = prefixField; + } + + public void setShingleFields(ShingleFieldType[] shingleFields) { + checkIfFrozen(); + this.shingleFields = shingleFields; + } + + @Override + public MappedFieldType clone() { + return new SearchAsYouTypeFieldType(this); + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + private ShingleFieldType shingleFieldForPositions(int positions) { + final int indexFromShingleSize = Math.max(positions - 2, 0); + return shingleFields[Math.min(indexFromShingleSize, shingleFields.length - 1)]; + } + + @Override + public Query existsQuery(QueryShardContext context) { + if (omitNorms()) { + return new TermQuery(new Term(FieldNamesFieldMapper.NAME, name())); + } else { + return new NormsFieldExistsQuery(name()); + } + } + + @Override + public Query prefixQuery(String value, MultiTermQuery.RewriteMethod method, QueryShardContext context) { + if (prefixField == null || prefixField.termLengthWithinBounds(value.length()) == false) { + return super.prefixQuery(value, method, context); + } else { + final Query query = prefixField.prefixQuery(value, method, context); + if (method == null + || method == MultiTermQuery.CONSTANT_SCORE_REWRITE + || method == MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE) { + return new ConstantScoreQuery(query); + } else { + return query; + } + } + } + + @Override + public Query phraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements) throws IOException { + int numPos = countPosition(stream); + if (shingleFields.length == 0 || slop > 0 || hasGaps(stream) || numPos <= 1) { + return TextFieldMapper.createPhraseQuery(stream, name(), slop, enablePositionIncrements); + } + final ShingleFieldType shingleField = shingleFieldForPositions(numPos); + stream = new FixedShingleFilter(stream, shingleField.shingleSize); + return shingleField.phraseQuery(stream, 0, true); + } + + @Override + public Query multiPhraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements) throws IOException { + int numPos = countPosition(stream); + if (shingleFields.length == 0 || slop > 0 || hasGaps(stream) || numPos <= 1) { + return TextFieldMapper.createPhraseQuery(stream, name(), slop, enablePositionIncrements); + } + final ShingleFieldType shingleField = shingleFieldForPositions(numPos); + stream = new FixedShingleFilter(stream, shingleField.shingleSize); + return shingleField.multiPhraseQuery(stream, 0, true); + } + + @Override + public Query phrasePrefixQuery(TokenStream stream, int slop, int maxExpansions) throws IOException { + int numPos = countPosition(stream); + if (shingleFields.length == 0 || slop > 0 || hasGaps(stream) || numPos <= 1) { + return TextFieldMapper.createPhrasePrefixQuery(stream, name(), slop, maxExpansions, + null, null); + } + final ShingleFieldType shingleField = shingleFieldForPositions(numPos); + stream = new FixedShingleFilter(stream, shingleField.shingleSize); + return shingleField.phrasePrefixQuery(stream, 0, maxExpansions); + } + + @Override + public SpanQuery spanPrefixQuery(String value, SpanMultiTermQueryWrapper.SpanRewriteMethod method, QueryShardContext context) { + if (prefixField != null && prefixField.termLengthWithinBounds(value.length())) { + return new FieldMaskingSpanQuery(new SpanTermQuery(new Term(prefixField.name(), indexedValueForSearch(value))), name()); + } else { + SpanMultiTermQueryWrapper spanMulti = + new SpanMultiTermQueryWrapper<>(new PrefixQuery(new Term(name(), indexedValueForSearch(value)))); + spanMulti.setRewriteMethod(method); + return spanMulti; + } + } + + @Override + public void checkCompatibility(MappedFieldType other, List conflicts) { + super.checkCompatibility(other, conflicts); + final SearchAsYouTypeFieldType otherFieldType = (SearchAsYouTypeFieldType) other; + if (this.shingleFields.length != otherFieldType.shingleFields.length) { + conflicts.add("mapper [" + name() + "] has a different [max_shingle_size]"); + } else if (Arrays.equals(this.shingleFields, otherFieldType.shingleFields) == false) { + conflicts.add("mapper [" + name() + "] has shingle subfields that are configured differently"); + } + + if (Objects.equals(this.prefixField, otherFieldType.prefixField) == false) { + conflicts.add("mapper [" + name() + "] has different [index_prefixes] settings"); + } + } + + @Override + public boolean equals(Object otherObject) { + if (this == otherObject) { + return true; + } + if (otherObject == null || getClass() != otherObject.getClass()) { + return false; + } + if (!super.equals(otherObject)) { + return false; + } + final SearchAsYouTypeFieldType other = (SearchAsYouTypeFieldType) otherObject; + return Objects.equals(prefixField, other.prefixField) && + Arrays.equals(shingleFields, other.shingleFields); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), prefixField, Arrays.hashCode(shingleFields)); + } + } + + /** + * The prefix field type handles prefix and phrase prefix queries that are delegated to it by the other field types in a + * search_as_you_type structure + */ + static final class PrefixFieldType extends StringFieldType { + + final int minChars; + final int maxChars; + final String parentField; + + PrefixFieldType(String parentField, String name, int minChars, int maxChars) { + setTokenized(true); + setOmitNorms(true); + setStored(false); + setName(name); + this.minChars = minChars; + this.maxChars = maxChars; + this.parentField = parentField; + } + + PrefixFieldType(PrefixFieldType other) { + super(other); + this.minChars = other.minChars; + this.maxChars = other.maxChars; + this.parentField = other.parentField; + } + + boolean termLengthWithinBounds(int length) { + return length >= minChars - 1 && length <= maxChars; + } + + @Override + public Query prefixQuery(String value, MultiTermQuery.RewriteMethod method, QueryShardContext context) { + if (value.length() >= minChars) { + return super.termQuery(value, context); + } + List automata = new ArrayList<>(); + automata.add(Automata.makeString(value)); + for (int i = value.length(); i < minChars; i++) { + automata.add(Automata.makeAnyChar()); + } + Automaton automaton = Operations.concatenate(automata); + AutomatonQuery query = new AutomatonQuery(new Term(name(), value + "*"), automaton); + query.setRewriteMethod(method); + return new BooleanQuery.Builder() + .add(query, BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term(parentField, value)), BooleanClause.Occur.SHOULD) + .build(); + } + + @Override + public PrefixFieldType clone() { + return new PrefixFieldType(this); + } + + @Override + public String typeName() { + return "prefix"; + } + + @Override + public String toString() { + return super.toString() + ",prefixChars=" + minChars + ":" + maxChars; + } + + @Override + public Query existsQuery(QueryShardContext context) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + PrefixFieldType that = (PrefixFieldType) o; + return minChars == that.minChars && + maxChars == that.maxChars; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), minChars, maxChars); + } + } + + static final class PrefixFieldMapper extends FieldMapper { + + PrefixFieldMapper(PrefixFieldType fieldType, Settings indexSettings) { + super(fieldType.name(), fieldType, fieldType, indexSettings, MultiFields.empty(), CopyTo.empty()); + } + + @Override + public PrefixFieldType fieldType() { + return (PrefixFieldType) super.fieldType(); + } + + @Override + protected void parseCreateField(ParseContext context, List fields) { + throw new UnsupportedOperationException(); + } + + @Override + protected String contentType() { + return "prefix"; + } + + @Override + public String toString() { + return fieldType().toString(); + } + } + + static final class ShingleFieldMapper extends FieldMapper { + + ShingleFieldMapper(ShingleFieldType fieldType, Settings indexSettings) { + super(fieldType.name(), fieldType, fieldType, indexSettings, MultiFields.empty(), CopyTo.empty()); + } + + @Override + public ShingleFieldType fieldType() { + return (ShingleFieldType) super.fieldType(); + } + + @Override + protected void parseCreateField(ParseContext context, List fields) { + throw new UnsupportedOperationException(); + } + + @Override + protected String contentType() { + return "shingle"; + } + } + + /** + * The shingle field type handles phrase queries and delegates prefix and phrase prefix queries to the prefix field + */ + static class ShingleFieldType extends StringFieldType { + final int shingleSize; + PrefixFieldType prefixFieldType; + + ShingleFieldType(MappedFieldType other, int shingleSize) { + super(other); + this.shingleSize = shingleSize; + this.setStored(false); + } + + ShingleFieldType(ShingleFieldType other) { + super(other); + this.shingleSize = other.shingleSize; + if (other.prefixFieldType != null) { + this.prefixFieldType = other.prefixFieldType.clone(); + } + } + + void setPrefixFieldType(PrefixFieldType prefixFieldType) { + checkIfFrozen(); + this.prefixFieldType = prefixFieldType; + } + + @Override + public ShingleFieldType clone() { + return new ShingleFieldType(this); + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + @Override + public Query existsQuery(QueryShardContext context) { + if (omitNorms()) { + return new TermQuery(new Term(FieldNamesFieldMapper.NAME, name())); + } else { + return new NormsFieldExistsQuery(name()); + } + } + + @Override + public Query prefixQuery(String value, MultiTermQuery.RewriteMethod method, QueryShardContext context) { + if (prefixFieldType == null || prefixFieldType.termLengthWithinBounds(value.length()) == false) { + return super.prefixQuery(value, method, context); + } else { + final Query query = prefixFieldType.prefixQuery(value, method, context); + if (method == null + || method == MultiTermQuery.CONSTANT_SCORE_REWRITE + || method == MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE) { + return new ConstantScoreQuery(query); + } else { + return query; + } + } + } + + @Override + public Query phraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements) throws IOException { + return TextFieldMapper.createPhraseQuery(stream, name(), slop, enablePositionIncrements); + } + + @Override + public Query multiPhraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements) throws IOException { + return TextFieldMapper.createPhraseQuery(stream, name(), slop, enablePositionIncrements); + } + + @Override + public Query phrasePrefixQuery(TokenStream stream, int slop, int maxExpansions) throws IOException { + final String prefixFieldName = slop > 0 + ? null + : prefixFieldType.name(); + return TextFieldMapper.createPhrasePrefixQuery(stream, name(), slop, maxExpansions, + prefixFieldName, prefixFieldType::termLengthWithinBounds); + } + + @Override + public SpanQuery spanPrefixQuery(String value, SpanMultiTermQueryWrapper.SpanRewriteMethod method, QueryShardContext context) { + if (prefixFieldType != null && prefixFieldType.termLengthWithinBounds(value.length())) { + return new FieldMaskingSpanQuery(new SpanTermQuery(new Term(prefixFieldType.name(), indexedValueForSearch(value))), name()); + } else { + SpanMultiTermQueryWrapper spanMulti = + new SpanMultiTermQueryWrapper<>(new PrefixQuery(new Term(name(), indexedValueForSearch(value)))); + spanMulti.setRewriteMethod(method); + return spanMulti; + } + } + + @Override + public void checkCompatibility(MappedFieldType other, List conflicts) { + super.checkCompatibility(other, conflicts); + ShingleFieldType ft = (ShingleFieldType) other; + if (ft.shingleSize != this.shingleSize) { + conflicts.add("mapper [" + name() + "] has different [shingle_size] values"); + } + if (Objects.equals(this.prefixFieldType, ft.prefixFieldType) == false) { + conflicts.add("mapper [" + name() + "] has different [index_prefixes] settings"); + } + } + + @Override + public boolean equals(Object otherObject) { + if (this == otherObject) { + return true; + } + if (otherObject == null || getClass() != otherObject.getClass()) { + return false; + } + if (!super.equals(otherObject)) { + return false; + } + final ShingleFieldType other = (ShingleFieldType) otherObject; + return shingleSize == other.shingleSize + && Objects.equals(prefixFieldType, other.prefixFieldType); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), shingleSize, prefixFieldType); + } + } + + private final int maxShingleSize; + private PrefixFieldMapper prefixField; + private final ShingleFieldMapper[] shingleFields; + + public SearchAsYouTypeFieldMapper(String simpleName, + SearchAsYouTypeFieldType fieldType, + Settings indexSettings, + CopyTo copyTo, + int maxShingleSize, + PrefixFieldMapper prefixField, + ShingleFieldMapper[] shingleFields) { + super(simpleName, fieldType, Defaults.FIELD_TYPE, indexSettings, MultiFields.empty(), copyTo); + this.prefixField = prefixField; + this.shingleFields = shingleFields; + this.maxShingleSize = maxShingleSize; + } + + @Override + public FieldMapper updateFieldType(Map fullNameToFieldType) { + SearchAsYouTypeFieldMapper fieldMapper = (SearchAsYouTypeFieldMapper) super.updateFieldType(fullNameToFieldType); + fieldMapper.prefixField = (PrefixFieldMapper) fieldMapper.prefixField.updateFieldType(fullNameToFieldType); + for (int i = 0; i < fieldMapper.shingleFields.length; i++) { + fieldMapper.shingleFields[i] = (ShingleFieldMapper) fieldMapper.shingleFields[i].updateFieldType(fullNameToFieldType); + } + return fieldMapper; + } + + @Override + protected void parseCreateField(ParseContext context, List fields) throws IOException { + final String value = context.externalValueSet() ? context.externalValue().toString() : context.parser().textOrNull(); + if (value == null) { + return; + } + + List newFields = new ArrayList<>(); + newFields.add(new Field(fieldType().name(), value, fieldType())); + for (ShingleFieldMapper subFieldMapper : shingleFields) { + fields.add(new Field(subFieldMapper.fieldType().name(), value, subFieldMapper.fieldType())); + } + newFields.add(new Field(prefixField.fieldType().name(), value, prefixField.fieldType())); + if (fieldType().omitNorms()) { + createFieldNamesField(context, newFields); + } + fields.addAll(newFields); + } + + @Override + protected String contentType() { + return CONTENT_TYPE; + } + + @Override + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); + SearchAsYouTypeFieldMapper mw = (SearchAsYouTypeFieldMapper) mergeWith; + if (mw.maxShingleSize != maxShingleSize) { + throw new IllegalArgumentException("mapper [" + name() + "] has different [max_shingle_size] setting, current [" + + this.maxShingleSize + "], merged [" + mw.maxShingleSize + "]"); + } + this.prefixField = (PrefixFieldMapper) this.prefixField.merge(mw.prefixField); + + ShingleFieldMapper[] shingleFieldMappers = new ShingleFieldMapper[mw.shingleFields.length]; + for (int i = 0; i < shingleFieldMappers.length; i++) { + this.shingleFields[i] = (ShingleFieldMapper) this.shingleFields[i].merge(mw.shingleFields[i]); + } + } + + public static String getShingleFieldName(String parentField, int shingleSize) { + return parentField + "._" + shingleSize + "gram"; + } + + @Override + public SearchAsYouTypeFieldType fieldType() { + return (SearchAsYouTypeFieldType) super.fieldType(); + } + + public int maxShingleSize() { + return maxShingleSize; + } + + public PrefixFieldMapper prefixField() { + return prefixField; + } + + public ShingleFieldMapper[] shingleFields() { + return shingleFields; + } + + @Override + protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException { + super.doXContentBody(builder, includeDefaults, params); + doXContentAnalyzers(builder, includeDefaults); + builder.field("max_shingle_size", maxShingleSize); + } + + @Override + public Iterator iterator() { + List subIterators = new ArrayList<>(); + subIterators.add(prefixField); + subIterators.addAll(Arrays.asList(shingleFields)); + @SuppressWarnings("unchecked") Iterator concat = Iterators.concat(super.iterator(), subIterators.iterator()); + return concat; + } + + /** + * An analyzer wrapper to add a shingle token filter, an edge ngram token filter or both to its wrapped analyzer. When adding an edge + * ngrams token filter, it also adds a {@link TrailingShingleTokenFilter} to add extra position increments at the end of the stream + * to induce the shingle token filter to create tokens at the end of the stream smaller than the shingle size + */ + static class SearchAsYouTypeAnalyzer extends AnalyzerWrapper { + + private final Analyzer delegate; + private final int shingleSize; + private final boolean indexPrefixes; + + private SearchAsYouTypeAnalyzer(Analyzer delegate, + int shingleSize, + boolean indexPrefixes) { + + super(delegate.getReuseStrategy()); + this.delegate = Objects.requireNonNull(delegate); + this.shingleSize = shingleSize; + this.indexPrefixes = indexPrefixes; + } + + static SearchAsYouTypeAnalyzer withShingle(Analyzer delegate, int shingleSize) { + return new SearchAsYouTypeAnalyzer(delegate, shingleSize, false); + } + + static SearchAsYouTypeAnalyzer withShingleAndPrefix(Analyzer delegate, int shingleSize) { + return new SearchAsYouTypeAnalyzer(delegate, shingleSize, true); + } + + @Override + protected Analyzer getWrappedAnalyzer(String fieldName) { + return delegate; + } + + @Override + protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { + TokenStream tokenStream = components.getTokenStream(); + if (indexPrefixes) { + tokenStream = new TrailingShingleTokenFilter(tokenStream, shingleSize - 1); + } + tokenStream = new FixedShingleFilter(tokenStream, shingleSize, " ", ""); + if (indexPrefixes) { + tokenStream = new EdgeNGramTokenFilter(tokenStream, Defaults.MIN_GRAM, Defaults.MAX_GRAM, true); + } + return new TokenStreamComponents(components.getSource(), tokenStream); + } + + public int shingleSize() { + return shingleSize; + } + + public boolean indexPrefixes() { + return indexPrefixes; + } + + @Override + public String toString() { + return "<" + getClass().getCanonicalName() + " shingleSize=[" + shingleSize + "] indexPrefixes=[" + indexPrefixes + "]>"; + } + + private static class TrailingShingleTokenFilter extends TokenFilter { + + private final int extraPositionIncrements; + private final PositionIncrementAttribute positionIncrementAttribute; + + TrailingShingleTokenFilter(TokenStream input, int extraPositionIncrements) { + super(input); + this.extraPositionIncrements = extraPositionIncrements; + this.positionIncrementAttribute = addAttribute(PositionIncrementAttribute.class); + } + + @Override + public boolean incrementToken() throws IOException { + return input.incrementToken(); + } + + @Override + public void end() throws IOException { + super.end(); + positionIncrementAttribute.setPositionIncrement(extraPositionIncrements); + } + } + } +} diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SparseVectorFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SparseVectorFieldMapper.java index adf46d6a60d2..931e27bc1c19 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SparseVectorFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SparseVectorFieldMapper.java @@ -46,7 +46,7 @@ public class SparseVectorFieldMapper extends FieldMapper { public static final String CONTENT_TYPE = "sparse_vector"; - public static short MAX_DIMS_COUNT = 500; //maximum allowed number of dimensions + public static short MAX_DIMS_COUNT = 1024; //maximum allowed number of dimensions public static int MAX_DIMS_NUMBER = 65535; //maximum allowed dimension's number public static class Defaults { @@ -178,10 +178,9 @@ public void parse(ParseContext context) throws IOException { } dims[dimCount] = dim; values[dimCount] = value; - dimCount ++; - if (dimCount >= MAX_DIMS_COUNT) { + if (dimCount++ >= MAX_DIMS_COUNT) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + - "] has exceeded the maximum allowed number of dimensions of :[" + MAX_DIMS_COUNT + "]"); + "] has exceeded the maximum allowed number of dimensions of [" + MAX_DIMS_COUNT + "]"); } } else { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/DenseVectorFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/DenseVectorFieldMapperTests.java index 2239c99a310f..cf6fc9965775 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/DenseVectorFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/DenseVectorFieldMapperTests.java @@ -30,18 +30,19 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; -import org.hamcrest.Matchers; +import org.junit.Before; +import java.io.IOException; import java.util.Collection; -public class DenseVectorFieldMapperTests extends ESSingleNodeTestCase { +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; - @Override - protected Collection> getPlugins() { - return pluginList(MapperExtrasPlugin.class); - } +public class DenseVectorFieldMapperTests extends ESSingleNodeTestCase { + private DocumentMapper mapper; - public void testDefaults() throws Exception { + @Before + public void setUpMapper() throws Exception { IndexService indexService = createIndex("test-index"); DocumentMapperParser parser = indexService.mapperService().documentMapperParser(); String mapping = Strings.toString(XContentFactory.jsonBuilder() @@ -53,10 +54,15 @@ public void testDefaults() throws Exception { .endObject() .endObject() .endObject()); + mapper = parser.parse("_doc", new CompressedXContent(mapping)); + } - DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); - assertEquals(mapping, mapper.mappingSource().toString()); + @Override + protected Collection> getPlugins() { + return pluginList(MapperExtrasPlugin.class); + } + public void testDefaults() throws Exception { float[] expectedArray = {-12.1f, 100.7f, -4}; ParsedDocument doc1 = mapper.parse(new SourceToParse("test-index", "_doc", "1", BytesReference .bytes(XContentFactory.jsonBuilder() @@ -66,7 +72,7 @@ public void testDefaults() throws Exception { XContentType.JSON)); IndexableField[] fields = doc1.rootDoc().getFields("my-dense-vector"); assertEquals(1, fields.length); - assertThat(fields[0], Matchers.instanceOf(BinaryDocValuesField.class)); + assertThat(fields[0], instanceOf(BinaryDocValuesField.class)); // assert that after decoding the indexed value is equal to expected BytesRef vectorBR = ((BinaryDocValuesField) fields[0]).binaryValue(); @@ -78,4 +84,22 @@ public void testDefaults() throws Exception { 0.001f ); } + + public void testDimensionLimit() throws IOException { + float[] validVector = new float[DenseVectorFieldMapper.MAX_DIMS_COUNT]; + BytesReference validDoc = BytesReference.bytes( + XContentFactory.jsonBuilder().startObject() + .array("my-dense-vector", validVector) + .endObject()); + mapper.parse(new SourceToParse("test-index", "_doc", "1", validDoc, XContentType.JSON)); + + float[] invalidVector = new float[DenseVectorFieldMapper.MAX_DIMS_COUNT + 1]; + BytesReference invalidDoc = BytesReference.bytes( + XContentFactory.jsonBuilder().startObject() + .array("my-dense-vector", invalidVector) + .endObject()); + MapperParsingException e = expectThrows(MapperParsingException.class, () -> mapper.parse( + new SourceToParse("test-index", "_doc", "1", invalidDoc, XContentType.JSON))); + assertThat(e.getDetailedMessage(), containsString("has exceeded the maximum allowed number of dimensions")); + } } diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SearchAsYouTypeAnalyzerTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SearchAsYouTypeAnalyzerTests.java new file mode 100644 index 000000000000..6cf0dc83d907 --- /dev/null +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SearchAsYouTypeAnalyzerTests.java @@ -0,0 +1,197 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.core.SimpleAnalyzer; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.elasticsearch.index.mapper.SearchAsYouTypeFieldMapper.SearchAsYouTypeAnalyzer; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Function; +import java.util.stream.IntStream; + +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static java.util.stream.Collectors.toList; +import static org.hamcrest.Matchers.equalTo; + +public class SearchAsYouTypeAnalyzerTests extends ESTestCase { + + private static final Analyzer SIMPLE = new SimpleAnalyzer(); + + public static List analyze(SearchAsYouTypeAnalyzer analyzer, String text) throws IOException { + final List tokens = new ArrayList<>(); + try (TokenStream tokenStream = analyzer.tokenStream("field", text)) { + final CharTermAttribute charTermAttribute = tokenStream.addAttribute(CharTermAttribute.class); + tokenStream.reset(); + while (tokenStream.incrementToken()) { + tokens.add(charTermAttribute.toString()); + } + } + return tokens; + } + + private void testCase(String text, + Function analyzerFunction, + Function> expectedTokensFunction) throws IOException { + + for (int shingleSize = 2; shingleSize <= 4; shingleSize++) { + final SearchAsYouTypeAnalyzer analyzer = analyzerFunction.apply(shingleSize); + final List expectedTokens = expectedTokensFunction.apply(shingleSize); + final List actualTokens = analyze(analyzer, text); + assertThat("analyzed correctly with " + analyzer, actualTokens, equalTo(expectedTokens)); + } + } + + public void testSingleTermShingles() throws IOException { + testCase( + "quick", + shingleSize -> SearchAsYouTypeAnalyzer.withShingle(SIMPLE, shingleSize), + shingleSize -> emptyList() + ); + } + + public void testMultiTermShingles() throws IOException { + testCase( + "quick brown fox jump lazy", + shingleSize -> SearchAsYouTypeAnalyzer.withShingle(SIMPLE, shingleSize), + shingleSize -> { + if (shingleSize == 2) { + return asList("quick brown", "brown fox", "fox jump", "jump lazy"); + } else if (shingleSize == 3) { + return asList("quick brown fox", "brown fox jump", "fox jump lazy"); + } else if (shingleSize == 4) { + return asList("quick brown fox jump", "brown fox jump lazy"); + } + throw new IllegalArgumentException(); + } + ); + } + + public void testSingleTermPrefix() throws IOException { + testCase( + "quick", + shingleSize -> SearchAsYouTypeAnalyzer.withShingleAndPrefix(SIMPLE, shingleSize), + shingleSize -> { + final List tokens = new ArrayList<>(asList("q", "qu", "qui", "quic", "quick")); + tokens.addAll(tokenWithSpaces("quick", shingleSize)); + return tokens; + } + ); + } + + public void testMultiTermPrefix() throws IOException { + testCase( + //"quick red fox lazy brown", + "quick brown fox jump lazy", + shingleSize -> SearchAsYouTypeAnalyzer.withShingleAndPrefix(SIMPLE, shingleSize), + shingleSize -> { + if (shingleSize == 2) { + final List tokens = new ArrayList<>(); + tokens.addAll(asList( + "q", "qu", "qui", "quic", "quick", "quick ", "quick b", "quick br", "quick bro", "quick brow", "quick brown" + )); + tokens.addAll(asList( + "b", "br", "bro", "brow", "brown", "brown ", "brown f", "brown fo", "brown fox" + )); + tokens.addAll(asList( + "f", "fo", "fox", "fox ", "fox j", "fox ju", "fox jum", "fox jump" + )); + tokens.addAll(asList( + "j", "ju", "jum", "jump", "jump ", "jump l", "jump la", "jump laz", "jump lazy" + )); + tokens.addAll(asList( + "l", "la", "laz", "lazy" + )); + tokens.addAll(tokenWithSpaces("lazy", shingleSize)); + return tokens; + } else if (shingleSize == 3) { + final List tokens = new ArrayList<>(); + tokens.addAll(asList( + "q", "qu", "qui", "quic", "quick", "quick ", "quick b", "quick br", "quick bro", "quick brow", "quick brown", + "quick brown ", "quick brown f", "quick brown fo", "quick brown fox" + )); + tokens.addAll(asList( + "b", "br", "bro", "brow", "brown", "brown ", "brown f", "brown fo", "brown fox", "brown fox ", "brown fox j", + "brown fox ju", "brown fox jum", "brown fox jump" + )); + tokens.addAll(asList( + "f", "fo", "fox", "fox ", "fox j", "fox ju", "fox jum", "fox jump", "fox jump ", "fox jump l", "fox jump la", + "fox jump laz", "fox jump lazy" + )); + tokens.addAll(asList( + "j", "ju", "jum", "jump", "jump ", "jump l", "jump la", "jump laz", "jump lazy" + )); + tokens.addAll(tokenWithSpaces("jump lazy", shingleSize - 1)); + tokens.addAll(asList( + "l", "la", "laz", "lazy" + )); + tokens.addAll(tokenWithSpaces("lazy", shingleSize)); + return tokens; + } else if (shingleSize == 4) { + final List tokens = new ArrayList<>(); + tokens.addAll(asList( + "q", "qu", "qui", "quic", "quick", "quick ", "quick b", "quick br", "quick bro", "quick brow", "quick brown", + "quick brown ", "quick brown f", "quick brown fo", "quick brown fox", "quick brown fox ", "quick brown fox j", + "quick brown fox ju", "quick brown fox jum", "quick brown fox jump" + )); + tokens.addAll(asList( + "b", "br", "bro", "brow", "brown", "brown ", "brown f", "brown fo", "brown fox", "brown fox ", "brown fox j", + "brown fox ju", "brown fox jum", "brown fox jump", "brown fox jump ", "brown fox jump l", "brown fox jump la", + "brown fox jump laz", "brown fox jump lazy" + )); + tokens.addAll(asList( + "f", "fo", "fox", "fox ", "fox j", "fox ju", "fox jum", "fox jump", "fox jump ", "fox jump l", "fox jump la", + "fox jump laz", "fox jump lazy" + )); + tokens.addAll(tokenWithSpaces("fox jump lazy", shingleSize - 2)); + tokens.addAll(asList( + "j", "ju", "jum", "jump", "jump ", "jump l", "jump la", "jump laz", "jump lazy" + )); + tokens.addAll(tokenWithSpaces("jump lazy", shingleSize - 1)); + tokens.addAll(asList( + "l", "la", "laz", "lazy" + )); + tokens.addAll(tokenWithSpaces("lazy", shingleSize)); + return tokens; + } + + throw new IllegalArgumentException(); + } + ); + } + + private static List tokenWithSpaces(String text, int maxShingleSize) { + return IntStream.range(1, maxShingleSize).mapToObj(i -> text + spaces(i)).collect(toList()); + } + + private static String spaces(int count) { + final StringBuilder builder = new StringBuilder(); + for (int i = 0; i < count; i++) { + builder.append(" "); + } + return builder.toString(); + } +} diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SearchAsYouTypeFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SearchAsYouTypeFieldMapperTests.java new file mode 100644 index 000000000000..4622b34ea151 --- /dev/null +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SearchAsYouTypeFieldMapperTests.java @@ -0,0 +1,813 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.mapper; + +import org.apache.lucene.document.FieldType; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.DisjunctionMaxQuery; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.MultiPhraseQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.SynonymQuery; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.spans.FieldMaskingSpanQuery; +import org.apache.lucene.search.spans.SpanNearQuery; +import org.apache.lucene.search.spans.SpanTermQuery; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.analysis.NamedAnalyzer; +import org.elasticsearch.index.mapper.SearchAsYouTypeFieldMapper.PrefixFieldMapper; +import org.elasticsearch.index.mapper.SearchAsYouTypeFieldMapper.PrefixFieldType; +import org.elasticsearch.index.mapper.SearchAsYouTypeFieldMapper.SearchAsYouTypeAnalyzer; +import org.elasticsearch.index.mapper.SearchAsYouTypeFieldMapper.SearchAsYouTypeFieldType; +import org.elasticsearch.index.mapper.SearchAsYouTypeFieldMapper.ShingleFieldMapper; +import org.elasticsearch.index.mapper.SearchAsYouTypeFieldMapper.ShingleFieldType; +import org.elasticsearch.index.query.MatchPhrasePrefixQueryBuilder; +import org.elasticsearch.index.query.MatchPhraseQueryBuilder; +import org.elasticsearch.index.query.MultiMatchQueryBuilder; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.hamcrest.Matcher; +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static java.util.Arrays.asList; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasProperty; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.collection.IsArrayContainingInAnyOrder.arrayContainingInAnyOrder; +import static org.hamcrest.core.IsInstanceOf.instanceOf; + +public class SearchAsYouTypeFieldMapperTests extends ESSingleNodeTestCase { + + @Override + protected Collection> getPlugins() { + return pluginList(MapperExtrasPlugin.class); + } + + public void testIndexing() throws IOException { + final String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .endObject() + .endObject() + .endObject() + .endObject()); + + final DocumentMapper mapper = createIndex("test") + .mapperService() + .documentMapperParser() + .parse("_doc", new CompressedXContent(mapping)); + ParsedDocument doc = mapper.parse(new SourceToParse("test", "_doc", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("a_field", "new york city") + .endObject()), + XContentType.JSON)); + + for (String field : new String[] { "a_field", "a_field._index_prefix", "a_field._2gram", "a_field._3gram"}) { + IndexableField[] fields = doc.rootDoc().getFields(field); + assertEquals(1, fields.length); + assertEquals("new york city", fields[0].stringValue()); + } + } + + public void testDefaultConfiguration() throws IOException { + final String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .endObject() + .endObject() + .endObject() + .endObject()); + + final DocumentMapper defaultMapper = createIndex("test") + .mapperService() + .documentMapperParser() + .parse("_doc", new CompressedXContent(mapping)); + + final SearchAsYouTypeFieldMapper rootMapper = getRootFieldMapper(defaultMapper, "a_field"); + assertRootFieldMapper(rootMapper, 3, "default"); + + + final PrefixFieldMapper prefixFieldMapper = getPrefixFieldMapper(defaultMapper, "a_field._index_prefix"); + assertPrefixFieldType(prefixFieldMapper.fieldType(), 3, "default"); + + assertShingleFieldType( + getShingleFieldMapper(defaultMapper, "a_field._2gram").fieldType(), 2, "default", prefixFieldMapper.fieldType()); + assertShingleFieldType( + getShingleFieldMapper(defaultMapper, "a_field._3gram").fieldType(), 3, "default", prefixFieldMapper.fieldType()); + } + + public void testConfiguration() throws IOException { + final int maxShingleSize = 4; + final String analyzerName = "simple"; + + final String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .field("analyzer", analyzerName) + .field("max_shingle_size", maxShingleSize) + .endObject() + .endObject() + .endObject() + .endObject()); + + final DocumentMapper defaultMapper = createIndex("test") + .mapperService() + .documentMapperParser() + .parse("_doc", new CompressedXContent(mapping)); + + final SearchAsYouTypeFieldMapper rootMapper = getRootFieldMapper(defaultMapper, "a_field"); + assertRootFieldMapper(rootMapper, maxShingleSize, analyzerName); + + final PrefixFieldMapper prefixFieldMapper = getPrefixFieldMapper(defaultMapper, "a_field._index_prefix"); + assertPrefixFieldType(prefixFieldMapper.fieldType(), maxShingleSize, analyzerName); + + assertShingleFieldType( + getShingleFieldMapper(defaultMapper, "a_field._2gram").fieldType(), 2, analyzerName, prefixFieldMapper.fieldType()); + assertShingleFieldType( + getShingleFieldMapper(defaultMapper, "a_field._3gram").fieldType(), 3, analyzerName, prefixFieldMapper.fieldType()); + assertShingleFieldType( + getShingleFieldMapper(defaultMapper, "a_field._4gram").fieldType(), 4, analyzerName, prefixFieldMapper.fieldType()); + } + + public void testSimpleMerge() throws IOException { + MapperService mapperService = createIndex("test").mapperService(); + { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .field("analyzer", "standard") + .endObject() + .endObject() + .endObject().endObject()); + DocumentMapper mapper = mapperService.merge("_doc", + new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); + } + + { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .field("analyzer", "standard") + .endObject() + .startObject("b_field") + .field("type", "text") + .endObject() + .endObject() + .endObject().endObject()); + DocumentMapper mapper = mapperService.merge("_doc", + new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); + } + + { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .field("analyzer", "standard") + .field("max_shingle_size", "4") + .endObject() + .startObject("b_field") + .field("type", "text") + .endObject() + .endObject() + .endObject().endObject()); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> mapperService.merge("_doc", + new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE)); + assertThat(e.getMessage(), containsString("different [max_shingle_size]")); + } + } + + public void testIndexOptions() throws IOException { + final String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .field("index_options", "offsets") + .endObject() + .endObject() + .endObject() + .endObject()); + + final DocumentMapper defaultMapper = createIndex("test") + .mapperService() + .documentMapperParser() + .parse("_doc", new CompressedXContent(mapping)); + + Stream.of( + getRootFieldMapper(defaultMapper, "a_field"), + getPrefixFieldMapper(defaultMapper, "a_field._index_prefix"), + getShingleFieldMapper(defaultMapper, "a_field._2gram"), + getShingleFieldMapper(defaultMapper, "a_field._3gram") + ).forEach(mapper -> assertThat("for " + mapper.name(), + mapper.fieldType().indexOptions(), equalTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS))); + } + + public void testStore() throws IOException { + final String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .field("store", "true") + .endObject() + .endObject() + .endObject() + .endObject()); + + final DocumentMapper defaultMapper = createIndex("test") + .mapperService() + .documentMapperParser() + .parse("_doc", new CompressedXContent(mapping)); + + assertTrue(getRootFieldMapper(defaultMapper, "a_field").fieldType().stored()); + Stream.of( + getPrefixFieldMapper(defaultMapper, "a_field._index_prefix"), + getShingleFieldMapper(defaultMapper, "a_field._2gram"), + getShingleFieldMapper(defaultMapper, "a_field._3gram") + ).forEach(mapper -> assertFalse("for " + mapper.name(), mapper.fieldType().stored())); + } + + public void testIndex() throws IOException { + final String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .field("index", "false") + .endObject() + .endObject() + .endObject() + .endObject()); + + final DocumentMapper defaultMapper = createIndex("test") + .mapperService() + .documentMapperParser() + .parse("_doc", new CompressedXContent(mapping)); + + Stream.of( + getRootFieldMapper(defaultMapper, "a_field"), + getPrefixFieldMapper(defaultMapper, "a_field._index_prefix"), + getShingleFieldMapper(defaultMapper, "a_field._2gram"), + getShingleFieldMapper(defaultMapper, "a_field._3gram") + ).forEach(mapper -> assertThat("for " + mapper.name(), mapper.fieldType().indexOptions(), equalTo(IndexOptions.NONE))); + } + + public void testTermVectors() throws IOException { + final String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .field("term_vector", "yes") + .endObject() + .endObject() + .endObject() + .endObject()); + + final DocumentMapper defaultMapper = createIndex("test") + .mapperService() + .documentMapperParser() + .parse("_doc", new CompressedXContent(mapping)); + + Stream.of( + getRootFieldMapper(defaultMapper, "a_field"), + getShingleFieldMapper(defaultMapper, "a_field._2gram"), + getShingleFieldMapper(defaultMapper, "a_field._3gram") + ).forEach(mapper -> assertTrue("for " + mapper.name(), mapper.fieldType().storeTermVectors())); + + final PrefixFieldMapper prefixFieldMapper = getPrefixFieldMapper(defaultMapper, "a_field._index_prefix"); + assertFalse(prefixFieldMapper.fieldType().storeTermVectors()); + } + + public void testNorms() throws IOException { + // default setting + { + final String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .endObject() + .endObject() + .endObject() + .endObject()); + + final DocumentMapper defaultMapper = createIndex("test-1") + .mapperService() + .documentMapperParser() + .parse("_doc", new CompressedXContent(mapping)); + + Stream.of( + getRootFieldMapper(defaultMapper, "a_field"), + getShingleFieldMapper(defaultMapper, "a_field._2gram"), + getShingleFieldMapper(defaultMapper, "a_field._3gram") + ).forEach(mapper -> assertFalse("for " + mapper.name(), mapper.fieldType().omitNorms())); + + final PrefixFieldMapper prefixFieldMapper = getPrefixFieldMapper(defaultMapper, "a_field._index_prefix"); + assertTrue(prefixFieldMapper.fieldType().omitNorms()); + } + + // can disable them on shingle fields + { + final String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .field("norms", "false") + .endObject() + .endObject() + .endObject() + .endObject()); + + final DocumentMapper defaultMapper = createIndex("test-2") + .mapperService() + .documentMapperParser() + .parse("_doc", new CompressedXContent(mapping)); + + Stream.of( + getRootFieldMapper(defaultMapper, "a_field"), + getPrefixFieldMapper(defaultMapper, "a_field._index_prefix"), + getShingleFieldMapper(defaultMapper, "a_field._2gram"), + getShingleFieldMapper(defaultMapper, "a_field._3gram") + ).forEach(mapper -> assertTrue("for " + mapper.name(), mapper.fieldType().omitNorms())); + } + } + + + public void testDocumentParsingSingleValue() throws IOException { + documentParsingTestCase(Collections.singleton(randomAlphaOfLengthBetween(5, 20))); + } + + public void testDocumentParsingMultipleValues() throws IOException { + documentParsingTestCase(randomUnique(() -> randomAlphaOfLengthBetween(3, 20), randomIntBetween(2, 10))); + } + + public void testMatchPhrasePrefix() throws IOException { + IndexService indexService = createIndex("test", Settings.EMPTY); + QueryShardContext queryShardContext = indexService.newQueryShardContext( + randomInt(20), null, () -> { + throw new UnsupportedOperationException(); + }, null); + + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties") + .startObject("field") + .field("type", "search_as_you_type") + .endObject() + .endObject() + .endObject().endObject()); + + queryShardContext.getMapperService().merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); + + { + Query q = new MatchPhrasePrefixQueryBuilder("field", "two words").toQuery(queryShardContext); + Query expected = new SynonymQuery(new Term("field._index_prefix", "two words")); + assertThat(q, equalTo(expected)); + } + + { + Query q = new MatchPhrasePrefixQueryBuilder("field", "three words here").toQuery(queryShardContext); + Query expected = new SynonymQuery(new Term("field._index_prefix", "three words here")); + assertThat(q, equalTo(expected)); + } + + { + Query q = new MatchPhrasePrefixQueryBuilder("field", "two words").slop(1).toQuery(queryShardContext); + MultiPhrasePrefixQuery mpq = new MultiPhrasePrefixQuery("field"); + mpq.setSlop(1); + mpq.add(new Term("field", "two")); + mpq.add(new Term("field", "words")); + assertThat(q, equalTo(mpq)); + } + + { + Query q = new MatchPhrasePrefixQueryBuilder("field", "more than three words").toQuery(queryShardContext); + Query expected = new SpanNearQuery.Builder("field._3gram", true) + .addClause(new SpanTermQuery(new Term("field._3gram", "more than three"))) + .addClause(new FieldMaskingSpanQuery( + new SpanTermQuery(new Term("field._index_prefix", "than three words")), "field._3gram") + ) + .build(); + assertThat(q, equalTo(expected)); + } + + { + Query q = new MatchPhrasePrefixQueryBuilder("field._3gram", "more than three words").toQuery(queryShardContext); + Query expected = new SpanNearQuery.Builder("field._3gram", true) + .addClause(new SpanTermQuery(new Term("field._3gram", "more than three"))) + .addClause(new FieldMaskingSpanQuery( + new SpanTermQuery(new Term("field._index_prefix", "than three words")), "field._3gram") + ) + .build(); + assertThat(q, equalTo(expected)); + } + + { + Query q = new MatchPhrasePrefixQueryBuilder("field._3gram", "two words").toQuery(queryShardContext); + Query expected = new MatchNoDocsQuery(); + assertThat(q, equalTo(expected)); + } + + { + Query actual = new MatchPhrasePrefixQueryBuilder("field._3gram", "one two three four") + .slop(1) + .toQuery(queryShardContext); + MultiPhrasePrefixQuery expected = new MultiPhrasePrefixQuery("field._3gram"); + expected.setSlop(1); + expected.add(new Term("field._3gram", "one two three")); + expected.add(new Term("field._3gram", "two three four")); + assertThat(actual, equalTo(expected)); + } + + } + + public void testMatchPhrase() throws IOException { + final IndexService indexService = createIndex("test", Settings.EMPTY); + final QueryShardContext queryShardContext = indexService.newQueryShardContext(randomInt(20), null, + () -> { throw new UnsupportedOperationException(); }, null); + final String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .endObject() + .endObject() + .endObject() + .endObject()); + + queryShardContext.getMapperService().merge("_doc", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); + + { + final Query actual = new MatchPhraseQueryBuilder("a_field", "one") + .toQuery(queryShardContext); + final Query expected = new TermQuery(new Term("a_field", "one")); + assertThat(actual, equalTo(expected)); + } + + { + final Query actual = new MatchPhraseQueryBuilder("a_field", "one two") + .toQuery(queryShardContext); + final Query expected = new MultiPhraseQuery.Builder() + .add(new Term("a_field._2gram", "one two")) + .build(); + assertThat(actual, equalTo(expected)); + } + + { + final Query actual = new MatchPhraseQueryBuilder("a_field", "one two three") + .toQuery(queryShardContext); + final Query expected = new MultiPhraseQuery.Builder() + .add(new Term("a_field._3gram", "one two three")) + .build(); + assertThat(actual, equalTo(expected)); + } + + { + final Query actual = new MatchPhraseQueryBuilder("a_field", "one two three four") + .toQuery(queryShardContext); + final Query expected = new MultiPhraseQuery.Builder() + .add(new Term("a_field._3gram", "one two three")) + .add(new Term("a_field._3gram", "two three four")) + .build(); + assertThat(actual, equalTo(expected)); + } + + { + final Query actual = new MatchPhraseQueryBuilder("a_field", "one two") + .slop(1) + .toQuery(queryShardContext); + final Query expected = new MultiPhraseQuery.Builder() + .add(new Term("a_field", "one")) + .add(new Term("a_field", "two")) + .setSlop(1) + .build(); + assertThat(actual, equalTo(expected)); + } + + { + final Query actual = new MatchPhraseQueryBuilder("a_field._2gram", "one two") + .toQuery(queryShardContext); + final Query expected = new TermQuery(new Term("a_field._2gram", "one two")); + assertThat(actual, equalTo(expected)); + } + + { + final Query actual = new MatchPhraseQueryBuilder("a_field._2gram", "one two three") + .toQuery(queryShardContext); + final Query expected = new MultiPhraseQuery.Builder() + .add(new Term("a_field._2gram", "one two")) + .add(new Term("a_field._2gram", "two three")) + .build(); + assertThat(actual, equalTo(expected)); + } + + { + final Query actual = new MatchPhraseQueryBuilder("a_field._3gram", "one two three") + .toQuery(queryShardContext); + final Query expected = new TermQuery(new Term("a_field._3gram", "one two three")); + assertThat(actual, equalTo(expected)); + } + + { + final Query actual = new MatchPhraseQueryBuilder("a_field._3gram", "one two three four") + .toQuery(queryShardContext); + final Query expected = new MultiPhraseQuery.Builder() + .add(new Term("a_field._3gram", "one two three")) + .add(new Term("a_field._3gram", "two three four")) + .build(); + assertThat(actual, equalTo(expected)); + } + + // todo are these queries generated for the prefix field right? + { + final Query actual = new MatchPhraseQueryBuilder("a_field._index_prefix", "one two") + .toQuery(queryShardContext); + final Query expected = new MatchNoDocsQuery("Matching no documents because no terms present"); + assertThat(actual, equalTo(expected)); + } + + { + final Query actual = new MatchPhraseQueryBuilder("a_field._index_prefix", "one two three") + .toQuery(queryShardContext); + final Query expected = new TermQuery(new Term("a_field._index_prefix", "one two three")); + assertThat(actual, equalTo(expected)); + } + + { + expectThrows(IllegalArgumentException.class, + () -> new MatchPhraseQueryBuilder("a_field._index_prefix", "one two three four").toQuery(queryShardContext)); + } + } + + private static BooleanQuery buildBoolPrefixQuery(String shingleFieldName, String prefixFieldName, List terms) { + final BooleanQuery.Builder builder = new BooleanQuery.Builder(); + for (int i = 0; i < terms.size() - 1; i++) { + final String term = terms.get(i); + builder.add(new BooleanClause(new TermQuery(new Term(shingleFieldName, term)), BooleanClause.Occur.SHOULD)); + } + final String finalTerm = terms.get(terms.size() - 1); + builder.add(new BooleanClause( + new ConstantScoreQuery(new TermQuery(new Term(prefixFieldName, finalTerm))), BooleanClause.Occur.SHOULD)); + return builder.build(); + } + + public void testMultiMatchBoolPrefix() throws IOException { + final IndexService indexService = createIndex("test", Settings.EMPTY); + final QueryShardContext queryShardContext = indexService.newQueryShardContext(randomInt(20), null, + () -> { throw new UnsupportedOperationException(); }, null); + final String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .field("max_shingle_size", 4) + .endObject() + .endObject() + .endObject() + .endObject()); + + queryShardContext.getMapperService().merge("_doc", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); + + final MultiMatchQueryBuilder builder = new MultiMatchQueryBuilder( + "quick brown fox jump lazy dog", + "a_field", + "a_field._2gram", + "a_field._3gram", + "a_field._4gram" + ); + builder.type(MultiMatchQueryBuilder.Type.BOOL_PREFIX); + + final Query actual = builder.toQuery(queryShardContext); + assertThat(actual, instanceOf(DisjunctionMaxQuery.class)); + final DisjunctionMaxQuery disMaxQuery = (DisjunctionMaxQuery) actual; + assertThat(disMaxQuery.getDisjuncts(), hasSize(4)); + assertThat(disMaxQuery.getDisjuncts(), containsInAnyOrder( + buildBoolPrefixQuery( + "a_field", "a_field._index_prefix", asList("quick", "brown", "fox", "jump", "lazy", "dog")), + buildBoolPrefixQuery("a_field._2gram", "a_field._index_prefix", + asList("quick brown", "brown fox", "fox jump", "jump lazy", "lazy dog")), + buildBoolPrefixQuery("a_field._3gram", "a_field._index_prefix", + asList("quick brown fox", "brown fox jump", "fox jump lazy", "jump lazy dog")), + buildBoolPrefixQuery("a_field._4gram", "a_field._index_prefix", + asList("quick brown fox jump", "brown fox jump lazy", "fox jump lazy dog")))); + } + + private void documentParsingTestCase(Collection values) throws IOException { + final String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .endObject() + .endObject() + .endObject() + .endObject()); + + final DocumentMapper defaultMapper = createIndex("test") + .mapperService() + .documentMapperParser() + .parse("_doc", new CompressedXContent(mapping)); + + final XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + if (values.size() > 1) { + builder.array("a_field", values.toArray(new String[0])); + } else { + builder.field("a_field", values.iterator().next()); + } + builder.endObject(); + final ParsedDocument parsedDocument = defaultMapper.parse( + new SourceToParse("test", "_doc", "1", BytesReference.bytes(builder), XContentType.JSON)); + + + final Set> rootFieldMatchers = values.stream() + .map(value -> indexableFieldMatcher(value, SearchAsYouTypeFieldType.class)) + .collect(Collectors.toSet()); + final Set> shingleFieldMatchers = values.stream() + .map(value -> indexableFieldMatcher(value, ShingleFieldType.class)) + .collect(Collectors.toSet()); + final Set> prefixFieldMatchers = values.stream() + .map(value -> indexableFieldMatcher(value, PrefixFieldType.class)) + .collect(Collectors.toSet()); + + // the use of new ArrayList<>() here is to avoid the varargs form of arrayContainingInAnyOrder + assertThat( + parsedDocument.rootDoc().getFields("a_field"), + arrayContainingInAnyOrder(new ArrayList<>(rootFieldMatchers))); + + assertThat( + parsedDocument.rootDoc().getFields("a_field._index_prefix"), + arrayContainingInAnyOrder(new ArrayList<>(prefixFieldMatchers))); + + for (String name : asList("a_field._2gram", "a_field._3gram")) { + assertThat(parsedDocument.rootDoc().getFields(name), arrayContainingInAnyOrder(new ArrayList<>(shingleFieldMatchers))); + } + } + + private static Matcher indexableFieldMatcher(String value, Class fieldTypeClass) { + return Matchers.allOf( + hasProperty(IndexableField::stringValue, equalTo(value)), + hasProperty(IndexableField::fieldType, instanceOf(fieldTypeClass)) + ); + } + + private static void assertRootFieldMapper(SearchAsYouTypeFieldMapper mapper, + int maxShingleSize, + String analyzerName) { + + assertThat(mapper.maxShingleSize(), equalTo(maxShingleSize)); + assertThat(mapper.fieldType(), notNullValue()); + assertSearchAsYouTypeFieldType(mapper.fieldType(), maxShingleSize, analyzerName, mapper.prefixField().fieldType()); + + assertThat(mapper.prefixField(), notNullValue()); + assertThat(mapper.prefixField().fieldType().parentField, equalTo(mapper.name())); + assertPrefixFieldType(mapper.prefixField().fieldType(), maxShingleSize, analyzerName); + + + for (int shingleSize = 2; shingleSize <= maxShingleSize; shingleSize++) { + final ShingleFieldMapper shingleFieldMapper = mapper.shingleFields()[shingleSize - 2]; + assertThat(shingleFieldMapper, notNullValue()); + assertShingleFieldType(shingleFieldMapper.fieldType(), shingleSize, analyzerName, mapper.prefixField().fieldType()); + } + + final int numberOfShingleSubfields = (maxShingleSize - 2) + 1; + assertThat(mapper.shingleFields().length, equalTo(numberOfShingleSubfields)); + } + + private static void assertSearchAsYouTypeFieldType(SearchAsYouTypeFieldType fieldType, int maxShingleSize, + String analyzerName, + PrefixFieldType prefixFieldType) { + + assertThat(fieldType.shingleFields.length, equalTo(maxShingleSize-1)); + for (NamedAnalyzer analyzer : asList(fieldType.indexAnalyzer(), fieldType.searchAnalyzer())) { + assertThat(analyzer.name(), equalTo(analyzerName)); + } + int shingleSize = 2; + for (ShingleFieldType shingleField : fieldType.shingleFields) { + assertShingleFieldType(shingleField, shingleSize++, analyzerName, prefixFieldType); + } + + assertThat(fieldType.prefixField, equalTo(prefixFieldType)); + } + + private static void assertShingleFieldType(ShingleFieldType fieldType, + int shingleSize, + String analyzerName, + PrefixFieldType prefixFieldType) { + + assertThat(fieldType.shingleSize, equalTo(shingleSize)); + + for (NamedAnalyzer analyzer : asList(fieldType.indexAnalyzer(), fieldType.searchAnalyzer())) { + assertThat(analyzer.name(), equalTo(analyzerName)); + if (shingleSize > 1) { + final SearchAsYouTypeAnalyzer wrappedAnalyzer = (SearchAsYouTypeAnalyzer) analyzer.analyzer(); + assertThat(wrappedAnalyzer.shingleSize(), equalTo(shingleSize)); + assertThat(wrappedAnalyzer.indexPrefixes(), equalTo(false)); + } + } + + assertThat(fieldType.prefixFieldType, equalTo(prefixFieldType)); + + } + + private static void assertPrefixFieldType(PrefixFieldType fieldType, int shingleSize, String analyzerName) { + for (NamedAnalyzer analyzer : asList(fieldType.indexAnalyzer(), fieldType.searchAnalyzer())) { + assertThat(analyzer.name(), equalTo(analyzerName)); + } + + final SearchAsYouTypeAnalyzer wrappedIndexAnalyzer = (SearchAsYouTypeAnalyzer) fieldType.indexAnalyzer().analyzer(); + final SearchAsYouTypeAnalyzer wrappedSearchAnalyzer = (SearchAsYouTypeAnalyzer) fieldType.searchAnalyzer().analyzer(); + for (SearchAsYouTypeAnalyzer analyzer : asList(wrappedIndexAnalyzer, wrappedSearchAnalyzer)) { + assertThat(analyzer.shingleSize(), equalTo(shingleSize)); + } + assertThat(wrappedIndexAnalyzer.indexPrefixes(), equalTo(true)); + assertThat(wrappedSearchAnalyzer.indexPrefixes(), equalTo(false)); + } + + private static SearchAsYouTypeFieldMapper getRootFieldMapper(DocumentMapper defaultMapper, String fieldName) { + final Mapper mapper = defaultMapper.mappers().getMapper(fieldName); + assertThat(mapper, instanceOf(SearchAsYouTypeFieldMapper.class)); + return (SearchAsYouTypeFieldMapper) mapper; + } + + private static ShingleFieldMapper getShingleFieldMapper(DocumentMapper defaultMapper, String fieldName) { + final Mapper mapper = defaultMapper.mappers().getMapper(fieldName); + assertThat(mapper, instanceOf(ShingleFieldMapper.class)); + return (ShingleFieldMapper) mapper; + } + + private static PrefixFieldMapper getPrefixFieldMapper(DocumentMapper defaultMapper, String fieldName) { + final Mapper mapper = defaultMapper.mappers().getMapper(fieldName); + assertThat(mapper, instanceOf(PrefixFieldMapper.class)); + return (PrefixFieldMapper) mapper; + } +} diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SearchAsYouTypeFieldTypeTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SearchAsYouTypeFieldTypeTests.java new file mode 100644 index 000000000000..523de9180914 --- /dev/null +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SearchAsYouTypeFieldTypeTests.java @@ -0,0 +1,113 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.PrefixQuery; +import org.apache.lucene.search.TermInSetQuery; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.mapper.SearchAsYouTypeFieldMapper.Defaults; +import org.elasticsearch.index.mapper.SearchAsYouTypeFieldMapper.PrefixFieldType; +import org.elasticsearch.index.mapper.SearchAsYouTypeFieldMapper.SearchAsYouTypeFieldType; +import org.elasticsearch.index.mapper.SearchAsYouTypeFieldMapper.ShingleFieldType; +import org.junit.Before; + +import static java.util.Arrays.asList; +import static org.apache.lucene.search.MultiTermQuery.CONSTANT_SCORE_REWRITE; +import static org.hamcrest.Matchers.equalTo; + +public class SearchAsYouTypeFieldTypeTests extends FieldTypeTestCase { + + private static final String NAME = "a_field"; + private static final String PREFIX_NAME = NAME + "._index_prefix"; + + @Before + public void setupProperties() { + addModifier(new Modifier("max_shingle_size", false) { + @Override + public void modify(MappedFieldType ft) { + SearchAsYouTypeFieldType fieldType = (SearchAsYouTypeFieldType) ft; + fieldType.setShingleFields(new ShingleFieldType[] { + new ShingleFieldType(fieldType, 2), + new ShingleFieldType(fieldType, 3) + }); + } + }); + addModifier(new Modifier("index_prefixes", false) { + @Override + public void modify(MappedFieldType ft) { + SearchAsYouTypeFieldType fieldType = (SearchAsYouTypeFieldType) ft; + fieldType.setPrefixField(new PrefixFieldType(NAME, PREFIX_NAME, 1, 10)); + } + }); + } + + @Override + protected SearchAsYouTypeFieldType createDefaultFieldType() { + final SearchAsYouTypeFieldType fieldType = new SearchAsYouTypeFieldType(); + fieldType.setName(NAME); + fieldType.setPrefixField(new PrefixFieldType(NAME, PREFIX_NAME, Defaults.MIN_GRAM, Defaults.MAX_GRAM)); + fieldType.setShingleFields(new ShingleFieldType[] { new ShingleFieldType(fieldType, 2) }); + return fieldType; + } + + public void testTermQuery() { + final MappedFieldType fieldType = createDefaultFieldType(); + + fieldType.setIndexOptions(IndexOptions.DOCS); + assertThat(fieldType.termQuery("foo", null), equalTo(new TermQuery(new Term(NAME, "foo")))); + + fieldType.setIndexOptions(IndexOptions.NONE); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> fieldType.termQuery("foo", null)); + assertThat(e.getMessage(), equalTo("Cannot search on field [" + NAME + "] since it is not indexed.")); + } + + public void testTermsQuery() { + final MappedFieldType fieldType = createDefaultFieldType(); + + fieldType.setIndexOptions(IndexOptions.DOCS); + assertThat(fieldType.termsQuery(asList("foo", "bar"), null), + equalTo(new TermInSetQuery(NAME, asList(new BytesRef("foo"), new BytesRef("bar"))))); + + fieldType.setIndexOptions(IndexOptions.NONE); + final IllegalArgumentException e = + expectThrows(IllegalArgumentException.class, () -> fieldType.termsQuery(asList("foo", "bar"), null)); + assertThat(e.getMessage(), equalTo("Cannot search on field [" + NAME + "] since it is not indexed.")); + } + + public void testPrefixQuery() { + final SearchAsYouTypeFieldType fieldType = createDefaultFieldType(); + + // this term should be a length that can be rewriteable to a term query on the prefix field + final String withinBoundsTerm = "foo"; + assertThat(fieldType.prefixQuery(withinBoundsTerm, CONSTANT_SCORE_REWRITE, null), + equalTo(new ConstantScoreQuery(new TermQuery(new Term(PREFIX_NAME, withinBoundsTerm))))); + + // our defaults don't allow a situation where a term can be too small + + // this term should be too long to be rewriteable to a term query on the prefix field + final String longTerm = "toolongforourprefixfieldthistermis"; + assertThat(fieldType.prefixQuery(longTerm, CONSTANT_SCORE_REWRITE, null), + equalTo(new PrefixQuery(new Term(NAME, longTerm)))); + } +} diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SparseVectorFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SparseVectorFieldMapperTests.java index 06710e39592c..754a6f1a3180 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SparseVectorFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SparseVectorFieldMapperTests.java @@ -33,7 +33,12 @@ import org.hamcrest.Matchers; import org.junit.Before; +import java.io.IOException; import java.util.Collection; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.IntStream; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.core.IsInstanceOf.instanceOf; @@ -42,7 +47,7 @@ public class SparseVectorFieldMapperTests extends ESSingleNodeTestCase { private DocumentMapper mapper; @Before - public void setup() throws Exception { + public void setUpMapper() throws Exception { IndexService indexService = createIndex("test-index"); DocumentMapperParser parser = indexService.mapperService().documentMapperParser(); String mapping = Strings.toString(XContentFactory.jsonBuilder() @@ -100,7 +105,7 @@ public void testDefaults() throws Exception { ); } - public void testErrors() { + public void testDimensionNumberValidation() { // 1. test for an error on negative dimension MapperParsingException e = expectThrows(MapperParsingException.class, () -> { mapper.parse(new SourceToParse("test-index", "_doc", "1", BytesReference @@ -161,4 +166,28 @@ public void testErrors() { assertThat(e.getCause().getMessage(), containsString( "takes an object that maps a dimension number to a float, but got unexpected token [START_ARRAY]")); } + + public void testDimensionLimit() throws IOException { + Map validVector = IntStream.range(0, SparseVectorFieldMapper.MAX_DIMS_COUNT) + .boxed() + .collect(Collectors.toMap(String::valueOf, Function.identity())); + + BytesReference validDoc = BytesReference.bytes( + XContentFactory.jsonBuilder().startObject() + .field("my-sparse-vector", validVector) + .endObject()); + mapper.parse(new SourceToParse("test-index", "_doc", "1", validDoc, XContentType.JSON)); + + Map invalidVector = IntStream.range(0, SparseVectorFieldMapper.MAX_DIMS_COUNT + 1) + .boxed() + .collect(Collectors.toMap(String::valueOf, Function.identity())); + + BytesReference invalidDoc = BytesReference.bytes( + XContentFactory.jsonBuilder().startObject() + .field("my-sparse-vector", invalidVector) + .endObject()); + MapperParsingException e = expectThrows(MapperParsingException.class, () -> mapper.parse( + new SourceToParse("test-index", "_doc", "1", invalidDoc, XContentType.JSON))); + assertThat(e.getDetailedMessage(), containsString("has exceeded the maximum allowed number of dimensions")); + } } diff --git a/modules/mapper-extras/src/test/resources/rest-api-spec/test/search-as-you-type/10_basic.yml b/modules/mapper-extras/src/test/resources/rest-api-spec/test/search-as-you-type/10_basic.yml new file mode 100644 index 000000000000..3ddcd89347fc --- /dev/null +++ b/modules/mapper-extras/src/test/resources/rest-api-spec/test/search-as-you-type/10_basic.yml @@ -0,0 +1,1249 @@ +setup: + - skip: + version: " - 7.0.99" + reason: "added in 7.1.0" + + - do: + indices.create: + index: test + body: + settings: + number_of_replicas: 0 + mappings: + properties: + a_field: + type: search_as_you_type + analyzer: simple + max_shingle_size: 4 + + - do: + index: + index: test + type: _doc + id: 1 + body: + a_field: "quick brown fox jump lazy dog" + + # this document should not be matched + - do: + index: + index: test + type: _doc + id: 2 + body: + a_field: "xylophone xylophone xylophone" + + - do: + indices.refresh: {} + +--- +"get document": + - do: + get: + index: test + type: _doc + id: 1 + + - is_true: found + - match: { _source.a_field: "quick brown fox jump lazy dog" } + +--- +"term query on root field": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field: "quick" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + + +# these "search on Xgram" tests repeat the same search for each term we expect to generate +--- +"term query on 2gram": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._2gram: "quick brown" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._2gram: "brown fox" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._2gram: "fox jump" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._2gram: "jump lazy" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._2gram: "lazy dog" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"term query on 3gram": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._3gram: "quick brown fox" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._3gram: "brown fox jump" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._3gram: "fox jump lazy" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._3gram: "jump lazy dog" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"term query on 4gram": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._4gram: "quick brown fox jump" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._4gram: "brown fox jump lazy" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._4gram: "fox jump lazy dog" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +# we won't check all the terms that this field generates because there are many +--- +"term query on prefix field with prefix term": + + # search term as prefix + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._index_prefix: "quick br" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"term query on prefix field with infix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._index_prefix: "jump la" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"term query on prefix field with trailing term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._index_prefix: "do" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on root field with prefix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field: "quic" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on root field with infix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field: "brown fo" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on 2gram with prefix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field._2gram: "quic" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on 2gram with infix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field._2gram: "brown fo" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on 3gram with prefix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field._3gram: "quic" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on 3gram with infix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field._3gram: "brown fo" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on 4gram with prefix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field._4gram: "quic" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on 4gram with infix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field._4gram: "brown fo" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on root field with 1 prefix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field: "quic" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on root field with 2 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field: "quick b" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on root field with 3 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field: "quick brown fo" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on root field with 4 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field: "quick brown fox ju" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on root field with 1 infix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field: "fo" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on root field with 2 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field: "fox jum" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on root field with 3 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field: "fox jump la" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on root field with 4 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field: "fox jump lazy do" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on root field with trailing term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field: "do" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase query on root field with 1 prefix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + a_field: "quick" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase query on root field with 2 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + a_field: "quick brown" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase query on root field with 3 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + a_field: "quick brown fox" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase query on root field with 4 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + a_field: "quick brown fox jump" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase query on root field with 5 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + a_field: "quick brown fox jump lazy" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase query on root field with 1 infix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + a_field: "brown" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase query on root field with 2 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + a_field: "brown fox" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase query on root field with 3 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + a_field: "brown fox jump" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase query on root field with 4 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + a_field: "brown fox jump lazy" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase query on root field with 5 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + a_field: "brown fox jump lazy dog" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase query on root field with trailing term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + a_field: "dog" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase prefix query on root field with 1 prefix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase_prefix: + a_field: "qui" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase prefix query on root field with 2 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase_prefix: + a_field: "quick b" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase prefix query on root field with 3 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase_prefix: + a_field: "quick brown f" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase prefix query on root field with 4 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase_prefix: + a_field: "quick brown fox ju" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase prefix query on root field with 5 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase_prefix: + a_field: "quick brown fox jump la" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase prefix query on root field with 1 infix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase_prefix: + a_field: "br" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase prefix query on root field with 2 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase_prefix: + a_field: "brown f" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase prefix query on root field with 3 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase_prefix: + a_field: "brown fox ju" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase prefix query on root field with 4 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase_prefix: + a_field: "brown fox jump la" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase prefix query on root field with 5 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase_prefix: + a_field: "brown fox jump lazy d" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase prefix query on root field with trailing term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase_prefix: + a_field: "do" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"bool prefix query on root field with 1 prefix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "qui" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"bool prefix query on root field with 2 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "quick b" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"bool prefix query on root field with 3 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "quick brown f" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"bool prefix query on root field with 4 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "quick brown fox ju" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"bool prefix query on root field with 5 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "quick brown fox jump la" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"bool prefix query on root field with 1 infix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "br" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"bool prefix query on root field with 2 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "brown f" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"bool prefix query on root field with 3 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "brown fox j" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"bool prefix query on root field with 4 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "brown fox jump la" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"bool prefix query on root field with 5 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "brown fox jump lazy d" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"bool prefix query on root field with trailing term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "do" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"bool prefix query on root field out of order partial trailing term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "fox jump brown do" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"bool prefix query on root field out of order partial leading term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "fox jump brown qui" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"multi match bool prefix query with 1 prefix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "qui" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"multi match bool prefix query with 2 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "quick br" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"multi match bool prefix query with 3 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "quick brown f" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"multi match bool prefix query with 4 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "quick brown fox ju" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"multi match bool prefix query with 5 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "quick brown fox jump la" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"multi match bool prefix query with 1 infix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "br" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"multi match bool prefix query with 2 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown f" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"multi match bool prefix query with 3 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox ju" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"multi match bool prefix query with 4 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox jump la" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"multi match bool prefix query with trailing term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "do" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"multi match bool prefix query out of order with partial trailing term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "fox jump brown do" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"multi match bool prefix query out of order with partial leading term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "fox jump lazy qui" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } diff --git a/modules/mapper-extras/src/test/resources/rest-api-spec/test/search-as-you-type/20_highlighting.yml b/modules/mapper-extras/src/test/resources/rest-api-spec/test/search-as-you-type/20_highlighting.yml new file mode 100644 index 000000000000..82a599ce686c --- /dev/null +++ b/modules/mapper-extras/src/test/resources/rest-api-spec/test/search-as-you-type/20_highlighting.yml @@ -0,0 +1,202 @@ +setup: + - skip: + version: " - 7.0.99" + reason: "added in 7.1.0" + + - do: + indices.create: + index: test + body: + settings: + number_of_replicas: 0 + mappings: + properties: + a_field: + type: search_as_you_type + analyzer: simple + max_shingle_size: 4 + text_field: + type: text + analyzer: simple + + - do: + index: + index: test + type: _doc + id: 1 + body: + a_field: "quick brown fox jump lazy dog" + text_field: "quick brown fox jump lazy dog" + + - do: + indices.refresh: {} + +--- +"phrase query": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + a_field: "brown" + highlight: + fields: + a_field: + type: unified + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0._source.text_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0.highlight.a_field.0: "quick brown fox jump lazy dog" } + +--- +"bool prefix query": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "brown fo" + highlight: + fields: + a_field: + type: unified + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0._source.text_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0.highlight.a_field.0: "quick brown fox jump lazy dog" } + +--- +"multi match bool prefix query 1 complete term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fo" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + highlight: + fields: + a_field: + type: unified + a_field._2gram: + type: unified + a_field._3gram: + type: unified + a_field._4gram: + type: unified + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0._source.text_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0.highlight.a_field: ["quick brown fox jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._2gram: null } + - match: { hits.hits.0.highlight.a_field\._3gram: null } + - match: { hits.hits.0.highlight.a_field\._4gram: null } + +--- +"multi match bool prefix query 2 complete term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox ju" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + highlight: + fields: + a_field: + type: unified + a_field._2gram: + type: unified + a_field._3gram: + type: unified + a_field._4gram: + type: unified + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0._source.text_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0.highlight.a_field: ["quick brown fox jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._2gram: ["quick brown fox jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._3gram: null } + - match: { hits.hits.0.highlight.a_field\._4gram: null } + +--- +"multi match bool prefix query 3 complete term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox jump la" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + highlight: + fields: + a_field: + type: unified + a_field._2gram: + type: unified + a_field._3gram: + type: unified + a_field._4gram: + type: unified + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0._source.text_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0.highlight.a_field: ["quick brown fox jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._2gram: ["quick brown fox jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._3gram: ["quick brown fox jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._4gram: null } + +--- +"multi match bool prefix query 4 complete term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox jump lazy d" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + highlight: + fields: + a_field: + type: unified + a_field._2gram: + type: unified + a_field._3gram: + type: unified + a_field._4gram: + type: unified + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0._source.text_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0.highlight.a_field: ["quick brown fox jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._2gram: ["quick brown fox jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._3gram: ["quick brown fox jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._4gram: ["quick brown fox jump lazy dog"] } diff --git a/modules/percolator/build.gradle b/modules/percolator/build.gradle index db4a716af651..0826c91676de 100644 --- a/modules/percolator/build.gradle +++ b/modules/percolator/build.gradle @@ -33,6 +33,3 @@ dependencyLicenses { it.group.startsWith('org.elasticsearch') == false } - project.configurations.compileOnly } - -compileJava.options.compilerArgs << "-Xlint:-deprecation,-rawtypes" -compileTestJava.options.compilerArgs << "-Xlint:-deprecation,-rawtypes" diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java index 92b2180f4da6..bdedc65b7a6d 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java @@ -276,7 +276,7 @@ public void testBulkResponseSetsLotsOfStatus() { versionConflicts++; responses[i] = new BulkItemResponse(i, randomFrom(DocWriteRequest.OpType.values()), new Failure(shardId.getIndexName(), "type", "id" + i, - new VersionConflictEngineException(shardId, "type", "id", "test"))); + new VersionConflictEngineException(shardId, "id", "test"))); continue; } boolean createdResponse; diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java index c077c992beb6..917d196b6e9f 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java @@ -81,7 +81,7 @@ public void testAbortOnVersionConflict() throws Exception { BulkByScrollResponse response = copy.get(); assertThat(response, matcher().batches(1).versionConflicts(1).failures(1).created(99)); for (Failure failure: response.getBulkFailures()) { - assertThat(failure.getMessage(), containsString("VersionConflictEngineException[[_doc][")); + assertThat(failure.getMessage(), containsString("VersionConflictEngineException[[")); } } diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yml b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yml index dd29e7701ba1..d11f160bcf57 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yml @@ -129,7 +129,7 @@ - match: {failures.0.status: 409} - match: {failures.0.cause.type: version_conflict_engine_exception} # Use a regex so we don't mind if the current version isn't always 1. Sometimes it comes out 2. - - match: {failures.0.cause.reason: "/\\[_doc\\]\\[1\\]:.version.conflict,.current.version.\\[\\d+\\].is.different.than.the.one.provided.\\[\\d+\\]/"} + - match: {failures.0.cause.reason: "/\\[1\\]:.version.conflict,.current.version.\\[\\d+\\].is.different.than.the.one.provided.\\[\\d+\\]/"} - match: {failures.0.cause.shard: /\d+/} - match: {failures.0.cause.index: test} - gte: { took: 0 } @@ -185,7 +185,7 @@ - match: {failures.0.id: "1"} - match: {failures.0.status: 409} - match: {failures.0.cause.type: version_conflict_engine_exception} - - match: {failures.0.cause.reason: "/\\[_doc\\]\\[1\\]:.version.conflict,.required.seqNo.\\[\\d+\\]/"} + - match: {failures.0.cause.reason: "/\\[1\\]:.version.conflict,.required.seqNo.\\[\\d+\\]/"} - match: {failures.0.cause.shard: /\d+/} - match: {failures.0.cause.index: test} - gte: { took: 0 } diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yml index 312a88ace5e9..9ef6c1a90c40 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yml @@ -160,7 +160,7 @@ - match: {failures.0.status: 409} - match: {failures.0.cause.type: version_conflict_engine_exception} # Use a regex so we don't mind if the version isn't always 1. Sometimes it comes out 2. - - match: {failures.0.cause.reason: "/\\[_doc\\]\\[1\\]:.version.conflict,.document.already.exists.\\(current.version.\\[\\d+\\]\\)/"} + - match: {failures.0.cause.reason: "/\\[1\\]:.version.conflict,.document.already.exists.\\(current.version.\\[\\d+\\]\\)/"} - match: {failures.0.cause.shard: /\d+/} - match: {failures.0.cause.index: dest} - gte: { took: 0 } diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yml b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yml index 15bc62214ebf..08c8465c4096 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yml @@ -109,7 +109,7 @@ - match: {failures.0.status: 409} - match: {failures.0.cause.type: version_conflict_engine_exception} # Use a regex so we don't mind if the current version isn't always 1. Sometimes it comes out 2. - - match: {failures.0.cause.reason: "/\\[_doc\\]\\[1\\]:.version.conflict,.current.version.\\[\\d+\\].is.different.than.the.one.provided.\\[\\d+\\]/"} + - match: {failures.0.cause.reason: "/\\[1\\]:.version.conflict,.current.version.\\[\\d+\\].is.different.than.the.one.provided.\\[\\d+\\]/"} - match: {failures.0.cause.shard: /\d+/} - match: {failures.0.cause.index: test} - gte: { took: 0 } @@ -151,7 +151,7 @@ - match: {failures.0.id: "1"} - match: {failures.0.status: 409} - match: {failures.0.cause.type: version_conflict_engine_exception} - - match: {failures.0.cause.reason: "/\\[_doc\\]\\[1\\]:.version.conflict,.required.seqNo.\\[\\d+\\]/"} + - match: {failures.0.cause.reason: "/\\[1\\]:.version.conflict,.required.seqNo.\\[\\d+\\]/"} - match: {failures.0.cause.shard: /\d+/} - match: {failures.0.cause.index: test} - gte: { took: 0 } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java index 73135c2a1456..69d84dfb78fa 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java @@ -20,12 +20,11 @@ package org.elasticsearch.http.netty4; import io.netty.channel.Channel; -import io.netty.channel.ChannelPromise; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.concurrent.CompletableContext; import org.elasticsearch.http.HttpChannel; import org.elasticsearch.http.HttpResponse; +import org.elasticsearch.transport.netty4.Netty4TcpChannel; import java.net.InetSocketAddress; @@ -36,38 +35,12 @@ public class Netty4HttpChannel implements HttpChannel { Netty4HttpChannel(Channel channel) { this.channel = channel; - this.channel.closeFuture().addListener(f -> { - if (f.isSuccess()) { - closeContext.complete(null); - } else { - Throwable cause = f.cause(); - if (cause instanceof Error) { - ExceptionsHelper.maybeDieOnAnotherThread(cause); - closeContext.completeExceptionally(new Exception(cause)); - } else { - closeContext.completeExceptionally((Exception) cause); - } - } - }); + Netty4TcpChannel.addListener(this.channel.closeFuture(), closeContext); } @Override public void sendResponse(HttpResponse response, ActionListener listener) { - ChannelPromise writePromise = channel.newPromise(); - writePromise.addListener(f -> { - if (f.isSuccess()) { - listener.onResponse(null); - } else { - final Throwable cause = f.cause(); - ExceptionsHelper.maybeDieOnAnotherThread(cause); - if (cause instanceof Error) { - listener.onFailure(new Exception(cause)); - } else { - listener.onFailure((Exception) cause); - } - } - }); - channel.writeAndFlush(response, writePromise); + channel.writeAndFlush(response, Netty4TcpChannel.addPromise(listener, channel)); } @Override diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java index 472e34d09fc4..cad95d262708 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java @@ -38,7 +38,7 @@ class Netty4HttpRequestHandler extends SimpleChannelInboundHandler msg) throws Exception { + protected void channelRead0(ChannelHandlerContext ctx, HttpPipelinedRequest msg) { Netty4HttpChannel channel = ctx.channel().attr(Netty4HttpServerTransport.HTTP_CHANNEL_KEY).get(); FullHttpRequest request = msg.getRequest(); @@ -72,7 +72,7 @@ protected void channelRead0(ChannelHandlerContext ctx, HttpPipelinedRequest { - if (f.isSuccess()) { - closeContext.complete(null); - } else { - Throwable cause = f.cause(); - if (cause instanceof Error) { - ExceptionsHelper.maybeDieOnAnotherThread(cause); - closeContext.completeExceptionally(new Exception(cause)); - } else { - closeContext.completeExceptionally((Exception) cause); - } - } - }); + Netty4TcpChannel.addListener(this.channel.closeFuture(), closeContext); } @Override diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java index 38527151695d..4eca1803b638 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java @@ -22,7 +22,6 @@ import io.netty.util.internal.logging.AbstractInternalLogger; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; import org.elasticsearch.common.SuppressLoggerChecks; @SuppressLoggerChecks(reason = "safely delegates to logger") diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpChannel.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpChannel.java index ef96f75be89c..4c68466efc4d 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpChannel.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpChannel.java @@ -46,33 +46,54 @@ public class Netty4TcpChannel implements TcpChannel { this.isServer = isServer; this.profile = profile; this.connectContext = new CompletableContext<>(); - this.channel.closeFuture().addListener(f -> { + addListener(this.channel.closeFuture(), closeContext); + addListener(connectFuture, connectContext); + } + + /** + * Adds a listener that completes the given {@link CompletableContext} to the given {@link ChannelFuture}. + * @param channelFuture Channel future + * @param context Context to complete + */ + public static void addListener(ChannelFuture channelFuture, CompletableContext context) { + channelFuture.addListener(f -> { if (f.isSuccess()) { - closeContext.complete(null); + context.complete(null); } else { Throwable cause = f.cause(); if (cause instanceof Error) { ExceptionsHelper.maybeDieOnAnotherThread(cause); - closeContext.completeExceptionally(new Exception(cause)); + context.completeExceptionally(new Exception(cause)); } else { - closeContext.completeExceptionally((Exception) cause); + context.completeExceptionally((Exception) cause); } } }); + } - connectFuture.addListener(f -> { + /** + * Creates a {@link ChannelPromise} for the given {@link Channel} and adds a listener that invokes the given {@link ActionListener} + * on its completion. + * @param listener lister to invoke + * @param channel channel + * @return write promise + */ + public static ChannelPromise addPromise(ActionListener listener, Channel channel) { + ChannelPromise writePromise = channel.newPromise(); + writePromise.addListener(f -> { if (f.isSuccess()) { - connectContext.complete(null); + listener.onResponse(null); } else { - Throwable cause = f.cause(); + final Throwable cause = f.cause(); + ExceptionsHelper.maybeDieOnAnotherThread(cause); if (cause instanceof Error) { - ExceptionsHelper.maybeDieOnAnotherThread(cause); - connectContext.completeExceptionally(new Exception(cause)); + listener.onFailure(new Exception(cause)); } else { - connectContext.completeExceptionally((Exception) cause); + listener.onFailure((Exception) cause); } } }); + return writePromise; } @Override @@ -122,21 +143,7 @@ public InetSocketAddress getRemoteAddress() { @Override public void sendMessage(BytesReference reference, ActionListener listener) { - ChannelPromise writePromise = channel.newPromise(); - writePromise.addListener(f -> { - if (f.isSuccess()) { - listener.onResponse(null); - } else { - final Throwable cause = f.cause(); - ExceptionsHelper.maybeDieOnAnotherThread(cause); - if (cause instanceof Error) { - listener.onFailure(new Exception(cause)); - } else { - listener.onFailure((Exception) cause); - } - } - }); - channel.writeAndFlush(Netty4Utils.toByteBuf(reference), writePromise); + channel.writeAndFlush(Netty4Utils.toByteBuf(reference), addPromise(listener, channel)); if (channel.eventLoop().isShutdown()) { listener.onFailure(new TransportException("Cannot send message, event loop is shutting down.")); diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpServerChannel.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpServerChannel.java index 9ef3f296f060..830b0a8c203a 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpServerChannel.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpServerChannel.java @@ -20,7 +20,6 @@ package org.elasticsearch.transport.netty4; import io.netty.channel.Channel; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.concurrent.CompletableContext; import org.elasticsearch.transport.TcpServerChannel; @@ -36,19 +35,7 @@ public class Netty4TcpServerChannel implements TcpServerChannel { Netty4TcpServerChannel(Channel channel, String profile) { this.channel = channel; this.profile = profile; - this.channel.closeFuture().addListener(f -> { - if (f.isSuccess()) { - closeContext.complete(null); - } else { - Throwable cause = f.cause(); - if (cause instanceof Error) { - ExceptionsHelper.maybeDieOnAnotherThread(cause); - closeContext.completeExceptionally(new Exception(cause)); - } else { - closeContext.completeExceptionally((Exception) cause); - } - } - }); + Netty4TcpChannel.addListener(this.channel.closeFuture(), closeContext); } @Override diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportIT.java deleted file mode 100644 index cf9791ce85a4..000000000000 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportIT.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.transport.netty4; - -import org.elasticsearch.ESNetty4IntegTestCase; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.health.ClusterHealthStatus; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.PageCacheRecycler; -import org.elasticsearch.indices.breaker.CircuitBreakerService; -import org.elasticsearch.plugins.NetworkPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.InboundMessage; -import org.elasticsearch.transport.TcpChannel; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportSettings; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.function.Supplier; - -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.is; - -@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1) -public class Netty4TransportIT extends ESNetty4IntegTestCase { - // static so we can use it in anonymous classes - private static String channelProfileName = null; - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - .put(NetworkModule.TRANSPORT_TYPE_KEY, "exception-throwing").build(); - } - - @Override - protected Collection> nodePlugins() { - List> list = new ArrayList<>(); - list.add(ExceptionThrowingNetty4Transport.TestPlugin.class); - list.addAll(super.nodePlugins()); - return Collections.unmodifiableCollection(list); - } - - public void testThatConnectionFailsAsIntended() throws Exception { - Client transportClient = internalCluster().transportClient(); - ClusterHealthResponse clusterIndexHealths = transportClient.admin().cluster().prepareHealth().get(); - assertThat(clusterIndexHealths.getStatus(), is(ClusterHealthStatus.GREEN)); - try { - transportClient.filterWithHeader(Collections.singletonMap("ERROR", "MY MESSAGE")).admin().cluster().prepareHealth().get(); - fail("Expected exception, but didn't happen"); - } catch (ElasticsearchException e) { - assertThat(e.getMessage(), containsString("MY MESSAGE")); - assertThat(channelProfileName, is(TransportSettings.DEFAULT_PROFILE)); - } - } - - public static final class ExceptionThrowingNetty4Transport extends Netty4Transport { - - public static class TestPlugin extends Plugin implements NetworkPlugin { - - @Override - public Map> getTransports(Settings settings, ThreadPool threadPool, - PageCacheRecycler pageCacheRecycler, - CircuitBreakerService circuitBreakerService, - NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService) { - return Collections.singletonMap("exception-throwing", - () -> new ExceptionThrowingNetty4Transport(settings, threadPool, networkService, pageCacheRecycler, - namedWriteableRegistry, circuitBreakerService)); - } - } - - public ExceptionThrowingNetty4Transport( - Settings settings, - ThreadPool threadPool, - NetworkService networkService, - PageCacheRecycler recycler, - NamedWriteableRegistry namedWriteableRegistry, - CircuitBreakerService circuitBreakerService) { - super(settings, Version.CURRENT, threadPool, networkService, recycler, namedWriteableRegistry, circuitBreakerService); - } - - @Override - protected void handleRequest(TcpChannel channel, InboundMessage.Request request, int messageLengthBytes) throws IOException { - super.handleRequest(channel, request, messageLengthBytes); - channelProfileName = TransportSettings.DEFAULT_PROFILE; - } - - @Override - protected void validateRequest(StreamInput buffer, long requestId, String action) - throws IOException { - super.validateRequest(buffer, requestId, action); - String error = threadPool.getThreadContext().getHeader("ERROR"); - if (error != null) { - throw new ElasticsearchException(error); - } - } - - } - -} diff --git a/plugins/build.gradle b/plugins/build.gradle index 5b7d5f5faf26..585f26c3780f 100644 --- a/plugins/build.gradle +++ b/plugins/build.gradle @@ -21,7 +21,7 @@ configure(subprojects.findAll { it.parent.path == project.path }) { group = 'org.elasticsearch.plugin' // TODO exclude some plugins as they require features not yet supproted by testclusters - if (false == name in ['repository-azure', 'repository-hdfs', 'repository-s3']) { + if (false == name in ['repository-hdfs']) { apply plugin: 'elasticsearch.testclusters' } diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java index 977717456362..7b195bdc7b43 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java +++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java @@ -591,7 +591,7 @@ public Query multiPhraseQuery(TokenStream stream, int slop, boolean enablePositi @Override public Query phrasePrefixQuery(TokenStream stream, int slop, int maxExpansions) throws IOException { - return TextFieldMapper.createPhrasePrefixQuery(stream, name(), slop, maxExpansions); + return TextFieldMapper.createPhrasePrefixQuery(stream, name(), slop, maxExpansions, null, null); } } diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 27597e94976f..a7c1af412d94 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -65,7 +65,9 @@ check { dependsOn 'qa:microsoft-azure-storage:check' } -integTestCluster { - keystoreSetting 'azure.client.integration_test.account', 'azure_account' - keystoreSetting 'azure.client.integration_test.key', 'azure_key' +testClusters { + integTest { + keystore 'azure.client.integration_test.account', 'azure_account' + keystore 'azure.client.integration_test.key', 'azure_key' + } } diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 34323fb930fc..946b377491d2 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -24,18 +24,19 @@ import org.elasticsearch.gradle.test.RestIntegTestTask import java.nio.file.Files import java.nio.file.Path import java.nio.file.Paths - +apply plugin: 'elasticsearch.test.fixtures' + esplugin { description 'The HDFS repository plugin adds support for Hadoop Distributed File-System (HDFS) repositories.' classname 'org.elasticsearch.repositories.hdfs.HdfsPlugin' } -apply plugin: 'elasticsearch.vagrantsupport' - versions << [ 'hadoop2': '2.8.1' ] +testFixtures.useFixture ":test:fixtures:krb5kdc-fixture" + configurations { hdfsFixture } @@ -68,67 +69,27 @@ dependencyLicenses { mapping from: /hadoop-.*/, to: 'hadoop' } -// MIT Kerberos Vagrant Testing Fixture -String box = "krb5kdc" -Map vagrantEnvVars = [ - 'VAGRANT_CWD' : "${project(':test:fixtures:krb5kdc-fixture').projectDir}", - 'VAGRANT_VAGRANTFILE' : 'Vagrantfile', - 'VAGRANT_PROJECT_DIR' : "${project(':test:fixtures:krb5kdc-fixture').projectDir}" -] - -task krb5kdcUpdate(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'box' - subcommand 'update' - boxName box - environmentVars vagrantEnvVars - dependsOn "vagrantCheckVersion", "virtualboxCheckVersion" -} - -task krb5kdcFixture(type: org.elasticsearch.gradle.test.VagrantFixture) { - command 'up' - args '--provision', '--provider', 'virtualbox' - boxName box - environmentVars vagrantEnvVars - dependsOn krb5kdcUpdate -} - -task krb5AddPrincipals { - dependsOn krb5kdcFixture -} -List principals = [ "elasticsearch", "hdfs/hdfs.build.elastic.co" ] String realm = "BUILD.ELASTIC.CO" -for (String principal : principals) { - Task create = project.tasks.create("addPrincipal#${principal}".replace('/', '_'), org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'ssh' - args '--command', "sudo bash /vagrant/src/main/resources/provision/addprinc.sh $principal" - boxName box - environmentVars vagrantEnvVars - dependsOn krb5kdcFixture - } - krb5AddPrincipals.dependsOn(create) -} // Create HDFS File System Testing Fixtures for HA/Secure combinations for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture', 'secureHaHdfsFixture']) { project.tasks.create(fixtureName, org.elasticsearch.gradle.test.AntFixture) { - dependsOn project.configurations.hdfsFixture + dependsOn project.configurations.hdfsFixture, project(':test:fixtures:krb5kdc-fixture').tasks.postProcessFixture executable = new File(project.runtimeJavaHome, 'bin/java') env 'CLASSPATH', "${ -> project.configurations.hdfsFixture.asPath }" waitCondition = { fixture, ant -> // the hdfs.MiniHDFS fixture writes the ports file when // it's ready, so we can just wait for the file to exist return fixture.portsFile.exists() - } + } final List miniHDFSArgs = [] // If it's a secure fixture, then depend on Kerberos Fixture and principals + add the krb5conf to the JVM options if (fixtureName.equals('secureHdfsFixture') || fixtureName.equals('secureHaHdfsFixture')) { - dependsOn krb5kdcFixture, krb5AddPrincipals - Path krb5Config = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("conf").resolve("krb5.conf") - miniHDFSArgs.add("-Djava.security.krb5.conf=${krb5Config}"); + miniHDFSArgs.add("-Djava.security.krb5.conf=${project(':test:fixtures:krb5kdc-fixture').ext.krb5Conf("hdfs")}"); if (project.runtimeJavaVersion == JavaVersion.VERSION_1_9) { miniHDFSArgs.add('--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED') } @@ -145,9 +106,11 @@ for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture', // If it's a secure fixture, then set the principal name and keytab locations to use for auth. if (fixtureName.equals('secureHdfsFixture') || fixtureName.equals('secureHaHdfsFixture')) { - Path keytabPath = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("keytabs").resolve("hdfs_hdfs.build.elastic.co.keytab") miniHDFSArgs.add("hdfs/hdfs.build.elastic.co@${realm}") - miniHDFSArgs.add("${keytabPath}") + miniHDFSArgs.add( + project(':test:fixtures:krb5kdc-fixture') + .ext.krb5Keytabs("hdfs", "hdfs_hdfs.build.elastic.co.keytab") + ) } args miniHDFSArgs.toArray() @@ -170,10 +133,11 @@ project.afterEvaluate { // If it's a secure cluster, add the keytab as an extra config, and set the krb5 conf in the JVM options. if (integTestTaskName.equals('integTestSecure') || integTestTaskName.equals('integTestSecureHa')) { - Path elasticsearchKT = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("keytabs").resolve("elasticsearch.keytab").toAbsolutePath() - Path krb5conf = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("conf").resolve("krb5.conf").toAbsolutePath() - - restIntegTestTask.clusterConfig.extraConfigFile("repository-hdfs/krb5.keytab", "${elasticsearchKT}") + String krb5conf = project(':test:fixtures:krb5kdc-fixture').ext.krb5Conf("hdfs") + restIntegTestTask.clusterConfig.extraConfigFile( + "repository-hdfs/krb5.keytab", + "${project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs", "elasticsearch.keytab")}" + ) jvmArgs = jvmArgs + " " + "-Djava.security.krb5.conf=${krb5conf}" if (project.runtimeJavaVersion == JavaVersion.VERSION_1_9) { jvmArgs = jvmArgs + " " + '--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED' @@ -189,9 +153,10 @@ project.afterEvaluate { if (project.runtimeJavaVersion == JavaVersion.VERSION_1_9) { restIntegTestTaskRunner.jvmArg '--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED' } - - Path hdfsKT = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("keytabs").resolve("hdfs_hdfs.build.elastic.co.keytab").toAbsolutePath() - restIntegTestTaskRunner.systemProperty "test.krb5.keytab.hdfs", "${hdfsKT}" + restIntegTestTaskRunner.systemProperty ( + "test.krb5.keytab.hdfs", + project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs","hdfs_hdfs.build.elastic.co.keytab") + ) } } @@ -269,41 +234,25 @@ if (fixtureSupported) { integTestHa.setEnabled(false) } -// Secure HDFS testing relies on the Vagrant based Kerberos fixture. -boolean secureFixtureSupported = false -if (fixtureSupported) { - secureFixtureSupported = project.rootProject.vagrantSupported -} - -if (secureFixtureSupported) { - project.check.dependsOn(integTestSecure) - project.check.dependsOn(integTestSecureHa) +check.dependsOn(integTestSecure, integTestSecureHa) - // Fixture dependencies - integTestSecureCluster.dependsOn secureHdfsFixture, krb5kdcFixture - integTestSecureHaCluster.dependsOn secureHaHdfsFixture, krb5kdcFixture +// Fixture dependencies +integTestSecureCluster.dependsOn secureHdfsFixture +integTestSecureHaCluster.dependsOn secureHaHdfsFixture - // Set the keytab files in the classpath so that we can access them from test code without the security manager - // freaking out. - Path hdfsKeytabPath = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("keytabs") - project.dependencies { - testRuntime fileTree(dir: hdfsKeytabPath.toString(), include: ['*.keytab']) - } - - // Run just the secure hdfs rest test suite. - integTestSecureRunner.systemProperty 'tests.rest.suite', 'secure_hdfs_repository' - // Ignore HA integration Tests. They are included below as part of integTestSecureHa test runner. - integTestSecureRunner.exclude('**/Ha*TestSuiteIT.class') - - // Only include the HA integration tests for the HA test task - integTestSecureHaRunner.patternSet.setIncludes(['**/Ha*TestSuiteIT.class']) -} else { - // Security tests unsupported. Don't run these tests. - integTestSecure.enabled = false - integTestSecureHa.enabled = false - testingConventions.enabled = false +// Set the keytab files in the classpath so that we can access them from test code without the security manager +// freaking out. +project.dependencies { + testRuntime fileTree(dir: project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs","hdfs_hdfs.build.elastic.co.keytab").parent, include: ['*.keytab']) } +// Run just the secure hdfs rest test suite. +integTestSecureRunner.systemProperty 'tests.rest.suite', 'secure_hdfs_repository' +// Ignore HA integration Tests. They are included below as part of integTestSecureHa test runner. +integTestSecureRunner.exclude('**/Ha*TestSuiteIT.class') +// Only include the HA integration tests for the HA test task +integTestSecureHaRunner.patternSet.setIncludes(['**/Ha*TestSuiteIT.class']) + thirdPartyAudit { ignoreMissingClasses() ignoreViolations ( diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index b41174096e49..8a2edeb78c50 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -1,7 +1,6 @@ import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.MavenFilteringHack import org.elasticsearch.gradle.test.AntFixture -import org.elasticsearch.gradle.test.ClusterConfiguration import org.elasticsearch.gradle.test.RestIntegTestTask import com.carrotsearch.gradle.junit4.RandomizedTestingTask @@ -71,7 +70,7 @@ task testRepositoryCreds(type: RandomizedTestingTask) { include '**/S3BlobStoreRepositoryTests.class' systemProperty 'es.allow_insecure_settings', 'true' } -project.check.dependsOn(testRepositoryCreds) +check.dependsOn(testRepositoryCreds) unitTest { // these are tested explicitly in separate test tasks @@ -136,78 +135,61 @@ if (!s3EC2Bucket && !s3EC2BasePath && !s3ECSBucket && !s3ECSBasePath) { throw new IllegalArgumentException("not all options specified to run EC2/ECS tests are present") } -buildscript { - repositories { - maven { - url 'https://plugins.gradle.org/m2/' - } - } - dependencies { - classpath 'de.undercouch:gradle-download-task:3.4.3' - } -} - if (useFixture) { - apply plugin: 'elasticsearch.test.fixtures' - - RestIntegTestTask integTestMinio = project.tasks.create('integTestMinio', RestIntegTestTask.class) { - description = "Runs REST tests using the Minio repository." - } - - Task writeDockerFile = project.tasks.create('writeDockerFile') { + task writeDockerFile { File minioDockerfile = new File("${project.buildDir}/minio-docker/Dockerfile") outputs.file(minioDockerfile) doLast { minioDockerfile.parentFile.mkdirs() minioDockerfile.text = "FROM minio/minio:RELEASE.2019-01-23T23-18-58Z\n" + - "RUN mkdir -p /minio/data/${s3PermanentBucket}\n" + - "ENV MINIO_ACCESS_KEY ${s3PermanentAccessKey}\n" + - "ENV MINIO_SECRET_KEY ${s3PermanentSecretKey}" + "RUN mkdir -p /minio/data/${s3PermanentBucket}\n" + + "ENV MINIO_ACCESS_KEY ${s3PermanentAccessKey}\n" + + "ENV MINIO_SECRET_KEY ${s3PermanentSecretKey}" } } + preProcessFixture { + dependsOn(writeDockerFile) + } - preProcessFixture.dependsOn(writeDockerFile) - // The following closure must execute before the afterEvaluate block in the constructor of the following integrationTest tasks: - project.afterEvaluate { - // Only configure the Minio tests if postProcessFixture is configured to skip them if Docker is not available - // or fixtures have been disabled - if (postProcessFixture.enabled) { - ClusterConfiguration cluster = project.extensions.getByName('integTestMinioCluster') as ClusterConfiguration - cluster.dependsOn(project.bundlePlugin) - cluster.dependsOn(postProcessFixture) - cluster.keystoreSetting 's3.client.integration_test_permanent.access_key', s3PermanentAccessKey - cluster.keystoreSetting 's3.client.integration_test_permanent.secret_key', s3PermanentSecretKey - - Closure minioAddressAndPort = { - int minioPort = postProcessFixture.ext."test.fixtures.minio-fixture.tcp.9000" - assert minioPort > 0 - return 'http://127.0.0.1:' + minioPort - } - cluster.setting 's3.client.integration_test_permanent.endpoint', "${-> minioAddressAndPort.call()}" - - Task restIntegTestTask = project.tasks.getByName('integTestMinio') - restIntegTestTask.clusterConfig.plugin(project.path) - - // Default jvm arguments for all test clusters - String jvmArgs = "-Xms" + System.getProperty('tests.heap.size', '512m') + - " " + "-Xmx" + System.getProperty('tests.heap.size', '512m') + - " " + System.getProperty('tests.jvm.argline', '') - - restIntegTestTask.clusterConfig.jvmArgs = jvmArgs - project.check.dependsOn(integTestMinio) + task integTestMinio(type: RestIntegTestTask) { + description = "Runs REST tests using the Minio repository." + dependsOn tasks.bundlePlugin, tasks.postProcessFixture + runner { + // Minio only supports a single access key, see https://github.com/minio/minio/pull/5968 + systemProperty 'tests.rest.blacklist', [ + 'repository_s3/30_repository_temporary_credentials/*', + 'repository_s3/40_repository_ec2_credentials/*', + 'repository_s3/50_repository_ecs_credentials/*' + ].join(",") } } + check.dependsOn(integTestMinio) + BuildPlugin.requireDocker(tasks.integTestMinio) + + testClusters.integTestMinio { + keystore 's3.client.integration_test_permanent.access_key', s3PermanentAccessKey + keystore 's3.client.integration_test_permanent.secret_key', s3PermanentSecretKey + setting 's3.client.integration_test_permanent.endpoint', { + int minioPort = postProcessFixture.ext."test.fixtures.minio-fixture.tcp.9000" + assert minioPort > 0 + return 'http://127.0.0.1:' + minioPort + } + plugin file(tasks.bundlePlugin.archiveFile) + } - integTestMinioRunner.dependsOn(postProcessFixture) - // Minio only supports a single access key, see https://github.com/minio/minio/pull/5968 - integTestMinioRunner.systemProperty 'tests.rest.blacklist', [ - 'repository_s3/30_repository_temporary_credentials/*', - 'repository_s3/40_repository_ec2_credentials/*', - 'repository_s3/50_repository_ecs_credentials/*' - ].join(",") - - BuildPlugin.requireDocker(integTestMinio) + integTest.runner { + systemProperty 'tests.rest.blacklist', 'repository_s3/50_repository_ecs_credentials/*' + } +} else { + integTest.runner { + systemProperty 'tests.rest.blacklist', + [ + 'repository_s3/30_repository_temporary_credentials/*', + 'repository_s3/40_repository_ec2_credentials/*', + 'repository_s3/50_repository_ecs_credentials/*' + ].join(",") + } } File parentFixtures = new File(project.buildDir, "fixtures") @@ -242,82 +224,65 @@ task s3Fixture(type: AntFixture) { args 'org.elasticsearch.repositories.s3.AmazonS3Fixture', baseDir, s3FixtureFile.getAbsolutePath() } -Map expansions = [ - 'permanent_bucket': s3PermanentBucket, - 'permanent_base_path': s3PermanentBasePath, - 'temporary_bucket': s3TemporaryBucket, - 'temporary_base_path': s3TemporaryBasePath, - 'ec2_bucket': s3EC2Bucket, - 'ec2_base_path': s3EC2BasePath, - 'ecs_bucket': s3ECSBucket, - 'ecs_base_path': s3ECSBasePath -] - processTestResources { + Map expansions = [ + 'permanent_bucket': s3PermanentBucket, + 'permanent_base_path': s3PermanentBasePath, + 'temporary_bucket': s3TemporaryBucket, + 'temporary_base_path': s3TemporaryBasePath, + 'ec2_bucket': s3EC2Bucket, + 'ec2_base_path': s3EC2BasePath, + 'ecs_bucket': s3ECSBucket, + 'ecs_base_path': s3ECSBasePath + ] inputs.properties(expansions) MavenFilteringHack.filter(it, expansions) } -project.afterEvaluate { - if (useFixture == false) { - // temporary_credentials, ec2_credentials and ecs_credentials are not ready for third-party-tests yet - integTestRunner.systemProperty 'tests.rest.blacklist', - [ - 'repository_s3/30_repository_temporary_credentials/*', - 'repository_s3/40_repository_ec2_credentials/*', - 'repository_s3/50_repository_ecs_credentials/*' - ].join(",") - } +integTest { + dependsOn s3Fixture } -integTestCluster { - keystoreSetting 's3.client.integration_test_permanent.access_key', s3PermanentAccessKey - keystoreSetting 's3.client.integration_test_permanent.secret_key', s3PermanentSecretKey +testClusters.integTest { + keystore 's3.client.integration_test_permanent.access_key', s3PermanentAccessKey + keystore 's3.client.integration_test_permanent.secret_key', s3PermanentSecretKey - keystoreSetting 's3.client.integration_test_temporary.access_key', s3TemporaryAccessKey - keystoreSetting 's3.client.integration_test_temporary.secret_key', s3TemporarySecretKey - keystoreSetting 's3.client.integration_test_temporary.session_token', s3TemporarySessionToken + keystore 's3.client.integration_test_temporary.access_key', s3TemporaryAccessKey + keystore 's3.client.integration_test_temporary.secret_key', s3TemporarySecretKey + keystore 's3.client.integration_test_temporary.session_token', s3TemporarySessionToken if (useFixture) { - dependsOn s3Fixture - /* Use a closure on the string to delay evaluation until tests are executed */ - setting 's3.client.integration_test_permanent.endpoint', "http://${-> s3Fixture.addressAndPort}" - setting 's3.client.integration_test_temporary.endpoint', "http://${-> s3Fixture.addressAndPort}" - setting 's3.client.integration_test_ec2.endpoint', "http://${-> s3Fixture.addressAndPort}" + setting 's3.client.integration_test_permanent.endpoint', { "http://${s3Fixture.addressAndPort}" } + setting 's3.client.integration_test_temporary.endpoint', { "http://${s3Fixture.addressAndPort}" } + setting 's3.client.integration_test_ec2.endpoint', { "http://${s3Fixture.addressAndPort}" } // to redirect InstanceProfileCredentialsProvider to custom auth point - systemProperty "com.amazonaws.sdk.ec2MetadataServiceEndpointOverride", "http://${-> s3Fixture.addressAndPort}" + systemProperty "com.amazonaws.sdk.ec2MetadataServiceEndpointOverride", { "http://${s3Fixture.addressAndPort}" } } else { println "Using an external service to test the repository-s3 plugin" } } -integTestRunner.systemProperty 'tests.rest.blacklist', 'repository_s3/50_repository_ecs_credentials/*' - if (useFixture) { - RestIntegTestTask integTestECS = project.tasks.create('integTestECS', RestIntegTestTask.class) { + task integTestECS(type: RestIntegTestTask.class) { description = "Runs tests using the ECS repository." + dependsOn(project.s3Fixture) + runner { + systemProperty 'tests.rest.blacklist', [ + 'repository_s3/10_basic/*', + 'repository_s3/20_repository_permanent_credentials/*', + 'repository_s3/30_repository_temporary_credentials/*', + 'repository_s3/40_repository_ec2_credentials/*' + ].join(",") + } } + check.dependsOn(integTestECS) -// The following closure must execute before the afterEvaluate block in the constructor of the following integrationTest tasks: - project.afterEvaluate { - ClusterConfiguration cluster = project.extensions.getByName('integTestECSCluster') as ClusterConfiguration - cluster.dependsOn(project.s3Fixture) - - cluster.setting 's3.client.integration_test_ecs.endpoint', "http://${-> s3Fixture.addressAndPort}" - - Task integTestECSTask = project.tasks.getByName('integTestECS') - integTestECSTask.clusterConfig.plugin(project.path) - integTestECSTask.clusterConfig.environment 'AWS_CONTAINER_CREDENTIALS_FULL_URI', - "http://${-> s3Fixture.addressAndPort}/ecs_credentials_endpoint" - integTestECSRunner.systemProperty 'tests.rest.blacklist', [ - 'repository_s3/10_basic/*', - 'repository_s3/20_repository_permanent_credentials/*', - 'repository_s3/30_repository_temporary_credentials/*', - 'repository_s3/40_repository_ec2_credentials/*' - ].join(",") + testClusters.integTestECS { + setting 's3.client.integration_test_ecs.endpoint', { "http://${s3Fixture.addressAndPort}" } + plugin file(tasks.bundlePlugin.archiveFile) + environment 'AWS_CONTAINER_CREDENTIALS_FULL_URI', { "http://${s3Fixture.addressAndPort}/ecs_credentials_endpoint" } } - project.check.dependsOn(integTestECS) } thirdPartyAudit.ignoreMissingClasses ( @@ -446,8 +411,3 @@ if (project.runtimeJavaVersion <= JavaVersion.VERSION_1_8) { } else { thirdPartyAudit.ignoreMissingClasses 'javax.activation.DataHandler' } - -// AWS SDK is exposing some deprecated methods which we call using a delegate: -// * setObjectRedirectLocation(String bucketName, String key, String newRedirectLocation) -// * changeObjectStorageClass(String bucketName, String key, StorageClass newStorageClass) -compileTestJava.options.compilerArgs << "-Xlint:-deprecation" diff --git a/plugins/store-smb/src/main/java/org/elasticsearch/index/store/SmbDirectoryWrapper.java b/plugins/store-smb/src/main/java/org/elasticsearch/index/store/SmbDirectoryWrapper.java index 9bf6daa13fb9..c2a3af360935 100644 --- a/plugins/store-smb/src/main/java/org/elasticsearch/index/store/SmbDirectoryWrapper.java +++ b/plugins/store-smb/src/main/java/org/elasticsearch/index/store/SmbDirectoryWrapper.java @@ -19,17 +19,19 @@ package org.elasticsearch.index.store; -import java.io.FilterOutputStream; -import java.io.IOException; -import java.nio.channels.Channels; -import java.nio.file.Files; -import java.nio.file.StandardOpenOption; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.OutputStreamIndexOutput; +import java.io.FilterOutputStream; +import java.io.IOException; +import java.nio.channels.Channels; +import java.nio.file.Files; +import java.nio.file.StandardOpenOption; +import java.util.Set; + /** * This class is used to wrap an existing {@link org.apache.lucene.store.FSDirectory} so that * the new shard segment files will be opened for Read and Write access. @@ -78,4 +80,10 @@ public void write(byte[] b, int offset, int length) throws IOException { CHUNK_SIZE); } } + + // temporary override until LUCENE-8735 is integrated + @Override + public Set getPendingDeletions() throws IOException { + return in.getPendingDeletions(); + } } diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportIT.java b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportIT.java deleted file mode 100644 index d02be2cff9e7..000000000000 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportIT.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.transport.nio; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.NioIntegTestCase; -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.health.ClusterHealthStatus; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.PageCacheRecycler; -import org.elasticsearch.indices.breaker.CircuitBreakerService; -import org.elasticsearch.plugins.NetworkPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.InboundMessage; -import org.elasticsearch.transport.TcpChannel; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportSettings; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.function.Supplier; - -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.is; - -@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1) -public class NioTransportIT extends NioIntegTestCase { - - // static so we can use it in anonymous classes - private static String channelProfileName = null; - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - .put(NetworkModule.TRANSPORT_TYPE_KEY, "exception-throwing").build(); - } - - @Override - protected Collection> nodePlugins() { - List> list = new ArrayList<>(); - list.add(ExceptionThrowingNioTransport.TestPlugin.class); - list.addAll(super.nodePlugins()); - return Collections.unmodifiableCollection(list); - } - - public void testThatConnectionFailsAsIntended() throws Exception { - Client transportClient = internalCluster().transportClient(); - ClusterHealthResponse clusterIndexHealths = transportClient.admin().cluster().prepareHealth().get(); - assertThat(clusterIndexHealths.getStatus(), is(ClusterHealthStatus.GREEN)); - try { - transportClient.filterWithHeader(Collections.singletonMap("ERROR", "MY MESSAGE")).admin().cluster().prepareHealth().get(); - fail("Expected exception, but didn't happen"); - } catch (ElasticsearchException e) { - assertThat(e.getMessage(), containsString("MY MESSAGE")); - assertThat(channelProfileName, is(TransportSettings.DEFAULT_PROFILE)); - } - } - - public static final class ExceptionThrowingNioTransport extends NioTransport { - - private static final Logger logger = LogManager.getLogger(ExceptionThrowingNioTransport.class); - - public static class TestPlugin extends Plugin implements NetworkPlugin { - - @Override - public Map> getTransports(Settings settings, ThreadPool threadPool, - PageCacheRecycler pageCacheRecycler, - CircuitBreakerService circuitBreakerService, - NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService) { - return Collections.singletonMap("exception-throwing", - () -> new ExceptionThrowingNioTransport(settings, threadPool, networkService, pageCacheRecycler, - namedWriteableRegistry, circuitBreakerService)); - } - } - - ExceptionThrowingNioTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, - PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, - CircuitBreakerService circuitBreakerService) { - super(settings, Version.CURRENT, threadPool, networkService, pageCacheRecycler, namedWriteableRegistry, circuitBreakerService, - new NioGroupFactory(settings, logger)); - } - - @Override - protected void handleRequest(TcpChannel channel, InboundMessage.Request request, int messageLengthBytes) throws IOException { - super.handleRequest(channel, request, messageLengthBytes); - channelProfileName = TransportSettings.DEFAULT_PROFILE; - } - - @Override - protected void validateRequest(StreamInput buffer, long requestId, String action) - throws IOException { - super.validateRequest(buffer, requestId, action); - String error = threadPool.getThreadContext().getHeader("ERROR"); - if (error != null) { - throw new ElasticsearchException(error); - } - } - - } -} diff --git a/qa/multi-cluster-search/build.gradle b/qa/multi-cluster-search/build.gradle index 6942331c97c2..0835945499d3 100644 --- a/qa/multi-cluster-search/build.gradle +++ b/qa/multi-cluster-search/build.gradle @@ -21,6 +21,10 @@ import org.elasticsearch.gradle.test.RestIntegTestTask apply plugin: 'elasticsearch.standalone-test' +dependencies { + testCompile "org.elasticsearch.client:elasticsearch-rest-high-level-client:${version}" +} + task remoteClusterTest(type: RestIntegTestTask) { mustRunAfter(precommit) } @@ -53,6 +57,6 @@ task integTest { dependsOn = [mixedClusterTest] } -unitTest.enabled = false // no unit tests for multi-cluster-search, only the rest integration test +unitTest.enabled = false // no unit tests for multi-cluster-search, only integration tests check.dependsOn(integTest) diff --git a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java new file mode 100644 index 000000000000..4a18ddbe1b69 --- /dev/null +++ b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java @@ -0,0 +1,847 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search; + +import com.carrotsearch.randomizedtesting.RandomizedContext; +import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; +import org.apache.lucene.search.join.ScoreMode; +import org.apache.lucene.util.TimeUnits; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.LatchedActionListener; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.bulk.BulkProcessor; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.indices.CreateIndexRequest; +import org.elasticsearch.client.indices.CreateIndexResponse; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.index.query.InnerHitBuilder; +import org.elasticsearch.index.query.MatchQueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.RangeQueryBuilder; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.index.query.TermsQueryBuilder; +import org.elasticsearch.indices.TermsLookup; +import org.elasticsearch.join.query.HasChildQueryBuilder; +import org.elasticsearch.join.query.HasParentQueryBuilder; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; +import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.CardinalityAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.TopHitsAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.DerivativePipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.MaxBucketPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.support.ValueType; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.collapse.CollapseBuilder; +import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; +import org.elasticsearch.search.rescore.QueryRescoreMode; +import org.elasticsearch.search.rescore.QueryRescorerBuilder; +import org.elasticsearch.search.sort.ScoreSortBuilder; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.search.suggest.SuggestBuilder; +import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder; +import org.elasticsearch.search.suggest.phrase.DirectCandidateGeneratorBuilder; +import org.elasticsearch.search.suggest.phrase.PhraseSuggestion; +import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder; +import org.elasticsearch.search.suggest.term.TermSuggestion; +import org.elasticsearch.search.suggest.term.TermSuggestionBuilder; +import org.elasticsearch.test.NotEqualMessageBuilder; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.AfterClass; +import org.junit.Before; + +import java.io.IOException; +import java.time.LocalDate; +import java.time.format.DateTimeFormatter; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; + +/** + * This test class executes twice, first against the remote cluster, and then against another cluster that has the remote cluster + * registered. Given that each test gets executed against both clusters, {@link #assumeMultiClusterSetup()} needs to be used to run a test + * against the multi cluster setup only, which is required for testing cross-cluster search. + * The goal of this test is not to test correctness of CCS responses, but rather to verify that CCS returns the same responses when + * minimizeRoundTrips is set to either true or false. In fact the execution differs depending on + * such parameter, hence we want to verify that results are the same in both scenarios. + */ +@TimeoutSuite(millis = 5 * TimeUnits.MINUTE) // to account for slow as hell VMs +public class CCSDuelIT extends ESRestTestCase { + + private static final String INDEX_NAME = "ccs_duel_index"; + private static final String REMOTE_INDEX_NAME = "my_remote_cluster:" + INDEX_NAME; + private static final String[] TAGS = new String[]{"java", "xml", "sql", "html", "php", "ruby", "python", "perl"}; + + private static RestHighLevelClient restHighLevelClient; + + @Before + public void init() throws Exception { + super.initClient(); + if (restHighLevelClient == null) { + restHighLevelClient = new HighLevelClient(client()); + String destinationCluster = System.getProperty("tests.rest.suite"); + //we index docs with private randomness otherwise the two clusters end up with exactly the same documents + //given that this test class is run twice with same seed. + RandomizedContext.current().runWithPrivateRandomness(random().nextLong() + destinationCluster.hashCode(), + (Callable) () -> { + indexDocuments(destinationCluster + "-"); + return null; + }); + } + } + + private static class HighLevelClient extends RestHighLevelClient { + private HighLevelClient(RestClient restClient) { + super(restClient, (client) -> {}, Collections.emptyList()); + } + } + + @AfterClass + public static void cleanupClient() throws IOException { + IOUtils.close(restHighLevelClient); + restHighLevelClient = null; + } + + @Override + protected boolean preserveIndicesUponCompletion() { + return true; + } + + private static void indexDocuments(String idPrefix) throws IOException, InterruptedException { + //this index with a single document is used to test partial failures + IndexRequest indexRequest = new IndexRequest(INDEX_NAME + "_err"); + indexRequest.id("id"); + indexRequest.source("creationDate", "err"); + indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL); + IndexResponse indexResponse = restHighLevelClient.index(indexRequest, RequestOptions.DEFAULT); + assertEquals(201, indexResponse.status().getStatus()); + + CreateIndexRequest createEmptyIndexRequest = new CreateIndexRequest(INDEX_NAME + "_empty"); + CreateIndexResponse response = restHighLevelClient.indices().create(createEmptyIndexRequest, RequestOptions.DEFAULT); + assertTrue(response.isAcknowledged()); + + int numShards = randomIntBetween(1, 5); + CreateIndexRequest createIndexRequest = new CreateIndexRequest(INDEX_NAME); + createIndexRequest.settings(Settings.builder().put("index.number_of_shards", numShards).put("index.number_of_replicas", 0)); + createIndexRequest.mapping("{\"properties\":{" + + "\"suggest\":{\"type\":\"completion\"}," + + "\"join\":{\"type\":\"join\", \"relations\": {\"question\":\"answer\"}}}}", XContentType.JSON); + CreateIndexResponse createIndexResponse = restHighLevelClient.indices().create(createIndexRequest, RequestOptions.DEFAULT); + assertTrue(createIndexResponse.isAcknowledged()); + + BulkProcessor bulkProcessor = BulkProcessor.builder((r, l) -> restHighLevelClient.bulkAsync(r, RequestOptions.DEFAULT, l), + new BulkProcessor.Listener() { + @Override + public void beforeBulk(long executionId, BulkRequest request) { + } + + @Override + public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { + assertFalse(response.hasFailures()); + } + + @Override + public void afterBulk(long executionId, BulkRequest request, Throwable failure) { + throw new AssertionError("Failed to execute bulk", failure); + } + }).build(); + + int numQuestions = randomIntBetween(50, 100); + for (int i = 0; i < numQuestions; i++) { + bulkProcessor.add(buildIndexRequest(idPrefix + i, "question", null)); + } + int numAnswers = randomIntBetween(100, 150); + for (int i = 0; i < numAnswers; i++) { + bulkProcessor.add(buildIndexRequest(idPrefix + (i + 1000), "answer", idPrefix + randomIntBetween(0, numQuestions - 1))); + } + assertTrue(bulkProcessor.awaitClose(30, TimeUnit.SECONDS)); + + RefreshResponse refreshResponse = restHighLevelClient.indices().refresh(new RefreshRequest(INDEX_NAME), RequestOptions.DEFAULT); + assertEquals(0, refreshResponse.getFailedShards()); + assertEquals(numShards, refreshResponse.getSuccessfulShards()); + } + + private static IndexRequest buildIndexRequest(String id, String type, String questionId) { + IndexRequest indexRequest = new IndexRequest(INDEX_NAME); + indexRequest.id(id); + if (questionId != null) { + indexRequest.routing(questionId); + } + indexRequest.create(true); + int numTags = randomIntBetween(1, 3); + Set tags = new HashSet<>(); + if (questionId == null) { + for (int i = 0; i < numTags; i++) { + tags.add(randomFrom(TAGS)); + } + } + String[] tagsArray = tags.toArray(new String[0]); + String date = LocalDate.of(2019, 1, randomIntBetween(1, 31)).format(DateTimeFormatter.ofPattern("yyyy/MM/dd", Locale.ROOT)); + Map joinField = new HashMap<>(); + joinField.put("name", type); + if (questionId != null) { + joinField.put("parent", questionId); + } + indexRequest.source(XContentType.JSON, + "type", type, + "votes", randomIntBetween(0, 30), + "questionId", questionId, + "tags", tagsArray, + "user", "user" + randomIntBetween(1, 10), + "suggest", Collections.singletonMap("input", tagsArray), + "creationDate", date, + "join", joinField); + return indexRequest; + } + + public void testMatchAll() throws Exception { + assumeMultiClusterSetup(); + //verify that the order in which documents are returned when they all have the same score is the same + SearchRequest searchRequest = initSearchRequest(); + duelSearch(searchRequest, CCSDuelIT::assertHits); + } + + public void testMatchQuery() throws Exception { + assumeMultiClusterSetup(); + SearchRequest searchRequest = initSearchRequest(); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + sourceBuilder.query(QueryBuilders.matchQuery("tags", "php")); + searchRequest.source(sourceBuilder); + duelSearch(searchRequest, CCSDuelIT::assertHits); + } + + public void testTrackTotalHitsUpTo() throws Exception { + assumeMultiClusterSetup(); + SearchRequest searchRequest = initSearchRequest(); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.trackTotalHitsUpTo(5); + sourceBuilder.query(QueryBuilders.matchQuery("tags", "sql")); + searchRequest.source(sourceBuilder); + duelSearch(searchRequest, CCSDuelIT::assertHits); + } + + public void testTerminateAfter() throws Exception { + assumeMultiClusterSetup(); + SearchRequest searchRequest = initSearchRequest(); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.terminateAfter(10); + sourceBuilder.query(QueryBuilders.matchQuery("tags", "perl")); + searchRequest.source(sourceBuilder); + duelSearch(searchRequest, CCSDuelIT::assertHits); + } + + public void testPagination() throws Exception { + assumeMultiClusterSetup(); + SearchRequest searchRequest = initSearchRequest(); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.from(10); + sourceBuilder.size(20); + sourceBuilder.query(QueryBuilders.matchQuery("tags", "python")); + searchRequest.source(sourceBuilder); + duelSearch(searchRequest, response -> assertHits(response, 10)); + } + + public void testHighlighting() throws Exception { + assumeMultiClusterSetup(); + SearchRequest searchRequest = initSearchRequest(); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.highlighter(new HighlightBuilder().field("tags")); + sourceBuilder.query(QueryBuilders.matchQuery("tags", "xml")); + searchRequest.source(sourceBuilder); + duelSearch(searchRequest, response -> { + assertHits(response); + assertFalse(response.getHits().getHits()[0].getHighlightFields().isEmpty()); + }); + } + + public void testFetchSource() throws Exception { + assumeMultiClusterSetup(); + SearchRequest searchRequest = initSearchRequest(); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.fetchSource(new String[]{"tags"}, Strings.EMPTY_ARRAY); + sourceBuilder.query(QueryBuilders.matchQuery("tags", "ruby")); + searchRequest.source(sourceBuilder); + duelSearch(searchRequest, response -> { + assertHits(response); + assertEquals(1, response.getHits().getHits()[0].getSourceAsMap().size()); + }); + } + + public void testDocValueFields() throws Exception { + assumeMultiClusterSetup(); + SearchRequest searchRequest = initSearchRequest(); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.docValueField("user.keyword"); + sourceBuilder.query(QueryBuilders.matchQuery("tags", "xml")); + searchRequest.source(sourceBuilder); + duelSearch(searchRequest, response -> { + assertHits(response); + assertEquals(1, response.getHits().getHits()[0].getFields().size()); + assertNotNull(response.getHits().getHits()[0].getFields().get("user.keyword")); + }); + } + + public void testScriptFields() throws Exception { + assumeMultiClusterSetup(); + SearchRequest searchRequest = initSearchRequest(); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.scriptField("parent", new Script(ScriptType.INLINE, "painless", "doc['join#question']", Collections.emptyMap())); + searchRequest.source(sourceBuilder); + duelSearch(searchRequest, response -> { + assertHits(response); + assertEquals(1, response.getHits().getHits()[0].getFields().size()); + assertNotNull(response.getHits().getHits()[0].getFields().get("parent")); + }); + } + + public void testExplain() throws Exception { + assumeMultiClusterSetup(); + SearchRequest searchRequest = initSearchRequest(); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.explain(true); + sourceBuilder.query(QueryBuilders.matchQuery("tags", "sql")); + searchRequest.source(sourceBuilder); + duelSearch(searchRequest, response -> { + assertHits(response); + assertNotNull(response.getHits().getHits()[0].getExplanation()); + }); + } + + public void testRescore() throws Exception { + assumeMultiClusterSetup(); + SearchRequest searchRequest = initSearchRequest(); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.query(QueryBuilders.matchQuery("tags", "xml")); + QueryRescorerBuilder rescorerBuilder = new QueryRescorerBuilder(new MatchQueryBuilder("tags", "java")); + rescorerBuilder.setScoreMode(QueryRescoreMode.Multiply); + rescorerBuilder.setRescoreQueryWeight(5); + sourceBuilder.addRescorer(rescorerBuilder); + searchRequest.source(sourceBuilder); + duelSearch(searchRequest, CCSDuelIT::assertHits); + } + + public void testHasParentWithInnerHit() throws Exception { + assumeMultiClusterSetup(); + SearchRequest searchRequest = initSearchRequest(); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + HasParentQueryBuilder hasParentQueryBuilder = new HasParentQueryBuilder("question", QueryBuilders.matchQuery("tags", "xml"), true); + hasParentQueryBuilder.innerHit(new InnerHitBuilder("inner")); + sourceBuilder.query(hasParentQueryBuilder); + searchRequest.source(sourceBuilder); + duelSearch(searchRequest, CCSDuelIT::assertHits); + } + + public void testHasChildWithInnerHit() throws Exception { + assumeMultiClusterSetup(); + SearchRequest searchRequest = initSearchRequest(); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder("creationDate").gte("2019/01/01").lte("2019/01/31"); + HasChildQueryBuilder query = new HasChildQueryBuilder("answer", rangeQueryBuilder, ScoreMode.Total); + query.innerHit(new InnerHitBuilder("inner")); + sourceBuilder.query(query); + searchRequest.source(sourceBuilder); + duelSearch(searchRequest, CCSDuelIT::assertHits); + } + + public void testProfile() throws Exception { + assumeMultiClusterSetup(); + SearchRequest searchRequest = initSearchRequest(); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.profile(true); + sourceBuilder.query(QueryBuilders.matchQuery("tags", "html")); + searchRequest.source(sourceBuilder); + duelSearch(searchRequest, response -> { + assertHits(response); + assertFalse(response.getProfileResults().isEmpty()); + }); + } + + public void testSortByField() throws Exception { + assumeMultiClusterSetup(); + SearchRequest searchRequest = initSearchRequest(); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.from(30); + sourceBuilder.size(25); + sourceBuilder.query(QueryBuilders.matchQuery("tags", "php")); + sourceBuilder.sort("type.keyword", SortOrder.ASC); + sourceBuilder.sort("creationDate", SortOrder.DESC); + sourceBuilder.sort("user.keyword", SortOrder.ASC); + searchRequest.source(sourceBuilder); + duelSearch(searchRequest, response -> { + assertHits(response, 30); + if (response.getHits().getTotalHits().value > 30) { + assertEquals(3, response.getHits().getHits()[0].getSortValues().length); + } + }); + } + + public void testSortByFieldOneClusterHasNoResults() throws Exception { + assumeMultiClusterSetup(); + SearchRequest searchRequest = initSearchRequest(); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + boolean onlyRemote = randomBoolean(); + sourceBuilder.query(new TermQueryBuilder("_index", onlyRemote ? REMOTE_INDEX_NAME : INDEX_NAME)); + sourceBuilder.sort("type.keyword", SortOrder.ASC); + sourceBuilder.sort("creationDate", SortOrder.DESC); + sourceBuilder.sort("user.keyword", SortOrder.ASC); + searchRequest.source(sourceBuilder); + duelSearch(searchRequest, response -> { + assertHits(response); + SearchHit[] hits = response.getHits().getHits(); + for (SearchHit hit : hits) { + assertEquals(3, hit.getSortValues().length); + assertEquals(INDEX_NAME, hit.getIndex()); + if (onlyRemote) { + assertEquals("my_remote_cluster", hit.getClusterAlias()); + } else { + assertNull(hit.getClusterAlias()); + } + } + }); + } + + public void testFieldCollapsingOneClusterHasNoResults() throws Exception { + assumeMultiClusterSetup(); + SearchRequest searchRequest = initSearchRequest(); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + boolean onlyRemote = randomBoolean(); + sourceBuilder.query(new TermQueryBuilder("_index", onlyRemote ? REMOTE_INDEX_NAME : INDEX_NAME)); + sourceBuilder.collapse(new CollapseBuilder("user.keyword")); + searchRequest.source(sourceBuilder); + duelSearch(searchRequest, response -> { + assertHits(response); + for (SearchHit hit : response.getHits().getHits()) { + assertEquals(INDEX_NAME, hit.getIndex()); + if (onlyRemote) { + assertEquals("my_remote_cluster", hit.getClusterAlias()); + } else { + assertNull(hit.getClusterAlias()); + } + } + }); + } + + public void testFieldCollapsingSortByScore() throws Exception { + assumeMultiClusterSetup(); + SearchRequest searchRequest = initSearchRequest(); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + searchRequest.source(sourceBuilder); + sourceBuilder.query(QueryBuilders.matchQuery("tags", "ruby")); + sourceBuilder.collapse(new CollapseBuilder("user.keyword")); + duelSearch(searchRequest, CCSDuelIT::assertHits); + } + + public void testFieldCollapsingSortByField() throws Exception { + assumeMultiClusterSetup(); + SearchRequest searchRequest = initSearchRequest(); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + searchRequest.source(sourceBuilder); + sourceBuilder.query(QueryBuilders.matchQuery("tags", "ruby")); + sourceBuilder.sort("creationDate", SortOrder.DESC); + sourceBuilder.sort(new ScoreSortBuilder()); + sourceBuilder.collapse(new CollapseBuilder("user.keyword")); + duelSearch(searchRequest, response -> { + assertHits(response); + assertEquals(2, response.getHits().getHits()[0].getSortValues().length); + }); + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40005") + public void testTermsAggs() throws Exception { + assumeMultiClusterSetup(); + SearchRequest searchRequest = initSearchRequest(); + searchRequest.source(buildTermsAggsSource()); + duelSearch(searchRequest, CCSDuelIT::assertAggs); + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40005") + public void testTermsAggsWithProfile() throws Exception { + assumeMultiClusterSetup(); + SearchRequest searchRequest = initSearchRequest(); + searchRequest.source(buildTermsAggsSource().profile(true)); + duelSearch(searchRequest, CCSDuelIT::assertAggs); + } + + private static SearchSourceBuilder buildTermsAggsSource() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(0); + TermsAggregationBuilder cluster = new TermsAggregationBuilder("cluster123", ValueType.STRING); + cluster.field("_index"); + TermsAggregationBuilder type = new TermsAggregationBuilder("type", ValueType.STRING); + type.field("type.keyword"); + type.showTermDocCountError(true); + type.order(BucketOrder.key(true)); + cluster.subAggregation(type); + sourceBuilder.aggregation(cluster); + + TermsAggregationBuilder tags = new TermsAggregationBuilder("tags", ValueType.STRING); + tags.field("tags.keyword"); + tags.showTermDocCountError(true); + tags.size(100); + sourceBuilder.aggregation(tags); + + TermsAggregationBuilder tags2 = new TermsAggregationBuilder("tags", ValueType.STRING); + tags2.field("tags.keyword"); + tags.subAggregation(tags2); + + FilterAggregationBuilder answers = new FilterAggregationBuilder("answers", new TermQueryBuilder("type", "answer")); + TermsAggregationBuilder answerPerQuestion = new TermsAggregationBuilder("answer_per_question", ValueType.STRING); + answerPerQuestion.showTermDocCountError(true); + answerPerQuestion.field("questionId.keyword"); + answers.subAggregation(answerPerQuestion); + TermsAggregationBuilder answerPerUser = new TermsAggregationBuilder("answer_per_user", ValueType.STRING); + answerPerUser.field("user.keyword"); + answerPerUser.size(30); + answerPerUser.showTermDocCountError(true); + answers.subAggregation(answerPerUser); + sourceBuilder.aggregation(answers); + return sourceBuilder; + } + + public void testDateHistogram() throws Exception { + assumeMultiClusterSetup(); + SearchRequest searchRequest = initSearchRequest(); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(0); + searchRequest.source(sourceBuilder); + TermsAggregationBuilder tags = new TermsAggregationBuilder("tags", ValueType.STRING); + tags.field("tags.keyword"); + tags.showTermDocCountError(true); + DateHistogramAggregationBuilder creation = new DateHistogramAggregationBuilder("creation"); + creation.field("creationDate"); + creation.dateHistogramInterval(DateHistogramInterval.QUARTER); + creation.subAggregation(tags); + sourceBuilder.aggregation(creation); + duelSearch(searchRequest, CCSDuelIT::assertAggs); + } + + public void testCardinalityAgg() throws Exception { + assumeMultiClusterSetup(); + SearchRequest searchRequest = initSearchRequest(); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(0); + searchRequest.source(sourceBuilder); + CardinalityAggregationBuilder tags = new CardinalityAggregationBuilder("tags", ValueType.STRING); + tags.field("tags.keyword"); + sourceBuilder.aggregation(tags); + duelSearch(searchRequest, CCSDuelIT::assertAggs); + } + + public void testPipelineAggs() throws Exception { + assumeMultiClusterSetup(); + SearchRequest searchRequest = initSearchRequest(); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.query(new TermQueryBuilder("type", "answer")); + searchRequest.source(sourceBuilder); + sourceBuilder.size(0); + DateHistogramAggregationBuilder daily = new DateHistogramAggregationBuilder("daily"); + daily.field("creationDate"); + daily.dateHistogramInterval(DateHistogramInterval.DAY); + sourceBuilder.aggregation(daily); + daily.subAggregation(new DerivativePipelineAggregationBuilder("derivative", "_count")); + sourceBuilder.aggregation(new MaxBucketPipelineAggregationBuilder("biggest_day", "daily._count")); + daily.subAggregation(new SumAggregationBuilder("votes").field("votes")); + sourceBuilder.aggregation(new MaxBucketPipelineAggregationBuilder("most_voted", "daily>votes")); + duelSearch(searchRequest, response -> { + assertAggs(response); + assertNotNull(response.getAggregations().get("most_voted")); + }); + duelSearch(searchRequest, CCSDuelIT::assertAggs); + } + + public void testTopHits() throws Exception { + assumeMultiClusterSetup(); + SearchRequest searchRequest = initSearchRequest(); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + searchRequest.source(sourceBuilder); + sourceBuilder.size(0); + TopHitsAggregationBuilder topHits = new TopHitsAggregationBuilder("top"); + topHits.from(10); + topHits.size(10); + topHits.sort("creationDate", SortOrder.DESC); + topHits.sort("_id", SortOrder.ASC); + TermsAggregationBuilder tags = new TermsAggregationBuilder("tags", ValueType.STRING); + tags.field("tags.keyword"); + tags.size(10); + tags.subAggregation(topHits); + sourceBuilder.aggregation(tags); + duelSearch(searchRequest, CCSDuelIT::assertAggs); + } + + public void testTermsLookup() throws Exception { + assumeMultiClusterSetup(); + IndexRequest indexRequest = new IndexRequest("lookup_index"); + indexRequest.id("id"); + indexRequest.source("tags", new String[]{"java", "sql", "html", "jax-ws"}); + indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL); + IndexResponse indexResponse = restHighLevelClient.index(indexRequest, RequestOptions.DEFAULT); + assertEquals(201, indexResponse.status().getStatus()); + SearchRequest searchRequest = initSearchRequest(); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + TermsQueryBuilder termsQueryBuilder = new TermsQueryBuilder("tags", new TermsLookup("lookup_index", "id", "tags")); + sourceBuilder.query(termsQueryBuilder); + searchRequest.source(sourceBuilder); + duelSearch(searchRequest, CCSDuelIT::assertHits); + } + + public void testShardFailures() throws Exception { + assumeMultiClusterSetup(); + SearchRequest searchRequest = new SearchRequest(INDEX_NAME + "*", REMOTE_INDEX_NAME + "*"); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.query(QueryBuilders.matchQuery("creationDate", "err")); + searchRequest.source(sourceBuilder); + duelSearch(searchRequest, response -> { + assertMultiClusterSearchResponse(response); + assertThat(response.getHits().getTotalHits().value, greaterThan(0L)); + assertNull(response.getAggregations()); + assertNull(response.getSuggest()); + assertThat(response.getHits().getHits().length, greaterThan(0)); + assertThat(response.getFailedShards(), greaterThanOrEqualTo(2)); + }); + } + + public void testTermSuggester() throws Exception { + assumeMultiClusterSetup(); + SearchRequest searchRequest = initSearchRequest(); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + searchRequest.source(sourceBuilder); + SuggestBuilder suggestBuilder = new SuggestBuilder(); + suggestBuilder.setGlobalText("jva hml"); + suggestBuilder.addSuggestion("tags", new TermSuggestionBuilder("tags") + .suggestMode(TermSuggestionBuilder.SuggestMode.POPULAR)); + sourceBuilder.suggest(suggestBuilder); + duelSearch(searchRequest, response -> { + assertMultiClusterSearchResponse(response); + assertEquals(1, response.getSuggest().size()); + TermSuggestion tags = response.getSuggest().getSuggestion("tags"); + assertThat(tags.getEntries().size(), greaterThan(0)); + }); + } + + public void testPhraseSuggester() throws Exception { + assumeMultiClusterSetup(); + SearchRequest searchRequest = initSearchRequest(); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + searchRequest.source(sourceBuilder); + SuggestBuilder suggestBuilder = new SuggestBuilder(); + suggestBuilder.setGlobalText("jva and hml"); + suggestBuilder.addSuggestion("tags", new PhraseSuggestionBuilder("tags").addCandidateGenerator( + new DirectCandidateGeneratorBuilder("tags").suggestMode("always")).highlight("", "")); + sourceBuilder.suggest(suggestBuilder); + duelSearch(searchRequest, response -> { + assertMultiClusterSearchResponse(response); + assertEquals(1, response.getSuggest().size()); + PhraseSuggestion tags = response.getSuggest().getSuggestion("tags"); + assertThat(tags.getEntries().size(), greaterThan(0)); + }); + } + + public void testCompletionSuggester() throws Exception { + assumeMultiClusterSetup(); + SearchRequest searchRequest = initSearchRequest(); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + searchRequest.source(sourceBuilder); + SuggestBuilder suggestBuilder = new SuggestBuilder(); + suggestBuilder.addSuggestion("python", new CompletionSuggestionBuilder("suggest").size(10).text("pyth")); + suggestBuilder.addSuggestion("java", new CompletionSuggestionBuilder("suggest").size(20).text("jav")); + suggestBuilder.addSuggestion("ruby", new CompletionSuggestionBuilder("suggest").size(30).text("rub")); + sourceBuilder.suggest(suggestBuilder); + duelSearch(searchRequest, response -> { + assertMultiClusterSearchResponse(response); + assertEquals(Strings.toString(response, true, true), 3, response.getSuggest().size()); + assertThat(response.getSuggest().getSuggestion("python").getEntries().size(), greaterThan(0)); + assertThat(response.getSuggest().getSuggestion("java").getEntries().size(), greaterThan(0)); + assertThat(response.getSuggest().getSuggestion("ruby").getEntries().size(), greaterThan(0)); + }); + } + + private static void assumeMultiClusterSetup() { + assumeTrue("must run only against the multi_cluster setup", "multi_cluster".equals(System.getProperty("tests.rest.suite"))); + } + + private static SearchRequest initSearchRequest() { + List indices = Arrays.asList(INDEX_NAME, "my_remote_cluster:" + INDEX_NAME); + Collections.shuffle(indices, random()); + return new SearchRequest(indices.toArray(new String[0])); + } + + private static void duelSearch(SearchRequest searchRequest, Consumer responseChecker) throws Exception { + CountDownLatch latch = new CountDownLatch(2); + AtomicReference exception1 = new AtomicReference<>(); + AtomicReference minimizeRoundtripsResponse = new AtomicReference<>(); + searchRequest.setCcsMinimizeRoundtrips(true); + restHighLevelClient.searchAsync(searchRequest, RequestOptions.DEFAULT, + new LatchedActionListener<>(ActionListener.wrap(minimizeRoundtripsResponse::set, exception1::set), latch)); + + AtomicReference exception2 = new AtomicReference<>(); + AtomicReference fanOutResponse = new AtomicReference<>(); + searchRequest.setCcsMinimizeRoundtrips(false); + restHighLevelClient.searchAsync(searchRequest, RequestOptions.DEFAULT, + new LatchedActionListener<>(ActionListener.wrap(fanOutResponse::set, exception2::set), latch)); + + latch.await(); + + if (exception1.get() != null && exception2.get() != null) { + exception1.get().addSuppressed(exception2.get()); + throw new AssertionError("both requests returned an exception", exception1.get()); + } else { + if (exception1.get() != null) { + throw new AssertionError("one of the two requests returned an exception", exception1.get()); + } + if (exception2.get() != null) { + throw new AssertionError("one of the two requests returned an exception", exception2.get()); + } + SearchResponse minimizeRoundtripsSearchResponse = minimizeRoundtripsResponse.get(); + responseChecker.accept(minimizeRoundtripsSearchResponse); + assertEquals(3, minimizeRoundtripsSearchResponse.getNumReducePhases()); + SearchResponse fanOutSearchResponse = fanOutResponse.get(); + responseChecker.accept(fanOutSearchResponse); + assertEquals(1, fanOutSearchResponse.getNumReducePhases()); + Map minimizeRoundtripsResponseMap = responseToMap(minimizeRoundtripsSearchResponse); + Map fanOutResponseMap = responseToMap(fanOutSearchResponse); + if (minimizeRoundtripsResponseMap.equals(fanOutResponseMap) == false) { + NotEqualMessageBuilder message = new NotEqualMessageBuilder(); + message.compareMaps(minimizeRoundtripsResponseMap, fanOutResponseMap); + throw new AssertionError("Didn't match expected value:\n" + message); + } + } + } + + private static void assertMultiClusterSearchResponse(SearchResponse searchResponse) { + assertEquals(2, searchResponse.getClusters().getTotal()); + assertEquals(2, searchResponse.getClusters().getSuccessful()); + assertThat(searchResponse.getTotalShards(), greaterThan(1)); + assertThat(searchResponse.getSuccessfulShards(), greaterThan(1)); + } + + private static void assertHits(SearchResponse response) { + assertHits(response, 0); + } + + private static void assertHits(SearchResponse response, int from) { + assertMultiClusterSearchResponse(response); + assertThat(response.getHits().getTotalHits().value, greaterThan(0L)); + assertEquals(0, response.getFailedShards()); + assertNull(response.getAggregations()); + assertNull(response.getSuggest()); + if (response.getHits().getTotalHits().value > from) { + assertThat(response.getHits().getHits().length, greaterThan(0)); + } else { + assertThat(response.getHits().getHits().length, equalTo(0)); + } + } + + private static void assertAggs(SearchResponse response) { + assertMultiClusterSearchResponse(response); + assertThat(response.getHits().getTotalHits().value, greaterThan(0L)); + assertEquals(0, response.getHits().getHits().length); + assertNull(response.getSuggest()); + assertNotNull(response.getAggregations()); + List aggregations = response.getAggregations().asList(); + for (Aggregation aggregation : aggregations) { + if (aggregation instanceof MultiBucketsAggregation) { + MultiBucketsAggregation multiBucketsAggregation = (MultiBucketsAggregation) aggregation; + assertThat("agg " + multiBucketsAggregation.getName() + " has 0 buckets", + multiBucketsAggregation.getBuckets().size(), greaterThan(0)); + } + } + } + + @SuppressWarnings("unchecked") + private static Map responseToMap(SearchResponse response) throws IOException { + BytesReference bytesReference = XContentHelper.toXContent(response, XContentType.JSON, false); + Map responseMap = XContentHelper.convertToMap(bytesReference, false, XContentType.JSON).v2(); + assertNotNull(responseMap.put("took", -1)); + responseMap.remove("num_reduce_phases"); + Map profile = (Map)responseMap.get("profile"); + if (profile != null) { + List> shards = (List >)profile.get("shards"); + for (Map shard : shards) { + replaceProfileTime(shard); + } + } + return responseMap; + } + + @SuppressWarnings("unchecked") + private static void replaceProfileTime(Map map) { + for (Map.Entry entry : map.entrySet()) { + if (entry.getKey().contains("time")) { + assertThat(entry.getValue(), instanceOf(Number.class)); + assertNotNull(entry.setValue(-1)); + } + if (entry.getKey().equals("breakdown")) { + Map breakdown = (Map) entry.getValue(); + for (String key : breakdown.keySet()) { + assertNotNull(breakdown.put(key, -1L)); + } + } + if (entry.getValue() instanceof Map) { + replaceProfileTime((Map) entry.getValue()); + } + if (entry.getValue() instanceof List) { + List list = (List) entry.getValue(); + for (Object obj : list) { + if (obj instanceof Map) { + replaceProfileTime((Map) obj); + } + } + } + } + } +} diff --git a/qa/multi-cluster-search/src/test/java/org/elasticsearch/upgrades/MultiClusterSearchYamlTestSuiteIT.java b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/MultiClusterSearchYamlTestSuiteIT.java similarity index 97% rename from qa/multi-cluster-search/src/test/java/org/elasticsearch/upgrades/MultiClusterSearchYamlTestSuiteIT.java rename to qa/multi-cluster-search/src/test/java/org/elasticsearch/search/MultiClusterSearchYamlTestSuiteIT.java index fe3a90988318..eb4f9a8e6a91 100644 --- a/qa/multi-cluster-search/src/test/java/org/elasticsearch/upgrades/MultiClusterSearchYamlTestSuiteIT.java +++ b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/MultiClusterSearchYamlTestSuiteIT.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.upgrades; +package org.elasticsearch.search; import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; @@ -42,5 +42,4 @@ public MultiClusterSearchYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate t public static Iterable parameters() throws Exception { return createParameters(); } - } diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java index e306e7c63ce5..458359b299e7 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java @@ -20,31 +20,45 @@ package org.elasticsearch.packaging.test; import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering; +import org.apache.http.client.fluent.Request; +import org.elasticsearch.packaging.util.FileUtils; import org.elasticsearch.packaging.util.Shell; import org.elasticsearch.packaging.util.Shell.Result; +import org.hamcrest.CoreMatchers; import org.junit.Before; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.Paths; import java.nio.file.StandardOpenOption; import java.util.regex.Matcher; import java.util.regex.Pattern; +import static org.elasticsearch.packaging.util.FileUtils.append; import static org.elasticsearch.packaging.util.FileUtils.assertPathsDontExist; +import static org.elasticsearch.packaging.util.FileUtils.assertPathsExist; +import static org.elasticsearch.packaging.util.FileUtils.cp; +import static org.elasticsearch.packaging.util.FileUtils.fileWithGlobExist; +import static org.elasticsearch.packaging.util.FileUtils.mkdir; import static org.elasticsearch.packaging.util.FileUtils.mv; +import static org.elasticsearch.packaging.util.FileUtils.rm; +import static org.elasticsearch.packaging.util.FileUtils.slurp; import static org.elasticsearch.packaging.util.Packages.SYSTEMD_SERVICE; import static org.elasticsearch.packaging.util.Packages.assertInstalled; import static org.elasticsearch.packaging.util.Packages.assertRemoved; import static org.elasticsearch.packaging.util.Packages.install; import static org.elasticsearch.packaging.util.Packages.remove; +import static org.elasticsearch.packaging.util.Packages.restartElasticsearch; import static org.elasticsearch.packaging.util.Packages.startElasticsearch; import static org.elasticsearch.packaging.util.Packages.stopElasticsearch; import static org.elasticsearch.packaging.util.Packages.verifyPackageInstallation; import static org.elasticsearch.packaging.util.Platforms.getOsRelease; import static org.elasticsearch.packaging.util.Platforms.isSystemd; +import static org.elasticsearch.packaging.util.ServerUtils.makeRequest; import static org.elasticsearch.packaging.util.ServerUtils.runElasticsearchTests; +import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.Matchers.containsString; @@ -55,42 +69,50 @@ @TestCaseOrdering(TestCaseOrdering.AlphabeticOrder.class) public abstract class PackageTestCase extends PackagingTestCase { + private Shell sh; @Before public void onlyCompatibleDistributions() { assumeTrue("only compatible distributions", distribution().packaging.compatible); + sh = newShell(); } public void test10InstallPackage() throws IOException { assertRemoved(distribution()); installation = install(distribution()); assertInstalled(distribution()); - verifyPackageInstallation(installation, distribution(), newShell()); + verifyPackageInstallation(installation, distribution(), sh); } public void test20PluginsCommandWhenNoPlugins() { assumeThat(installation, is(notNullValue())); - assertThat(newShell().run(installation.bin("elasticsearch-plugin") + " list").stdout, isEmptyString()); + assertThat(sh.run(installation.bin("elasticsearch-plugin") + " list").stdout, isEmptyString()); } - public void test30InstallDoesNotStartServer() { + public void test30DaemonIsNotEnabledOnRestart() { + if (isSystemd()) { + sh.run("systemctl daemon-reload"); + String isEnabledOutput = sh.runIgnoreExitCode("systemctl is-enabled elasticsearch.service").stdout.trim(); + assertThat(isEnabledOutput, equalTo("disabled")); + } + } + + public void test31InstallDoesNotStartServer() { assumeThat(installation, is(notNullValue())); - assertThat(newShell().run("ps aux").stdout, not(containsString("org.elasticsearch.bootstrap.Elasticsearch"))); + assertThat(sh.run("ps aux").stdout, not(containsString("org.elasticsearch.bootstrap.Elasticsearch"))); } public void assertRunsWithJavaHome() throws IOException { - Shell sh = newShell(); - String systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim(); byte[] originalEnvFile = Files.readAllBytes(installation.envFile); try { Files.write(installation.envFile, ("JAVA_HOME=" + systemJavaHome + "\n").getBytes(StandardCharsets.UTF_8), StandardOpenOption.APPEND); - startElasticsearch(); + startElasticsearch(sh); runElasticsearchTests(); - stopElasticsearch(); + stopElasticsearch(sh); } finally { Files.write(installation.envFile, originalEnvFile); } @@ -99,7 +121,7 @@ public void assertRunsWithJavaHome() throws IOException { assertThat(new String(Files.readAllBytes(log), StandardCharsets.UTF_8), containsString(systemJavaHome)); } - public void test31JavaHomeOverride() throws IOException { + public void test32JavaHomeOverride() throws IOException { assumeThat(installation, is(notNullValue())); // we always run with java home when no bundled jdk is included, so this test would be repetitive assumeThat(distribution().hasJdk, is(true)); @@ -121,11 +143,20 @@ public void test42BundledJdkRemoved() throws IOException { } public void test40StartServer() throws IOException { + String start = sh.runIgnoreExitCode("date ").stdout.trim(); assumeThat(installation, is(notNullValue())); - startElasticsearch(); + startElasticsearch(sh); + + String journalEntries = sh.runIgnoreExitCode("journalctl _SYSTEMD_UNIT=elasticsearch.service " + + "--since \"" + start + "\" --output cat | wc -l").stdout.trim(); + assertThat(journalEntries, equalTo("0")); + + assertPathsExist(installation.pidDir.resolve("elasticsearch.pid")); + assertPathsExist(installation.logs.resolve("elasticsearch_server.json")); + runElasticsearchTests(); - verifyPackageInstallation(installation, distribution(), newShell()); // check startup script didn't change permissions + verifyPackageInstallation(installation, distribution(), sh); // check startup script didn't change permissions } public void test50Remove() { @@ -134,7 +165,6 @@ public void test50Remove() { remove(distribution()); // removing must stop the service - final Shell sh = newShell(); assertThat(sh.run("ps aux").stdout, not(containsString("org.elasticsearch.bootstrap.Elasticsearch"))); if (isSystemd()) { @@ -184,9 +214,160 @@ public void test60Reinstall() throws IOException { installation = install(distribution()); assertInstalled(distribution()); - verifyPackageInstallation(installation, distribution(), newShell()); + verifyPackageInstallation(installation, distribution(), sh); remove(distribution()); assertRemoved(distribution()); } + + public void test70RestartServer() throws IOException { + try { + installation = install(distribution()); + assertInstalled(distribution()); + + startElasticsearch(sh); + restartElasticsearch(sh); + runElasticsearchTests(); + stopElasticsearch(sh); + } finally { + cleanup(); + } + } + + + public void test72TestRuntimeDirectory() throws IOException { + try { + installation = install(distribution()); + FileUtils.rm(installation.pidDir); + startElasticsearch(sh); + assertPathsExist(installation.pidDir); + stopElasticsearch(sh); + } finally { + cleanup(); + } + } + + public void test73gcLogsExist() throws IOException { + installation = install(distribution()); + startElasticsearch(sh); + // it can be gc.log or gc.log.0.current + assertThat(installation.logs, fileWithGlobExist("gc.log*")); + stopElasticsearch(sh); + } + + // TEST CASES FOR SYSTEMD ONLY + + + /** + * # Simulates the behavior of a system restart: + * # the PID directory is deleted by the operating system + * # but it should not block ES from starting + * # see https://github.com/elastic/elasticsearch/issues/11594 + */ + public void test80DeletePID_DIRandRestart() throws IOException { + assumeTrue(isSystemd()); + + rm(installation.pidDir); + + sh.run("systemd-tmpfiles --create"); + + startElasticsearch(sh); + + final Path pidFile = installation.pidDir.resolve("elasticsearch.pid"); + + assertTrue(Files.exists(pidFile)); + + stopElasticsearch(sh); + } + + public void test81CustomPathConfAndJvmOptions() throws IOException { + assumeTrue(isSystemd()); + + assumeThat(installation, is(notNullValue())); + assertPathsExist(installation.envFile); + + stopElasticsearch(sh); + + // The custom config directory is not under /tmp or /var/tmp because + // systemd's private temp directory functionally means different + // processes can have different views of what's in these directories + String temp = sh.runIgnoreExitCode("mktemp -p /etc -d").stdout.trim(); + final Path tempConf = Paths.get(temp); + + try { + mkdir(tempConf); + cp(installation.config("elasticsearch.yml"), tempConf.resolve("elasticsearch.yml")); + cp(installation.config("log4j2.properties"), tempConf.resolve("log4j2.properties")); + + // we have to disable Log4j from using JMX lest it will hit a security + // manager exception before we have configured logging; this will fail + // startup since we detect usages of logging before it is configured + final String jvmOptions = + "-Xms512m\n" + + "-Xmx512m\n" + + "-Dlog4j2.disable.jmx=true\n"; + append(tempConf.resolve("jvm.options"), jvmOptions); + + sh.runIgnoreExitCode("chown -R elasticsearch:elasticsearch " + tempConf); + + final Shell serverShell = newShell(); + cp(installation.envFile, tempConf.resolve("elasticsearch.bk"));//backup + append(installation.envFile, "ES_PATH_CONF=" + tempConf + "\n"); + append(installation.envFile, "ES_JAVA_OPTS=-XX:-UseCompressedOops"); + + startElasticsearch(serverShell); + + final String nodesResponse = makeRequest(Request.Get("http://localhost:9200/_nodes")); + assertThat(nodesResponse, CoreMatchers.containsString("\"heap_init_in_bytes\":536870912")); + assertThat(nodesResponse, CoreMatchers.containsString("\"using_compressed_ordinary_object_pointers\":\"false\"")); + + stopElasticsearch(serverShell); + + } finally { + rm(installation.envFile); + cp(tempConf.resolve("elasticsearch.bk"), installation.envFile); + rm(tempConf); + cleanup(); + } + } + + public void test82SystemdMask() throws IOException { + try { + assumeTrue(isSystemd()); + + sh.run("systemctl mask systemd-sysctl.service"); + + installation = install(distribution()); + + sh.run("systemctl unmask systemd-sysctl.service"); + } finally { + cleanup(); + } + } + + public void test83serviceFileSetsLimits() throws IOException { + // Limits are changed on systemd platforms only + assumeTrue(isSystemd()); + + installation = install(distribution()); + + startElasticsearch(sh); + + final Path pidFile = installation.pidDir.resolve("elasticsearch.pid"); + assertTrue(Files.exists(pidFile)); + String pid = slurp(pidFile).trim(); + String maxFileSize = sh.run("cat /proc/%s/limits | grep \"Max file size\" | awk '{ print $4 }'", pid).stdout.trim(); + assertThat(maxFileSize, equalTo("unlimited")); + + String maxProcesses = sh.run("cat /proc/%s/limits | grep \"Max processes\" | awk '{ print $3 }'", pid).stdout.trim(); + assertThat(maxProcesses, equalTo("4096")); + + String maxOpenFiles = sh.run("cat /proc/%s/limits | grep \"Max open files\" | awk '{ print $4 }'", pid).stdout.trim(); + assertThat(maxOpenFiles, equalTo("65535")); + + String maxAddressSpace = sh.run("cat /proc/%s/limits | grep \"Max address space\" | awk '{ print $4 }'", pid).stdout.trim(); + assertThat(maxAddressSpace, equalTo("unlimited")); + + stopElasticsearch(sh); + } } diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/WindowsServiceTestCase.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/WindowsServiceTestCase.java index b9536f86184f..08f54096e073 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/WindowsServiceTestCase.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/WindowsServiceTestCase.java @@ -39,7 +39,7 @@ import static org.elasticsearch.packaging.util.Archives.verifyArchiveInstallation; import static org.elasticsearch.packaging.util.FileUtils.mv; import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.CoreMatchers.equalTo; public abstract class WindowsServiceTestCase extends PackagingTestCase { @@ -64,18 +64,6 @@ public void uninstallService() { sh.runIgnoreExitCode(serviceScript + " remove"); } - private Result runWithoutJava(String script) { - final Path relocatedJdk = installation.bundledJdk.getParent().resolve("jdk.relocated"); - - try { - mv(installation.bundledJdk, relocatedJdk); - // ask for elasticsearch version to quickly exit if java is actually found (ie test failure) - return sh.runIgnoreExitCode(script); - } finally { - mv(relocatedJdk, installation.bundledJdk); - } - } - private void assertService(String id, String status, String displayName) { Result result = sh.run("Get-Service " + id + " | Format-List -Property Name, Status, DisplayName"); assertThat(result.stdout, containsString("Name : " + id)); @@ -84,16 +72,27 @@ private void assertService(String id, String status, String displayName) { } // runs the service command, dumping all log files on failure - private void assertCommand(String script) { + private Result assertCommand(String script) { + Result result = sh.runIgnoreExitCode(script); + assertExit(result, script, 0); + return result; + } + + private Result assertFailure(String script, int exitCode) { Result result = sh.runIgnoreExitCode(script); - if (result.exitCode != 0) { - logger.error("---- Failed to run script: " + script); + assertExit(result, script, exitCode); + return result; + } + + private void assertExit(Result result, String script, int exitCode) { + if (result.exitCode != exitCode) { + logger.error("---- Unexpected exit code (expected " + exitCode + ", got " + result.exitCode + ") for script: " + script); logger.error(result); logger.error("Dumping log files\n"); Result logs = sh.run("$files = Get-ChildItem \"" + installation.logs + "\\elasticsearch.log\"; " + "Write-Output $files; " + "foreach ($file in $files) {" + - "Write-Output \"$file\"; " + + "Write-Output \"$file\"; " + "Get-Content \"$file\" " + "}"); logger.error(logs.stdout); @@ -126,14 +125,20 @@ public void test12InstallService() { } public void test13InstallMissingJava() throws IOException { - Result result = runWithoutJava(serviceScript + " install"); - assertThat(result.exitCode, equalTo(1)); - assertThat(result.stderr, containsString("could not find java in JAVA_HOME or bundled")); + final Path relocatedJdk = installation.bundledJdk.getParent().resolve("jdk.relocated"); + + try { + mv(installation.bundledJdk, relocatedJdk); + Result result = sh.runIgnoreExitCode(serviceScript + " install"); + assertThat(result.exitCode, equalTo(1)); + assertThat(result.stderr, containsString("could not find java in JAVA_HOME or bundled")); + } finally { + mv(relocatedJdk, installation.bundledJdk); + } } public void test14RemoveNotInstalled() { - Result result = sh.runIgnoreExitCode(serviceScript + " remove"); - assertThat(result.stdout, result.exitCode, equalTo(1)); + Result result = assertFailure(serviceScript + " remove", 1); assertThat(result.stdout, containsString("Failed removing '" + DEFAULT_ID + "' service")); } @@ -155,10 +160,7 @@ public void test21CustomizeServiceDisplayName() { } // NOTE: service description is not attainable through any powershell api, so checking it is not possible... - - public void test30StartStop() throws IOException { - sh.run(serviceScript + " install"); - assertCommand(serviceScript + " start"); + public void assertStartedAndStop() throws IOException { ServerUtils.waitForElasticsearch(); ServerUtils.runElasticsearchTests(); @@ -189,6 +191,12 @@ public void test30StartStop() throws IOException { "}"); } + public void test30StartStop() throws IOException { + sh.run(serviceScript + " install"); + assertCommand(serviceScript + " start"); + assertStartedAndStop(); + } + public void test31StartNotInstalled() throws IOException { Result result = sh.runIgnoreExitCode(serviceScript + " start"); assertThat(result.stdout, result.exitCode, equalTo(1)); @@ -201,17 +209,20 @@ public void test32StopNotStarted() throws IOException { assertThat(result.stdout, containsString("The service '" + DEFAULT_ID + "' has been stopped")); } - /* - // TODO: need to make JAVA_HOME resolve at install time for this to work - // see https://github.com/elastic/elasticsearch/issues/23097 public void test33JavaChanged() throws IOException { - sh.run(serviceScript + " install"); - runWithoutJava(serviceScript + "start"); - ServerUtils.waitForElasticsearch(); - sh.run(serviceScript + " stop"); - sh.runIgnoreExitCode("Wait-Process -Name \"elasticsearch-service-x64\" -Timeout 10"); - sh.run(serviceScript + " remove"); - }*/ + final Path relocatedJdk = installation.bundledJdk.getParent().resolve("jdk.relocated"); + + try { + mv(installation.bundledJdk, relocatedJdk); + sh.getEnv().put("JAVA_HOME", relocatedJdk.toString()); + assertCommand(serviceScript + " install"); + sh.getEnv().remove("JAVA_HOME"); + assertCommand(serviceScript + " start"); + assertStartedAndStop(); + } finally { + mv(relocatedJdk, installation.bundledJdk); + } + } public void test60Manager() throws IOException { Path serviceMgr = installation.bin("elasticsearch-service-mgr.exe"); diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/FileUtils.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/FileUtils.java index 10d1b3ee6b6d..efbf0bd74a35 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/FileUtils.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/FileUtils.java @@ -20,6 +20,8 @@ package org.elasticsearch.packaging.util; import org.elasticsearch.core.internal.io.IOUtils; +import org.hamcrest.FeatureMatcher; +import org.hamcrest.Matcher; import java.io.BufferedWriter; import java.io.IOException; @@ -34,9 +36,11 @@ import java.nio.file.attribute.PosixFileAttributes; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.emptyIterable; import static org.hamcrest.core.IsNot.not; import static org.hamcrest.text.IsEmptyString.isEmptyOrNullString; import static org.junit.Assert.assertFalse; @@ -69,6 +73,15 @@ public static void rm(Path... paths) { } } + public static Path mktempDir(Path path) { + try { + return Files.createTempDirectory(path,"tmp"); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + public static Path mkdir(Path path) { try { return Files.createDirectories(path); @@ -176,6 +189,20 @@ public static void assertPathsExist(Path... paths) { Arrays.stream(paths).forEach(path -> assertTrue(path + " should exist", Files.exists(path))); } + public static Matcher fileWithGlobExist(String glob) throws IOException { + return new FeatureMatcher>(not(emptyIterable()),"File with pattern exist", "file with pattern"){ + + @Override + protected Iterable featureValueOf(Path actual) { + try { + return Files.newDirectoryStream(actual,glob); + } catch (IOException e) { + return Collections.emptyList(); + } + } + }; + } + public static void assertPathsDontExist(Path... paths) { Arrays.stream(paths).forEach(path -> assertFalse(path + " should not exist", Files.exists(path))); } diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java index 7014a627a7ae..afa7e371c2c5 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java @@ -270,8 +270,7 @@ private static void verifyDefaultInstallation(Installation es) { ).forEach(configFile -> assertThat(es.config(configFile), file(File, "root", "elasticsearch", p660))); } - public static void startElasticsearch() throws IOException { - final Shell sh = new Shell(); + public static void startElasticsearch(Shell sh) throws IOException { if (isSystemd()) { sh.run("systemctl daemon-reload"); sh.run("systemctl enable elasticsearch.service"); @@ -281,6 +280,10 @@ public static void startElasticsearch() throws IOException { sh.run("service elasticsearch start"); } + assertElasticsearchStarted(sh); + } + + public static void assertElasticsearchStarted(Shell sh) throws IOException { waitForElasticsearch(); if (isSystemd()) { @@ -291,12 +294,21 @@ public static void startElasticsearch() throws IOException { } } - public static void stopElasticsearch() throws IOException { - final Shell sh = new Shell(); + public static void stopElasticsearch(Shell sh) throws IOException { if (isSystemd()) { sh.run("systemctl stop elasticsearch.service"); } else { sh.run("service elasticsearch stop"); } } + + public static void restartElasticsearch(Shell sh) throws IOException { + if (isSystemd()) { + sh.run("systemctl restart elasticsearch.service"); + } else { + sh.run("service elasticsearch restart"); + } + + waitForElasticsearch(); + } } diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Shell.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Shell.java index b43743813055..dc490de05b9c 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Shell.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Shell.java @@ -27,6 +27,7 @@ import java.io.InputStreamReader; import java.nio.file.Path; import java.util.HashMap; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.stream.Stream; @@ -67,6 +68,10 @@ public Result runIgnoreExitCode(String script) { return runScriptIgnoreExitCode(getScriptCommand(script)); } + public Result run( String command, Object... args) { + String formattedCommand = String.format(Locale.ROOT, command, args); + return run(formattedCommand); + } private String[] getScriptCommand(String script) { if (Platforms.WINDOWS) { return powershellCommand(script); diff --git a/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats b/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats deleted file mode 100644 index 8baa75f38f5b..000000000000 --- a/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats +++ /dev/null @@ -1,257 +0,0 @@ -#!/usr/bin/env bats - -# This file is used to test the elasticsearch Systemd setup. - -# WARNING: This testing file must be executed as root and can -# dramatically change your system. It should only be executed -# in a throw-away VM like those made by the Vagrantfile at -# the root of the Elasticsearch source code. This should -# cause the script to fail if it is executed any other way: -[ -f /etc/is_vagrant_vm ] || { - >&2 echo "must be run on a vagrant VM" - exit 1 -} - -# The test case can be executed with the Bash Automated -# Testing System tool available at https://github.com/sstephenson/bats -# Thanks to Sam Stephenson! - -# Licensed to Elasticsearch under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Load test utilities -load $BATS_UTILS/utils.bash -load $BATS_UTILS/packages.bash -load $BATS_UTILS/plugins.bash - -# Cleans everything for the 1st execution -setup() { - skip_not_systemd - skip_not_dpkg_or_rpm - export_elasticsearch_paths -} - -@test "[SYSTEMD] install elasticsearch" { - clean_before_test - install_package -} - -@test "[SYSTEMD] daemon reload after install" { - systemctl daemon-reload -} - -@test "[SYSTEMD] daemon isn't enabled on restart" { - # Rather than restart the VM we just ask systemd if it plans on starting - # elasticsearch on restart. Not as strong as a restart but much much - # faster. - run systemctl is-enabled elasticsearch.service - [ "$output" = "disabled" ] -} - -@test "[SYSTEMD] enable" { - systemctl enable elasticsearch.service - - systemctl is-enabled elasticsearch.service -} - -@test "[SYSTEMD] start" { - # Capture the current epoch in millis - run date +%s - epoch="$output" - - # The OpenJDK packaged for CentOS and OEL both override the default value (false) for the JVM option "AssumeMP". - # - # Because it is forced to "true" by default for these packages, the following warning message is printed to the - # standard output when the Vagrant box has only 1 CPU: - # OpenJDK 64-Bit Server VM warning: If the number of processors is expected to increase from one, then you should configure - # the number of parallel GC threads appropriately using -XX:ParallelGCThreads=N - # - # This message will then fail the next test where we check if no entries have been added to the journal. - # - # This message appears since with java-1.8.0-openjdk-1.8.0.111-1.b15.el7_2.x86_64 because of the commit: - # 2016-10-10 - Andrew Hughes - 1:1.8.0.111-1.b15 - Turn debug builds on for all JIT architectures. - # Always AssumeMP on RHEL. - # - Resolves: rhbz#1381990 - # - if [ -x "$(command -v lsb_release)" ]; then - # Here we set the "-XX:-AssumeMP" option to false again: - lsb_release=$(lsb_release -i) - if [[ "$lsb_release" =~ "CentOS" ]] || [[ "$lsb_release" =~ "OracleServer" ]]; then - echo "-XX:-AssumeMP" >> $ESCONFIG/jvm.options - fi - fi - - systemctl start elasticsearch.service - wait_for_elasticsearch_status - assert_file_exist "/var/run/elasticsearch/elasticsearch.pid" - assert_file_exist "/var/log/elasticsearch/elasticsearch_server.json" - - # Converts the epoch back in a human readable format - run date --date=@$epoch "+%Y-%m-%d %H:%M:%S" - since="$output" - - # Verifies that no new entries in journald have been added - # since the last start - result="$(journalctl _SYSTEMD_UNIT=elasticsearch.service --since "$since" --output cat | wc -l)" - [ "$result" -eq "0" ] || { - echo "Expected no entries in journalctl for the Elasticsearch service but found:" - journalctl _SYSTEMD_UNIT=elasticsearch.service --since "$since" - false - } -} - -@test "[SYSTEMD] start (running)" { - systemctl start elasticsearch.service -} - -@test "[SYSTEMD] is active (running)" { - run systemctl is-active elasticsearch.service - [ "$status" -eq 0 ] - [ "$output" = "active" ] -} - -@test "[SYSTEMD] status (running)" { - systemctl status elasticsearch.service -} - -################################## -# Check that Elasticsearch is working -################################## -@test "[SYSTEMD] test elasticsearch" { - run_elasticsearch_tests -} - -@test "[SYSTEMD] restart" { - systemctl restart elasticsearch.service - - wait_for_elasticsearch_status - - service elasticsearch status -} - -@test "[SYSTEMD] stop (running)" { - systemctl stop elasticsearch.service -} - -@test "[SYSTEMD] status (stopping)" { - run systemctl status elasticsearch.service - # I'm not sure why suse exits 0 here, but it does - if [ ! -e /etc/SuSE-release ]; then - [ "$status" -eq 3 ] || "Expected exit code 3 meaning stopped but got $status" - fi - echo "$output" | grep "Active:" | grep "inactive" -} - -@test "[SYSTEMD] stop (stopped)" { - systemctl stop elasticsearch.service -} - -@test "[SYSTEMD] status (stopped)" { - run systemctl status elasticsearch.service - # I'm not sure why suse exits 0 here, but it does - if [ ! -e /etc/SuSE-release ]; then - [ "$status" -eq 3 ] || "Expected exit code 3 meaning stopped but got $status" - fi - echo "$output" | grep "Active:" | grep "inactive" -} - -# Simulates the behavior of a system restart: -# the PID directory is deleted by the operating system -# but it should not block ES from starting -# see https://github.com/elastic/elasticsearch/issues/11594 -@test "[SYSTEMD] delete PID_DIR and restart" { - rm -rf /var/run/elasticsearch - - systemd-tmpfiles --create - - systemctl start elasticsearch.service - - wait_for_elasticsearch_status - - assert_file_exist "/var/run/elasticsearch/elasticsearch.pid" - - systemctl stop elasticsearch.service -} - -@test "[SYSTEMD] start Elasticsearch with custom JVM options" { - assert_file_exist $ESENVFILE - # The custom config directory is not under /tmp or /var/tmp because - # systemd's private temp directory functionally means different - # processes can have different views of what's in these directories - local temp=`mktemp -p /etc -d` - cp "$ESCONFIG"/elasticsearch.yml "$temp" - cp "$ESCONFIG"/log4j2.properties "$temp" - touch "$temp/jvm.options" - chown -R elasticsearch:elasticsearch "$temp" - echo "-Xms512m" >> "$temp/jvm.options" - echo "-Xmx512m" >> "$temp/jvm.options" - # we have to disable Log4j from using JMX lest it will hit a security - # manager exception before we have configured logging; this will fail - # startup since we detect usages of logging before it is configured - echo "-Dlog4j2.disable.jmx=true" >> "$temp/jvm.options" - cp $ESENVFILE "$temp/elasticsearch" - echo "ES_PATH_CONF=\"$temp\"" >> $ESENVFILE - echo "ES_JAVA_OPTS=\"-XX:-UseCompressedOops\"" >> $ESENVFILE - service elasticsearch start - wait_for_elasticsearch_status - curl -s -XGET localhost:9200/_nodes | fgrep '"heap_init_in_bytes":536870912' - curl -s -XGET localhost:9200/_nodes | fgrep '"using_compressed_ordinary_object_pointers":"false"' - service elasticsearch stop - cp "$temp/elasticsearch" $ESENVFILE -} - -@test "[SYSTEMD] masking systemd-sysctl" { - clean_before_test - - systemctl mask systemd-sysctl.service - install_package - - systemctl unmask systemd-sysctl.service -} - -@test "[SYSTEMD] service file sets limits" { - clean_before_test - install_package - systemctl start elasticsearch.service - wait_for_elasticsearch_status - local pid=$(cat /var/run/elasticsearch/elasticsearch.pid) - local max_file_size=$(cat /proc/$pid/limits | grep "Max file size" | awk '{ print $4 }') - [ "$max_file_size" == "unlimited" ] - local max_processes=$(cat /proc/$pid/limits | grep "Max processes" | awk '{ print $3 }') - [ "$max_processes" == "4096" ] - local max_open_files=$(cat /proc/$pid/limits | grep "Max open files" | awk '{ print $4 }') - [ "$max_open_files" == "65535" ] - local max_address_space=$(cat /proc/$pid/limits | grep "Max address space" | awk '{ print $4 }') - [ "$max_address_space" == "unlimited" ] - systemctl stop elasticsearch.service -} - -@test "[SYSTEMD] test runtime directory" { - clean_before_test - install_package - sudo rm -rf /var/run/elasticsearch - systemctl start elasticsearch.service - wait_for_elasticsearch_status - [ -d /var/run/elasticsearch ] - systemctl stop elasticsearch.service -} - -@test "[SYSTEMD] GC logs exist" { - start_elasticsearch_service - assert_file_exist /var/log/elasticsearch/gc.log.0.current - stop_elasticsearch_service -} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml index adb459860ef3..884cf45c84ce 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml @@ -1,6 +1,8 @@ --- "Help": - + - skip: + version: " - 7.0.99" + reason: external refresh stats were added in 7.1.0 - do: cat.shards: help: true @@ -52,6 +54,8 @@ merges.total_time .+ \n refresh.total .+ \n refresh.time .+ \n + refresh.external_total .+ \n + refresh.external_time .+ \n refresh.listeners .+ \n search.fetch_current .+ \n search.fetch_time .+ \n diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yml index b8c922c98c15..732a53aeea4f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yml @@ -12,10 +12,6 @@ - match: { acknowledged: true } - - do: - cluster.state: - metric: [ master_node ] - - do: cluster.allocation_explain: body: { "index": "test", "shard": 0, "primary": true } @@ -37,10 +33,6 @@ index: test body: { "settings": { "index.number_of_shards": 1, "index.number_of_replicas": 9 } } - - do: - cluster.state: - metric: [ master_node ] - - do: cluster.allocation_explain: include_disk_info: true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.reroute/11_explain.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.reroute/11_explain.yml index 5419acb9321f..248b47d07a71 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.reroute/11_explain.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.reroute/11_explain.yml @@ -25,12 +25,14 @@ setup: --- "Explain API for non-existent node & shard": + - skip: + features: [arbitrary_key] - do: - cluster.state: - metric: [ master_node ] - - - set: {master_node: node_id} + nodes.info: + node_id: data:true + - set: + nodes._arbitrary_key_: node_id - do: cluster.reroute: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml index a6d6bb073054..41c851b71cc6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml @@ -3,18 +3,20 @@ - skip: version: " - 6.9.99" reason: expects warnings that pre-7.0.0 will not send - features: "warnings" - # creates an index with one document solely allocated on the master node + features: [warnings, arbitrary_key] + + # creates an index with one document solely allocated on a particular data node # and shrinks it into a new index with a single shard # we don't do the relocation to a single node after the index is created # here since in a mixed version cluster we can't identify # which node is the one with the highest version and that is the only one that can safely # be used to shrink the index. - - do: - cluster.state: {} - # Get master node id - - set: { master_node: master } + - do: + nodes.info: + node_id: data:true + - set: + nodes._arbitrary_key_: node_id - do: indices.create: @@ -22,8 +24,8 @@ wait_for_active_shards: 1 body: settings: - # ensure everything is allocated on a single node - index.routing.allocation.include._id: $master + # ensure everything is allocated on the same data node + index.routing.allocation.include._id: $node_id index.number_of_shards: 2 index.number_of_replicas: 0 - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml index f12864236d7b..dec0760fc6b1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml @@ -3,13 +3,13 @@ - skip: version: " - 6.9.99" reason: expects warnings that pre-7.0.0 will not send - features: "warnings" + features: [warnings, arbitrary_key] - do: - cluster.state: {} - # Get master node id - - - set: { master_node: master } + nodes.info: + node_id: data:true + - set: + nodes._arbitrary_key_: node_id # create index - do: @@ -19,7 +19,7 @@ body: settings: # ensure everything is allocated on a single node - index.routing.allocation.include._id: $master + index.routing.allocation.include._id: $node_id index.number_of_shards: 2 index.number_of_replicas: 0 mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml index 3add4b100d81..eda095ff91f9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml @@ -3,13 +3,13 @@ - skip: version: " - 6.9.99" reason: expects warnings that pre-7.0.0 will not send - features: "warnings" + features: [warnings, arbitrary_key] - do: - cluster.state: {} - - # get master node id - - set: { master_node: master } + nodes.info: + node_id: data:true + - set: + nodes._arbitrary_key_: node_id - do: indices.create: @@ -17,8 +17,8 @@ wait_for_active_shards: 1 body: settings: - # ensure everything is allocated on the master node - index.routing.allocation.include._id: $master + # ensure everything is allocated on the same node + index.routing.allocation.include._id: $node_id index.number_of_shards: 2 index.number_of_replicas: 0 index.merge.scheduler.max_merge_count: 4 @@ -60,4 +60,4 @@ - match: { copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" } - match: { copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" } - match: { copy-settings-target.settings.index.blocks.write: "true" } - - match: { copy-settings-target.settings.index.routing.allocation.include._id: $master } + - match: { copy-settings-target.settings.index.routing.allocation.include._id: $node_id } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml index 8cf932b1c115..df9eae0adf34 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml @@ -3,13 +3,13 @@ - skip: version: " - 6.9.99" reason: expects warnings that pre-7.0.0 will not send - features: "warnings" + features: [arbitrary_key, warnings] - do: - cluster.state: {} - - # get master node id - - set: { master_node: master } + nodes.info: + node_id: data:true + - set: + nodes._arbitrary_key_: node_id - do: indices.create: @@ -17,8 +17,8 @@ wait_for_active_shards: 1 body: settings: - # ensure everything is allocated on the master node - index.routing.allocation.include._id: $master + # ensure everything is allocated on the same node + index.routing.allocation.include._id: $node_id index.number_of_replicas: 0 index.number_of_shards: 1 index.number_of_routing_shards: 4 @@ -62,4 +62,4 @@ - match: { copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" } - match: { copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" } - match: { copy-settings-target.settings.index.blocks.write: "true" } - - match: { copy-settings-target.settings.index.routing.allocation.include._id: $master } + - match: { copy-settings-target.settings.index.routing.allocation.include._id: $node_id } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/10_basic.yml index 47f6c3e21141..5821117f4c00 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/10_basic.yml @@ -1,14 +1,13 @@ +setup: + - skip: + features: [arbitrary_key] --- "node_info test": - - do: - cluster.state: {} - - # Get master node id - - set: { master_node: master } - - do: nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - is_true: nodes - is_true: cluster_name - - is_true: nodes.$master.roles + - is_true: nodes.$node_id.roles diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/20_transport.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/20_transport.yml index efd2260356a2..09102157bcb9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/20_transport.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/20_transport.yml @@ -2,15 +2,15 @@ "node_info test profile is empty": - skip: - features: stash_in_path + features: [stash_in_path, arbitrary_key] - do: - cluster.state: {} - - - set: {master_node: master} + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.info: metric: [ transport ] - - is_true: nodes.$master.transport.profiles + - is_true: nodes.$node_id.transport.profiles diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/30_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/30_settings.yml index a63c246b6033..99b8b6f361a4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/30_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/30_settings.yml @@ -1,19 +1,22 @@ --- "node_info test flat_settings": - - do: - cluster.state: {} + - skip: + features: [arbitrary_key] - - set: { master_node: master } + - do: + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.info: metric: [ settings ] - - match : { nodes.$master.settings.client.type: node } + - match : { nodes.$node_id.settings.client.type: node } - do: nodes.info: metric: [ settings ] flat_settings: true - - match : { nodes.$master.settings.client\.type: node } + - match : { nodes.$node_id.settings.client\.type: node } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/10_basic.yml index 61614e7f8e1b..099483be9ade 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/10_basic.yml @@ -9,17 +9,20 @@ --- "Nodes stats level": - - do: - cluster.state: {} + - skip: + features: [arbitrary_key] - - set: { master_node: master } + - do: + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.stats: metric: [ indices ] level: "indices" - - is_true: nodes.$master.indices.indices + - is_true: nodes.$node_id.indices.indices --- "Nodes stats unrecognized parameter": diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml index 998909dd9cf1..a09619b7255c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml @@ -1,211 +1,227 @@ --- "Metric - blank": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.stats: {} - - is_true: nodes.$master.indices.docs - - is_true: nodes.$master.indices.store - - is_true: nodes.$master.indices.indexing - - is_true: nodes.$master.indices.get - - is_true: nodes.$master.indices.search - - is_true: nodes.$master.indices.merges - - is_true: nodes.$master.indices.refresh - - is_true: nodes.$master.indices.flush - - is_true: nodes.$master.indices.warmer - - is_true: nodes.$master.indices.query_cache - - is_true: nodes.$master.indices.fielddata - - is_true: nodes.$master.indices.completion - - is_true: nodes.$master.indices.segments - - is_true: nodes.$master.indices.translog - - is_true: nodes.$master.indices.recovery + - is_true: nodes.$node_id.indices.docs + - is_true: nodes.$node_id.indices.store + - is_true: nodes.$node_id.indices.indexing + - is_true: nodes.$node_id.indices.get + - is_true: nodes.$node_id.indices.search + - is_true: nodes.$node_id.indices.merges + - is_true: nodes.$node_id.indices.refresh + - is_true: nodes.$node_id.indices.flush + - is_true: nodes.$node_id.indices.warmer + - is_true: nodes.$node_id.indices.query_cache + - is_true: nodes.$node_id.indices.fielddata + - is_true: nodes.$node_id.indices.completion + - is_true: nodes.$node_id.indices.segments + - is_true: nodes.$node_id.indices.translog + - is_true: nodes.$node_id.indices.recovery --- "Metric - _all": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.stats: { metric: _all } - - is_true: nodes.$master.indices.docs - - is_true: nodes.$master.indices.store - - is_true: nodes.$master.indices.indexing - - is_true: nodes.$master.indices.get - - is_true: nodes.$master.indices.search - - is_true: nodes.$master.indices.merges - - is_true: nodes.$master.indices.refresh - - is_true: nodes.$master.indices.flush - - is_true: nodes.$master.indices.warmer - - is_true: nodes.$master.indices.query_cache - - is_true: nodes.$master.indices.fielddata - - is_true: nodes.$master.indices.completion - - is_true: nodes.$master.indices.segments - - is_true: nodes.$master.indices.translog - - is_true: nodes.$master.indices.recovery + - is_true: nodes.$node_id.indices.docs + - is_true: nodes.$node_id.indices.store + - is_true: nodes.$node_id.indices.indexing + - is_true: nodes.$node_id.indices.get + - is_true: nodes.$node_id.indices.search + - is_true: nodes.$node_id.indices.merges + - is_true: nodes.$node_id.indices.refresh + - is_true: nodes.$node_id.indices.flush + - is_true: nodes.$node_id.indices.warmer + - is_true: nodes.$node_id.indices.query_cache + - is_true: nodes.$node_id.indices.fielddata + - is_true: nodes.$node_id.indices.completion + - is_true: nodes.$node_id.indices.segments + - is_true: nodes.$node_id.indices.translog + - is_true: nodes.$node_id.indices.recovery --- "Metric - indices _all": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.stats: { metric: indices, index_metric: _all } - - is_true: nodes.$master.indices.docs - - is_true: nodes.$master.indices.store - - is_true: nodes.$master.indices.indexing - - is_true: nodes.$master.indices.get - - is_true: nodes.$master.indices.search - - is_true: nodes.$master.indices.merges - - is_true: nodes.$master.indices.refresh - - is_true: nodes.$master.indices.flush - - is_true: nodes.$master.indices.warmer - - is_true: nodes.$master.indices.query_cache - - is_true: nodes.$master.indices.fielddata - - is_true: nodes.$master.indices.completion - - is_true: nodes.$master.indices.segments - - is_true: nodes.$master.indices.translog - - is_true: nodes.$master.indices.recovery + - is_true: nodes.$node_id.indices.docs + - is_true: nodes.$node_id.indices.store + - is_true: nodes.$node_id.indices.indexing + - is_true: nodes.$node_id.indices.get + - is_true: nodes.$node_id.indices.search + - is_true: nodes.$node_id.indices.merges + - is_true: nodes.$node_id.indices.refresh + - is_true: nodes.$node_id.indices.flush + - is_true: nodes.$node_id.indices.warmer + - is_true: nodes.$node_id.indices.query_cache + - is_true: nodes.$node_id.indices.fielddata + - is_true: nodes.$node_id.indices.completion + - is_true: nodes.$node_id.indices.segments + - is_true: nodes.$node_id.indices.translog + - is_true: nodes.$node_id.indices.recovery --- "Metric - one": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.stats: { metric: indices, index_metric: docs } - - is_true: nodes.$master.indices.docs - - is_false: nodes.$master.indices.store - - is_false: nodes.$master.indices.indexing - - is_false: nodes.$master.indices.get - - is_false: nodes.$master.indices.search - - is_false: nodes.$master.indices.merges - - is_false: nodes.$master.indices.refresh - - is_false: nodes.$master.indices.flush - - is_false: nodes.$master.indices.warmer - - is_false: nodes.$master.indices.query_cache - - is_false: nodes.$master.indices.fielddata - - is_false: nodes.$master.indices.completion - - is_false: nodes.$master.indices.segments - - is_false: nodes.$master.indices.translog - - is_false: nodes.$master.indices.recovery + - is_true: nodes.$node_id.indices.docs + - is_false: nodes.$node_id.indices.store + - is_false: nodes.$node_id.indices.indexing + - is_false: nodes.$node_id.indices.get + - is_false: nodes.$node_id.indices.search + - is_false: nodes.$node_id.indices.merges + - is_false: nodes.$node_id.indices.refresh + - is_false: nodes.$node_id.indices.flush + - is_false: nodes.$node_id.indices.warmer + - is_false: nodes.$node_id.indices.query_cache + - is_false: nodes.$node_id.indices.fielddata + - is_false: nodes.$node_id.indices.completion + - is_false: nodes.$node_id.indices.segments + - is_false: nodes.$node_id.indices.translog + - is_false: nodes.$node_id.indices.recovery --- "Metric - multi": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.stats: { metric: indices, index_metric: [ store, get, merge ] } - - is_false: nodes.$master.indices.docs - - is_true: nodes.$master.indices.store - - is_false: nodes.$master.indices.indexing - - is_true: nodes.$master.indices.get - - is_false: nodes.$master.indices.search - - is_true: nodes.$master.indices.merges - - is_false: nodes.$master.indices.refresh - - is_false: nodes.$master.indices.flush - - is_false: nodes.$master.indices.warmer - - is_false: nodes.$master.indices.query_cache - - is_false: nodes.$master.indices.fielddata - - is_false: nodes.$master.indices.completion - - is_false: nodes.$master.indices.segments - - is_false: nodes.$master.indices.translog - - is_false: nodes.$master.indices.recovery + - is_false: nodes.$node_id.indices.docs + - is_true: nodes.$node_id.indices.store + - is_false: nodes.$node_id.indices.indexing + - is_true: nodes.$node_id.indices.get + - is_false: nodes.$node_id.indices.search + - is_true: nodes.$node_id.indices.merges + - is_false: nodes.$node_id.indices.refresh + - is_false: nodes.$node_id.indices.flush + - is_false: nodes.$node_id.indices.warmer + - is_false: nodes.$node_id.indices.query_cache + - is_false: nodes.$node_id.indices.fielddata + - is_false: nodes.$node_id.indices.completion + - is_false: nodes.$node_id.indices.segments + - is_false: nodes.$node_id.indices.translog + - is_false: nodes.$node_id.indices.recovery --- "Metric - recovery": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.stats: { metric: indices, index_metric: [ recovery ] } - - is_false: nodes.$master.indices.docs - - is_false: nodes.$master.indices.store - - is_false: nodes.$master.indices.indexing - - is_false: nodes.$master.indices.get - - is_false: nodes.$master.indices.search - - is_false: nodes.$master.indices.merges - - is_false: nodes.$master.indices.refresh - - is_false: nodes.$master.indices.flush - - is_false: nodes.$master.indices.warmer - - is_false: nodes.$master.indices.query_cache - - is_false: nodes.$master.indices.fielddata - - is_false: nodes.$master.indices.completion - - is_false: nodes.$master.indices.segments - - is_false: nodes.$master.indices.translog - - is_true: nodes.$master.indices.recovery + - is_false: nodes.$node_id.indices.docs + - is_false: nodes.$node_id.indices.store + - is_false: nodes.$node_id.indices.indexing + - is_false: nodes.$node_id.indices.get + - is_false: nodes.$node_id.indices.search + - is_false: nodes.$node_id.indices.merges + - is_false: nodes.$node_id.indices.refresh + - is_false: nodes.$node_id.indices.flush + - is_false: nodes.$node_id.indices.warmer + - is_false: nodes.$node_id.indices.query_cache + - is_false: nodes.$node_id.indices.fielddata + - is_false: nodes.$node_id.indices.completion + - is_false: nodes.$node_id.indices.segments + - is_false: nodes.$node_id.indices.translog + - is_true: nodes.$node_id.indices.recovery --- "Metric - _all include_segment_file_sizes": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.stats: { metric: indices, index_metric: _all, include_segment_file_sizes: true } - - is_true: nodes.$master.indices.docs - - is_true: nodes.$master.indices.store - - is_true: nodes.$master.indices.indexing - - is_true: nodes.$master.indices.get - - is_true: nodes.$master.indices.search - - is_true: nodes.$master.indices.merges - - is_true: nodes.$master.indices.refresh - - is_true: nodes.$master.indices.flush - - is_true: nodes.$master.indices.warmer - - is_true: nodes.$master.indices.query_cache - - is_true: nodes.$master.indices.fielddata - - is_true: nodes.$master.indices.completion - - is_true: nodes.$master.indices.segments - - is_true: nodes.$master.indices.translog - - is_true: nodes.$master.indices.recovery - - is_true: nodes.$master.indices.segments.file_sizes + - is_true: nodes.$node_id.indices.docs + - is_true: nodes.$node_id.indices.store + - is_true: nodes.$node_id.indices.indexing + - is_true: nodes.$node_id.indices.get + - is_true: nodes.$node_id.indices.search + - is_true: nodes.$node_id.indices.merges + - is_true: nodes.$node_id.indices.refresh + - is_true: nodes.$node_id.indices.flush + - is_true: nodes.$node_id.indices.warmer + - is_true: nodes.$node_id.indices.query_cache + - is_true: nodes.$node_id.indices.fielddata + - is_true: nodes.$node_id.indices.completion + - is_true: nodes.$node_id.indices.segments + - is_true: nodes.$node_id.indices.translog + - is_true: nodes.$node_id.indices.recovery + - is_true: nodes.$node_id.indices.segments.file_sizes --- "Metric - segments include_segment_file_sizes": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.stats: { metric: indices, index_metric: segments, include_segment_file_sizes: true } - - is_false: nodes.$master.indices.docs - - is_false: nodes.$master.indices.store - - is_false: nodes.$master.indices.indexing - - is_false: nodes.$master.indices.get - - is_false: nodes.$master.indices.search - - is_false: nodes.$master.indices.merges - - is_false: nodes.$master.indices.refresh - - is_false: nodes.$master.indices.flush - - is_false: nodes.$master.indices.warmer - - is_false: nodes.$master.indices.query_cache - - is_false: nodes.$master.indices.fielddata - - is_false: nodes.$master.indices.completion - - is_true: nodes.$master.indices.segments - - is_false: nodes.$master.indices.translog - - is_false: nodes.$master.indices.recovery - - is_true: nodes.$master.indices.segments.file_sizes + - is_false: nodes.$node_id.indices.docs + - is_false: nodes.$node_id.indices.store + - is_false: nodes.$node_id.indices.indexing + - is_false: nodes.$node_id.indices.get + - is_false: nodes.$node_id.indices.search + - is_false: nodes.$node_id.indices.merges + - is_false: nodes.$node_id.indices.refresh + - is_false: nodes.$node_id.indices.flush + - is_false: nodes.$node_id.indices.warmer + - is_false: nodes.$node_id.indices.query_cache + - is_false: nodes.$node_id.indices.fielddata + - is_false: nodes.$node_id.indices.completion + - is_true: nodes.$node_id.indices.segments + - is_false: nodes.$node_id.indices.translog + - is_false: nodes.$node_id.indices.recovery + - is_true: nodes.$node_id.indices.segments.file_sizes diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/20_response_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/20_response_filtering.yml index 432e5d8c207e..a478fd7d3f23 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/20_response_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/20_response_filtering.yml @@ -1,10 +1,11 @@ --- "Nodes Stats with response filtering": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - # Get master node id - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id # Nodes Stats with no filtering - do: @@ -12,18 +13,18 @@ - is_true: cluster_name - is_true: nodes - - is_true: nodes.$master.name - - is_true: nodes.$master.indices - - is_true: nodes.$master.indices.docs - - gte: { nodes.$master.indices.docs.count: 0 } - - is_true: nodes.$master.indices.segments - - gte: { nodes.$master.indices.segments.count: 0 } - - is_true: nodes.$master.jvm - - is_true: nodes.$master.jvm.threads - - gte: { nodes.$master.jvm.threads.count: 0 } - - is_true: nodes.$master.jvm.buffer_pools.direct - - gte: { nodes.$master.jvm.buffer_pools.direct.count: 0 } - - gte: { nodes.$master.jvm.buffer_pools.direct.used_in_bytes: 0 } + - is_true: nodes.$node_id.name + - is_true: nodes.$node_id.indices + - is_true: nodes.$node_id.indices.docs + - gte: { nodes.$node_id.indices.docs.count: 0 } + - is_true: nodes.$node_id.indices.segments + - gte: { nodes.$node_id.indices.segments.count: 0 } + - is_true: nodes.$node_id.jvm + - is_true: nodes.$node_id.jvm.threads + - gte: { nodes.$node_id.jvm.threads.count: 0 } + - is_true: nodes.$node_id.jvm.buffer_pools.direct + - gte: { nodes.$node_id.jvm.buffer_pools.direct.count: 0 } + - gte: { nodes.$node_id.jvm.buffer_pools.direct.used_in_bytes: 0 } # Nodes Stats with only "cluster_name" field - do: @@ -32,9 +33,9 @@ - is_true: cluster_name - is_false: nodes - - is_false: nodes.$master.name - - is_false: nodes.$master.indices - - is_false: nodes.$master.jvm + - is_false: nodes.$node_id.name + - is_false: nodes.$node_id.indices + - is_false: nodes.$node_id.jvm # Nodes Stats with "nodes" field and sub-fields - do: @@ -43,18 +44,18 @@ - is_false: cluster_name - is_true: nodes - - is_true: nodes.$master.name - - is_true: nodes.$master.indices - - is_true: nodes.$master.indices.docs - - gte: { nodes.$master.indices.docs.count: 0 } - - is_true: nodes.$master.indices.segments - - gte: { nodes.$master.indices.segments.count: 0 } - - is_true: nodes.$master.jvm - - is_true: nodes.$master.jvm.threads - - gte: { nodes.$master.jvm.threads.count: 0 } - - is_true: nodes.$master.jvm.buffer_pools.direct - - gte: { nodes.$master.jvm.buffer_pools.direct.count: 0 } - - gte: { nodes.$master.jvm.buffer_pools.direct.used_in_bytes: 0 } + - is_true: nodes.$node_id.name + - is_true: nodes.$node_id.indices + - is_true: nodes.$node_id.indices.docs + - gte: { nodes.$node_id.indices.docs.count: 0 } + - is_true: nodes.$node_id.indices.segments + - gte: { nodes.$node_id.indices.segments.count: 0 } + - is_true: nodes.$node_id.jvm + - is_true: nodes.$node_id.jvm.threads + - gte: { nodes.$node_id.jvm.threads.count: 0 } + - is_true: nodes.$node_id.jvm.buffer_pools.direct + - gte: { nodes.$node_id.jvm.buffer_pools.direct.count: 0 } + - gte: { nodes.$node_id.jvm.buffer_pools.direct.used_in_bytes: 0 } # Nodes Stats with "nodes.*.indices" field and sub-fields - do: @@ -63,13 +64,13 @@ - is_false: cluster_name - is_true: nodes - - is_false: nodes.$master.name - - is_true: nodes.$master.indices - - is_true: nodes.$master.indices.docs - - gte: { nodes.$master.indices.docs.count: 0 } - - is_true: nodes.$master.indices.segments - - gte: { nodes.$master.indices.segments.count: 0 } - - is_false: nodes.$master.jvm + - is_false: nodes.$node_id.name + - is_true: nodes.$node_id.indices + - is_true: nodes.$node_id.indices.docs + - gte: { nodes.$node_id.indices.docs.count: 0 } + - is_true: nodes.$node_id.indices.segments + - gte: { nodes.$node_id.indices.segments.count: 0 } + - is_false: nodes.$node_id.jvm # Nodes Stats with "nodes.*.name" and "nodes.*.indices.docs.count" fields - do: @@ -78,12 +79,12 @@ - is_false: cluster_name - is_true: nodes - - is_true: nodes.$master.name - - is_true: nodes.$master.indices - - is_true: nodes.$master.indices.docs - - gte: { nodes.$master.indices.docs.count: 0 } - - is_false: nodes.$master.indices.segments - - is_false: nodes.$master.jvm + - is_true: nodes.$node_id.name + - is_true: nodes.$node_id.indices + - is_true: nodes.$node_id.indices.docs + - gte: { nodes.$node_id.indices.docs.count: 0 } + - is_false: nodes.$node_id.indices.segments + - is_false: nodes.$node_id.jvm # Nodes Stats with all "count" fields - do: @@ -92,18 +93,18 @@ - is_false: cluster_name - is_true: nodes - - is_false: nodes.$master.name - - is_true: nodes.$master.indices - - is_true: nodes.$master.indices.docs - - gte: { nodes.$master.indices.docs.count: 0 } - - is_true: nodes.$master.indices.segments - - gte: { nodes.$master.indices.segments.count: 0 } - - is_true: nodes.$master.jvm - - is_true: nodes.$master.jvm.threads - - gte: { nodes.$master.jvm.threads.count: 0 } - - is_true: nodes.$master.jvm.buffer_pools.direct - - gte: { nodes.$master.jvm.buffer_pools.direct.count: 0 } - - is_false: nodes.$master.jvm.buffer_pools.direct.used_in_bytes + - is_false: nodes.$node_id.name + - is_true: nodes.$node_id.indices + - is_true: nodes.$node_id.indices.docs + - gte: { nodes.$node_id.indices.docs.count: 0 } + - is_true: nodes.$node_id.indices.segments + - gte: { nodes.$node_id.indices.segments.count: 0 } + - is_true: nodes.$node_id.jvm + - is_true: nodes.$node_id.jvm.threads + - gte: { nodes.$node_id.jvm.threads.count: 0 } + - is_true: nodes.$node_id.jvm.buffer_pools.direct + - gte: { nodes.$node_id.jvm.buffer_pools.direct.count: 0 } + - is_false: nodes.$node_id.jvm.buffer_pools.direct.used_in_bytes # Nodes Stats with all "count" fields in sub-fields of "jvm" field - do: @@ -112,16 +113,16 @@ - is_false: cluster_name - is_true: nodes - - is_false: nodes.$master.name - - is_false: nodes.$master.indices - - is_false: nodes.$master.indices.docs.count - - is_false: nodes.$master.indices.segments.count - - is_true: nodes.$master.jvm - - is_true: nodes.$master.jvm.threads - - gte: { nodes.$master.jvm.threads.count: 0 } - - is_true: nodes.$master.jvm.buffer_pools.direct - - gte: { nodes.$master.jvm.buffer_pools.direct.count: 0 } - - is_false: nodes.$master.jvm.buffer_pools.direct.used_in_bytes + - is_false: nodes.$node_id.name + - is_false: nodes.$node_id.indices + - is_false: nodes.$node_id.indices.docs.count + - is_false: nodes.$node_id.indices.segments.count + - is_true: nodes.$node_id.jvm + - is_true: nodes.$node_id.jvm.threads + - gte: { nodes.$node_id.jvm.threads.count: 0 } + - is_true: nodes.$node_id.jvm.buffer_pools.direct + - gte: { nodes.$node_id.jvm.buffer_pools.direct.count: 0 } + - is_false: nodes.$node_id.jvm.buffer_pools.direct.used_in_bytes # Nodes Stats with "nodes.*.fs.data" fields - do: @@ -130,13 +131,13 @@ - is_false: cluster_name - is_true: nodes - - is_false: nodes.$master.name - - is_false: nodes.$master.indices - - is_false: nodes.$master.jvm - - is_true: nodes.$master.fs.data - - is_true: nodes.$master.fs.data.0.path - - is_true: nodes.$master.fs.data.0.type - - is_true: nodes.$master.fs.data.0.total_in_bytes + - is_false: nodes.$node_id.name + - is_false: nodes.$node_id.indices + - is_false: nodes.$node_id.jvm + - is_true: nodes.$node_id.fs.data + - is_true: nodes.$node_id.fs.data.0.path + - is_true: nodes.$node_id.fs.data.0.type + - is_true: nodes.$node_id.fs.data.0.total_in_bytes # Nodes Stats with "nodes.*.fs.data.t*" fields - do: @@ -145,21 +146,22 @@ - is_false: cluster_name - is_true: nodes - - is_false: nodes.$master.name - - is_false: nodes.$master.indices - - is_false: nodes.$master.jvm - - is_true: nodes.$master.fs.data - - is_false: nodes.$master.fs.data.0.path - - is_true: nodes.$master.fs.data.0.type - - is_true: nodes.$master.fs.data.0.total_in_bytes + - is_false: nodes.$node_id.name + - is_false: nodes.$node_id.indices + - is_false: nodes.$node_id.jvm + - is_true: nodes.$node_id.fs.data + - is_false: nodes.$node_id.fs.data.0.path + - is_true: nodes.$node_id.fs.data.0.type + - is_true: nodes.$node_id.fs.data.0.total_in_bytes --- "Nodes Stats filtered using both includes and excludes filters": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - # Get master node id - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id # Nodes Stats with "nodes" field but no JVM stats - do: @@ -168,10 +170,10 @@ - is_false: cluster_name - is_true: nodes - - is_true: nodes.$master.name - - is_true: nodes.$master.os - - is_false: nodes.$master.indices - - is_false: nodes.$master.jvm + - is_true: nodes.$node_id.name + - is_true: nodes.$node_id.os + - is_false: nodes.$node_id.indices + - is_false: nodes.$node_id.jvm # Nodes Stats with "nodes.*.indices" field and sub-fields but no indices segments - do: @@ -180,10 +182,10 @@ - is_false: cluster_name - is_true: nodes - - is_false: nodes.$master.name - - is_true: nodes.$master.indices - - is_true: nodes.$master.indices.docs - - is_false: nodes.$master.indices.segments + - is_false: nodes.$node_id.name + - is_true: nodes.$node_id.indices + - is_true: nodes.$node_id.indices.docs + - is_false: nodes.$node_id.indices.segments # Nodes Stats with "nodes.*.fs.data.t*" fields but no "type" field - do: @@ -192,9 +194,9 @@ - is_false: cluster_name - is_true: nodes - - is_false: nodes.$master.name - - is_false: nodes.$master.indices - - is_false: nodes.$master.jvm - - is_true: nodes.$master.fs.data - - is_false: nodes.$master.fs.data.0.type - - is_true: nodes.$master.fs.data.0.total_in_bytes + - is_false: nodes.$node_id.name + - is_false: nodes.$node_id.indices + - is_false: nodes.$node_id.jvm + - is_true: nodes.$node_id.fs.data + - is_false: nodes.$node_id.fs.data.0.type + - is_true: nodes.$node_id.fs.data.0.total_in_bytes diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/30_discovery.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/30_discovery.yml index ad8058876ae4..a6b7f29a183c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/30_discovery.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/30_discovery.yml @@ -1,13 +1,13 @@ --- "Discovery stats": - skip: - version: " - 6.0.99" - reason: "published_cluster_states_received arrived in 6.1.0" - - do: - cluster.state: {} + features: [arbitrary_key] - # Get master node id - - set: { master_node: master } + - do: + nodes.info: + node_id: _master + - set: + nodes._arbitrary_key_: master - do: nodes.stats: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/310_match_bool_prefix.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/310_match_bool_prefix.yml new file mode 100644 index 000000000000..bcc28c785342 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/310_match_bool_prefix.yml @@ -0,0 +1,363 @@ +setup: + - skip: + version: " - 7.0.99" + reason: "added in 7.1.0" + + - do: + indices.create: + index: test + body: + mappings: + properties: + my_field1: + type: text + my_field2: + type: text + + - do: + index: + index: test + id: 1 + body: + my_field1: "brown fox jump" + my_field2: "xylophone" + + - do: + index: + index: test + id: 2 + body: + my_field1: "brown emu jump" + my_field2: "xylophone" + + - do: + index: + index: test + id: 3 + body: + my_field1: "jumparound" + my_field2: "emu" + + - do: + index: + index: test + id: 4 + body: + my_field1: "dog" + my_field2: "brown fox jump lazy" + + - do: + indices.refresh: {} + +--- +"scoring complete term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + my_field1: "brown fox jump" + + - match: { hits.total: 3 } + - match: { hits.hits.0._source.my_field1: "brown fox jump" } + - match: { hits.hits.1._source.my_field1: "brown emu jump" } + - match: { hits.hits.2._source.my_field1: "jumparound" } + +--- +"scoring partial term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + my_field1: "brown fox ju" + + - match: { hits.total: 3 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.0._source.my_field1: "brown fox jump" } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.1._source.my_field1: "brown emu jump" } + - match: { hits.hits.2._id: "3" } + - match: { hits.hits.2._source.my_field1: "jumparound" } + +--- +"minimum should match": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + my_field1: + query: "brown fox jump" + minimum_should_match: 3 + + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.0._source.my_field1: "brown fox jump" } + +--- +"analyzer": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + my_field1: + query: "BROWN dog" + analyzer: whitespace # this analyzer doesn't lowercase terms + + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "4" } + - match: { hits.hits.0._source.my_field1: "dog" } + +--- +"operator": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + my_field1: + query: "brown fox jump" + operator: AND + + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.0._source.my_field1: "brown fox jump" } + +--- +"fuzziness": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + my_field2: + query: "xylophoen foo" + fuzziness: 1 + prefix_length: 1 + max_expansions: 10 + fuzzy_transpositions: true + fuzzy_rewrite: constant_score + + - match: { hits.total: 2 } + - match: { hits.hits.0._source.my_field2: "xylophone" } + - match: { hits.hits.1._source.my_field2: "xylophone" } + +--- +"multi_match single field complete term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox jump" + type: bool_prefix + fields: [ "my_field1" ] + + - match: { hits.total: 3 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.0._source.my_field1: "brown fox jump" } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.1._source.my_field1: "brown emu jump" } + - match: { hits.hits.2._id: "3" } + - match: { hits.hits.2._source.my_field1: "jumparound" } + +--- +"multi_match single field partial term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox ju" + type: bool_prefix + fields: [ "my_field1" ] + + - match: { hits.total: 3 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.0._source.my_field1: "brown fox jump" } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.1._source.my_field1: "brown emu jump" } + - match: { hits.hits.2._id: "3" } + - match: { hits.hits.2._source.my_field1: "jumparound" } + +--- +"multi_match multiple fields complete term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox jump lazy" + type: bool_prefix + fields: [ "my_field1", "my_field2" ] + + - match: { hits.total: 3 } + - match: { hits.hits.0._id: "4" } + - match: { hits.hits.0._source.my_field1: "dog" } + - match: { hits.hits.0._source.my_field2: "brown fox jump lazy" } + - match: { hits.hits.1._id: "1" } + - match: { hits.hits.1._source.my_field1: "brown fox jump" } + - match: { hits.hits.2._id: "2" } + - match: { hits.hits.2._source.my_field1: "brown emu jump" } + +--- +"multi_match multiple fields partial term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox jump laz" + type: bool_prefix + fields: [ "my_field1", "my_field2" ] + + - match: { hits.total: 3 } + - match: { hits.hits.0._id: "4" } + - match: { hits.hits.0._source.my_field1: "dog" } + - match: { hits.hits.0._source.my_field2: "brown fox jump lazy" } + - match: { hits.hits.1._id: "1" } + - match: { hits.hits.1._source.my_field1: "brown fox jump" } + - match: { hits.hits.2._id: "2" } + - match: { hits.hits.2._source.my_field1: "brown emu jump" } + +--- +"multi_match multiple fields with analyzer": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "BROWN FOX JUMP dog" + type: bool_prefix + fields: [ "my_field1", "my_field2" ] + analyzer: whitespace # this analyzer doesn't lowercase terms + + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "4" } + - match: { hits.hits.0._source.my_field1: "dog" } + - match: { hits.hits.0._source.my_field2: "brown fox jump lazy" } + +--- +"multi_match multiple fields with minimum_should_match": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox jump la" + type: bool_prefix + fields: [ "my_field1", "my_field2" ] + minimum_should_match: 4 + + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "4" } + - match: { hits.hits.0._source.my_field1: "dog" } + - match: { hits.hits.0._source.my_field2: "brown fox jump lazy" } + +--- +"multi_match multiple fields with fuzziness": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "dob nomatch" + type: bool_prefix + fields: [ "my_field1", "my_field2" ] + fuzziness: 1 + + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "4" } + - match: { hits.hits.0._source.my_field1: "dog" } + - match: { hits.hits.0._source.my_field2: "brown fox jump lazy" } + +--- +"multi_match multiple fields with boost": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown emu" + type: bool_prefix + fields: [ "my_field1", "my_field2^10" ] + fuzziness: 1 + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.0._source.my_field2: "emu" } + +--- +"multi_match multiple fields with slop throws exception": + + - do: + catch: /\[slop\] not allowed for type \[bool_prefix\]/ + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown" + type: bool_prefix + fields: [ "my_field1", "my_field2" ] + slop: 1 + +--- +"multi_match multiple fields with cutoff_frequency throws exception": + + - do: + catch: /\[cutoff_frequency\] not allowed for type \[bool_prefix\]/ + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown" + type: bool_prefix + fields: [ "my_field1", "my_field2" ] + cutoff_frequency: 0.001 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.get/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.get/10_basic.yml index caf97b302f13..addeb3226c57 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.get/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.get/10_basic.yml @@ -1,9 +1,6 @@ --- "get task test": # Note that this gets much better testing in reindex's tests because it actually saves the task - - do: - cluster.state: {} - - do: catch: missing tasks.get: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml index 4fdfc378bee2..1742134af2b7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml @@ -1,16 +1,18 @@ --- "tasks_list test": - - do: - cluster.state: {} + - skip: + features: [arbitrary_key] - # Get master node id - - set: { master_node: master } + - do: + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: tasks.list: {} - is_true: nodes - - is_true: nodes.$master.roles + - is_true: nodes.$node_id.roles - do: tasks.list: diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index dbc8246aa7d7..bcc651eb83a6 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -122,10 +122,10 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_6_6_1 = new Version(V_6_6_1_ID, org.apache.lucene.util.Version.LUCENE_7_6_0); public static final int V_6_6_2_ID = 6060299; public static final Version V_6_6_2 = new Version(V_6_6_2_ID, org.apache.lucene.util.Version.LUCENE_7_6_0); - public static final int V_6_6_3_ID = 6060399; - public static final Version V_6_6_3 = new Version(V_6_6_3_ID, org.apache.lucene.util.Version.LUCENE_7_6_0); public static final int V_6_7_0_ID = 6070099; public static final Version V_6_7_0 = new Version(V_6_7_0_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); + public static final int V_6_7_1_ID = 6070199; + public static final Version V_6_7_1 = new Version(V_6_7_1_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final int V_7_0_0_ID = 7000099; public static final Version V_7_0_0 = new Version(V_7_0_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_1_0_ID = 7010099; @@ -152,10 +152,10 @@ public static Version fromId(int id) { return V_7_1_0; case V_7_0_0_ID: return V_7_0_0; + case V_6_7_1_ID: + return V_6_7_1; case V_6_7_0_ID: return V_6_7_0; - case V_6_6_3_ID: - return V_6_6_3; case V_6_6_2_ID: return V_6_6_2; case V_6_6_1_ID: diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 83e1e0161443..2cfe66372115 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -289,6 +289,7 @@ import org.elasticsearch.rest.action.cat.RestAliasAction; import org.elasticsearch.rest.action.cat.RestAllocationAction; import org.elasticsearch.rest.action.cat.RestCatAction; +import org.elasticsearch.rest.action.cat.RestCatRecoveryAction; import org.elasticsearch.rest.action.cat.RestFielddataAction; import org.elasticsearch.rest.action.cat.RestHealthAction; import org.elasticsearch.rest.action.cat.RestIndicesAction; @@ -665,7 +666,7 @@ public void initRestHandlers(Supplier nodesInCluster) { // Fully qualified to prevent interference with rest.action.count.RestCountAction registerHandler.accept(new org.elasticsearch.rest.action.cat.RestCountAction(settings, restController)); // Fully qualified to prevent interference with rest.action.indices.RestRecoveryAction - registerHandler.accept(new org.elasticsearch.rest.action.cat.RestRecoveryAction(settings, restController)); + registerHandler.accept(new RestCatRecoveryAction(settings, restController)); registerHandler.accept(new RestHealthAction(settings, restController)); registerHandler.accept(new org.elasticsearch.rest.action.cat.RestPendingClusterTasksAction(settings, restController)); registerHandler.accept(new RestAliasAction(settings, restController)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java index 469c14f49bd4..04901cbe256e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java @@ -69,18 +69,17 @@ protected ClusterBlockException checkBlock(DeleteRepositoryRequest request, Clus protected void masterOperation(final DeleteRepositoryRequest request, ClusterState state, final ActionListener listener) { repositoriesService.unregisterRepository( - new RepositoriesService.UnregisterRepositoryRequest("delete_repository [" + request.name() + "]", request.name()) - .masterNodeTimeout(request.masterNodeTimeout()).ackTimeout(request.timeout()), - new ActionListener() { - @Override - public void onResponse(ClusterStateUpdateResponse unregisterRepositoryResponse) { - listener.onResponse(new AcknowledgedResponse(unregisterRepositoryResponse.isAcknowledged())); - } + request, + new ActionListener() { + @Override + public void onResponse(ClusterStateUpdateResponse unregisterRepositoryResponse) { + listener.onResponse(new AcknowledgedResponse(unregisterRepositoryResponse.isAcknowledged())); + } - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java index a495ba72f35b..4a58edf64616 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java @@ -68,13 +68,7 @@ protected ClusterBlockException checkBlock(PutRepositoryRequest request, Cluster @Override protected void masterOperation(final PutRepositoryRequest request, ClusterState state, final ActionListener listener) { - - repositoriesService.registerRepository( - new RepositoriesService.RegisterRepositoryRequest("put_repository [" + request.name() + "]", - request.name(), request.type(), request.verify()) - .settings(request.settings()) - .masterNodeTimeout(request.masterNodeTimeout()) - .ackTimeout(request.timeout()), new ActionListener() { + repositoriesService.registerRepository(request, new ActionListener() { @Override public void onResponse(ClusterStateUpdateResponse response) { @@ -87,5 +81,4 @@ public void onFailure(Exception e) { } }); } - } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java index 19fa4cbde15c..aa973d4797a7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java @@ -26,13 +26,15 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.repositories.RepositoriesService; -import org.elasticsearch.repositories.RepositoryVerificationException; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.List; + /** * Transport action for verifying repository operation */ @@ -68,14 +70,10 @@ protected ClusterBlockException checkBlock(VerifyRepositoryRequest request, Clus @Override protected void masterOperation(final VerifyRepositoryRequest request, ClusterState state, final ActionListener listener) { - repositoriesService.verifyRepository(request.name(), new ActionListener() { + repositoriesService.verifyRepository(request.name(), new ActionListener>() { @Override - public void onResponse(RepositoriesService.VerifyResponse verifyResponse) { - if (verifyResponse.failed()) { - listener.onFailure(new RepositoryVerificationException(request.name(), verifyResponse.failureDescription())); - } else { - listener.onResponse(new VerifyRepositoryResponse(verifyResponse.nodes())); - } + public void onResponse(List verifyResponse) { + listener.onResponse(new VerifyRepositoryResponse(verifyResponse.toArray(new DiscoveryNode[0]))); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportSingleItemBulkWriteAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportSingleItemBulkWriteAction.java index 892daae4bb27..cc97b6237e30 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportSingleItemBulkWriteAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportSingleItemBulkWriteAction.java @@ -23,19 +23,12 @@ import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.WriteResponse; import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; -import org.elasticsearch.action.support.replication.TransportWriteAction; -import org.elasticsearch.cluster.action.shard.ShardStateAction; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.indices.IndicesService; import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.util.function.Supplier; @@ -45,68 +38,21 @@ public abstract class TransportSingleItemBulkWriteAction< Request extends ReplicatedWriteRequest, Response extends ReplicationResponse & WriteResponse - > extends TransportWriteAction { + > extends HandledTransportAction { private final TransportBulkAction bulkAction; - private final TransportShardBulkAction shardBulkAction; - - protected TransportSingleItemBulkWriteAction(Settings settings, String actionName, TransportService transportService, - ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, - ShardStateAction shardStateAction, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, Supplier request, - Supplier replicaRequest, String executor, - TransportBulkAction bulkAction, TransportShardBulkAction shardBulkAction) { - super(settings, actionName, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, - indexNameExpressionResolver, request, replicaRequest, executor); + protected TransportSingleItemBulkWriteAction(String actionName, TransportService transportService, ActionFilters actionFilters, + Supplier request, TransportBulkAction bulkAction) { + super(actionName, transportService, actionFilters, request); this.bulkAction = bulkAction; - this.shardBulkAction = shardBulkAction; } - @Override protected void doExecute(Task task, final Request request, final ActionListener listener) { bulkAction.execute(task, toSingleItemBulkRequest(request), wrapBulkResponse(listener)); } - @Override - protected WritePrimaryResult shardOperationOnPrimary( - Request request, final IndexShard primary) throws Exception { - BulkItemRequest[] itemRequests = new BulkItemRequest[1]; - WriteRequest.RefreshPolicy refreshPolicy = request.getRefreshPolicy(); - request.setRefreshPolicy(WriteRequest.RefreshPolicy.NONE); - itemRequests[0] = new BulkItemRequest(0, ((DocWriteRequest) request)); - BulkShardRequest bulkShardRequest = new BulkShardRequest(request.shardId(), refreshPolicy, itemRequests); - WritePrimaryResult bulkResult = - shardBulkAction.shardOperationOnPrimary(bulkShardRequest, primary); - assert bulkResult.finalResponseIfSuccessful.getResponses().length == 1 : "expected only one bulk shard response"; - BulkItemResponse itemResponse = bulkResult.finalResponseIfSuccessful.getResponses()[0]; - final Response response; - final Exception failure; - if (itemResponse.isFailed()) { - failure = itemResponse.getFailure().getCause(); - response = null; - } else { - response = (Response) itemResponse.getResponse(); - failure = null; - } - return new WritePrimaryResult<>(request, response, bulkResult.location, failure, primary, logger); - } - - @Override - protected WriteReplicaResult shardOperationOnReplica( - Request replicaRequest, IndexShard replica) throws Exception { - BulkItemRequest[] itemRequests = new BulkItemRequest[1]; - WriteRequest.RefreshPolicy refreshPolicy = replicaRequest.getRefreshPolicy(); - itemRequests[0] = new BulkItemRequest(0, ((DocWriteRequest) replicaRequest)); - BulkShardRequest bulkShardRequest = new BulkShardRequest(replicaRequest.shardId(), refreshPolicy, itemRequests); - WriteReplicaResult result = shardBulkAction.shardOperationOnReplica(bulkShardRequest, replica); - // a replica operation can never throw a document-level failure, - // as the same document has been already indexed successfully in the primary - return new WriteReplicaResult<>(replicaRequest, result.location, null, replica, logger); - } - - public static ActionListener wrapBulkResponse(ActionListener listener) { return ActionListener.wrap(bulkItemResponses -> { diff --git a/server/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java b/server/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java index 32c599a9f580..5b85f2f90851 100644 --- a/server/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java +++ b/server/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java @@ -20,16 +20,9 @@ package org.elasticsearch.action.delete; import org.elasticsearch.action.bulk.TransportBulkAction; -import org.elasticsearch.action.bulk.TransportShardBulkAction; import org.elasticsearch.action.bulk.TransportSingleItemBulkWriteAction; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.cluster.action.shard.ShardStateAction; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; /** @@ -41,17 +34,7 @@ public class TransportDeleteAction extends TransportSingleItemBulkWriteAction { @Inject - public TransportDeleteAction(Settings settings, TransportService transportService, ClusterService clusterService, - IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - TransportBulkAction bulkAction, TransportShardBulkAction shardBulkAction) { - super(settings, DeleteAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, - actionFilters, indexNameExpressionResolver, DeleteRequest::new, DeleteRequest::new, ThreadPool.Names.WRITE, - bulkAction, shardBulkAction); - } - - @Override - protected DeleteResponse newResponseInstance() { - return new DeleteResponse(); + public TransportDeleteAction(TransportService transportService, ActionFilters actionFilters, TransportBulkAction bulkAction) { + super(DeleteAction.NAME, transportService, actionFilters, DeleteRequest::new, bulkAction); } } diff --git a/server/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/server/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index 8480c7be3bb6..b8e3b9b89b3b 100644 --- a/server/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -20,16 +20,9 @@ package org.elasticsearch.action.index; import org.elasticsearch.action.bulk.TransportBulkAction; -import org.elasticsearch.action.bulk.TransportShardBulkAction; import org.elasticsearch.action.bulk.TransportSingleItemBulkWriteAction; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.cluster.action.shard.ShardStateAction; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; /** @@ -48,18 +41,7 @@ public class TransportIndexAction extends TransportSingleItemBulkWriteAction { @Inject - public TransportIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, - IndicesService indicesService, - ThreadPool threadPool, ShardStateAction shardStateAction, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - TransportBulkAction bulkAction, TransportShardBulkAction shardBulkAction) { - super(settings, IndexAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, - actionFilters, indexNameExpressionResolver, IndexRequest::new, IndexRequest::new, ThreadPool.Names.WRITE, - bulkAction, shardBulkAction); - } - - @Override - protected IndexResponse newResponseInstance() { - return new IndexResponse(); + public TransportIndexAction(ActionFilters actionFilters, TransportService transportService, TransportBulkAction bulkAction) { + super(IndexAction.NAME, transportService, actionFilters, IndexRequest::new, bulkAction); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index f54f101041d1..0125084c3709 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -486,7 +486,7 @@ private ReducedQueryPhase reducedQueryPhase(Collection listener) { + assert request.shardId() != null : "request shardId must be set"; new ReroutePhase((ReplicationTask) task, request, listener).run(); } @@ -779,7 +780,6 @@ protected void doRun() { // resolve all derived request fields, so we can route and apply it resolveRequest(indexMetaData, request); - assert request.shardId() != null : "request shardId must be set in resolveRequest"; assert request.waitForActiveShards() != ActiveShardCount.DEFAULT : "request waitForActiveShards must be set in resolveRequest"; diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java index c4643771fb79..7271013cb363 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java @@ -29,10 +29,13 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.discovery.DiscoveryModule; +import org.elasticsearch.node.Node; import org.elasticsearch.threadpool.ThreadPool.Names; import org.elasticsearch.transport.TransportService; import java.util.ArrayList; +import java.util.Collections; import java.util.HashSet; import java.util.LinkedHashSet; import java.util.List; @@ -75,15 +78,28 @@ public class ClusterBootstrapService { public ClusterBootstrapService(Settings settings, TransportService transportService, Supplier> discoveredNodesSupplier, BooleanSupplier isBootstrappedSupplier, Consumer votingConfigurationConsumer) { - - final List initialMasterNodes = INITIAL_MASTER_NODES_SETTING.get(settings); - bootstrapRequirements = unmodifiableSet(new LinkedHashSet<>(initialMasterNodes)); - if (bootstrapRequirements.size() != initialMasterNodes.size()) { - throw new IllegalArgumentException( - "setting [" + INITIAL_MASTER_NODES_SETTING.getKey() + "] contains duplicates: " + initialMasterNodes); + if (DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE.equals(DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings))) { + if (INITIAL_MASTER_NODES_SETTING.exists(settings)) { + throw new IllegalArgumentException("setting [" + INITIAL_MASTER_NODES_SETTING.getKey() + + "] is not allowed when [" + DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey() + "] is set to [" + + DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE + "]"); + } + if (DiscoveryNode.isMasterNode(settings) == false) { + throw new IllegalArgumentException("node with [" + DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey() + "] set to [" + + DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE + "] must be master-eligible"); + } + bootstrapRequirements = Collections.singleton(Node.NODE_NAME_SETTING.get(settings)); + unconfiguredBootstrapTimeout = null; + } else { + final List initialMasterNodes = INITIAL_MASTER_NODES_SETTING.get(settings); + bootstrapRequirements = unmodifiableSet(new LinkedHashSet<>(initialMasterNodes)); + if (bootstrapRequirements.size() != initialMasterNodes.size()) { + throw new IllegalArgumentException( + "setting [" + INITIAL_MASTER_NODES_SETTING.getKey() + "] contains duplicates: " + initialMasterNodes); + } + unconfiguredBootstrapTimeout = discoveryIsConfigured(settings) ? null : UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING.get(settings); } - unconfiguredBootstrapTimeout = discoveryIsConfigured(settings) ? null : UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING.get(settings); this.transportService = transportService; this.discoveredNodesSupplier = discoveredNodesSupplier; this.isBootstrappedSupplier = isBootstrappedSupplier; diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 71cd2fbb121e..154f4ab162d7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -51,23 +51,27 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.discovery.Discovery; +import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoveryStats; import org.elasticsearch.discovery.HandshakingTransportAddressConnector; import org.elasticsearch.discovery.PeerFinder; import org.elasticsearch.discovery.SeedHostsProvider; import org.elasticsearch.discovery.SeedHostsResolver; +import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool.Names; import org.elasticsearch.transport.TransportResponse.Empty; import org.elasticsearch.transport.TransportService; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Optional; @@ -93,6 +97,7 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery TimeValue.timeValueMillis(30000), TimeValue.timeValueMillis(1), Setting.Property.NodeScope); private final Settings settings; + private final boolean singleNodeDiscovery; private final TransportService transportService; private final MasterService masterService; private final AllocationService allocationService; @@ -142,6 +147,7 @@ public Coordinator(String nodeName, Settings settings, ClusterSettings clusterSe this.masterService = masterService; this.allocationService = allocationService; this.onJoinValidators = JoinTaskExecutor.addBuiltInJoinValidators(onJoinValidators); + this.singleNodeDiscovery = DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE.equals(DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings)); this.joinHelper = new JoinHelper(settings, allocationService, masterService, transportService, this::getCurrentTerm, this::getStateForMasterService, this::handleJoinRequest, this::joinLeaderInTerm, this.onJoinValidators); this.persistedStateSupplier = persistedStateSupplier; @@ -423,6 +429,13 @@ private void handleJoinRequest(JoinRequest joinRequest, JoinHelper.JoinCallback assert Thread.holdsLock(mutex) == false; assert getLocalNode().isMasterNode() : getLocalNode() + " received a join but is not master-eligible"; logger.trace("handleJoinRequest: as {}, handling {}", mode, joinRequest); + + if (singleNodeDiscovery && joinRequest.getSourceNode().equals(getLocalNode()) == false) { + joinCallback.onFailure(new IllegalStateException("cannot join node with [" + DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey() + + "] set to [" + DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE + "] discovery")); + return; + } + transportService.connectToNode(joinRequest.getSourceNode()); final ClusterState stateForJoinValidation = getStateForMasterService(); @@ -635,6 +648,14 @@ protected void doStart() { coordinationState.set(new CoordinationState(settings, getLocalNode(), persistedState)); peerFinder.setCurrentTerm(getCurrentTerm()); configuredHostsResolver.start(); + VotingConfiguration votingConfiguration = coordinationState.get().getLastAcceptedState().getLastCommittedConfiguration(); + if (singleNodeDiscovery && + votingConfiguration.isEmpty() == false && + votingConfiguration.hasQuorum(Collections.singleton(getLocalNode().getId())) == false) { + throw new IllegalStateException("cannot start with [" + DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey() + "] set to [" + + DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE + "] when local node " + getLocalNode() + + " does not have quorum in voting configuration " + votingConfiguration); + } ClusterState initialState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings)) .blocks(ClusterBlocks.builder() .addGlobalBlock(STATE_NOT_RECOVERED_BLOCK) @@ -983,20 +1004,6 @@ assert getLocalNode().equals(clusterState.getNodes().get(getLocalNode().getId()) new ListenableFuture<>(), ackListener, publishListener); currentPublication = Optional.of(publication); - transportService.getThreadPool().schedule(new Runnable() { - @Override - public void run() { - synchronized (mutex) { - publication.cancel("timed out after " + publishTimeout); - } - } - - @Override - public String toString() { - return "scheduled timeout for " + publication; - } - }, publishTimeout, Names.GENERIC); - final DiscoveryNodes publishNodes = publishRequest.getAcceptedState().nodes(); leaderChecker.setCurrentNodes(publishNodes); followersChecker.setCurrentNodes(publishNodes); @@ -1062,7 +1069,8 @@ private class CoordinatorPeerFinder extends PeerFinder { CoordinatorPeerFinder(Settings settings, TransportService transportService, TransportAddressConnector transportAddressConnector, ConfiguredHostsResolver configuredHostsResolver) { - super(settings, transportService, transportAddressConnector, configuredHostsResolver); + super(settings, transportService, transportAddressConnector, + singleNodeDiscovery ? hostsResolver -> Collections.emptyList() : configuredHostsResolver); } @Override @@ -1073,6 +1081,13 @@ protected void onActiveMasterFound(DiscoveryNode masterNode, long term) { } } + @Override + protected void startProbe(TransportAddress transportAddress) { + if (singleNodeDiscovery == false) { + super.startProbe(transportAddress); + } + } + @Override protected void onFoundPeersUpdated() { synchronized (mutex) { @@ -1161,6 +1176,7 @@ class CoordinatorPublication extends Publication { private final AckListener ackListener; private final ActionListener publishListener; private final PublicationTransportHandler.PublicationContext publicationContext; + private final Scheduler.ScheduledCancellable scheduledCancellable; // We may not have accepted our own state before receiving a join from another node, causing its join to be rejected (we cannot // safely accept a join whose last-accepted term/version is ahead of ours), so store them up and process them at the end. @@ -1201,6 +1217,19 @@ public void onNodeAck(DiscoveryNode node, Exception e) { this.localNodeAckEvent = localNodeAckEvent; this.ackListener = ackListener; this.publishListener = publishListener; + this.scheduledCancellable = transportService.getThreadPool().schedule(new Runnable() { + @Override + public void run() { + synchronized (mutex) { + cancel("timed out after " + publishTimeout); + } + } + + @Override + public String toString() { + return "scheduled timeout for " + this; + } + }, publishTimeout, Names.GENERIC); } private void removePublicationAndPossiblyBecomeCandidate(String reason) { @@ -1242,6 +1271,7 @@ public void onFailure(String source, Exception e) { synchronized (mutex) { removePublicationAndPossiblyBecomeCandidate("clusterApplier#onNewClusterState"); } + scheduledCancellable.cancel(); ackListener.onNodeAck(getLocalNode(), e); publishListener.onFailure(e); } @@ -1271,6 +1301,7 @@ public void onSuccess(String source) { } lagDetector.startLagDetector(publishRequest.getAcceptedState().version()); } + scheduledCancellable.cancel(); ackListener.onNodeAck(getLocalNode(), null); publishListener.onResponse(null); } @@ -1281,6 +1312,7 @@ public void onSuccess(String source) { public void onFailure(Exception e) { assert Thread.holdsLock(mutex) : "Coordinator mutex not held"; removePublicationAndPossiblyBecomeCandidate("Publication.onCompletion(false)"); + scheduledCancellable.cancel(); final FailedToCommitClusterStateException exception = new FailedToCommitClusterStateException("publication failed", e); ackListener.onNodeAck(getLocalNode(), exception); // other nodes have acked, but not the master. diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index 050d97ba54cf..03fa790a8717 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -157,10 +157,19 @@ Index[] concreteIndices(Context context, String... indexExpressions) { for (ExpressionResolver expressionResolver : expressionResolvers) { expressions = expressionResolver.resolve(context, expressions); } - + if (expressions.isEmpty()) { if (!options.allowNoIndices()) { - IndexNotFoundException infe = new IndexNotFoundException((String)null); + IndexNotFoundException infe; + if (indexExpressions.length == 1) { + if (indexExpressions[0].equals(MetaData.ALL)) { + infe = new IndexNotFoundException("no indices exist", (String)null); + } else { + infe = new IndexNotFoundException((String)null); + } + } else { + infe = new IndexNotFoundException((String)null); + } infe.setResources("index_expression", indexExpressions); throw infe; } else { @@ -173,7 +182,12 @@ Index[] concreteIndices(Context context, String... indexExpressions) { AliasOrIndex aliasOrIndex = metaData.getAliasAndIndexLookup().get(expression); if (aliasOrIndex == null ) { if (failNoIndices) { - IndexNotFoundException infe = new IndexNotFoundException(expression); + IndexNotFoundException infe; + if (expression.equals(MetaData.ALL)) { + infe = new IndexNotFoundException("no indices exist", expression); + } else { + infe = new IndexNotFoundException(expression); + } infe.setResources("index_expression", expression); throw infe; } else { diff --git a/server/src/main/java/org/elasticsearch/common/Numbers.java b/server/src/main/java/org/elasticsearch/common/Numbers.java index 27c1dd18e97b..51aecb5e19c9 100644 --- a/server/src/main/java/org/elasticsearch/common/Numbers.java +++ b/server/src/main/java/org/elasticsearch/common/Numbers.java @@ -125,6 +125,10 @@ public static long toLongExact(Number n) { } } + // weak bounds on the BigDecimal representation to allow for coercion + private static BigDecimal BIGDECIMAL_GREATER_THAN_LONG_MAX_VALUE = BigDecimal.valueOf(Long.MAX_VALUE).add(BigDecimal.ONE); + private static BigDecimal BIGDECIMAL_LESS_THAN_LONG_MIN_VALUE = BigDecimal.valueOf(Long.MIN_VALUE).subtract(BigDecimal.ONE); + /** Return the long that {@code stringValue} stores or throws an exception if the * stored value cannot be converted to a long that stores the exact same * value and {@code coerce} is false. */ @@ -138,6 +142,10 @@ public static long toLong(String stringValue, boolean coerce) { final BigInteger bigIntegerValue; try { BigDecimal bigDecimalValue = new BigDecimal(stringValue); + if (bigDecimalValue.compareTo(BIGDECIMAL_GREATER_THAN_LONG_MAX_VALUE) >= 0 || + bigDecimalValue.compareTo(BIGDECIMAL_LESS_THAN_LONG_MIN_VALUE) <= 0) { + throw new IllegalArgumentException("Value [" + stringValue + "] is out of range for a long"); + } bigIntegerValue = coerce ? bigDecimalValue.toBigInteger() : bigDecimalValue.toBigIntegerExact(); } catch (ArithmeticException e) { throw new IllegalArgumentException("Value [" + stringValue + "] has a decimal part"); diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java b/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java index a45667b908d7..6dcaaaa7d6a2 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.common.xcontent.XContentSubParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.fielddata.FieldData; @@ -435,51 +436,52 @@ public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point, fina NumberFormatException numberFormatException = null; if(parser.currentToken() == Token.START_OBJECT) { - while(parser.nextToken() != Token.END_OBJECT) { - if(parser.currentToken() == Token.FIELD_NAME) { - String field = parser.currentName(); - if(LATITUDE.equals(field)) { - parser.nextToken(); - switch (parser.currentToken()) { - case VALUE_NUMBER: - case VALUE_STRING: - try { - lat = parser.doubleValue(true); - } catch (NumberFormatException e) { - numberFormatException = e; - } - break; - default: - throw new ElasticsearchParseException("latitude must be a number"); - } - } else if (LONGITUDE.equals(field)) { - parser.nextToken(); - switch (parser.currentToken()) { - case VALUE_NUMBER: - case VALUE_STRING: - try { - lon = parser.doubleValue(true); - } catch (NumberFormatException e) { - numberFormatException = e; - } - break; - default: - throw new ElasticsearchParseException("longitude must be a number"); - } - } else if (GEOHASH.equals(field)) { - if(parser.nextToken() == Token.VALUE_STRING) { - geohash = parser.text(); + try (XContentSubParser subParser = new XContentSubParser(parser)) { + while (subParser.nextToken() != Token.END_OBJECT) { + if (subParser.currentToken() == Token.FIELD_NAME) { + String field = subParser.currentName(); + if (LATITUDE.equals(field)) { + subParser.nextToken(); + switch (subParser.currentToken()) { + case VALUE_NUMBER: + case VALUE_STRING: + try { + lat = subParser.doubleValue(true); + } catch (NumberFormatException e) { + numberFormatException = e; + } + break; + default: + throw new ElasticsearchParseException("latitude must be a number"); + } + } else if (LONGITUDE.equals(field)) { + subParser.nextToken(); + switch (subParser.currentToken()) { + case VALUE_NUMBER: + case VALUE_STRING: + try { + lon = subParser.doubleValue(true); + } catch (NumberFormatException e) { + numberFormatException = e; + } + break; + default: + throw new ElasticsearchParseException("longitude must be a number"); + } + } else if (GEOHASH.equals(field)) { + if (subParser.nextToken() == Token.VALUE_STRING) { + geohash = subParser.text(); + } else { + throw new ElasticsearchParseException("geohash must be a string"); + } } else { - throw new ElasticsearchParseException("geohash must be a string"); + throw new ElasticsearchParseException("field must be either [{}], [{}] or [{}]", LATITUDE, LONGITUDE, GEOHASH); } } else { - throw new ElasticsearchParseException("field must be either [{}], [{}] or [{}]", LATITUDE, LONGITUDE, GEOHASH); + throw new ElasticsearchParseException("token [{}] not allowed", subParser.currentToken()); } - } else { - throw new ElasticsearchParseException("token [{}] not allowed", parser.currentToken()); } } - if (geohash != null) { if(!Double.isNaN(lat) || !Double.isNaN(lon)) { throw new ElasticsearchParseException("field must be either lat/lon or geohash"); @@ -498,19 +500,21 @@ public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point, fina } } else if(parser.currentToken() == Token.START_ARRAY) { - int element = 0; - while(parser.nextToken() != Token.END_ARRAY) { - if(parser.currentToken() == Token.VALUE_NUMBER) { - element++; - if(element == 1) { - lon = parser.doubleValue(); - } else if(element == 2) { - lat = parser.doubleValue(); + try (XContentSubParser subParser = new XContentSubParser(parser)) { + int element = 0; + while (subParser.nextToken() != Token.END_ARRAY) { + if (subParser.currentToken() == Token.VALUE_NUMBER) { + element++; + if (element == 1) { + lon = subParser.doubleValue(); + } else if (element == 2) { + lat = subParser.doubleValue(); + } else { + GeoPoint.assertZValue(ignoreZValue, subParser.doubleValue()); + } } else { - GeoPoint.assertZValue(ignoreZValue, parser.doubleValue()); + throw new ElasticsearchParseException("numeric value expected"); } - } else { - throw new ElasticsearchParseException("numeric value expected"); } } return point.reset(lat, lon); diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java index 8e51bc5951d5..960df44a6251 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java @@ -50,11 +50,24 @@ public float score() { private final ScoreScript.LeafFactory script; + private final int shardId; + private final String indexName; + public ScriptScoreFunction(Script sScript, ScoreScript.LeafFactory script) { super(CombineFunction.REPLACE); this.sScript = sScript; this.script = script; + this.indexName = null; + this.shardId = -1; + } + + public ScriptScoreFunction(Script sScript, ScoreScript.LeafFactory script, String indexName, int shardId) { + super(CombineFunction.REPLACE); + this.sScript = sScript; + this.script = script; + this.indexName = indexName; + this.shardId = shardId; } @Override @@ -62,6 +75,8 @@ public LeafScoreFunction getLeafScoreFunction(LeafReaderContext ctx) throws IOEx final ScoreScript leafScript = script.newInstance(ctx); final CannedScorer scorer = new CannedScorer(); leafScript.setScorer(scorer); + leafScript._setIndexName(indexName); + leafScript._setShard(shardId); return new LeafScoreFunction() { @Override public double score(int docId, float subQueryScore) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index e8930afadfe3..bbb824a49799 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -154,6 +154,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING, MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING, MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING, + MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING, BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING, IndexModule.INDEX_STORE_TYPE_SETTING, IndexModule.INDEX_STORE_PRE_LOAD_SETTING, diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java index a8dce661e1c9..2379b4f00c2b 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java @@ -1585,7 +1585,7 @@ static JavaDateFormatter merge(String pattern, List formatters) { if (printer == null) { printer = javaDateFormatter.getPrinter(); } - dateTimeFormatters.add(javaDateFormatter.getParser()); + dateTimeFormatters.addAll(javaDateFormatter.getParsers()); roundupBuilder.appendOptional(javaDateFormatter.getRoundupParser()); } DateTimeFormatter roundUpParser = roundupBuilder.toFormatter(Locale.ROOT); @@ -1632,7 +1632,7 @@ public static ZonedDateTime from(TemporalAccessor accessor) { if (zoneId == null) { zoneId = ZoneOffset.UTC; } - + LocalDate localDate = accessor.query(TemporalQueries.localDate()); LocalTime localTime = accessor.query(TemporalQueries.localTime()); boolean isLocalDateSet = localDate != null; diff --git a/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java b/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java index c3adcc84b578..d0f4200b3baf 100644 --- a/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java +++ b/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.Strings; +import java.text.ParsePosition; import java.time.ZoneId; import java.time.format.DateTimeFormatter; import java.time.format.DateTimeFormatterBuilder; @@ -29,7 +30,10 @@ import java.time.temporal.TemporalAccessor; import java.time.temporal.TemporalField; import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; @@ -39,6 +43,7 @@ class JavaDateFormatter implements DateFormatter { // base fields which should be used for default parsing, when we round up for date math private static final Map ROUND_UP_BASE_FIELDS = new HashMap<>(6); + { ROUND_UP_BASE_FIELDS.put(ChronoField.MONTH_OF_YEAR, 1L); ROUND_UP_BASE_FIELDS.put(ChronoField.DAY_OF_MONTH, 1L); @@ -50,22 +55,15 @@ class JavaDateFormatter implements DateFormatter { private final String format; private final DateTimeFormatter printer; - private final DateTimeFormatter parser; + private final List parsers; private final DateTimeFormatter roundupParser; - private JavaDateFormatter(String format, DateTimeFormatter printer, DateTimeFormatter roundupParser, DateTimeFormatter parser) { - this.format = format; - this.printer = printer; - this.roundupParser = roundupParser; - this.parser = parser; - } - JavaDateFormatter(String format, DateTimeFormatter printer, DateTimeFormatter... parsers) { this(format, printer, builder -> ROUND_UP_BASE_FIELDS.forEach(builder::parseDefaulting), parsers); } JavaDateFormatter(String format, DateTimeFormatter printer, Consumer roundupParserConsumer, - DateTimeFormatter... parsers) { + DateTimeFormatter... parsers) { if (printer == null) { throw new IllegalArgumentException("printer may not be null"); } @@ -79,26 +77,21 @@ private JavaDateFormatter(String format, DateTimeFormatter printer, DateTimeForm } this.printer = printer; this.format = format; + if (parsers.length == 0) { - this.parser = printer; - } else if (parsers.length == 1) { - this.parser = parsers[0]; + this.parsers = Collections.singletonList(printer); } else { - DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder(); - for (DateTimeFormatter parser : parsers) { - builder.appendOptional(parser); - } - this.parser = builder.toFormatter(Locale.ROOT); + this.parsers = Arrays.asList(parsers); } DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder(); if (format.contains("||") == false) { - builder.append(this.parser); + builder.append(this.parsers.get(0)); } roundupParserConsumer.accept(builder); - DateTimeFormatter roundupFormatter = builder.toFormatter(parser.getLocale()); + DateTimeFormatter roundupFormatter = builder.toFormatter(locale()); if (printer.getZone() != null) { - roundupFormatter = roundupFormatter.withZone(printer.getZone()); + roundupFormatter = roundupFormatter.withZone(zone()); } this.roundupParser = roundupFormatter; } @@ -107,10 +100,6 @@ DateTimeFormatter getRoundupParser() { return roundupParser; } - DateTimeFormatter getParser() { - return parser; - } - DateTimeFormatter getPrinter() { return printer; } @@ -122,30 +111,64 @@ public TemporalAccessor parse(String input) { } try { - return parser.parse(input); + return doParse(input); } catch (DateTimeParseException e) { throw new IllegalArgumentException("failed to parse date field [" + input + "] with format [" + format + "]", e); } } + /** + * Attempt parsing the input without throwing exception. If multiple parsers are provided, + * it will continue iterating if the previous parser failed. The pattern must fully match, meaning whole input was used. + * This also means that this method depends on DateTimeFormatter.ClassicFormat.parseObject + * which does not throw exceptions when parsing failed. + * + * The approach with collection of parsers was taken because java-time requires ordering on optional (composite) + * patterns. Joda does not suffer from this. + * https://bugs.openjdk.java.net/browse/JDK-8188771 + * + * @param input An arbitrary string resembling the string representation of a date or time + * @return a TemporalAccessor if parsing was successful. + * @throws DateTimeParseException when unable to parse with any parsers + */ + private TemporalAccessor doParse(String input) { + if (parsers.size() > 1) { + for (DateTimeFormatter formatter : parsers) { + ParsePosition pos = new ParsePosition(0); + Object object = formatter.toFormat().parseObject(input, pos); + if (parsingSucceeded(object, input, pos) == true) { + return (TemporalAccessor) object; + } + } + throw new DateTimeParseException("Failed to parse with all enclosed parsers", input, 0); + } + return this.parsers.get(0).parse(input); + } + + private boolean parsingSucceeded(Object object, String input, ParsePosition pos) { + return object != null && pos.getIndex() == input.length(); + } + @Override public DateFormatter withZone(ZoneId zoneId) { // shortcurt to not create new objects unnecessarily - if (zoneId.equals(parser.getZone())) { + if (zoneId.equals(zone())) { return this; } - return new JavaDateFormatter(format, printer.withZone(zoneId), roundupParser.withZone(zoneId), parser.withZone(zoneId)); + return new JavaDateFormatter(format, printer.withZone(zoneId), + parsers.stream().map(p -> p.withZone(zoneId)).toArray(size -> new DateTimeFormatter[size])); } @Override public DateFormatter withLocale(Locale locale) { // shortcurt to not create new objects unnecessarily - if (locale.equals(parser.getLocale())) { + if (locale.equals(locale())) { return this; } - return new JavaDateFormatter(format, printer.withLocale(locale), roundupParser.withLocale(locale), parser.withLocale(locale)); + return new JavaDateFormatter(format, printer.withLocale(locale), + parsers.stream().map(p -> p.withLocale(locale)).toArray(size -> new DateTimeFormatter[size])); } @Override @@ -170,7 +193,7 @@ public ZoneId zone() { @Override public DateMathParser toDateMathParser() { - return new JavaDateMathParser(format, parser, roundupParser); + return new JavaDateMathParser(format, this, getRoundupParser()); } @Override @@ -186,12 +209,16 @@ public boolean equals(Object obj) { JavaDateFormatter other = (JavaDateFormatter) obj; return Objects.equals(format, other.format) && - Objects.equals(locale(), other.locale()) && - Objects.equals(this.printer.getZone(), other.printer.getZone()); + Objects.equals(locale(), other.locale()) && + Objects.equals(this.printer.getZone(), other.printer.getZone()); } @Override public String toString() { return String.format(Locale.ROOT, "format[%s] locale[%s]", format, locale()); } + + Collection getParsers() { + return parsers; + } } diff --git a/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java b/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java index 05e1e75efca3..dc7c195e2fd6 100644 --- a/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java +++ b/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java @@ -35,6 +35,7 @@ import java.time.temporal.TemporalAdjusters; import java.time.temporal.TemporalQueries; import java.util.Objects; +import java.util.function.Function; import java.util.function.LongSupplier; /** @@ -46,11 +47,11 @@ */ public class JavaDateMathParser implements DateMathParser { - private final DateTimeFormatter formatter; + private final JavaDateFormatter formatter; private final DateTimeFormatter roundUpFormatter; private final String format; - JavaDateMathParser(String format, DateTimeFormatter formatter, DateTimeFormatter roundUpFormatter) { + JavaDateMathParser(String format, JavaDateFormatter formatter, DateTimeFormatter roundUpFormatter) { this.format = format; Objects.requireNonNull(formatter); this.formatter = formatter; @@ -215,12 +216,12 @@ private Instant parseDateTime(String value, ZoneId timeZone, boolean roundUpIfNo throw new ElasticsearchParseException("cannot parse empty date"); } - DateTimeFormatter formatter = roundUpIfNoTime ? this.roundUpFormatter : this.formatter; + Function formatter = roundUpIfNoTime ? this.roundUpFormatter::parse : this.formatter::parse; try { if (timeZone == null) { - return DateFormatters.from(formatter.parse(value)).toInstant(); + return DateFormatters.from(formatter.apply(value)).toInstant(); } else { - TemporalAccessor accessor = formatter.parse(value); + TemporalAccessor accessor = formatter.apply(value); ZoneId zoneId = TemporalQueries.zone().queryFrom(accessor); if (zoneId != null) { timeZone = zoneId; @@ -228,7 +229,7 @@ private Instant parseDateTime(String value, ZoneId timeZone, boolean roundUpIfNo return DateFormatters.from(accessor).withZoneSameLocal(timeZone).toInstant(); } - } catch (DateTimeParseException e) { + } catch (IllegalArgumentException | DateTimeParseException e) { throw new ElasticsearchParseException("failed to parse date field [{}] with format [{}]: [{}]", e, value, format, e.getMessage()); } diff --git a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index ab95f2a43049..a14def8fa86b 100644 --- a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -36,7 +36,6 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.discovery.single.SingleNodeDiscovery; import org.elasticsearch.gateway.GatewayMetaState; import org.elasticsearch.plugins.DiscoveryPlugin; import org.elasticsearch.threadpool.ThreadPool; @@ -50,7 +49,6 @@ import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.Random; import java.util.Set; import java.util.function.BiConsumer; @@ -68,6 +66,8 @@ public class DiscoveryModule { public static final String ZEN2_DISCOVERY_TYPE = "zen"; + public static final String SINGLE_NODE_DISCOVERY_TYPE = "single-node"; + public static final Setting DISCOVERY_TYPE_SETTING = new Setting<>("discovery.type", ZEN2_DISCOVERY_TYPE, Function.identity(), Property.NodeScope); public static final Setting> DISCOVERY_SEED_PROVIDERS_SETTING = @@ -114,6 +114,8 @@ public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportServic List filteredSeedProviders = seedProviderNames.stream() .map(hostProviders::get).map(Supplier::get).collect(Collectors.toList()); + String discoveryType = DISCOVERY_TYPE_SETTING.get(settings); + final SeedHostsProvider seedHostsProvider = hostsResolver -> { final List addresses = new ArrayList<>(); for (SeedHostsProvider provider : filteredSeedProviders) { @@ -122,20 +124,17 @@ public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportServic return Collections.unmodifiableList(addresses); }; - Map> discoveryTypes = new HashMap<>(); - discoveryTypes.put(ZEN2_DISCOVERY_TYPE, () -> new Coordinator(NODE_NAME_SETTING.get(settings), settings, clusterSettings, - transportService, namedWriteableRegistry, allocationService, masterService, - () -> gatewayMetaState.getPersistedState(settings, (ClusterApplierService) clusterApplier), seedHostsProvider, clusterApplier, - joinValidators, new Random(Randomness.get().nextLong()))); - discoveryTypes.put("single-node", () -> new SingleNodeDiscovery(settings, transportService, masterService, clusterApplier, - gatewayMetaState)); - String discoveryType = DISCOVERY_TYPE_SETTING.get(settings); - Supplier discoverySupplier = discoveryTypes.get(discoveryType); - if (discoverySupplier == null) { + if (ZEN2_DISCOVERY_TYPE.equals(discoveryType) || SINGLE_NODE_DISCOVERY_TYPE.equals(discoveryType)) { + discovery = new Coordinator(NODE_NAME_SETTING.get(settings), + settings, clusterSettings, + transportService, namedWriteableRegistry, allocationService, masterService, + () -> gatewayMetaState.getPersistedState(settings, (ClusterApplierService) clusterApplier), seedHostsProvider, + clusterApplier, joinValidators, new Random(Randomness.get().nextLong())); + } else { throw new IllegalArgumentException("Unknown discovery type [" + discoveryType + "]"); } + logger.info("using discovery type [{}] and seed hosts providers {}", discoveryType, seedProviderNames); - discovery = Objects.requireNonNull(discoverySupplier.get()); } public Discovery getDiscovery() { diff --git a/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java b/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java index 9bcdd5b54426..f3e52e8df561 100644 --- a/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java +++ b/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java @@ -287,7 +287,7 @@ public String toString() { return peersRemoved; } - private void startProbe(TransportAddress transportAddress) { + protected void startProbe(TransportAddress transportAddress) { assert holdsLock() : "PeerFinder mutex not held"; if (active == false) { logger.trace("startProbe({}) not running", transportAddress); diff --git a/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java b/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java deleted file mode 100644 index 2a415a74cd0c..000000000000 --- a/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.discovery.single; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlocks; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.service.ClusterApplier; -import org.elasticsearch.cluster.service.ClusterApplier.ClusterApplyListener; -import org.elasticsearch.cluster.service.ClusterApplierService; -import org.elasticsearch.cluster.service.MasterService; -import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.discovery.DiscoveryStats; -import org.elasticsearch.gateway.GatewayMetaState; -import org.elasticsearch.transport.TransportService; - -import java.util.Objects; - -import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; - -/** - * A discovery implementation where the only member of the cluster is the local node. - */ -public class SingleNodeDiscovery extends AbstractLifecycleComponent implements Discovery { - private static final Logger logger = LogManager.getLogger(SingleNodeDiscovery.class); - - private final ClusterName clusterName; - protected final TransportService transportService; - private final ClusterApplier clusterApplier; - private volatile ClusterState clusterState; - - public SingleNodeDiscovery(final Settings settings, final TransportService transportService, - final MasterService masterService, final ClusterApplier clusterApplier, - final GatewayMetaState gatewayMetaState) { - this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); - this.transportService = Objects.requireNonNull(transportService); - masterService.setClusterStateSupplier(() -> clusterState); - this.clusterApplier = clusterApplier; - - if (clusterApplier instanceof ClusterApplierService) { - ((ClusterApplierService) clusterApplier).addLowPriorityApplier(gatewayMetaState); - } - } - - @Override - public synchronized void publish(final ClusterChangedEvent event, ActionListener publishListener, - final AckListener ackListener) { - clusterState = event.state(); - ackListener.onCommit(TimeValue.ZERO); - - clusterApplier.onNewClusterState("apply-locally-on-node[" + event.source() + "]", () -> clusterState, new ClusterApplyListener() { - @Override - public void onSuccess(String source) { - publishListener.onResponse(null); - ackListener.onNodeAck(transportService.getLocalNode(), null); - } - - @Override - public void onFailure(String source, Exception e) { - publishListener.onFailure(e); - ackListener.onNodeAck(transportService.getLocalNode(), e); - logger.warn(() -> new ParameterizedMessage("failed while applying cluster state locally [{}]", event.source()), e); - } - }); - } - - @Override - public DiscoveryStats stats() { - return new DiscoveryStats(null, null); - } - - @Override - public synchronized void startInitialJoin() { - if (lifecycle.started() == false) { - throw new IllegalStateException("can't start initial join when not started"); - } - // apply a fresh cluster state just so that state recovery gets triggered by GatewayService - // TODO: give discovery module control over GatewayService - clusterState = ClusterState.builder(clusterState).build(); - clusterApplier.onNewClusterState("single-node-start-initial-join", () -> clusterState, (source, e) -> {}); - } - - @Override - protected synchronized void doStart() { - // set initial state - DiscoveryNode localNode = transportService.getLocalNode(); - clusterState = createInitialState(localNode); - clusterApplier.setInitialState(clusterState); - } - - protected ClusterState createInitialState(DiscoveryNode localNode) { - ClusterState.Builder builder = ClusterState.builder(clusterName); - return builder.nodes(DiscoveryNodes.builder().add(localNode) - .localNodeId(localNode.getId()) - .masterNodeId(localNode.getId()) - .build()) - .blocks(ClusterBlocks.builder() - .addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)) - .build(); - } - - @Override - protected void doStop() { - - } - - @Override - protected void doClose() { - - } - -} diff --git a/server/src/main/java/org/elasticsearch/index/Index.java b/server/src/main/java/org/elasticsearch/index/Index.java index ac5a2763644f..9b6f4dbd98af 100644 --- a/server/src/main/java/org/elasticsearch/index/Index.java +++ b/server/src/main/java/org/elasticsearch/index/Index.java @@ -50,8 +50,8 @@ public class Index implements Writeable, ToXContentObject { private final String uuid; public Index(String name, String uuid) { - this.name = Objects.requireNonNull(name).intern(); - this.uuid = Objects.requireNonNull(uuid).intern(); + this.name = Objects.requireNonNull(name); + this.uuid = Objects.requireNonNull(uuid); } /** diff --git a/server/src/main/java/org/elasticsearch/index/IndexModule.java b/server/src/main/java/org/elasticsearch/index/IndexModule.java index 6b83d2252dec..acec458b8b0c 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/server/src/main/java/org/elasticsearch/index/IndexModule.java @@ -366,6 +366,7 @@ public static Type defaultStoreType(final boolean allowMmap) { } public IndexService newIndexService( + IndexService.IndexCreationContext indexCreationContext, NodeEnvironment environment, NamedXContentRegistry xContentRegistry, IndexService.ShardStoreDeleter shardStoreDeleter, @@ -395,7 +396,7 @@ public IndexService newIndexService( } else { queryCache = new DisabledQueryCache(indexSettings); } - return new IndexService(indexSettings, environment, xContentRegistry, + return new IndexService(indexSettings, indexCreationContext, environment, xContentRegistry, new SimilarityService(indexSettings, scriptService, similarities), shardStoreDeleter, analysisRegistry, engineFactory, circuitBreakerService, bigArrays, threadPool, scriptService, client, queryCache, store, eventListener, searcherWrapperFactory, mapperRegistry, diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index ea40dd1db016..501dbf442b00 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -136,6 +136,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust public IndexService( IndexSettings indexSettings, + IndexCreationContext indexCreationContext, NodeEnvironment nodeEnv, NamedXContentRegistry xContentRegistry, SimilarityService similarityService, @@ -162,21 +163,36 @@ public IndexService( this.similarityService = similarityService; this.namedWriteableRegistry = namedWriteableRegistry; this.circuitBreakerService = circuitBreakerService; - this.mapperService = new MapperService(indexSettings, registry.build(indexSettings), xContentRegistry, similarityService, - mapperRegistry, - // we parse all percolator queries as they would be parsed on shard 0 - () -> newQueryShardContext(0, null, System::currentTimeMillis, null)); - this.indexFieldData = new IndexFieldDataService(indexSettings, indicesFieldDataCache, circuitBreakerService, mapperService); - if (indexSettings.getIndexSortConfig().hasIndexSort()) { - // we delay the actual creation of the sort order for this index because the mapping has not been merged yet. - // The sort order is validated right after the merge of the mapping later in the process. - this.indexSortSupplier = () -> indexSettings.getIndexSortConfig().buildIndexSort( - mapperService::fullName, - indexFieldData::getForField - ); - } else { + if (indexSettings.getIndexMetaData().getState() == IndexMetaData.State.CLOSE && + indexCreationContext == IndexCreationContext.CREATE_INDEX) { // metadata verification needs a mapper service + this.mapperService = null; + this.indexFieldData = null; this.indexSortSupplier = () -> null; + this.bitsetFilterCache = null; + this.warmer = null; + this.indexCache = null; + } else { + this.mapperService = new MapperService(indexSettings, registry.build(indexSettings), xContentRegistry, similarityService, + mapperRegistry, + // we parse all percolator queries as they would be parsed on shard 0 + () -> newQueryShardContext(0, null, System::currentTimeMillis, null)); + this.indexFieldData = new IndexFieldDataService(indexSettings, indicesFieldDataCache, circuitBreakerService, mapperService); + if (indexSettings.getIndexSortConfig().hasIndexSort()) { + // we delay the actual creation of the sort order for this index because the mapping has not been merged yet. + // The sort order is validated right after the merge of the mapping later in the process. + this.indexSortSupplier = () -> indexSettings.getIndexSortConfig().buildIndexSort( + mapperService::fullName, + indexFieldData::getForField + ); + } else { + this.indexSortSupplier = () -> null; + } + indexFieldData.setListener(new FieldDataCacheListener(this)); + this.bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetCacheListener(this)); + this.warmer = new IndexWarmer(threadPool, indexFieldData, bitsetFilterCache.createListener(threadPool)); + this.indexCache = new IndexCache(indexSettings, queryCache, bitsetFilterCache); } + this.shardStoreDeleter = shardStoreDeleter; this.bigArrays = bigArrays; this.threadPool = threadPool; @@ -185,10 +201,6 @@ public IndexService( this.eventListener = eventListener; this.nodeEnv = nodeEnv; this.indexStore = indexStore; - indexFieldData.setListener(new FieldDataCacheListener(this)); - this.bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetCacheListener(this)); - this.warmer = new IndexWarmer(threadPool, indexFieldData, bitsetFilterCache.createListener(threadPool)); - this.indexCache = new IndexCache(indexSettings, queryCache, bitsetFilterCache); this.engineFactory = Objects.requireNonNull(engineFactory); // initialize this last -- otherwise if the wrapper requires any other member to be non-null we fail with an NPE this.searcherWrapper = wrapperFactory.newWrapper(this); @@ -202,6 +214,11 @@ public IndexService( updateFsyncTaskIfNecessary(); } + public enum IndexCreationContext { + CREATE_INDEX, + META_DATA_VERIFICATION + } + public int numberOfShards() { return shards.size(); } @@ -548,7 +565,10 @@ List getSearchOperationListener() { // pkg private for @Override public boolean updateMapping(final IndexMetaData currentIndexMetaData, final IndexMetaData newIndexMetaData) throws IOException { - return mapperService().updateMapping(currentIndexMetaData, newIndexMetaData); + if (mapperService == null) { + return false; + } + return mapperService.updateMapping(currentIndexMetaData, newIndexMetaData); } private class StoreCloseListener implements Store.OnClose { diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index edea3952ce4c..9bed93c37169 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -633,14 +633,14 @@ protected final GetResult getFromSearcher(Get get, BiFunction search return GetResult.NOT_EXISTS; } if (get.versionType().isVersionConflictForReads(versionValue.version, get.version())) { - throw new VersionConflictEngineException(shardId, get.type(), get.id(), + throw new VersionConflictEngineException(shardId, get.id(), get.versionType().explainConflictForReads(versionValue.version, get.version())); } if (get.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && ( get.getIfSeqNo() != versionValue.seqNo || get.getIfPrimaryTerm() != versionValue.term )) { - throw new VersionConflictEngineException(shardId, get.type(), get.id(), + throw new VersionConflictEngineException(shardId, get.id(), get.getIfSeqNo(), get.getIfPrimaryTerm(), versionValue.seqNo, versionValue.term); } if (get.isReadFromTranslog()) { @@ -1004,13 +1004,13 @@ protected final IndexingStrategy planIndexingAsPrimary(Index index) throws IOExc currentNotFoundOrDeleted = versionValue.isDelete(); } if (index.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && versionValue == null) { - final VersionConflictEngineException e = new VersionConflictEngineException(shardId, index.type(), index.id(), + final VersionConflictEngineException e = new VersionConflictEngineException(shardId, index.id(), index.getIfSeqNo(), index.getIfPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, 0); plan = IndexingStrategy.skipDueToVersionConflict(e, currentNotFoundOrDeleted, currentVersion, getPrimaryTerm()); } else if (index.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && ( versionValue.seqNo != index.getIfSeqNo() || versionValue.term != index.getIfPrimaryTerm() )) { - final VersionConflictEngineException e = new VersionConflictEngineException(shardId, index.type(), index.id(), + final VersionConflictEngineException e = new VersionConflictEngineException(shardId, index.id(), index.getIfSeqNo(), index.getIfPrimaryTerm(), versionValue.seqNo, versionValue.term); plan = IndexingStrategy.skipDueToVersionConflict(e, currentNotFoundOrDeleted, currentVersion, getPrimaryTerm()); } else if (index.versionType().isVersionConflictForWrites( @@ -1335,13 +1335,13 @@ protected final DeletionStrategy planDeletionAsPrimary(Delete delete) throws IOE } final DeletionStrategy plan; if (delete.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && versionValue == null) { - final VersionConflictEngineException e = new VersionConflictEngineException(shardId, delete.type(), delete.id(), + final VersionConflictEngineException e = new VersionConflictEngineException(shardId, delete.id(), delete.getIfSeqNo(), delete.getIfPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, 0); plan = DeletionStrategy.skipDueToVersionConflict(e, currentVersion, getPrimaryTerm(), currentlyDeleted); } else if (delete.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && ( versionValue.seqNo != delete.getIfSeqNo() || versionValue.term != delete.getIfPrimaryTerm() )) { - final VersionConflictEngineException e = new VersionConflictEngineException(shardId, delete.type(), delete.id(), + final VersionConflictEngineException e = new VersionConflictEngineException(shardId, delete.id(), delete.getIfSeqNo(), delete.getIfPrimaryTerm(), versionValue.seqNo, versionValue.term); plan = DeletionStrategy.skipDueToVersionConflict(e, currentVersion, getPrimaryTerm(), currentlyDeleted); } else if (delete.versionType().isVersionConflictForWrites(currentVersion, delete.version(), currentlyDeleted)) { diff --git a/server/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java b/server/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java index 357c9c107836..0f6c217409c3 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java +++ b/server/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java @@ -28,25 +28,25 @@ public class VersionConflictEngineException extends EngineException { public VersionConflictEngineException(ShardId shardId, Engine.Operation op, long currentVersion, boolean deleted) { - this(shardId, op.type(), op.id(), op.versionType().explainConflictForWrites(currentVersion, op.version(), deleted)); + this(shardId, op.id(), op.versionType().explainConflictForWrites(currentVersion, op.version(), deleted)); } - public VersionConflictEngineException(ShardId shardId, String type, String id, + public VersionConflictEngineException(ShardId shardId, String id, long compareAndWriteSeqNo, long compareAndWriteTerm, long currentSeqNo, long currentTerm) { - this(shardId, type, id, "required seqNo [" + compareAndWriteSeqNo + "], primary term [" + compareAndWriteTerm +"]." + + this(shardId, id, "required seqNo [" + compareAndWriteSeqNo + "], primary term [" + compareAndWriteTerm +"]." + (currentSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO ? " but no document was found" : " current document has seqNo [" + currentSeqNo + "] and primary term ["+ currentTerm + "]" )); } - public VersionConflictEngineException(ShardId shardId, String type, String id, String explanation) { - this(shardId, null, type, id, explanation); + public VersionConflictEngineException(ShardId shardId, String id, String explanation) { + this(shardId, null, id, explanation); } - public VersionConflictEngineException(ShardId shardId, Throwable cause, String type, String id, String explanation) { - this(shardId, "[{}][{}]: version conflict, {}", cause, type, id, explanation); + public VersionConflictEngineException(ShardId shardId, Throwable cause, String id, String explanation) { + this(shardId, "[{}]: version conflict, {}", cause, id, explanation); } public VersionConflictEngineException(ShardId shardId, String msg, Throwable cause, Object... params) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 53d49e657a38..171f8c4bb8b2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -69,6 +69,7 @@ import java.util.Set; import java.util.function.Function; import java.util.function.Supplier; +import java.util.stream.Stream; import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; @@ -101,7 +102,9 @@ public enum MergeReason { public static final Setting INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING = Setting.longSetting("index.mapping.total_fields.limit", 1000L, 0, Property.Dynamic, Property.IndexScope); public static final Setting INDEX_MAPPING_DEPTH_LIMIT_SETTING = - Setting.longSetting("index.mapping.depth.limit", 20L, 1, Property.Dynamic, Property.IndexScope); + Setting.longSetting("index.mapping.depth.limit", 20L, 1, Property.Dynamic, Property.IndexScope); + public static final Setting INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING = + Setting.longSetting("index.mapping.field_name_length.limit", Long.MAX_VALUE, 1L, Property.Dynamic, Property.IndexScope); public static final boolean INDEX_MAPPER_DYNAMIC_DEFAULT = true; @Deprecated public static final Setting INDEX_MAPPER_DYNAMIC_SETTING = @@ -503,6 +506,7 @@ private synchronized Map internalMerge(@Nullable Documen // Also, don't take metadata mappers into account for the field limit check checkTotalFieldsLimit(objectMappers.size() + fieldMappers.size() - metadataMappers.length + fieldAliasMappers.size() ); + checkFieldNameSoftLimit(objectMappers, fieldMappers, fieldAliasMappers); } results.put(newMapper.type(), newMapper); @@ -623,6 +627,24 @@ private void checkDepthLimit(String objectPath, long maxDepth) { } } + private void checkFieldNameSoftLimit(Collection objectMappers, + Collection fieldMappers, + Collection fieldAliasMappers) { + final long maxFieldNameLength = indexSettings.getValue(INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING); + + Stream.of(objectMappers.stream(), fieldMappers.stream(), fieldAliasMappers.stream()) + .reduce(Stream::concat) + .orElseGet(Stream::empty) + .forEach(mapper -> { + String name = mapper.simpleName(); + if (name.length() > maxFieldNameLength) { + throw new IllegalArgumentException("Field name [" + name + "] in index [" + index().getName() + + "] is too long. The limit is set to [" + maxFieldNameLength + "] characters but was [" + + name.length() + "] characters"); + } + }); + } + private void checkPartitionedIndexConstraints(DocumentMapper newMapper) { if (indexSettings.getIndexMetaData().isRoutingPartitionedIndex()) { if (!newMapper.routingFieldMapper().required()) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index 805b50e628bb..5790248ead80 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -74,6 +74,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.IntPredicate; import static org.elasticsearch.index.mapper.TypeParsers.parseTextField; @@ -687,69 +688,12 @@ public Query phrasePrefixQuery(TokenStream stream, int slop, int maxExpansions) } private Query analyzePhrasePrefix(TokenStream stream, int slop, int maxExpansions) throws IOException { - final MultiPhrasePrefixQuery query = createPhrasePrefixQuery(stream, name(), slop, maxExpansions); - - if (slop > 0 - || prefixFieldType == null - || prefixFieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) { - return query; - } - - int lastPos = query.getTerms().length - 1; - final Term[][] terms = query.getTerms(); - final int[] positions = query.getPositions(); - for (Term term : terms[lastPos]) { - String value = term.text(); - if (value.length() < prefixFieldType.minChars || value.length() > prefixFieldType.maxChars) { - return query; - } - } - - if (terms.length == 1) { - Term[] newTerms = Arrays.stream(terms[0]) - .map(term -> new Term(prefixFieldType.name(), term.bytes())) - .toArray(Term[]::new); - return new SynonymQuery(newTerms); - } - - SpanNearQuery.Builder spanQuery = new SpanNearQuery.Builder(name(), true); - spanQuery.setSlop(slop); - int previousPos = -1; - for (int i = 0; i < terms.length; i++) { - Term[] posTerms = terms[i]; - int posInc = positions[i] - previousPos; - previousPos = positions[i]; - if (posInc > 1) { - spanQuery.addGap(posInc - 1); - } - if (i == lastPos) { - if (posTerms.length == 1) { - FieldMaskingSpanQuery fieldMask = - new FieldMaskingSpanQuery(new SpanTermQuery(new Term(prefixFieldType.name(), posTerms[0].bytes())), name()); - spanQuery.addClause(fieldMask); - } else { - SpanQuery[] queries = Arrays.stream(posTerms) - .map(term -> new FieldMaskingSpanQuery( - new SpanTermQuery(new Term(prefixFieldType.name(), term.bytes())), name()) - ) - .toArray(SpanQuery[]::new); - spanQuery.addClause(new SpanOrQuery(queries)); - } - } else { - if (posTerms.length == 1) { - spanQuery.addClause(new SpanTermQuery(posTerms[0])); - } else { - SpanTermQuery[] queries = Arrays.stream(posTerms) - .map(SpanTermQuery::new) - .toArray(SpanTermQuery[]::new); - spanQuery.addClause(new SpanOrQuery(queries)); - } - } - } - return spanQuery.build(); + String prefixField = prefixFieldType == null || slop > 0 ? null : prefixFieldType.name(); + IntPredicate usePrefix = (len) -> len >= prefixFieldType.minChars && len <= prefixFieldType.maxChars; + return createPhrasePrefixQuery(stream, name(), slop, maxExpansions, prefixField, usePrefix); } - private static boolean hasGaps(TokenStream stream) throws IOException { + public static boolean hasGaps(TokenStream stream) throws IOException { assert stream instanceof CachingTokenFilter; PositionIncrementAttribute posIncAtt = stream.getAttribute(PositionIncrementAttribute.class); stream.reset(); @@ -963,8 +907,8 @@ public static Query createPhraseQuery(TokenStream stream, String field, int slop return mpqb.build(); } - public static MultiPhrasePrefixQuery createPhrasePrefixQuery(TokenStream stream, String field, - int slop, int maxExpansions) throws IOException { + public static Query createPhrasePrefixQuery(TokenStream stream, String field, int slop, int maxExpansions, + String prefixField, IntPredicate usePrefixField) throws IOException { MultiPhrasePrefixQuery builder = new MultiPhrasePrefixQuery(field); builder.setSlop(slop); builder.setMaxExpansions(maxExpansions); @@ -987,6 +931,61 @@ public static MultiPhrasePrefixQuery createPhrasePrefixQuery(TokenStream stream, currentTerms.add(new Term(field, termAtt.getBytesRef())); } builder.add(currentTerms.toArray(new Term[0]), position); - return builder; + if (prefixField == null) { + return builder; + } + + int lastPos = builder.getTerms().length - 1; + final Term[][] terms = builder.getTerms(); + final int[] positions = builder.getPositions(); + for (Term term : terms[lastPos]) { + String value = term.text(); + if (usePrefixField.test(value.length()) == false) { + return builder; + } + } + + if (terms.length == 1) { + Term[] newTerms = Arrays.stream(terms[0]) + .map(term -> new Term(prefixField, term.bytes())) + .toArray(Term[]::new); + return new SynonymQuery(newTerms); + } + + SpanNearQuery.Builder spanQuery = new SpanNearQuery.Builder(field, true); + spanQuery.setSlop(slop); + int previousPos = -1; + for (int i = 0; i < terms.length; i++) { + Term[] posTerms = terms[i]; + int posInc = positions[i] - previousPos; + previousPos = positions[i]; + if (posInc > 1) { + spanQuery.addGap(posInc - 1); + } + if (i == lastPos) { + if (posTerms.length == 1) { + FieldMaskingSpanQuery fieldMask = + new FieldMaskingSpanQuery(new SpanTermQuery(new Term(prefixField, posTerms[0].bytes())), field); + spanQuery.addClause(fieldMask); + } else { + SpanQuery[] queries = Arrays.stream(posTerms) + .map(term -> new FieldMaskingSpanQuery( + new SpanTermQuery(new Term(prefixField, term.bytes())), field) + ) + .toArray(SpanQuery[]::new); + spanQuery.addClause(new SpanOrQuery(queries)); + } + } else { + if (posTerms.length == 1) { + spanQuery.addClause(new SpanTermQuery(posTerms[0])); + } else { + SpanTermQuery[] queries = Arrays.stream(posTerms) + .map(SpanTermQuery::new) + .toArray(SpanTermQuery[]::new); + spanQuery.addClause(new SpanOrQuery(queries)); + } + } + } + return spanQuery.build(); } } diff --git a/server/src/main/java/org/elasticsearch/index/query/MatchBoolPrefixQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MatchBoolPrefixQueryBuilder.java new file mode 100644 index 000000000000..7f0c89f9df49 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/query/MatchBoolPrefixQueryBuilder.java @@ -0,0 +1,393 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.apache.lucene.search.FuzzyQuery; +import org.apache.lucene.search.Query; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lucene.search.Queries; +import org.elasticsearch.common.unit.Fuzziness; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.support.QueryParsers; +import org.elasticsearch.index.search.MatchQuery; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.index.query.MatchQueryBuilder.FUZZY_REWRITE_FIELD; +import static org.elasticsearch.index.query.MatchQueryBuilder.FUZZY_TRANSPOSITIONS_FIELD; +import static org.elasticsearch.index.query.MatchQueryBuilder.MAX_EXPANSIONS_FIELD; +import static org.elasticsearch.index.query.MatchQueryBuilder.OPERATOR_FIELD; +import static org.elasticsearch.index.query.MatchQueryBuilder.PREFIX_LENGTH_FIELD; + +/** + * The boolean prefix query analyzes the input text and creates a boolean query containing a Term query for each term, except + * for the last term, which is used to create a prefix query + */ +public class MatchBoolPrefixQueryBuilder extends AbstractQueryBuilder { + + public static final String NAME = "match_bool_prefix"; + + private static final Operator DEFAULT_OPERATOR = Operator.OR; + + private final String fieldName; + + private final Object value; + + private String analyzer; + + private Operator operator = DEFAULT_OPERATOR; + + private String minimumShouldMatch; + + private Fuzziness fuzziness; + + private int prefixLength = FuzzyQuery.defaultPrefixLength; + + private int maxExpansions = FuzzyQuery.defaultMaxExpansions; + + private boolean fuzzyTranspositions = FuzzyQuery.defaultTranspositions; + + private String fuzzyRewrite; + + public MatchBoolPrefixQueryBuilder(String fieldName, Object value) { + if (Strings.isEmpty(fieldName)) { + throw new IllegalArgumentException("[" + NAME + "] requires fieldName"); + } + if (value == null) { + throw new IllegalArgumentException("[" + NAME + "] requires query value"); + } + this.fieldName = fieldName; + this.value = value; + } + + public MatchBoolPrefixQueryBuilder(StreamInput in) throws IOException { + super(in); + fieldName = in.readString(); + value = in.readGenericValue(); + analyzer = in.readOptionalString(); + operator = Operator.readFromStream(in); + minimumShouldMatch = in.readOptionalString(); + fuzziness = in.readOptionalWriteable(Fuzziness::new); + prefixLength = in.readVInt(); + maxExpansions = in.readVInt(); + fuzzyTranspositions = in.readBoolean(); + fuzzyRewrite = in.readOptionalString(); + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + out.writeString(fieldName); + out.writeGenericValue(value); + out.writeOptionalString(analyzer); + operator.writeTo(out); + out.writeOptionalString(minimumShouldMatch); + out.writeOptionalWriteable(fuzziness); + out.writeVInt(prefixLength); + out.writeVInt(maxExpansions); + out.writeBoolean(fuzzyTranspositions); + out.writeOptionalString(fuzzyRewrite); + } + + /** Returns the field name used in this query. */ + public String fieldName() { + return this.fieldName; + } + + /** Returns the value used in this query. */ + public Object value() { + return this.value; + } + + /** Get the analyzer to use, if previously set, otherwise {@code null} */ + public String analyzer() { + return this.analyzer; + } + + /** + * Explicitly set the analyzer to use. Defaults to use explicit mapping + * config for the field, or, if not set, the default search analyzer. + */ + public MatchBoolPrefixQueryBuilder analyzer(String analyzer) { + this.analyzer = analyzer; + return this; + } + + /** Sets the operator to use when using a boolean query. Defaults to {@code OR}. */ + public MatchBoolPrefixQueryBuilder operator(Operator operator) { + if (operator == null) { + throw new IllegalArgumentException("[" + NAME + "] requires operator to be non-null"); + } + this.operator = operator; + return this; + } + + /** Returns the operator to use in a boolean query.*/ + public Operator operator() { + return this.operator; + } + + /** Sets optional minimumShouldMatch value to apply to the query */ + public MatchBoolPrefixQueryBuilder minimumShouldMatch(String minimumShouldMatch) { + this.minimumShouldMatch = minimumShouldMatch; + return this; + } + + /** Gets the minimumShouldMatch value */ + public String minimumShouldMatch() { + return this.minimumShouldMatch; + } + + /** Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". */ + public MatchBoolPrefixQueryBuilder fuzziness(Object fuzziness) { + this.fuzziness = Fuzziness.build(fuzziness); + return this; + } + + /** Gets the fuzziness used when evaluated to a fuzzy query type. */ + public Fuzziness fuzziness() { + return this.fuzziness; + } + + /** + * Sets the length of a length of common (non-fuzzy) prefix for fuzzy match queries + * @param prefixLength non-negative length of prefix + * @throws IllegalArgumentException in case the prefix is negative + */ + public MatchBoolPrefixQueryBuilder prefixLength(int prefixLength) { + if (prefixLength < 0 ) { + throw new IllegalArgumentException("[" + NAME + "] requires prefix length to be non-negative."); + } + this.prefixLength = prefixLength; + return this; + } + + /** + * Gets the length of a length of common (non-fuzzy) prefix for fuzzy match queries + */ + public int prefixLength() { + return this.prefixLength; + } + + /** + * When using fuzzy or prefix type query, the number of term expansions to use. + */ + public MatchBoolPrefixQueryBuilder maxExpansions(int maxExpansions) { + if (maxExpansions <= 0 ) { + throw new IllegalArgumentException("[" + NAME + "] requires maxExpansions to be positive."); + } + this.maxExpansions = maxExpansions; + return this; + } + + /** + * Get the (optional) number of term expansions when using fuzzy or prefix type query. + */ + public int maxExpansions() { + return this.maxExpansions; + } + + /** + * Sets whether transpositions are supported in fuzzy queries.

+ * The default metric used by fuzzy queries to determine a match is the Damerau-Levenshtein + * distance formula which supports transpositions. Setting transposition to false will + * switch to classic Levenshtein distance.
+ * If not set, Damerau-Levenshtein distance metric will be used. + */ + public MatchBoolPrefixQueryBuilder fuzzyTranspositions(boolean fuzzyTranspositions) { + this.fuzzyTranspositions = fuzzyTranspositions; + return this; + } + + /** Gets the fuzzy query transposition setting. */ + public boolean fuzzyTranspositions() { + return this.fuzzyTranspositions; + } + + /** Sets the fuzzy_rewrite parameter controlling how the fuzzy query will get rewritten */ + public MatchBoolPrefixQueryBuilder fuzzyRewrite(String fuzzyRewrite) { + this.fuzzyRewrite = fuzzyRewrite; + return this; + } + + /** + * Get the fuzzy_rewrite parameter + * @see #fuzzyRewrite(String) + */ + public String fuzzyRewrite() { + return this.fuzzyRewrite; + } + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(NAME); + builder.startObject(fieldName); + builder.field(MatchQueryBuilder.QUERY_FIELD.getPreferredName(), value); + if (analyzer != null) { + builder.field(MatchQueryBuilder.ANALYZER_FIELD.getPreferredName(), analyzer); + } + builder.field(OPERATOR_FIELD.getPreferredName(), operator.toString()); + if (minimumShouldMatch != null) { + builder.field(MatchQueryBuilder.MINIMUM_SHOULD_MATCH_FIELD.getPreferredName(), minimumShouldMatch); + } + if (fuzziness != null) { + fuzziness.toXContent(builder, params); + } + builder.field(PREFIX_LENGTH_FIELD.getPreferredName(), prefixLength); + builder.field(MAX_EXPANSIONS_FIELD.getPreferredName(), maxExpansions); + builder.field(FUZZY_TRANSPOSITIONS_FIELD.getPreferredName(), fuzzyTranspositions); + if (fuzzyRewrite != null) { + builder.field(FUZZY_REWRITE_FIELD.getPreferredName(), fuzzyRewrite); + } + printBoostAndQueryName(builder); + builder.endObject(); + builder.endObject(); + } + + public static MatchBoolPrefixQueryBuilder fromXContent(XContentParser parser) throws IOException { + String fieldName = null; + Object value = null; + float boost = AbstractQueryBuilder.DEFAULT_BOOST; + String analyzer = null; + Operator operator = DEFAULT_OPERATOR; + String minimumShouldMatch = null; + Fuzziness fuzziness = null; + int prefixLength = FuzzyQuery.defaultPrefixLength; + int maxExpansion = FuzzyQuery.defaultMaxExpansions; + boolean fuzzyTranspositions = FuzzyQuery.defaultTranspositions; + String fuzzyRewrite = null; + String queryName = null; + XContentParser.Token token; + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, currentFieldName); + fieldName = currentFieldName; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (MatchQueryBuilder.QUERY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + value = parser.objectText(); + } else if (MatchQueryBuilder.ANALYZER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + analyzer = parser.text(); + } else if (OPERATOR_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + operator = Operator.fromString(parser.text()); + } else if (MatchQueryBuilder.MINIMUM_SHOULD_MATCH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + minimumShouldMatch = parser.textOrNull(); + } else if (Fuzziness.FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + fuzziness = Fuzziness.parse(parser); + } else if (PREFIX_LENGTH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + prefixLength = parser.intValue(); + } else if (MAX_EXPANSIONS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + maxExpansion = parser.intValue(); + } else if (FUZZY_TRANSPOSITIONS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + fuzzyTranspositions = parser.booleanValue(); + } else if (FUZZY_REWRITE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + fuzzyRewrite = parser.textOrNull(); + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + boost = parser.floatValue(); + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + queryName = parser.text(); + } else { + throw new ParsingException(parser.getTokenLocation(), + "[" + NAME + "] query does not support [" + currentFieldName + "]"); + } + } else { + throw new ParsingException(parser.getTokenLocation(), + "[" + NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]"); + } + } + } else { + throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, parser.currentName()); + fieldName = parser.currentName(); + value = parser.objectText(); + } + } + + MatchBoolPrefixQueryBuilder queryBuilder = new MatchBoolPrefixQueryBuilder(fieldName, value); + queryBuilder.analyzer(analyzer); + queryBuilder.operator(operator); + queryBuilder.minimumShouldMatch(minimumShouldMatch); + queryBuilder.boost(boost); + queryBuilder.queryName(queryName); + if (fuzziness != null) { + queryBuilder.fuzziness(fuzziness); + } + queryBuilder.prefixLength(prefixLength); + queryBuilder.maxExpansions(maxExpansion); + queryBuilder.fuzzyTranspositions(fuzzyTranspositions); + queryBuilder.fuzzyRewrite(fuzzyRewrite); + return queryBuilder; + } + + @Override + protected Query doToQuery(QueryShardContext context) throws IOException { + if (analyzer != null && context.getIndexAnalyzers().get(analyzer) == null) { + throw new QueryShardException(context, "[" + NAME + "] analyzer [" + analyzer + "] not found"); + } + + final MatchQuery matchQuery = new MatchQuery(context); + if (analyzer != null) { + matchQuery.setAnalyzer(analyzer); + } + matchQuery.setOccur(operator.toBooleanClauseOccur()); + matchQuery.setFuzziness(fuzziness); + matchQuery.setFuzzyPrefixLength(prefixLength); + matchQuery.setMaxExpansions(maxExpansions); + matchQuery.setTranspositions(fuzzyTranspositions); + matchQuery.setFuzzyRewriteMethod(QueryParsers.parseRewriteMethod(fuzzyRewrite, null, LoggingDeprecationHandler.INSTANCE)); + + final Query query = matchQuery.parse(MatchQuery.Type.BOOLEAN_PREFIX, fieldName, value); + return Queries.maybeApplyMinimumShouldMatch(query, minimumShouldMatch); + } + + @Override + protected boolean doEquals(MatchBoolPrefixQueryBuilder other) { + return Objects.equals(fieldName, other.fieldName) && + Objects.equals(value, other.value) && + Objects.equals(analyzer, other.analyzer) && + Objects.equals(operator, other.operator) && + Objects.equals(minimumShouldMatch, other.minimumShouldMatch) && + Objects.equals(fuzziness, other.fuzziness) && + Objects.equals(prefixLength, other.prefixLength) && + Objects.equals(maxExpansions, other.maxExpansions) && + Objects.equals(fuzzyTranspositions, other.fuzzyTranspositions) && + Objects.equals(fuzzyRewrite, other.fuzzyRewrite); + } + + @Override + protected int doHashCode() { + return Objects.hash(fieldName, value, analyzer, operator, minimumShouldMatch, fuzziness, prefixLength, maxExpansions, + fuzzyTranspositions, fuzzyRewrite); + } + + @Override + public String getWriteableName() { + return NAME; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java index 0e9148e54010..267c86ea8448 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java @@ -128,7 +128,12 @@ public enum Type implements Writeable { * Uses the best matching phrase-prefix field as main score and uses * a tie-breaker to adjust the score based on remaining field matches */ - PHRASE_PREFIX(MatchQuery.Type.PHRASE_PREFIX, 0.0f, new ParseField("phrase_prefix")); + PHRASE_PREFIX(MatchQuery.Type.PHRASE_PREFIX, 0.0f, new ParseField("phrase_prefix")), + + /** + * Uses the sum of the matching boolean fields to score the query + */ + BOOL_PREFIX(MatchQuery.Type.BOOLEAN_PREFIX, 1.0f, new ParseField("bool_prefix")); private MatchQuery.Type matchQueryType; private final float tieBreaker; @@ -687,6 +692,16 @@ public static MultiMatchQueryBuilder fromXContent(XContentParser parser) throws "Fuzziness not allowed for type [" + type.parseField.getPreferredName() + "]"); } + if (slop != DEFAULT_PHRASE_SLOP && type == Type.BOOL_PREFIX) { + throw new ParsingException(parser.getTokenLocation(), + "[" + SLOP_FIELD.getPreferredName() + "] not allowed for type [" + type.parseField.getPreferredName() + "]"); + } + + if (cutoffFrequency != null && type == Type.BOOL_PREFIX) { + throw new ParsingException(parser.getTokenLocation(), + "[" + CUTOFF_FREQUENCY_FIELD.getPreferredName() + "] not allowed for type [" + type.parseField.getPreferredName() + "]"); + } + MultiMatchQueryBuilder builder = new MultiMatchQueryBuilder(value) .fields(fieldsBoosts) .type(type) diff --git a/server/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java b/server/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java index a860bd19d7c5..accfd2f65699 100644 --- a/server/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java @@ -94,7 +94,7 @@ protected ScoreFunction doToFunction(QueryShardContext context) { try { ScoreScript.Factory factory = context.getScriptService().compile(script, ScoreScript.CONTEXT); ScoreScript.LeafFactory searchScript = factory.newFactory(script.getParams(), context.lookup()); - return new ScriptScoreFunction(script, searchScript); + return new ScriptScoreFunction(script, searchScript, context.index().getName(), context.getShardId()); } catch (Exception e) { throw new QueryShardException(context, "script_score: the script could not be loaded", e); } diff --git a/server/src/main/java/org/elasticsearch/index/refresh/RefreshStats.java b/server/src/main/java/org/elasticsearch/index/refresh/RefreshStats.java index 11b65d166fc9..c77e90337387 100644 --- a/server/src/main/java/org/elasticsearch/index/refresh/RefreshStats.java +++ b/server/src/main/java/org/elasticsearch/index/refresh/RefreshStats.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.refresh; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -36,6 +37,10 @@ public class RefreshStats implements Streamable, Writeable, ToXContentFragment { private long totalTimeInMillis; + private long externalTotal; + + private long externalTotalTimeInMillis; + /** * Number of waiting refresh listeners. */ @@ -47,12 +52,29 @@ public RefreshStats() { public RefreshStats(StreamInput in) throws IOException { total = in.readVLong(); totalTimeInMillis = in.readVLong(); + if (in.getVersion().onOrAfter(Version.V_7_1_0)) { + externalTotal = in.readVLong(); + externalTotalTimeInMillis = in.readVLong(); + } listeners = in.readVInt(); } - public RefreshStats(long total, long totalTimeInMillis, int listeners) { + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(total); + out.writeVLong(totalTimeInMillis); + if (out.getVersion().onOrAfter(Version.V_7_1_0)) { + out.writeVLong(externalTotal); + out.writeVLong(externalTotalTimeInMillis); + } + out.writeVInt(listeners); + } + + public RefreshStats(long total, long totalTimeInMillis, long externalTotal, long externalTotalTimeInMillis, int listeners) { this.total = total; this.totalTimeInMillis = totalTimeInMillis; + this.externalTotal = externalTotal; + this.externalTotalTimeInMillis = externalTotalTimeInMillis; this.listeners = listeners; } @@ -66,6 +88,8 @@ public void addTotals(RefreshStats refreshStats) { } this.total += refreshStats.total; this.totalTimeInMillis += refreshStats.totalTimeInMillis; + this.externalTotal += refreshStats.externalTotal; + this.externalTotalTimeInMillis += refreshStats.externalTotalTimeInMillis; this.listeners += refreshStats.listeners; } @@ -76,20 +100,38 @@ public long getTotal() { return this.total; } + /* + * The total number of external refresh executed. + */ + public long getExternalTotal() { return this.externalTotal; } + /** - * The total time merges have been executed (in milliseconds). + * The total time spent executing refreshes (in milliseconds). */ public long getTotalTimeInMillis() { return this.totalTimeInMillis; } /** - * The total time merges have been executed. + * The total time spent executing external refreshes (in milliseconds). + */ + public long getExternalTotalTimeInMillis() { + return this.externalTotalTimeInMillis; + } + + /** + * The total time refreshes have been executed. */ public TimeValue getTotalTime() { return new TimeValue(totalTimeInMillis); } + /** + * The total time external refreshes have been executed. + */ + public TimeValue getExternalTotalTime() { + return new TimeValue(externalTotalTimeInMillis); + } /** * The number of waiting refresh listeners. */ @@ -102,6 +144,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject("refresh"); builder.field("total", total); builder.humanReadableField("total_time_in_millis", "total_time", getTotalTime()); + builder.field("external_total", externalTotal); + builder.humanReadableField("external_total_time_in_millis", "external_total_time", getExternalTotalTime()); builder.field("listeners", listeners); builder.endObject(); return builder; @@ -112,13 +156,6 @@ public void readFrom(StreamInput in) throws IOException { throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVLong(total); - out.writeVLong(totalTimeInMillis); - out.writeVInt(listeners); - } - @Override public boolean equals(Object obj) { if (obj == null || obj.getClass() != RefreshStats.class) { @@ -127,11 +164,13 @@ public boolean equals(Object obj) { RefreshStats rhs = (RefreshStats) obj; return total == rhs.total && totalTimeInMillis == rhs.totalTimeInMillis + && externalTotal == rhs.externalTotal + && externalTotalTimeInMillis == rhs.externalTotalTimeInMillis && listeners == rhs.listeners; } @Override public int hashCode() { - return Objects.hash(total, totalTimeInMillis, listeners); + return Objects.hash(total, totalTimeInMillis, externalTotal, externalTotalTimeInMillis, listeners); } } diff --git a/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java b/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java index ad4b267eef64..da7273aa6630 100644 --- a/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java +++ b/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java @@ -23,6 +23,7 @@ import org.apache.lucene.analysis.CachingTokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.DisableGraphAttribute; +import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute; import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute; @@ -51,7 +52,9 @@ import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.lucene.search.SpanBooleanQueryRewriteWithMaxClause; import org.elasticsearch.common.unit.Fuzziness; +import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.TextFieldMapper; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.support.QueryParsers; @@ -78,7 +81,11 @@ public enum Type implements Writeable { /** * The text is analyzed and used in a phrase query, with the last term acting as a prefix. */ - PHRASE_PREFIX(2); + PHRASE_PREFIX(2), + /** + * The text is analyzed, terms are added to a boolean query with the last term acting as a prefix. + */ + BOOLEAN_PREFIX(3); private final int ordinal; @@ -244,11 +251,18 @@ public Query parse(Type type, String fieldName, Object value) throws IOException /* * If a keyword analyzer is used, we know that further analysis isn't - * needed and can immediately return a term query. + * needed and can immediately return a term query. If the query is a bool + * prefix query and the field type supports prefix queries, we return + * a prefix query instead */ - if (analyzer == Lucene.KEYWORD_ANALYZER - && type != Type.PHRASE_PREFIX) { - return builder.newTermQuery(new Term(fieldName, value.toString())); + if (analyzer == Lucene.KEYWORD_ANALYZER && type != Type.PHRASE_PREFIX) { + final Term term = new Term(fieldName, value.toString()); + if ((fieldType instanceof TextFieldMapper.TextFieldType || fieldType instanceof KeywordFieldMapper.KeywordFieldType) + && type == Type.BOOLEAN_PREFIX) { + return builder.newPrefixQuery(fieldName, term); + } else { + return builder.newTermQuery(term); + } } return parseInternal(type, fieldName, builder, value); @@ -265,6 +279,10 @@ protected final Query parseInternal(Type type, String fieldName, MatchQueryBuild } break; + case BOOLEAN_PREFIX: + query = builder.createBooleanPrefixQuery(fieldName, value.toString(), occur); + break; + case PHRASE: query = builder.createPhraseQuery(fieldName, value.toString(), phraseSlop); break; @@ -354,10 +372,28 @@ protected Query createFieldQuery(Analyzer analyzer, BooleanClause.Occur operator return createQuery(field, queryText, type, operator, slop); } - public Query createPhrasePrefixQuery(String field, String queryText, int slop) { + /** + * Creates a phrase prefix query from the query text. + * + * @param field field name + * @param queryText text to be passed to the analyzer + * @return {@code PrefixQuery}, {@code MultiPhrasePrefixQuery}, based on the analysis of {@code queryText} + */ + protected Query createPhrasePrefixQuery(String field, String queryText, int slop) { return createQuery(field, queryText, Type.PHRASE_PREFIX, occur, slop); } + /** + * Creates a boolean prefix query from the query text. + * + * @param field field name + * @param queryText text to be passed to the analyzer + * @return {@code PrefixQuery}, {@code BooleanQuery}, based on the analysis of {@code queryText} + */ + protected Query createBooleanPrefixQuery(String field, String queryText, BooleanClause.Occur occur) { + return createQuery(field, queryText, Type.BOOLEAN_PREFIX, occur, 0); + } + private Query createFieldQuery(TokenStream source, Type type, BooleanClause.Occur operator, String field, int phraseSlop) { assert operator == BooleanClause.Occur.SHOULD || operator == BooleanClause.Occur.MUST; @@ -405,14 +441,14 @@ private Query createFieldQuery(TokenStream source, Type type, BooleanClause.Occu if (type == Type.PHRASE_PREFIX) { return analyzePhrasePrefix(field, stream, phraseSlop, positionCount); } else { - return analyzeTerm(field, stream); + return analyzeTerm(field, stream, type == Type.BOOLEAN_PREFIX); } } else if (isGraph) { // graph if (type == Type.PHRASE || type == Type.PHRASE_PREFIX) { return analyzeGraphPhrase(stream, field, type, phraseSlop); } else { - return analyzeGraphBoolean(field, stream, operator); + return analyzeGraphBoolean(field, stream, operator, type == Type.BOOLEAN_PREFIX); } } else if (type == Type.PHRASE && positionCount > 1) { // phrase @@ -433,7 +469,7 @@ private Query createFieldQuery(TokenStream source, Type type, BooleanClause.Occu return analyzeBoolean(field, stream); } else { // complex case: multiple positions - return analyzeMultiBoolean(field, stream, operator); + return analyzeMultiBoolean(field, stream, operator, type == Type.BOOLEAN_PREFIX); } } } catch (IOException e) { @@ -462,13 +498,13 @@ private Query createQuery(String field, String queryText, Type type, BooleanClau } } - private SpanQuery newSpanQuery(Term[] terms, boolean prefix) { + private SpanQuery newSpanQuery(Term[] terms, boolean isPrefix) { if (terms.length == 1) { - return prefix ? fieldType.spanPrefixQuery(terms[0].text(), spanRewriteMethod, context) : new SpanTermQuery(terms[0]); + return isPrefix ? fieldType.spanPrefixQuery(terms[0].text(), spanRewriteMethod, context) : new SpanTermQuery(terms[0]); } SpanQuery[] spanQueries = new SpanQuery[terms.length]; for (int i = 0; i < terms.length; i++) { - spanQueries[i] = prefix ? new SpanTermQuery(terms[i]) : + spanQueries[i] = isPrefix ? new SpanTermQuery(terms[i]) : fieldType.spanPrefixQuery(terms[i].text(), spanRewriteMethod, context); } return new SpanOrQuery(spanQueries); @@ -479,7 +515,7 @@ protected SpanQuery createSpanQuery(TokenStream in, String field) throws IOExcep return createSpanQuery(in, field, false); } - private SpanQuery createSpanQuery(TokenStream in, String field, boolean prefix) throws IOException { + private SpanQuery createSpanQuery(TokenStream in, String field, boolean isPrefix) throws IOException { TermToBytesRefAttribute termAtt = in.getAttribute(TermToBytesRefAttribute.class); PositionIncrementAttribute posIncAtt = in.getAttribute(PositionIncrementAttribute.class); if (termAtt == null) { @@ -498,7 +534,7 @@ private SpanQuery createSpanQuery(TokenStream in, String field, boolean prefix) lastTerm = new Term(field, termAtt.getBytesRef()); } if (lastTerm != null) { - SpanQuery spanQuery = prefix ? + SpanQuery spanQuery = isPrefix ? fieldType.spanPrefixQuery(lastTerm.text(), spanRewriteMethod, context) : new SpanTermQuery(lastTerm); builder.addClause(spanQuery); } @@ -537,6 +573,74 @@ protected Query newTermQuery(Term term) { } } + /** + * Builds a new prefix query instance. + */ + protected Query newPrefixQuery(String field, Term term) { + try { + return fieldType.prefixQuery(term.text(), null, context); + } catch (RuntimeException e) { + if (lenient) { + return newLenientFieldQuery(field, e); + } + throw e; + } + } + + private Query analyzeTerm(String field, TokenStream stream, boolean isPrefix) throws IOException { + TermToBytesRefAttribute termAtt = stream.getAttribute(TermToBytesRefAttribute.class); + OffsetAttribute offsetAtt = stream.addAttribute(OffsetAttribute.class); + + stream.reset(); + if (!stream.incrementToken()) { + throw new AssertionError(); + } + final Term term = new Term(field, termAtt.getBytesRef()); + int lastOffset = offsetAtt.endOffset(); + stream.end(); + return isPrefix && lastOffset == offsetAtt.endOffset() ? newPrefixQuery(field, term) : newTermQuery(term); + } + + private void add(BooleanQuery.Builder q, String field, List current, BooleanClause.Occur operator, boolean isPrefix) { + if (current.isEmpty()) { + return; + } + if (current.size() == 1) { + if (isPrefix) { + q.add(newPrefixQuery(field, current.get(0)), operator); + } else { + q.add(newTermQuery(current.get(0)), operator); + } + } else { + // We don't apply prefix on synonyms + q.add(newSynonymQuery(current.toArray(new Term[current.size()])), operator); + } + } + + private Query analyzeMultiBoolean(String field, TokenStream stream, + BooleanClause.Occur operator, boolean isPrefix) throws IOException { + BooleanQuery.Builder q = newBooleanQuery(); + List currentQuery = new ArrayList<>(); + + TermToBytesRefAttribute termAtt = stream.getAttribute(TermToBytesRefAttribute.class); + PositionIncrementAttribute posIncrAtt = stream.getAttribute(PositionIncrementAttribute.class); + OffsetAttribute offsetAtt = stream.addAttribute(OffsetAttribute.class); + + stream.reset(); + int lastOffset = 0; + while (stream.incrementToken()) { + if (posIncrAtt.getPositionIncrement() != 0) { + add(q, field, currentQuery, operator, false); + currentQuery.clear(); + } + currentQuery.add(new Term(field, termAtt.getBytesRef())); + lastOffset = offsetAtt.endOffset(); + } + stream.end(); + add(q, field, currentQuery, operator, isPrefix && lastOffset == offsetAtt.endOffset()); + return q.build(); + } + @Override protected Query analyzePhrase(String field, TokenStream stream, int slop) throws IOException { try { @@ -577,6 +681,62 @@ private Query analyzePhrasePrefix(String field, TokenStream stream, int slop, in } } + private Query analyzeGraphBoolean(String field, TokenStream source, + BooleanClause.Occur operator, boolean isPrefix) throws IOException { + source.reset(); + GraphTokenStreamFiniteStrings graph = new GraphTokenStreamFiniteStrings(source); + BooleanQuery.Builder builder = new BooleanQuery.Builder(); + int[] articulationPoints = graph.articulationPoints(); + int lastState = 0; + for (int i = 0; i <= articulationPoints.length; i++) { + int start = lastState; + int end = -1; + if (i < articulationPoints.length) { + end = articulationPoints[i]; + } + lastState = end; + final Query queryPos; + boolean usePrefix = isPrefix && end == -1; + if (graph.hasSidePath(start)) { + final Iterator it = graph.getFiniteStrings(start, end); + Iterator queries = new Iterator() { + @Override + public boolean hasNext() { + return it.hasNext(); + } + + @Override + public Query next() { + TokenStream ts = it.next(); + final Type type; + if (getAutoGenerateMultiTermSynonymsPhraseQuery()) { + type = usePrefix + ? Type.PHRASE_PREFIX + : Type.PHRASE; + } else { + type = Type.BOOLEAN; + } + return createFieldQuery(ts, type, BooleanClause.Occur.MUST, field, 0); + } + }; + queryPos = newGraphSynonymQuery(queries); + } else { + Term[] terms = graph.getTerms(field, start); + assert terms.length > 0; + if (terms.length == 1) { + queryPos = usePrefix ? newPrefixQuery(field, terms[0]) : newTermQuery(terms[0]); + } else { + // We don't apply prefix on synonyms + queryPos = newSynonymQuery(terms); + } + } + if (queryPos != null) { + builder.add(queryPos, operator); + } + } + return builder.build(); + } + private Query analyzeGraphPhrase(TokenStream source, String field, Type type, int slop) throws IOException { assert type == Type.PHRASE_PREFIX || type == Type.PHRASE; @@ -615,13 +775,13 @@ private Query analyzeGraphPhrase(TokenStream source, String field, Type type, in } lastState = end; final SpanQuery queryPos; - boolean endPrefix = end == -1 && type == Type.PHRASE_PREFIX; + boolean usePrefix = end == -1 && type == Type.PHRASE_PREFIX; if (graph.hasSidePath(start)) { List queries = new ArrayList<>(); Iterator it = graph.getFiniteStrings(start, end); while (it.hasNext()) { TokenStream ts = it.next(); - SpanQuery q = createSpanQuery(ts, field, endPrefix); + SpanQuery q = createSpanQuery(ts, field, usePrefix); if (q != null) { if (queries.size() >= maxClauseCount) { throw new BooleanQuery.TooManyClauses(); @@ -640,7 +800,7 @@ private Query analyzeGraphPhrase(TokenStream source, String field, Type type, in if (terms.length >= maxClauseCount) { throw new BooleanQuery.TooManyClauses(); } - queryPos = newSpanQuery(terms, endPrefix); + queryPos = newSpanQuery(terms, usePrefix); } if (queryPos != null) { diff --git a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java index 88fd5293392b..667d3a3823db 100644 --- a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java +++ b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java @@ -66,6 +66,7 @@ public Query parse(MultiMatchQueryBuilder.Type type, Map fieldNam case PHRASE_PREFIX: case BEST_FIELDS: case MOST_FIELDS: + case BOOL_PREFIX: queries = buildFieldQueries(type, fieldNames, value, minimumShouldMatch); break; @@ -179,10 +180,23 @@ protected Query newSynonymQuery(Term[] terms) { } @Override - public Query newTermQuery(Term term) { + protected Query newTermQuery(Term term) { return blendTerm(context, term.bytes(), commonTermsCutoff, tieBreaker, lenient, blendedFields); } + @Override + protected Query newPrefixQuery(String field, Term term) { + List disjunctions = new ArrayList<>(); + for (FieldAndBoost fieldType : blendedFields) { + Query query = fieldType.fieldType.prefixQuery(term.text(), null, context); + if (fieldType.boost != 1f) { + query = new BoostQuery(query, fieldType.boost); + } + disjunctions.add(query); + } + return new DisjunctionMaxQuery(disjunctions, tieBreaker); + } + @Override protected Query analyzePhrase(String field, TokenStream stream, int slop) throws IOException { List disjunctions = new ArrayList<>(); diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java index 3493271e8d79..2f9043580a6a 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseActions.java @@ -28,6 +28,8 @@ import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.PlainShardIterator; import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; @@ -42,6 +44,7 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; +import java.util.Collections; import java.util.Objects; import java.util.function.Supplier; @@ -84,10 +87,14 @@ abstract static class TransportRetentionLeaseAction> extend @Override protected ShardsIterator shards(final ClusterState state, final InternalRequest request) { - return state + final IndexShardRoutingTable shardRoutingTable = state .routingTable() - .shardRoutingTable(request.concreteIndex(), request.request().getShardId().id()) - .primaryShardIt(); + .shardRoutingTable(request.concreteIndex(), request.request().getShardId().id()); + if (shardRoutingTable.primaryShard().active()) { + return shardRoutingTable.primaryShardIt(); + } else { + return new PlainShardIterator(request.request().getShardId(), Collections.emptyList()); + } } @Override diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 1733f54e4c10..97d1939c1b29 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -222,6 +222,7 @@ Runnable getGlobalCheckpointSyncer() { private final RecoveryStats recoveryStats = new RecoveryStats(); private final MeanMetric refreshMetric = new MeanMetric(); + private final MeanMetric externalRefreshMetric = new MeanMetric(); private final MeanMetric flushMetric = new MeanMetric(); private final CounterMetric periodicFlushMetric = new CounterMetric(); @@ -932,7 +933,12 @@ public long getWritingBytes() { public RefreshStats refreshStats() { int listeners = refreshListeners.pendingCount(); - return new RefreshStats(refreshMetric.count(), TimeUnit.NANOSECONDS.toMillis(refreshMetric.sum()), listeners); + return new RefreshStats( + refreshMetric.count(), + TimeUnit.NANOSECONDS.toMillis(refreshMetric.sum()), + externalRefreshMetric.count(), + TimeUnit.NANOSECONDS.toMillis(externalRefreshMetric.sum()), + listeners); } public FlushStats flushStats() { @@ -2487,8 +2493,9 @@ private EngineConfig newEngineConfig() { Sort indexSort = indexSortSupplier.get(); return new EngineConfig(shardId, shardRouting.allocationId().getId(), threadPool, indexSettings, warmer, store, indexSettings.getMergePolicy(), - mapperService.indexAnalyzer(), similarityService.similarity(mapperService), codecService, shardEventListener, - indexCache.query(), cachingPolicy, translogConfig, + mapperService != null ? mapperService.indexAnalyzer() : null, + similarityService.similarity(mapperService), codecService, shardEventListener, + indexCache != null ? indexCache.query() : null, cachingPolicy, translogConfig, IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings()), Collections.singletonList(refreshListeners), Collections.singletonList(new RefreshMetricUpdater(refreshMetric)), @@ -2900,7 +2907,8 @@ private RefreshListeners buildRefreshListeners() { indexSettings::getMaxRefreshListeners, () -> refresh("too_many_listeners"), threadPool.executor(ThreadPool.Names.LISTENER)::execute, - logger, threadPool.getThreadContext()); + logger, threadPool.getThreadContext(), + externalRefreshMetric); } /** @@ -3070,7 +3078,9 @@ public void afterRefresh(boolean didRefresh) throws IOException { private EngineConfig.TombstoneDocSupplier tombstoneDocSupplier() { final RootObjectMapper.Builder noopRootMapper = new RootObjectMapper.Builder("__noop"); - final DocumentMapper noopDocumentMapper = new DocumentMapper.Builder(noopRootMapper, mapperService).build(mapperService); + final DocumentMapper noopDocumentMapper = mapperService != null ? + new DocumentMapper.Builder(noopRootMapper, mapperService).build(mapperService) : + null; return new EngineConfig.TombstoneDocSupplier() { @Override public ParsedDocument newDeleteTombstoneDoc(String type, String id) { diff --git a/server/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java b/server/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java index cc9ac40c2744..72b99f4d4868 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/shard/LocalShardSnapshot.java @@ -33,6 +33,7 @@ import java.io.Closeable; import java.io.IOException; import java.util.Collection; +import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; final class LocalShardSnapshot implements Closeable { @@ -116,6 +117,12 @@ public Lock obtainLock(String name) throws IOException { public void close() throws IOException { throw new UnsupportedOperationException("nobody should close this directory wrapper"); } + + // temporary override until LUCENE-8735 is integrated + @Override + public Set getPendingDeletions() throws IOException { + return in.getPendingDeletions(); + } }; } diff --git a/server/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java b/server/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java index 713563eb111b..d343f8630a6f 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java +++ b/server/src/main/java/org/elasticsearch/index/shard/RefreshListeners.java @@ -23,6 +23,7 @@ import org.apache.lucene.search.ReferenceManager; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.util.concurrent.RunOnce; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.index.translog.Translog; @@ -50,6 +51,12 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener, private final Executor listenerExecutor; private final Logger logger; private final ThreadContext threadContext; + private final MeanMetric refreshMetric; + + /** + * Time in nanosecond when beforeRefresh() is called. Used for calculating refresh metrics. + */ + private long currentRefreshStartTime; /** * Is this closed? If true then we won't add more listeners and have flushed all pending listeners. @@ -76,12 +83,13 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener, private volatile Translog.Location lastRefreshedLocation; public RefreshListeners(IntSupplier getMaxRefreshListeners, Runnable forceRefresh, Executor listenerExecutor, Logger logger, - ThreadContext threadContext) { + ThreadContext threadContext, MeanMetric refreshMetric) { this.getMaxRefreshListeners = getMaxRefreshListeners; this.forceRefresh = forceRefresh; this.listenerExecutor = listenerExecutor; this.logger = logger; this.threadContext = threadContext; + this.refreshMetric = refreshMetric; } /** @@ -204,10 +212,14 @@ public void setCurrentRefreshLocationSupplier(Supplier curren @Override public void beforeRefresh() throws IOException { currentRefreshLocation = currentRefreshLocationSupplier.get(); + currentRefreshStartTime = System.nanoTime(); } @Override public void afterRefresh(boolean didRefresh) throws IOException { + // Increment refresh metric before communicating to listeners. + refreshMetric.inc(System.nanoTime() - currentRefreshStartTime); + /* We intentionally ignore didRefresh here because our timing is a little off. It'd be a useful flag if we knew everything that made * it into the refresh, but the way we snapshot the translog position before the refresh, things can sneak into the refresh that we * don't know about. */ diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index c97c19eb0f3e..06b6fa557983 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -259,6 +259,12 @@ public void readBytes(byte[] b, int offset, int len) throws IOException { assert index.getFileDetails(dest).recovered() == l : index.getFileDetails(dest).toString(); } } + + // temporary override until LUCENE-8735 is integrated + @Override + public Set getPendingDeletions() throws IOException { + return in.getPendingDeletions(); + } } /** diff --git a/server/src/main/java/org/elasticsearch/index/store/ByteSizeCachingDirectory.java b/server/src/main/java/org/elasticsearch/index/store/ByteSizeCachingDirectory.java index 3b0a912c2df7..9a202a9b4cd3 100644 --- a/server/src/main/java/org/elasticsearch/index/store/ByteSizeCachingDirectory.java +++ b/server/src/main/java/org/elasticsearch/index/store/ByteSizeCachingDirectory.java @@ -32,6 +32,7 @@ import java.io.UncheckedIOException; import java.nio.file.AccessDeniedException; import java.nio.file.NoSuchFileException; +import java.util.Set; final class ByteSizeCachingDirectory extends FilterDirectory { @@ -180,4 +181,9 @@ public void deleteFile(String name) throws IOException { } } + // temporary override until LUCENE-8735 is integrated + @Override + public Set getPendingDeletions() throws IOException { + return in.getPendingDeletions(); + } } diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index 65d2f8d7812f..f860e7fd940a 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -760,6 +760,13 @@ public IndexInput openInput(String name, IOContext context) throws IOException { public String toString() { return "store(" + in.toString() + ")"; } + + @Override + public Set getPendingDeletions() throws IOException { + // FilterDirectory.getPendingDeletions does not delegate, working around it here. + // to be removed once fixed in FilterDirectory. + return unwrap(this).getPendingDeletions(); + } } /** diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 4a12bdae6b9e..913fb47157ed 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -156,6 +156,8 @@ import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder; import static org.elasticsearch.common.util.CollectionUtils.arrayAsArrayList; +import static org.elasticsearch.index.IndexService.IndexCreationContext.CREATE_INDEX; +import static org.elasticsearch.index.IndexService.IndexCreationContext.META_DATA_VERIFICATION; import static org.elasticsearch.index.query.AbstractQueryBuilder.parseInnerQueryBuilder; public class IndicesService extends AbstractLifecycleComponent @@ -491,7 +493,7 @@ public void onStoreClosed(ShardId shardId) { finalListeners.add(oldShardsStats); final IndexService indexService = createIndexService( - "create index", + CREATE_INDEX, indexMetaData, indicesQueryCache, indicesFieldDataCache, @@ -513,7 +515,7 @@ public void onStoreClosed(ShardId shardId) { /** * This creates a new IndexService without registering it */ - private synchronized IndexService createIndexService(final String reason, + private synchronized IndexService createIndexService(IndexService.IndexCreationContext indexCreationContext, IndexMetaData indexMetaData, IndicesQueryCache indicesQueryCache, IndicesFieldDataCache indicesFieldDataCache, @@ -526,7 +528,7 @@ private synchronized IndexService createIndexService(final String reason, indexMetaData.getIndex(), idxSettings.getNumberOfShards(), idxSettings.getNumberOfReplicas(), - reason); + indexCreationContext); final IndexModule indexModule = new IndexModule(idxSettings, analysisRegistry, getEngineFactory(idxSettings), indexStoreFactories); for (IndexingOperationListener operationListener : indexingOperationListeners) { @@ -537,6 +539,7 @@ private synchronized IndexService createIndexService(final String reason, indexModule.addIndexEventListener(listener); } return indexModule.newIndexService( + indexCreationContext, nodeEnv, xContentRegistry, this, @@ -615,7 +618,7 @@ public synchronized void verifyIndexMetadata(IndexMetaData metaData, IndexMetaDa closeables.add(indicesQueryCache); // this will also fail if some plugin fails etc. which is nice since we can verify that early final IndexService service = - createIndexService("metadata verification", metaData, indicesQueryCache, indicesFieldDataCache, emptyList()); + createIndexService(META_DATA_VERIFICATION, metaData, indicesQueryCache, indicesFieldDataCache, emptyList()); closeables.add(() -> service.close("metadata verification", false)); service.mapperService().merge(metaData, MapperService.MergeReason.MAPPING_RECOVERY); if (metaData.equals(metaDataUpdate) == false) { diff --git a/server/src/main/java/org/elasticsearch/ingest/Pipeline.java b/server/src/main/java/org/elasticsearch/ingest/Pipeline.java index fc5311be5cbd..218713383227 100644 --- a/server/src/main/java/org/elasticsearch/ingest/Pipeline.java +++ b/server/src/main/java/org/elasticsearch/ingest/Pipeline.java @@ -89,6 +89,9 @@ public static Pipeline create(String id, Map config, /** * Modifies the data of a document to be indexed based on the processor this pipeline holds + * + * If null is returned then this document will be dropped and not indexed, otherwise + * this document will be kept and indexed. */ public IngestDocument execute(IngestDocument ingestDocument) throws Exception { long startTimeInNanos = relativeTimeProvider.getAsLong(); diff --git a/server/src/main/java/org/elasticsearch/ingest/Processor.java b/server/src/main/java/org/elasticsearch/ingest/Processor.java index 92b08bba77bf..c064ddb35a12 100644 --- a/server/src/main/java/org/elasticsearch/ingest/Processor.java +++ b/server/src/main/java/org/elasticsearch/ingest/Processor.java @@ -39,6 +39,9 @@ public interface Processor { /** * Introspect and potentially modify the incoming data. + * + * @return If null is returned then the current document will be dropped and not be indexed, + * otherwise this document will be kept and indexed */ IngestDocument execute(IngestDocument ingestDocument) throws Exception; diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index 033080c2c38e..eb130f31e4b9 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -23,11 +23,12 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateApplier; -import org.elasticsearch.cluster.ack.ClusterStateUpdateRequest; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoriesMetaData; @@ -43,12 +44,10 @@ import org.elasticsearch.transport.TransportService; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.stream.Collectors; /** * Service responsible for maintaining and providing access to snapshot repositories on nodes. @@ -93,12 +92,12 @@ public RepositoriesService(Settings settings, ClusterService clusterService, Tra * @param request register repository request * @param listener register repository listener */ - public void registerRepository(final RegisterRepositoryRequest request, final ActionListener listener) { - final RepositoryMetaData newRepositoryMetaData = new RepositoryMetaData(request.name, request.type, request.settings); + public void registerRepository(final PutRepositoryRequest request, final ActionListener listener) { + final RepositoryMetaData newRepositoryMetaData = new RepositoryMetaData(request.name(), request.type(), request.settings()); final ActionListener registrationListener; - if (request.verify) { - registrationListener = new VerifyingRegisterRepositoryListener(request.name, listener); + if (request.verify()) { + registrationListener = new VerifyingRegisterRepositoryListener(request.name(), listener); } else { registrationListener = listener; } @@ -111,7 +110,7 @@ public void registerRepository(final RegisterRepositoryRequest request, final Ac return; } - clusterService.submitStateUpdateTask(request.cause, + clusterService.submitStateUpdateTask("put_repository [" + request.name() + "]", new AckedClusterStateUpdateTask(request, registrationListener) { @Override protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { @@ -120,14 +119,14 @@ protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { @Override public ClusterState execute(ClusterState currentState) { - ensureRepositoryNotInUse(currentState, request.name); + ensureRepositoryNotInUse(currentState, request.name()); MetaData metaData = currentState.metaData(); MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); RepositoriesMetaData repositories = metaData.custom(RepositoriesMetaData.TYPE); if (repositories == null) { - logger.info("put repository [{}]", request.name); + logger.info("put repository [{}]", request.name()); repositories = new RepositoriesMetaData( - Collections.singletonList(new RepositoryMetaData(request.name, request.type, request.settings))); + Collections.singletonList(new RepositoryMetaData(request.name(), request.type(), request.settings()))); } else { boolean found = false; List repositoriesMetaData = new ArrayList<>(repositories.repositories().size() + 1); @@ -145,10 +144,10 @@ public ClusterState execute(ClusterState currentState) { } } if (!found) { - logger.info("put repository [{}]", request.name); - repositoriesMetaData.add(new RepositoryMetaData(request.name, request.type, request.settings)); + logger.info("put repository [{}]", request.name()); + repositoriesMetaData.add(new RepositoryMetaData(request.name(), request.type(), request.settings())); } else { - logger.info("update repository [{}]", request.name); + logger.info("update repository [{}]", request.name()); } repositories = new RepositoriesMetaData(repositoriesMetaData); } @@ -158,7 +157,7 @@ public ClusterState execute(ClusterState currentState) { @Override public void onFailure(String source, Exception e) { - logger.warn(() -> new ParameterizedMessage("failed to create repository [{}]", request.name), e); + logger.warn(() -> new ParameterizedMessage("failed to create repository [{}]", request.name()), e); super.onFailure(source, e); } @@ -177,51 +176,52 @@ public boolean mustAck(DiscoveryNode discoveryNode) { * @param request unregister repository request * @param listener unregister repository listener */ - public void unregisterRepository(final UnregisterRepositoryRequest request, final ActionListener listener) { - clusterService.submitStateUpdateTask(request.cause, new AckedClusterStateUpdateTask(request, listener) { - @Override - protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { - return new ClusterStateUpdateResponse(acknowledged); - } + public void unregisterRepository(final DeleteRepositoryRequest request, final ActionListener listener) { + clusterService.submitStateUpdateTask("delete_repository [" + request.name() + "]", + new AckedClusterStateUpdateTask(request, listener) { + @Override + protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { + return new ClusterStateUpdateResponse(acknowledged); + } - @Override - public ClusterState execute(ClusterState currentState) { - ensureRepositoryNotInUse(currentState, request.name); - MetaData metaData = currentState.metaData(); - MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); - RepositoriesMetaData repositories = metaData.custom(RepositoriesMetaData.TYPE); - if (repositories != null && repositories.repositories().size() > 0) { - List repositoriesMetaData = new ArrayList<>(repositories.repositories().size()); - boolean changed = false; - for (RepositoryMetaData repositoryMetaData : repositories.repositories()) { - if (Regex.simpleMatch(request.name, repositoryMetaData.name())) { - logger.info("delete repository [{}]", repositoryMetaData.name()); - changed = true; - } else { - repositoriesMetaData.add(repositoryMetaData); + @Override + public ClusterState execute(ClusterState currentState) { + ensureRepositoryNotInUse(currentState, request.name()); + MetaData metaData = currentState.metaData(); + MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); + RepositoriesMetaData repositories = metaData.custom(RepositoriesMetaData.TYPE); + if (repositories != null && repositories.repositories().size() > 0) { + List repositoriesMetaData = new ArrayList<>(repositories.repositories().size()); + boolean changed = false; + for (RepositoryMetaData repositoryMetaData : repositories.repositories()) { + if (Regex.simpleMatch(request.name(), repositoryMetaData.name())) { + logger.info("delete repository [{}]", repositoryMetaData.name()); + changed = true; + } else { + repositoriesMetaData.add(repositoryMetaData); + } + } + if (changed) { + repositories = new RepositoriesMetaData(repositoriesMetaData); + mdBuilder.putCustom(RepositoriesMetaData.TYPE, repositories); + return ClusterState.builder(currentState).metaData(mdBuilder).build(); } } - if (changed) { - repositories = new RepositoriesMetaData(repositoriesMetaData); - mdBuilder.putCustom(RepositoriesMetaData.TYPE, repositories); - return ClusterState.builder(currentState).metaData(mdBuilder).build(); + if (Regex.isMatchAllPattern(request.name())) { // we use a wildcard so we don't barf if it's not present. + return currentState; } + throw new RepositoryMissingException(request.name()); } - if (Regex.isMatchAllPattern(request.name)) { // we use a wildcard so we don't barf if it's not present. - return currentState; - } - throw new RepositoryMissingException(request.name); - } - @Override - public boolean mustAck(DiscoveryNode discoveryNode) { - // repository was created on both master and data nodes - return discoveryNode.isMasterNode() || discoveryNode.isDataNode(); - } - }); + @Override + public boolean mustAck(DiscoveryNode discoveryNode) { + // repository was created on both master and data nodes + return discoveryNode.isMasterNode() || discoveryNode.isDataNode(); + } + }); } - public void verifyRepository(final String repositoryName, final ActionListener listener) { + public void verifyRepository(final String repositoryName, final ActionListener> listener) { final Repository repository = repository(repositoryName); try { threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> { @@ -229,9 +229,9 @@ public void verifyRepository(final String repositoryName, final ActionListener() { + verifyAction.verify(repositoryName, verificationToken, new ActionListener>() { @Override - public void onResponse(VerifyResponse verifyResponse) { + public void onResponse(List verifyResponse) { threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> { try { repository.endVerification(verificationToken); @@ -263,7 +263,7 @@ public void onFailure(Exception e) { }); } } else { - listener.onResponse(new VerifyResponse(new DiscoveryNode[0], new VerificationFailure[0])); + listener.onResponse(Collections.emptyList()); } } catch (Exception e) { listener.onFailure(e); @@ -440,14 +440,10 @@ private class VerifyingRegisterRepositoryListener implements ActionListener() { + verifyRepository(name, new ActionListener>() { @Override - public void onResponse(VerifyResponse verifyResponse) { - if (verifyResponse.failed()) { - listener.onFailure(new RepositoryVerificationException(name, verifyResponse.failureDescription())); - } else { - listener.onResponse(clusterStateUpdateResponse); - } + public void onResponse(List verifyResponse) { + listener.onResponse(clusterStateUpdateResponse); } @Override @@ -465,104 +461,4 @@ public void onFailure(Exception e) { listener.onFailure(e); } } - - /** - * Register repository request - */ - public static class RegisterRepositoryRequest extends ClusterStateUpdateRequest { - - final String cause; - - final String name; - - final String type; - - final boolean verify; - - Settings settings = Settings.EMPTY; - - /** - * Constructs new register repository request - * - * @param cause repository registration cause - * @param name repository name - * @param type repository type - * @param verify verify repository after creation - */ - public RegisterRepositoryRequest(String cause, String name, String type, boolean verify) { - this.cause = cause; - this.name = name; - this.type = type; - this.verify = verify; - } - - /** - * Sets repository settings - * - * @param settings repository settings - * @return this request - */ - public RegisterRepositoryRequest settings(Settings settings) { - this.settings = settings; - return this; - } - } - - /** - * Unregister repository request - */ - public static class UnregisterRepositoryRequest extends ClusterStateUpdateRequest { - - final String cause; - - final String name; - - /** - * Creates a new unregister repository request - * - * @param cause repository unregistration cause - * @param name repository name - */ - public UnregisterRepositoryRequest(String cause, String name) { - this.cause = cause; - this.name = name; - } - - } - - /** - * Verify repository request - */ - public static class VerifyResponse { - - private VerificationFailure[] failures; - - private DiscoveryNode[] nodes; - - public VerifyResponse(DiscoveryNode[] nodes, VerificationFailure[] failures) { - this.nodes = nodes; - this.failures = failures; - } - - public VerificationFailure[] failures() { - return failures; - } - - public DiscoveryNode[] nodes() { - return nodes; - } - - public boolean failed() { - return failures.length > 0; - } - - public String failureDescription() { - return Arrays - .stream(failures) - .map(failure -> failure.toString()) - .collect(Collectors.joining(", ", "[", "]")); - } - - } - } diff --git a/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java b/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java index 142751a0f8cf..24a5d3b561dd 100644 --- a/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java @@ -30,7 +30,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.repositories.RepositoriesService.VerifyResponse; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.EmptyTransportResponseHandler; @@ -68,7 +67,7 @@ public VerifyNodeRepositoryAction(TransportService transportService, ClusterServ new VerifyNodeRepositoryRequestHandler()); } - public void verify(String repository, String verificationToken, final ActionListener listener) { + public void verify(String repository, String verificationToken, final ActionListener> listener) { final DiscoveryNodes discoNodes = clusterService.state().nodes(); final DiscoveryNode localNode = discoNodes.getLocalNode(); @@ -89,7 +88,7 @@ public void verify(String repository, String verificationToken, final ActionList errors.add(new VerificationFailure(node.getId(), e)); } if (counter.decrementAndGet() == 0) { - finishVerification(listener, nodes, errors); + finishVerification(repository, listener, nodes, errors); } } else { transportService.sendRequest(node, ACTION_NAME, new VerifyNodeRepositoryRequest(repository, verificationToken), @@ -97,7 +96,7 @@ public void verify(String repository, String verificationToken, final ActionList @Override public void handleResponse(TransportResponse.Empty response) { if (counter.decrementAndGet() == 0) { - finishVerification(listener, nodes, errors); + finishVerification(repository, listener, nodes, errors); } } @@ -105,7 +104,7 @@ public void handleResponse(TransportResponse.Empty response) { public void handleException(TransportException exp) { errors.add(new VerificationFailure(node.getId(), exp)); if (counter.decrementAndGet() == 0) { - finishVerification(listener, nodes, errors); + finishVerification(repository, listener, nodes, errors); } } }); @@ -113,10 +112,13 @@ public void handleException(TransportException exp) { } } - public void finishVerification(ActionListener listener, List nodes, + private static void finishVerification(String repositoryName, ActionListener> listener, List nodes, CopyOnWriteArrayList errors) { - listener.onResponse(new RepositoriesService.VerifyResponse(nodes.toArray(new DiscoveryNode[nodes.size()]), - errors.toArray(new VerificationFailure[errors.size()]))); + if (errors.isEmpty() == false) { + listener.onFailure(new RepositoryVerificationException(repositoryName, errors.toString())); + } else { + listener.onResponse(nodes); + } } private void doVerify(String repositoryName, String verificationToken, DiscoveryNode localNode) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestCatRecoveryAction.java similarity index 89% rename from server/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java rename to server/src/main/java/org/elasticsearch/rest/action/cat/RestCatRecoveryAction.java index 62a6d5f3f577..5db6e1afe246 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestCatRecoveryAction.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentElasticsearchExtension; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; @@ -48,8 +49,8 @@ * in a string format, designed to be used at the command line. An Index can * be specified to limit output to a particular index or indices. */ -public class RestRecoveryAction extends AbstractCatAction { - public RestRecoveryAction(Settings settings, RestController restController) { +public class RestCatRecoveryAction extends AbstractCatAction { + public RestCatRecoveryAction(Settings settings, RestController restController) { super(settings); restController.registerHandler(GET, "/_cat/recovery", this); restController.registerHandler(GET, "/_cat/recovery/{index}", this); @@ -87,6 +88,10 @@ protected Table getTableWithHeader(RestRequest request) { t.startHeaders() .addCell("index", "alias:i,idx;desc:index name") .addCell("shard", "alias:s,sh;desc:shard name") + .addCell("start_time", "default:false;alias:start;desc:recovery start time") + .addCell("start_time_millis", "default:false;alias:start_millis;desc:recovery start time in epoch milliseconds") + .addCell("stop_time", "default:false;alias:stop;desc:recovery stop time") + .addCell("stop_time_millis", "default:false;alias:stop_millis;desc:recovery stop time in epoch milliseconds") .addCell("time", "alias:t,ti;desc:recovery time") .addCell("type", "alias:ty;desc:recovery type") .addCell("stage", "alias:st;desc:recovery stage") @@ -150,6 +155,10 @@ public int compare(RecoveryState o1, RecoveryState o2) { t.startRow(); t.addCell(index); t.addCell(state.getShardId().id()); + t.addCell(XContentElasticsearchExtension.DEFAULT_DATE_PRINTER.print(state.getTimer().startTime())); + t.addCell(state.getTimer().startTime()); + t.addCell(XContentElasticsearchExtension.DEFAULT_DATE_PRINTER.print(state.getTimer().stopTime())); + t.addCell(state.getTimer().stopTime()); t.addCell(new TimeValue(state.getTimer().time())); t.addCell(state.getRecoverySource().getType().toString().toLowerCase(Locale.ROOT)); t.addCell(state.getStage().toString().toLowerCase(Locale.ROOT)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java index fb560cf7f77a..788de8e71548 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java @@ -289,6 +289,14 @@ protected Table getTableWithHeader(final RestRequest request) { table.addCell("refresh.time", "sibling:pri;alias:rti,refreshTime;default:false;text-align:right;desc:time spent in refreshes"); table.addCell("pri.refresh.time", "default:false;text-align:right;desc:time spent in refreshes"); + table.addCell("refresh.external_total", + "sibling:pri;alias:rto,refreshTotal;default:false;text-align:right;desc:total external refreshes"); + table.addCell("pri.refresh.external_total", "default:false;text-align:right;desc:total external refreshes"); + + table.addCell("refresh.external_time", + "sibling:pri;alias:rti,refreshTime;default:false;text-align:right;desc:time spent in external refreshes"); + table.addCell("pri.refresh.external_time", "default:false;text-align:right;desc:time spent in external refreshes"); + table.addCell("refresh.listeners", "sibling:pri;alias:rli,refreshListeners;default:false;text-align:right;desc:number of pending refresh listeners"); table.addCell("pri.refresh.listeners", "default:false;text-align:right;desc:number of pending refresh listeners"); @@ -562,6 +570,12 @@ Table buildTable(final RestRequest request, table.addCell(totalStats.getRefresh() == null ? null : totalStats.getRefresh().getTotalTime()); table.addCell(primaryStats.getRefresh() == null ? null : primaryStats.getRefresh().getTotalTime()); + table.addCell(totalStats.getRefresh() == null ? null : totalStats.getRefresh().getExternalTotal()); + table.addCell(primaryStats.getRefresh() == null ? null : primaryStats.getRefresh().getExternalTotal()); + + table.addCell(totalStats.getRefresh() == null ? null : totalStats.getRefresh().getExternalTotalTime()); + table.addCell(primaryStats.getRefresh() == null ? null : primaryStats.getRefresh().getExternalTotalTime()); + table.addCell(totalStats.getRefresh() == null ? null : totalStats.getRefresh().getListeners()); table.addCell(primaryStats.getRefresh() == null ? null : primaryStats.getRefresh().getListeners()); diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java index b3c480a2e7ba..74a103dcb540 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java @@ -201,6 +201,9 @@ protected Table getTableWithHeader(final RestRequest request) { table.addCell("refresh.total", "alias:rto,refreshTotal;default:false;text-align:right;desc:total refreshes"); table.addCell("refresh.time", "alias:rti,refreshTime;default:false;text-align:right;desc:time spent in refreshes"); + table.addCell("refresh.external_total", "alias:rto,refreshTotal;default:false;text-align:right;desc:total external refreshes"); + table.addCell("refresh.external_time", + "alias:rti,refreshTime;default:false;text-align:right;desc:time spent in external refreshes"); table.addCell("refresh.listeners", "alias:rli,refreshListeners;default:false;text-align:right;" + "desc:number of pending refresh listeners"); @@ -378,6 +381,8 @@ Table buildTable(boolean fullId, RestRequest req, ClusterStateResponse state, No RefreshStats refreshStats = indicesStats == null ? null : indicesStats.getRefresh(); table.addCell(refreshStats == null ? null : refreshStats.getTotal()); table.addCell(refreshStats == null ? null : refreshStats.getTotalTime()); + table.addCell(refreshStats == null ? null : refreshStats.getExternalTotal()); + table.addCell(refreshStats == null ? null : refreshStats.getExternalTotalTime()); table.addCell(refreshStats == null ? null : refreshStats.getListeners()); ScriptStats scriptStats = stats == null ? null : stats.getScriptStats(); diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java index eb82f7da58c5..ae751475ce59 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java @@ -164,6 +164,9 @@ protected Table getTableWithHeader(final RestRequest request) { table.addCell("refresh.total", "alias:rto,refreshTotal;default:false;text-align:right;desc:total refreshes"); table.addCell("refresh.time", "alias:rti,refreshTime;default:false;text-align:right;desc:time spent in refreshes"); + table.addCell("refresh.external_total", "alias:rto,refreshTotal;default:false;text-align:right;desc:total external refreshes"); + table.addCell("refresh.external_time", + "alias:rti,refreshTime;default:false;text-align:right;desc:time spent in external refreshes"); table.addCell("refresh.listeners", "alias:rli,refreshListeners;default:false;text-align:right;desc:number of pending refresh listeners"); @@ -319,6 +322,8 @@ private Table buildTable(RestRequest request, ClusterStateResponse state, Indice table.addCell(getOrNull(commonStats, CommonStats::getRefresh, RefreshStats::getTotal)); table.addCell(getOrNull(commonStats, CommonStats::getRefresh, RefreshStats::getTotalTime)); + table.addCell(getOrNull(commonStats, CommonStats::getRefresh, RefreshStats::getExternalTotal)); + table.addCell(getOrNull(commonStats, CommonStats::getRefresh, RefreshStats::getExternalTotalTime)); table.addCell(getOrNull(commonStats, CommonStats::getRefresh, RefreshStats::getListeners)); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getFetchCurrent())); diff --git a/server/src/main/java/org/elasticsearch/script/ScoreScript.java b/server/src/main/java/org/elasticsearch/script/ScoreScript.java index 6ac5935826bf..f31af4c008c7 100644 --- a/server/src/main/java/org/elasticsearch/script/ScoreScript.java +++ b/server/src/main/java/org/elasticsearch/script/ScoreScript.java @@ -62,6 +62,11 @@ public abstract class ScoreScript { private DoubleSupplier scoreSupplier = () -> 0.0; + private final int docBase; + private int docId; + private int shardId = -1; + private String indexName = null; + public ScoreScript(Map params, SearchLookup lookup, LeafReaderContext leafContext) { // null check needed b/c of expression engine subclass if (lookup == null) { @@ -69,11 +74,13 @@ public ScoreScript(Map params, SearchLookup lookup, LeafReaderCo assert leafContext == null; this.params = null; this.leafLookup = null; + this.docBase = 0; } else { this.leafLookup = lookup.getLeafSearchLookup(leafContext); params = new HashMap<>(params); params.putAll(leafLookup.asMap()); this.params = new DeprecationMap(params, DEPRECATIONS, "score-script"); + this.docBase = leafContext.docBase; } } @@ -91,6 +98,7 @@ public final Map> getDoc() { /** Set the current document to run the script on next. */ public void setDocument(int docid) { + this.docId = docid; leafLookup.setDocument(docid); } @@ -104,10 +112,74 @@ public void setScorer(Scorable scorer) { }; } + /** + * Accessed as _score in the painless script + * @return the score of the inner query + */ public double get_score() { return scoreSupplier.getAsDouble(); } + + /** + * Starting a name with underscore, so that the user cannot access this function directly through a script + * It is only used within predefined painless functions. + * @return the internal document ID + */ + public int _getDocId() { + return docId; + } + + /** + * Starting a name with underscore, so that the user cannot access this function directly through a script + * It is only used within predefined painless functions. + * @return the internal document ID with the base + */ + public int _getDocBaseId() { + return docBase + docId; + } + + /** + * Starting a name with underscore, so that the user cannot access this function directly through a script + * It is only used within predefined painless functions. + * @return shard id or throws an exception if shard is not set up for this script instance + */ + public int _getShardId() { + if (shardId > -1) { + return shardId; + } else { + throw new IllegalArgumentException("shard id can not be looked up!"); + } + } + + /** + * Starting a name with underscore, so that the user cannot access this function directly through a script + * It is only used within predefined painless functions. + * @return index name or throws an exception if the index name is not set up for this script instance + */ + public String _getIndex() { + if (indexName != null) { + return indexName; + } else { + throw new IllegalArgumentException("index name can not be looked up!"); + } + } + + /** + * Starting a name with underscore, so that the user cannot access this function directly through a script + */ + public void _setShard(int shardId) { + this.shardId = shardId; + } + + /** + * Starting a name with underscore, so that the user cannot access this function directly through a script + */ + public void _setIndexName(String indexName) { + this.indexName = indexName; + } + + /** A factory to construct {@link ScoreScript} instances. */ public interface LeafFactory { diff --git a/server/src/main/java/org/elasticsearch/script/ScoreScriptUtils.java b/server/src/main/java/org/elasticsearch/script/ScoreScriptUtils.java index 273b8fcf8559..c7d6e889397f 100644 --- a/server/src/main/java/org/elasticsearch/script/ScoreScriptUtils.java +++ b/server/src/main/java/org/elasticsearch/script/ScoreScriptUtils.java @@ -21,22 +21,20 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.StringHelper; -import org.elasticsearch.common.Randomness; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.mapper.DateFieldMapper; import java.time.ZoneId; -import java.util.Random; -/** - * ScoringScriptImpl can be used as {@link ScoreScript} - * to run a previously compiled Painless script. - */ +import static com.carrotsearch.hppc.BitMixer.mix32; + public final class ScoreScriptUtils { /****** STATIC FUNCTIONS that can be used by users for score calculations **/ @@ -53,26 +51,50 @@ public static double sigmoid(double value, double k, double a){ return Math.pow(value,a) / (Math.pow(k,a) + Math.pow(value,a)); } + // random score based on the documents' values of the given field + public static final class RandomScoreField { + private final ScoreScript scoreScript; + private final ScriptDocValues docValues; + private final int saltedSeed; - // reproducible random - public static double randomReproducible(String seedValue, int seed) { - int hash = StringHelper.murmurhash3_x86_32(new BytesRef(seedValue), seed); - return (hash & 0x00FFFFFF) / (float)(1 << 24); // only use the lower 24 bits to construct a float from 0.0-1.0 - } - // not reproducible random - public static final class RandomNotReproducible { - private final Random rnd; + public RandomScoreField(ScoreScript scoreScript, int seed, String fieldName) { + this.scoreScript = scoreScript; + this.docValues = scoreScript.getDoc().get(fieldName); + int salt = (scoreScript._getIndex().hashCode() << 10) | scoreScript._getShardId(); + this.saltedSeed = mix32(salt ^ seed); - public RandomNotReproducible() { - this.rnd = Randomness.get(); } - public double randomNotReproducible() { - return rnd.nextDouble(); + public double randomScore() { + try { + docValues.setNextDocId(scoreScript._getDocId()); + String seedValue = String.valueOf(docValues.get(0)); + int hash = StringHelper.murmurhash3_x86_32(new BytesRef(seedValue), saltedSeed); + return (hash & 0x00FFFFFF) / (float)(1 << 24); // only use the lower 24 bits to construct a float from 0.0-1.0 + } catch (Exception e) { + throw ExceptionsHelper.convertToElastic(e); + } } } + // random score based on the internal Lucene document Ids + public static final class RandomScoreDoc { + private final ScoreScript scoreScript; + private final int saltedSeed; + + public RandomScoreDoc(ScoreScript scoreScript, int seed) { + this.scoreScript = scoreScript; + int salt = (scoreScript._getIndex().hashCode() << 10) | scoreScript._getShardId(); + this.saltedSeed = mix32(salt ^ seed); + } + + public double randomScore() { + String seedValue = Integer.toString(scoreScript._getDocBaseId()); + int hash = StringHelper.murmurhash3_x86_32(new BytesRef(seedValue), saltedSeed); + return (hash & 0x00FFFFFF) / (float)(1 << 24); // only use the lower 24 bits to construct a float from 0.0-1.0 + } + } // **** Decay functions on geo field public static final class DecayGeoLinear { diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index de4f548f6cf0..4c6ba07c631a 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.xcontent.ParseFieldRegistry; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.MatchBoolPrefixQueryBuilder; import org.elasticsearch.index.query.BoostingQueryBuilder; import org.elasticsearch.index.query.CommonTermsQueryBuilder; import org.elasticsearch.index.query.ConstantScoreQueryBuilder; @@ -786,6 +787,8 @@ private void registerQueryParsers(List plugins) { registerQuery(new QuerySpec<>(IntervalQueryBuilder.NAME, IntervalQueryBuilder::new, IntervalQueryBuilder::fromXContent)); registerQuery(new QuerySpec<>(DistanceFeatureQueryBuilder.NAME, DistanceFeatureQueryBuilder::new, DistanceFeatureQueryBuilder::fromXContent)); + registerQuery( + new QuerySpec<>(MatchBoolPrefixQueryBuilder.NAME, MatchBoolPrefixQueryBuilder::new, MatchBoolPrefixQueryBuilder::fromXContent)); if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) { registerQuery(new QuerySpec<>(GeoShapeQueryBuilder.NAME, GeoShapeQueryBuilder::new, GeoShapeQueryBuilder::fromXContent)); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java index 4c614626d1d1..8cf5ee7a41de 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java @@ -132,7 +132,6 @@ public void execute(SearchContext context) { throw new AggregationExecutionException("Failed to build aggregation [" + aggregator.name() + "]", e); } } - context.queryResult().aggregations(new InternalAggregations(aggregations)); List pipelineAggregators = context.aggregations().factories().createPipelineAggregators(); List siblingPipelineAggregators = new ArrayList<>(pipelineAggregators.size()); for (PipelineAggregator pipelineAggregator : pipelineAggregators) { @@ -144,7 +143,7 @@ public void execute(SearchContext context) { + "allowed at the top level"); } } - context.queryResult().pipelineAggregators(siblingPipelineAggregators); + context.queryResult().aggregations(new InternalAggregations(aggregations, siblingPipelineAggregators)); // disable aggregations so that they don't run on next pages in case of scrolling context.aggregations(null); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java index 187f5e3864ed..8910ca25c337 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java @@ -77,7 +77,7 @@ public InternalAggregations(List aggregations, List getTopLevelPipelineAggregators() { + public List getTopLevelPipelineAggregators() { return topLevelPipelineAggregators; } @@ -91,20 +91,7 @@ public static InternalAggregations reduce(List aggregation if (aggregationsList.isEmpty()) { return null; } - InternalAggregations first = aggregationsList.get(0); - return reduce(aggregationsList, first.topLevelPipelineAggregators, context); - } - - /** - * Reduces the given list of aggregations as well as the provided top-level pipeline aggregators. - * Note that top-level pipeline aggregators are reduced only as part of the final reduction phase, otherwise they are left untouched. - */ - public static InternalAggregations reduce(List aggregationsList, - List topLevelPipelineAggregators, - ReduceContext context) { - if (aggregationsList.isEmpty()) { - return null; - } + List topLevelPipelineAggregators = aggregationsList.get(0).getTopLevelPipelineAggregators(); // first we collect all aggregations of the same type and list them together Map> aggByName = new HashMap<>(); diff --git a/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java b/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java index 34d3508f6bab..9f9a2c2680a1 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java @@ -21,6 +21,7 @@ import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.TotalHits; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; @@ -28,6 +29,7 @@ import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator; @@ -37,7 +39,6 @@ import java.io.IOException; import java.util.Collections; import java.util.List; -import java.util.Objects; import java.util.stream.Collectors; import static org.elasticsearch.common.lucene.Lucene.readTopDocs; @@ -54,7 +55,6 @@ public final class QuerySearchResult extends SearchPhaseResult { private DocValueFormat[] sortValueFormats; private InternalAggregations aggregations; private boolean hasAggs; - private List pipelineAggregators = Collections.emptyList(); private Suggest suggest; private boolean searchTimedOut; private Boolean terminatedEarly = null; @@ -198,14 +198,6 @@ public void profileResults(ProfileShardResult shardResults) { hasProfileResults = shardResults != null; } - public List pipelineAggregators() { - return pipelineAggregators; - } - - public void pipelineAggregators(List pipelineAggregators) { - this.pipelineAggregators = Objects.requireNonNull(pipelineAggregators); - } - public Suggest suggest() { return suggest; } @@ -294,8 +286,18 @@ public void readFromWithId(long id, StreamInput in) throws IOException { if (hasAggs = in.readBoolean()) { aggregations = InternalAggregations.readAggregations(in); } - pipelineAggregators = in.readNamedWriteableList(PipelineAggregator.class).stream().map(a -> (SiblingPipelineAggregator) a) - .collect(Collectors.toList()); + if (in.getVersion().before(Version.V_7_1_0)) { + List pipelineAggregators = in.readNamedWriteableList(PipelineAggregator.class).stream() + .map(a -> (SiblingPipelineAggregator) a).collect(Collectors.toList()); + if (hasAggs && pipelineAggregators.isEmpty() == false) { + List internalAggs = aggregations.asList().stream() + .map(agg -> (InternalAggregation) agg).collect(Collectors.toList()); + //Earlier versions serialize sibling pipeline aggs separately as they used to be set to QuerySearchResult directly, while + //later versions include them in InternalAggregations. Note that despite serializing sibling pipeline aggs as part of + //InternalAggregations is supported since 6.7.0, the shards set sibling pipeline aggs to InternalAggregations only from 7.1. + this.aggregations = new InternalAggregations(internalAggs, pipelineAggregators); + } + } if (in.readBoolean()) { suggest = new Suggest(in); } @@ -332,7 +334,16 @@ public void writeToNoId(StreamOutput out) throws IOException { out.writeBoolean(true); aggregations.writeTo(out); } - out.writeNamedWriteableList(pipelineAggregators); + if (out.getVersion().before(Version.V_7_1_0)) { + //Earlier versions expect sibling pipeline aggs separately as they used to be set to QuerySearchResult directly, + //while later versions expect them in InternalAggregations. Note that despite serializing sibling pipeline aggs as part of + //InternalAggregations is supported since 6.7.0, the shards set sibling pipeline aggs to InternalAggregations only from 7.1 on. + if (aggregations == null) { + out.writeNamedWriteableList(Collections.emptyList()); + } else { + out.writeNamedWriteableList(aggregations.getTopLevelPipelineAggregators()); + } + } if (suggest == null) { out.writeBoolean(false); } else { diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 7ba53cb5d1e1..088839286490 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -1132,6 +1132,7 @@ public void deleteSnapshot(final String repositoryName, final String snapshotNam */ private void deleteSnapshot(final Snapshot snapshot, final ActionListener listener, final long repositoryStateId, final boolean immediatePriority) { + logger.info("deleting snapshot [{}]", snapshot); Priority priority = immediatePriority ? Priority.IMMEDIATE : Priority.NORMAL; clusterService.submitStateUpdateTask("delete snapshot", new ClusterStateUpdateTask(priority) { diff --git a/server/src/main/java/org/elasticsearch/transport/InboundHandler.java b/server/src/main/java/org/elasticsearch/transport/InboundHandler.java new file mode 100644 index 000000000000..c50825d00a01 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/InboundHandler.java @@ -0,0 +1,286 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.Version; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.metrics.MeanMetric; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.Collections; +import java.util.Map; +import java.util.Set; + +public class InboundHandler { + + private static final Logger logger = LogManager.getLogger(InboundHandler.class); + + private final MeanMetric readBytesMetric = new MeanMetric(); + private final ThreadPool threadPool; + private final OutboundHandler outboundHandler; + private final CircuitBreakerService circuitBreakerService; + private final InboundMessage.Reader reader; + private final TransportLogger transportLogger; + private final TransportHandshaker handshaker; + private final TransportKeepAlive keepAlive; + + private final Transport.ResponseHandlers responseHandlers = new Transport.ResponseHandlers(); + private volatile Map> requestHandlers = Collections.emptyMap(); + private volatile TransportMessageListener messageListener = TransportMessageListener.NOOP_LISTENER; + + InboundHandler(ThreadPool threadPool, OutboundHandler outboundHandler, InboundMessage.Reader reader, + CircuitBreakerService circuitBreakerService, TransportLogger transportLogger, TransportHandshaker handshaker, + TransportKeepAlive keepAlive) { + this.threadPool = threadPool; + this.outboundHandler = outboundHandler; + this.circuitBreakerService = circuitBreakerService; + this.reader = reader; + this.transportLogger = transportLogger; + this.handshaker = handshaker; + this.keepAlive = keepAlive; + } + + synchronized void registerRequestHandler(RequestHandlerRegistry reg) { + if (requestHandlers.containsKey(reg.getAction())) { + throw new IllegalArgumentException("transport handlers for action " + reg.getAction() + " is already registered"); + } + requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(), reg).immutableMap(); + } + + final RequestHandlerRegistry getRequestHandler(String action) { + return requestHandlers.get(action); + } + + final Transport.ResponseHandlers getResponseHandlers() { + return responseHandlers; + } + + MeanMetric getReadBytes() { + return readBytesMetric; + } + + void setMessageListener(TransportMessageListener listener) { + if (messageListener == TransportMessageListener.NOOP_LISTENER) { + messageListener = listener; + } else { + throw new IllegalStateException("Cannot set message listener twice"); + } + } + + void inboundMessage(TcpChannel channel, BytesReference message) throws Exception { + channel.getChannelStats().markAccessed(threadPool.relativeTimeInMillis()); + transportLogger.logInboundMessage(channel, message); + readBytesMetric.inc(message.length() + TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE); + // Message length of 0 is a ping + if (message.length() != 0) { + messageReceived(message, channel); + } else { + keepAlive.receiveKeepAlive(channel); + } + } + + private void messageReceived(BytesReference reference, TcpChannel channel) throws IOException { + InetSocketAddress remoteAddress = channel.getRemoteAddress(); + + ThreadContext threadContext = threadPool.getThreadContext(); + try (ThreadContext.StoredContext existing = threadContext.stashContext(); + InboundMessage message = reader.deserialize(reference)) { + // Place the context with the headers from the message + message.getStoredContext().restore(); + threadContext.putTransient("_remote_address", remoteAddress); + if (message.isRequest()) { + handleRequest(channel, (InboundMessage.Request) message, reference.length()); + } else { + final TransportResponseHandler handler; + long requestId = message.getRequestId(); + if (message.isHandshake()) { + handler = handshaker.removeHandlerForHandshake(requestId); + } else { + TransportResponseHandler theHandler = + responseHandlers.onResponseReceived(requestId, messageListener); + if (theHandler == null && message.isError()) { + handler = handshaker.removeHandlerForHandshake(requestId); + } else { + handler = theHandler; + } + } + // ignore if its null, the service logs it + if (handler != null) { + if (message.isError()) { + handlerResponseError(message.getStreamInput(), handler); + } else { + handleResponse(remoteAddress, message.getStreamInput(), handler); + } + // Check the entire message has been read + final int nextByte = message.getStreamInput().read(); + // calling read() is useful to make sure the message is fully read, even if there is an EOS marker + if (nextByte != -1) { + throw new IllegalStateException("Message not fully read (response) for requestId [" + requestId + "], handler [" + + handler + "], error [" + message.isError() + "]; resetting"); + } + } + } + } + } + + private void handleRequest(TcpChannel channel, InboundMessage.Request message, int messageLengthBytes) { + final Set features = message.getFeatures(); + final String action = message.getActionName(); + final long requestId = message.getRequestId(); + final StreamInput stream = message.getStreamInput(); + final Version version = message.getVersion(); + messageListener.onRequestReceived(requestId, action); + TransportChannel transportChannel = null; + try { + if (message.isHandshake()) { + handshaker.handleHandshake(version, features, channel, requestId, stream); + } else { + final RequestHandlerRegistry reg = getRequestHandler(action); + if (reg == null) { + throw new ActionNotFoundTransportException(action); + } + CircuitBreaker breaker = circuitBreakerService.getBreaker(CircuitBreaker.IN_FLIGHT_REQUESTS); + if (reg.canTripCircuitBreaker()) { + breaker.addEstimateBytesAndMaybeBreak(messageLengthBytes, ""); + } else { + breaker.addWithoutBreaking(messageLengthBytes); + } + transportChannel = new TcpTransportChannel(outboundHandler, channel, action, requestId, version, features, + circuitBreakerService, messageLengthBytes, message.isCompress()); + final TransportRequest request = reg.newRequest(stream); + request.remoteAddress(new TransportAddress(channel.getRemoteAddress())); + // in case we throw an exception, i.e. when the limit is hit, we don't want to verify + final int nextByte = stream.read(); + // calling read() is useful to make sure the message is fully read, even if there some kind of EOS marker + if (nextByte != -1) { + throw new IllegalStateException("Message not fully read (request) for requestId [" + requestId + "], action [" + action + + "], available [" + stream.available() + "]; resetting"); + } + threadPool.executor(reg.getExecutor()).execute(new RequestHandler(reg, request, transportChannel)); + } + } catch (Exception e) { + // the circuit breaker tripped + if (transportChannel == null) { + transportChannel = new TcpTransportChannel(outboundHandler, channel, action, requestId, version, features, + circuitBreakerService, 0, message.isCompress()); + } + try { + transportChannel.sendResponse(e); + } catch (IOException inner) { + inner.addSuppressed(e); + logger.warn(() -> new ParameterizedMessage("Failed to send error message back to client for action [{}]", action), inner); + } + } + } + + private void handleResponse(InetSocketAddress remoteAddress, final StreamInput stream, + final TransportResponseHandler handler) { + final T response; + try { + response = handler.read(stream); + response.remoteAddress(new TransportAddress(remoteAddress)); + } catch (Exception e) { + handleException(handler, new TransportSerializationException( + "Failed to deserialize response from handler [" + handler.getClass().getName() + "]", e)); + return; + } + threadPool.executor(handler.executor()).execute(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + handleException(handler, new ResponseHandlerFailureTransportException(e)); + } + + @Override + protected void doRun() { + handler.handleResponse(response); + } + }); + } + + private void handlerResponseError(StreamInput stream, final TransportResponseHandler handler) { + Exception error; + try { + error = stream.readException(); + } catch (Exception e) { + error = new TransportSerializationException("Failed to deserialize exception response from stream", e); + } + handleException(handler, error); + } + + private void handleException(final TransportResponseHandler handler, Throwable error) { + if (!(error instanceof RemoteTransportException)) { + error = new RemoteTransportException(error.getMessage(), error); + } + final RemoteTransportException rtx = (RemoteTransportException) error; + threadPool.executor(handler.executor()).execute(() -> { + try { + handler.handleException(rtx); + } catch (Exception e) { + logger.error(() -> new ParameterizedMessage("failed to handle exception response [{}]", handler), e); + } + }); + } + + private static class RequestHandler extends AbstractRunnable { + private final RequestHandlerRegistry reg; + private final TransportRequest request; + private final TransportChannel transportChannel; + + RequestHandler(RequestHandlerRegistry reg, TransportRequest request, TransportChannel transportChannel) { + this.reg = reg; + this.request = request; + this.transportChannel = transportChannel; + } + + @SuppressWarnings({"unchecked"}) + @Override + protected void doRun() throws Exception { + reg.processMessageReceived(request, transportChannel); + } + + @Override + public boolean isForceExecution() { + return reg.isForceExecution(); + } + + @Override + public void onFailure(Exception e) { + try { + transportChannel.sendResponse(e); + } catch (Exception inner) { + inner.addSuppressed(e); + logger.warn(() -> new ParameterizedMessage( + "Failed to send error message back to client for action [{}]", reg.getAction()), inner); + } + } + } +} diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java index 1bf47d1a42f9..859a96e784c3 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java @@ -145,8 +145,6 @@ public String getKey(final String key) { /** * A proxy address for the remote cluster. - * NOTE: this settings is undocumented until we have at last one transport that supports passing - * on the hostname via a mechanism like SNI. */ public static final Setting.AffixSetting REMOTE_CLUSTERS_PROXY = Setting.affixKeySetting( "cluster.remote.", diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index 8125d5bcb12f..eb61af8d2a38 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -32,9 +32,7 @@ import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.lease.Releasable; @@ -52,10 +50,8 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.CountDown; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.node.Node; @@ -107,15 +103,11 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements private static final BytesReference EMPTY_BYTES_REFERENCE = new BytesArray(new byte[0]); protected final Settings settings; - private final CircuitBreakerService circuitBreakerService; protected final ThreadPool threadPool; - protected final BigArrays bigArrays; protected final PageCacheRecycler pageCacheRecycler; protected final NetworkService networkService; protected final Set profileSettings; - private volatile TransportMessageListener messageListener = TransportMessageListener.NOOP_LISTENER; - private final ConcurrentMap profileBoundAddresses = newConcurrentMap(); private final Map> serverChannels = newConcurrentMap(); private final Set acceptedChannels = ConcurrentCollections.newConcurrentSet(); @@ -125,14 +117,10 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements private final ReadWriteLock closeLock = new ReentrantReadWriteLock(); private volatile BoundTransportAddress boundAddress; - private final MeanMetric readBytesMetric = new MeanMetric(); - private volatile Map> requestHandlers = Collections.emptyMap(); - private final ResponseHandlers responseHandlers = new ResponseHandlers(); - private final TransportLogger transportLogger; private final TransportHandshaker handshaker; private final TransportKeepAlive keepAlive; - private final InboundMessage.Reader reader; private final OutboundHandler outboundHandler; + private final InboundHandler inboundHandler; public TcpTransport(Settings settings, Version version, ThreadPool threadPool, PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, @@ -140,11 +128,9 @@ public TcpTransport(Settings settings, Version version, ThreadPool threadPool, P this.settings = settings; this.profileSettings = getProfileSettings(settings); this.threadPool = threadPool; - this.bigArrays = new BigArrays(pageCacheRecycler, circuitBreakerService, CircuitBreaker.IN_FLIGHT_REQUESTS); this.pageCacheRecycler = pageCacheRecycler; - this.circuitBreakerService = circuitBreakerService; this.networkService = networkService; - this.transportLogger = new TransportLogger(); + TransportLogger transportLogger = new TransportLogger(); String nodeName = Node.NODE_NAME_SETTING.get(settings); final Settings defaultFeatures = TransportSettings.DEFAULT_FEATURES_SETTING.get(settings); String[] features; @@ -159,16 +145,19 @@ public TcpTransport(Settings settings, Version version, ThreadPool threadPool, P // use a sorted set to present the features in a consistent order features = new TreeSet<>(defaultFeatures.names()).toArray(new String[defaultFeatures.names().size()]); } - this.outboundHandler = new OutboundHandler(nodeName, version, features, threadPool, bigArrays, transportLogger); + BigArrays bigArrays = new BigArrays(pageCacheRecycler, circuitBreakerService, CircuitBreaker.IN_FLIGHT_REQUESTS); + this.outboundHandler = new OutboundHandler(nodeName, version, features, threadPool, bigArrays, transportLogger); this.handshaker = new TransportHandshaker(version, threadPool, (node, channel, requestId, v) -> outboundHandler.sendRequest(node, channel, requestId, TransportHandshaker.HANDSHAKE_ACTION_NAME, new TransportHandshaker.HandshakeRequest(version), TransportRequestOptions.EMPTY, v, false, true), (v, features1, channel, response, requestId) -> outboundHandler.sendResponse(v, features1, channel, requestId, TransportHandshaker.HANDSHAKE_ACTION_NAME, response, false, true)); + InboundMessage.Reader reader = new InboundMessage.Reader(version, namedWriteableRegistry, threadPool.getThreadContext()); this.keepAlive = new TransportKeepAlive(threadPool, this.outboundHandler::sendBytes); - this.reader = new InboundMessage.Reader(version, namedWriteableRegistry, threadPool.getThreadContext()); + this.inboundHandler = new InboundHandler(threadPool, outboundHandler, reader, circuitBreakerService, transportLogger, handshaker, + keepAlive); } @Override @@ -177,26 +166,13 @@ protected void doStart() { @Override public synchronized void setMessageListener(TransportMessageListener listener) { - if (messageListener == TransportMessageListener.NOOP_LISTENER) { - messageListener = listener; - outboundHandler.setMessageListener(listener); - } else { - throw new IllegalStateException("Cannot set message listener twice"); - } - } - - @Override - public CircuitBreaker getInFlightRequestBreaker() { - // We always obtain a fresh breaker to reflect changes to the breaker configuration. - return circuitBreakerService.getBreaker(CircuitBreaker.IN_FLIGHT_REQUESTS); + outboundHandler.setMessageListener(listener); + inboundHandler.setMessageListener(listener); } @Override public synchronized void registerRequestHandler(RequestHandlerRegistry reg) { - if (requestHandlers.containsKey(reg.getAction())) { - throw new IllegalArgumentException("transport handlers for action " + reg.getAction() + " is already registered"); - } - requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(), reg).immutableMap(); + inboundHandler.registerRequestHandler(reg); } public final class NodeChannels extends CloseableConnection { @@ -665,14 +641,7 @@ protected void serverAcceptedChannel(TcpChannel channel) { */ public void inboundMessage(TcpChannel channel, BytesReference message) { try { - channel.getChannelStats().markAccessed(threadPool.relativeTimeInMillis()); - transportLogger.logInboundMessage(channel, message); - // Message length of 0 is a ping - if (message.length() != 0) { - messageReceived(message, channel); - } else { - keepAlive.receiveKeepAlive(channel); - } + inboundHandler.inboundMessage(channel, message); } catch (Exception e) { onException(channel, e); } @@ -820,200 +789,9 @@ public HttpOnTransportException(StreamInput in) throws IOException { } } - /** - * This method handles the message receive part for both request and responses - */ - public final void messageReceived(BytesReference reference, TcpChannel channel) throws IOException { - readBytesMetric.inc(reference.length() + TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE); - InetSocketAddress remoteAddress = channel.getRemoteAddress(); - - ThreadContext threadContext = threadPool.getThreadContext(); - try (ThreadContext.StoredContext existing = threadContext.stashContext(); - InboundMessage message = reader.deserialize(reference)) { - // Place the context with the headers from the message - message.getStoredContext().restore(); - threadContext.putTransient("_remote_address", remoteAddress); - if (message.isRequest()) { - handleRequest(channel, (InboundMessage.Request) message, reference.length()); - } else { - final TransportResponseHandler handler; - long requestId = message.getRequestId(); - if (message.isHandshake()) { - handler = handshaker.removeHandlerForHandshake(requestId); - } else { - TransportResponseHandler theHandler = - responseHandlers.onResponseReceived(requestId, messageListener); - if (theHandler == null && message.isError()) { - handler = handshaker.removeHandlerForHandshake(requestId); - } else { - handler = theHandler; - } - } - // ignore if its null, the service logs it - if (handler != null) { - if (message.isError()) { - handlerResponseError(message.getStreamInput(), handler); - } else { - handleResponse(remoteAddress, message.getStreamInput(), handler); - } - // Check the entire message has been read - final int nextByte = message.getStreamInput().read(); - // calling read() is useful to make sure the message is fully read, even if there is an EOS marker - if (nextByte != -1) { - throw new IllegalStateException("Message not fully read (response) for requestId [" + requestId + "], handler [" - + handler + "], error [" + message.isError() + "]; resetting"); - } - } - } - } - } - - private void handleResponse(InetSocketAddress remoteAddress, final StreamInput stream, - final TransportResponseHandler handler) { - final T response; - try { - response = handler.read(stream); - response.remoteAddress(new TransportAddress(remoteAddress)); - } catch (Exception e) { - handleException(handler, new TransportSerializationException( - "Failed to deserialize response from handler [" + handler.getClass().getName() + "]", e)); - return; - } - threadPool.executor(handler.executor()).execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - handleException(handler, new ResponseHandlerFailureTransportException(e)); - } - - @Override - protected void doRun() throws Exception { - handler.handleResponse(response); - } - }); - - } - - /** - * Executed for a received response error - */ - private void handlerResponseError(StreamInput stream, final TransportResponseHandler handler) { - Exception error; - try { - error = stream.readException(); - } catch (Exception e) { - error = new TransportSerializationException("Failed to deserialize exception response from stream", e); - } - handleException(handler, error); - } - - private void handleException(final TransportResponseHandler handler, Throwable error) { - if (!(error instanceof RemoteTransportException)) { - error = new RemoteTransportException(error.getMessage(), error); - } - final RemoteTransportException rtx = (RemoteTransportException) error; - threadPool.executor(handler.executor()).execute(() -> { - try { - handler.handleException(rtx); - } catch (Exception e) { - logger.error(() -> new ParameterizedMessage("failed to handle exception response [{}]", handler), e); - } - }); - } - - protected void handleRequest(TcpChannel channel, InboundMessage.Request message, int messageLengthBytes) throws IOException { - final Set features = message.getFeatures(); - final String profileName = channel.getProfile(); - final String action = message.getActionName(); - final long requestId = message.getRequestId(); - final StreamInput stream = message.getStreamInput(); - final Version version = message.getVersion(); - messageListener.onRequestReceived(requestId, action); - TransportChannel transportChannel = null; - try { - if (message.isHandshake()) { - handshaker.handleHandshake(version, features, channel, requestId, stream); - } else { - final RequestHandlerRegistry reg = getRequestHandler(action); - if (reg == null) { - throw new ActionNotFoundTransportException(action); - } - if (reg.canTripCircuitBreaker()) { - getInFlightRequestBreaker().addEstimateBytesAndMaybeBreak(messageLengthBytes, ""); - } else { - getInFlightRequestBreaker().addWithoutBreaking(messageLengthBytes); - } - transportChannel = new TcpTransportChannel(outboundHandler, channel, action, requestId, version, features, - circuitBreakerService, messageLengthBytes, message.isCompress()); - final TransportRequest request = reg.newRequest(stream); - request.remoteAddress(new TransportAddress(channel.getRemoteAddress())); - // in case we throw an exception, i.e. when the limit is hit, we don't want to verify - validateRequest(stream, requestId, action); - threadPool.executor(reg.getExecutor()).execute(new RequestHandler(reg, request, transportChannel)); - } - } catch (Exception e) { - // the circuit breaker tripped - if (transportChannel == null) { - transportChannel = new TcpTransportChannel(outboundHandler, channel, action, requestId, version, features, - circuitBreakerService, 0, message.isCompress()); - } - try { - transportChannel.sendResponse(e); - } catch (IOException inner) { - inner.addSuppressed(e); - logger.warn(() -> new ParameterizedMessage("Failed to send error message back to client for action [{}]", action), inner); - } - } - } - - // This template method is needed to inject custom error checking logic in tests. - protected void validateRequest(StreamInput stream, long requestId, String action) throws IOException { - final int nextByte = stream.read(); - // calling read() is useful to make sure the message is fully read, even if there some kind of EOS marker - if (nextByte != -1) { - throw new IllegalStateException("Message not fully read (request) for requestId [" + requestId + "], action [" + action - + "], available [" + stream.available() + "]; resetting"); - } - } - - class RequestHandler extends AbstractRunnable { - private final RequestHandlerRegistry reg; - private final TransportRequest request; - private final TransportChannel transportChannel; - - RequestHandler(RequestHandlerRegistry reg, TransportRequest request, TransportChannel transportChannel) { - this.reg = reg; - this.request = request; - this.transportChannel = transportChannel; - } - - @SuppressWarnings({"unchecked"}) - @Override - protected void doRun() throws Exception { - reg.processMessageReceived(request, transportChannel); - } - - @Override - public boolean isForceExecution() { - return reg.isForceExecution(); - } - - @Override - public void onFailure(Exception e) { - if (lifecycleState() == Lifecycle.State.STARTED) { - // we can only send a response transport is started.... - try { - transportChannel.sendResponse(e); - } catch (Exception inner) { - inner.addSuppressed(e); - logger.warn(() -> new ParameterizedMessage( - "Failed to send error message back to client for action [{}]", reg.getAction()), inner); - } - } - } - } - public void executeHandshake(DiscoveryNode node, TcpChannel channel, ConnectionProfile profile, ActionListener listener) { - handshaker.sendHandshake(responseHandlers.newRequestId(), node, channel, profile.getHandshakeTimeout(), listener); + long requestId = inboundHandler.getResponseHandlers().newRequestId(); + handshaker.sendHandshake(requestId, node, channel, profile.getHandshakeTimeout(), listener); } final TransportKeepAlive getKeepAlive() { @@ -1037,7 +815,7 @@ final Set getAcceptedChannels() { * * @throws IllegalStateException if the transport is not started / open */ - protected final void ensureOpen() { + private void ensureOpen() { if (lifecycle.started() == false) { throw new IllegalStateException("transport has been stopped"); } @@ -1046,7 +824,8 @@ protected final void ensureOpen() { @Override public final TransportStats getStats() { MeanMetric transmittedBytes = outboundHandler.getTransmittedBytes(); - return new TransportStats(acceptedChannels.size(), readBytesMetric.count(), readBytesMetric.sum(), transmittedBytes.count(), + MeanMetric readBytes = inboundHandler.getReadBytes(); + return new TransportStats(acceptedChannels.size(), readBytes.count(), readBytes.sum(), transmittedBytes.count(), transmittedBytes.sum()); } @@ -1107,12 +886,12 @@ public ProfileSettings(Settings settings, String profileName) { @Override public final ResponseHandlers getResponseHandlers() { - return responseHandlers; + return inboundHandler.getResponseHandlers(); } @Override public final RequestHandlerRegistry getRequestHandler(String action) { - return requestHandlers.get(action); + return inboundHandler.getRequestHandler(action); } private final class ChannelsConnectedListener implements ActionListener { diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java index a13e8af919b2..b3ecc5907675 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java @@ -50,7 +50,6 @@ import org.elasticsearch.ingest.IngestService; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; import org.junit.Before; @@ -156,15 +155,8 @@ void createIndex(String index, TimeValue timeout, ActionListener { TestSingleItemBulkWriteAction(TestTransportBulkAction bulkAction) { - super(SETTINGS, IndexAction.NAME, TransportBulkActionIngestTests.this.transportService, - TransportBulkActionIngestTests.this.clusterService, - null, null, null, new ActionFilters(Collections.emptySet()), null, - IndexRequest::new, IndexRequest::new, ThreadPool.Names.WRITE, bulkAction, null); - } - - @Override - protected IndexResponse newResponseInstance() { - return new IndexResponse(); + super(IndexAction.NAME, TransportBulkActionIngestTests.this.transportService, + new ActionFilters(Collections.emptySet()), IndexRequest::new, bulkAction); } } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java index 8fa8060b3853..610a72de6ecf 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java @@ -550,7 +550,7 @@ public void testUpdateRequestWithConflictFailure() throws Exception { IndexRequest updateResponse = new IndexRequest("index", "_doc", "id").source(Requests.INDEX_CONTENT_TYPE, "field", "value"); - Exception err = new VersionConflictEngineException(shardId, "_doc", "id", + Exception err = new VersionConflictEngineException(shardId, "id", "I'm conflicted <(;_;)>"); Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0, 0); IndexShard shard = mock(IndexShard.class); @@ -784,7 +784,7 @@ public void testRetries() throws Exception { IndexRequest updateResponse = new IndexRequest("index", "_doc", "id").source(Requests.INDEX_CONTENT_TYPE, "field", "value"); - Exception err = new VersionConflictEngineException(shardId, "_doc", "id", + Exception err = new VersionConflictEngineException(shardId, "id", "I'm conflicted <(;_;)>"); Engine.IndexResult conflictedResult = new Engine.IndexResult(err, 0, 0); Engine.IndexResult mappingUpdate = diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java index ec9681a7b62f..ab31c44ff2d5 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java @@ -396,7 +396,6 @@ public void testMergeAggs() throws InterruptedException { assertEquals(totalCount, bucket.getDocCount()); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40214") public void testMergeSearchHits() throws InterruptedException { final long currentRelativeTime = randomLong(); final SearchTimeProvider timeProvider = new SearchTimeProvider(randomLong(), 0, () -> currentRelativeTime); @@ -442,6 +441,7 @@ public void testMergeSearchHits() throws InterruptedException { float expectedMaxScore = Float.NEGATIVE_INFINITY; int numIndices = requestedSize == 0 ? 0 : randomIntBetween(1, requestedSize); Iterator> indicesIterator = randomRealisticIndices(numIndices, numResponses).entrySet().iterator(); + boolean hasHits = false; for (int i = 0; i < numResponses; i++) { Map.Entry entry = indicesIterator.next(); String clusterAlias = entry.getKey(); @@ -465,6 +465,7 @@ public void testMergeSearchHits() throws InterruptedException { float maxScore = scoreSort ? numDocs * scoreFactor : Float.NaN; SearchHit[] hits = randomSearchHitArray(numDocs, numResponses, clusterAlias, indices, maxScore, scoreFactor, sortFields, priorityQueue); + hasHits |= hits.length > 0; expectedMaxScore = Math.max(expectedMaxScore, maxScore); Object[] collapseValues = null; @@ -513,8 +514,14 @@ public void testMergeSearchHits() throws InterruptedException { assertNull(searchResponse.getScrollId()); SearchHits searchHits = searchResponse.getHits(); - assertArrayEquals(sortFields, searchHits.getSortFields()); - assertEquals(collapseField, searchHits.getCollapseField()); + // the sort fields and the collapse field are not returned when hits are empty + if (hasHits) { + assertArrayEquals(sortFields, searchHits.getSortFields()); + assertEquals(collapseField, searchHits.getCollapseField()); + } else { + assertNull(searchHits.getSortFields()); + assertNull(searchHits.getCollapseField()); + } if (expectedTotalHits == null) { assertNull(searchHits.getTotalHits()); } else { @@ -532,7 +539,9 @@ public void testMergeSearchHits() throws InterruptedException { priorityQueue.poll(); } SearchHit[] hits = searchHits.getHits(); - if (collapseField != null) { + if (collapseField != null + // the collapse field is not returned when hits are empty + && hasHits) { assertEquals(hits.length, searchHits.getCollapseValues().length); } else { assertNull(searchHits.getCollapseValues()); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java index a2dee54b3c6f..4dd3e3e33c39 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java @@ -23,6 +23,8 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode.Role; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.DiscoveryModule; +import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransport; import org.elasticsearch.transport.TransportRequest; @@ -461,4 +463,52 @@ public void testDoesNotIncludeExtraNodes() { deterministicTaskQueue.runAllTasks(); assertTrue(bootstrapped.get()); } + + public void testBootstrapsAutomaticallyWithSingleNodeDiscovery() { + final Settings.Builder settings = Settings.builder() + .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE) + .put(NODE_NAME_SETTING.getKey(), localNode.getName()); + final AtomicBoolean bootstrapped = new AtomicBoolean(); + + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(settings.build(), + transportService, () -> emptyList(), () -> false, vc -> { + assertTrue(bootstrapped.compareAndSet(false, true)); + assertThat(vc.getNodeIds(), hasSize(1)); + assertThat(vc.getNodeIds(), hasItem(localNode.getId())); + assertTrue(vc.hasQuorum(singletonList(localNode.getId()))); + }); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + assertTrue(bootstrapped.get()); + + bootstrapped.set(false); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + assertFalse(bootstrapped.get()); // should only bootstrap once + } + + public void testFailBootstrapWithBothSingleNodeDiscoveryAndInitialMasterNodes() { + final Settings.Builder settings = Settings.builder() + .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE) + .put(NODE_NAME_SETTING.getKey(), localNode.getName()) + .put(INITIAL_MASTER_NODES_SETTING.getKey(), "test"); + + assertThat(expectThrows(IllegalArgumentException.class, () -> new ClusterBootstrapService(settings.build(), + transportService, () -> emptyList(), () -> false, vc -> fail())).getMessage(), + containsString("setting [" + INITIAL_MASTER_NODES_SETTING.getKey() + "] is not allowed when [discovery.type] is set " + + "to [single-node]")); + } + + public void testFailBootstrapNonMasterEligibleNodeWithSingleNodeDiscovery() { + final Settings.Builder settings = Settings.builder() + .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE) + .put(NODE_NAME_SETTING.getKey(), localNode.getName()) + .put(Node.NODE_MASTER_SETTING.getKey(), false); + + assertThat(expectThrows(IllegalArgumentException.class, () -> new ClusterBootstrapService(settings.build(), + transportService, () -> emptyList(), () -> false, vc -> fail())).getMessage(), + containsString("node with [discovery.type] set to [single-node] must be master-eligible")); + } } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index 07ae6679e448..6f078217e4f4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -63,6 +63,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.SeedHostsProvider.HostsResolver; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.gateway.MetaStateService; @@ -204,7 +205,7 @@ public void testCanUpdateClusterStateAfterStabilisation() { } public void testDoesNotElectNonMasterNode() { - final Cluster cluster = new Cluster(randomIntBetween(1, 5), false); + final Cluster cluster = new Cluster(randomIntBetween(1, 5), false, Settings.EMPTY); cluster.runRandomly(); cluster.stabilise(); @@ -904,7 +905,7 @@ public void testIncompatibleDiffResendsFullState() { * and join the leader again. */ public void testStayCandidateAfterReceivingFollowerCheckFromKnownMaster() { - final Cluster cluster = new Cluster(2, false); + final Cluster cluster = new Cluster(2, false, Settings.EMPTY); cluster.runRandomly(); cluster.stabilise(); @@ -1029,7 +1030,7 @@ public void testCannotJoinClusterWithDifferentUUID() throws IllegalAccessExcepti final ClusterNode shiftedNode = randomFrom(cluster2.clusterNodes).restartedNode(); final ClusterNode newNode = cluster1.new ClusterNode(nextNodeIndex.getAndIncrement(), - shiftedNode.getLocalNode(), n -> shiftedNode.persistedState); + shiftedNode.getLocalNode(), n -> shiftedNode.persistedState, shiftedNode.nodeSettings); cluster1.clusterNodes.add(newNode); MockLogAppender mockAppender = new MockLogAppender(); @@ -1053,7 +1054,7 @@ public void testCannotJoinClusterWithDifferentUUID() throws IllegalAccessExcepti final ClusterNode detachedNode = newNode.restartedNode( metaData -> DetachClusterCommand.updateMetaData(metaData), - term -> DetachClusterCommand.updateCurrentTerm()); + term -> DetachClusterCommand.updateCurrentTerm(), newNode.nodeSettings); cluster1.clusterNodes.replaceAll(cn -> cn == newNode ? detachedNode : cn); cluster1.stabilise(); } @@ -1111,6 +1112,43 @@ public void testFollowerRemovedIfUnableToSendRequestsToMaster() { + DEFAULT_CLUSTER_STATE_UPDATE_DELAY); } + public void testSingleNodeDiscoveryWithoutQuorum() { + final Cluster cluster = new Cluster(3); + cluster.runRandomly(); + cluster.stabilise(); + + final ClusterNode clusterNode = cluster.getAnyNode(); + logger.debug("rebooting [{}]", clusterNode.getId()); + clusterNode.close(); + cluster.clusterNodes.forEach( + cn -> cluster.deterministicTaskQueue.scheduleNow(cn.onNode( + new Runnable() { + @Override + public void run() { + cn.transportService.disconnectFromNode(clusterNode.getLocalNode()); + } + + @Override + public String toString() { + return "disconnect from " + clusterNode.getLocalNode() + " after shutdown"; + } + }))); + IllegalStateException ise = expectThrows(IllegalStateException.class, + () -> cluster.clusterNodes.replaceAll(cn -> cn == clusterNode ? + cn.restartedNode(Function.identity(), Function.identity(), Settings.builder() + .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE).build()) : + cn)); + assertThat(ise.getMessage(), containsString("cannot start with [discovery.type] set to [single-node] when local node")); + assertThat(ise.getMessage(), containsString("does not have quorum in voting configuration")); + } + + public void testSingleNodeDiscoveryWithQuorum() { + final Cluster cluster = new Cluster(1, randomBoolean(), Settings.builder().put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), + DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE).build()); + cluster.runRandomly(); + cluster.stabilise(); + } + private static long defaultMillis(Setting setting) { return setting.get(Settings.EMPTY).millis() + Cluster.DEFAULT_DELAY_VARIABILITY; } @@ -1184,10 +1222,10 @@ class Cluster { private List seedHostsList; Cluster(int initialNodeCount) { - this(initialNodeCount, true); + this(initialNodeCount, true, Settings.EMPTY); } - Cluster(int initialNodeCount, boolean allNodesMasterEligible) { + Cluster(int initialNodeCount, boolean allNodesMasterEligible, Settings nodeSettings) { deterministicTaskQueue.setExecutionDelayVariabilityMillis(DEFAULT_DELAY_VARIABILITY); assertThat(initialNodeCount, greaterThan(0)); @@ -1196,7 +1234,7 @@ class Cluster { clusterNodes = new ArrayList<>(initialNodeCount); for (int i = 0; i < initialNodeCount; i++) { final ClusterNode clusterNode = new ClusterNode(nextNodeIndex.getAndIncrement(), - allNodesMasterEligible || i == 0 || randomBoolean()); + allNodesMasterEligible || i == 0 || randomBoolean(), nodeSettings); clusterNodes.add(clusterNode); if (clusterNode.getLocalNode().isMasterNode()) { masterEligibleNodeIds.add(clusterNode.getId()); @@ -1229,7 +1267,7 @@ List addNodes(int newNodesCount) { final List addedNodes = new ArrayList<>(); for (int i = 0; i < newNodesCount; i++) { - final ClusterNode clusterNode = new ClusterNode(nextNodeIndex.getAndIncrement(), true); + final ClusterNode clusterNode = new ClusterNode(nextNodeIndex.getAndIncrement(), true, Settings.EMPTY); addedNodes.add(clusterNode); } clusterNodes.addAll(addedNodes); @@ -1701,6 +1739,7 @@ class ClusterNode { private Coordinator coordinator; private final DiscoveryNode localNode; private final MockPersistedState persistedState; + private final Settings nodeSettings; private AckedFakeThreadPoolMasterService masterService; private DisruptableClusterApplierService clusterApplierService; private ClusterService clusterService; @@ -1708,13 +1747,15 @@ class ClusterNode { private DisruptableMockTransport mockTransport; private List> extraJoinValidators = new ArrayList<>(); - ClusterNode(int nodeIndex, boolean masterEligible) { - this(nodeIndex, createDiscoveryNode(nodeIndex, masterEligible), defaultPersistedStateSupplier); + ClusterNode(int nodeIndex, boolean masterEligible, Settings nodeSettings) { + this(nodeIndex, createDiscoveryNode(nodeIndex, masterEligible), defaultPersistedStateSupplier, nodeSettings); } - ClusterNode(int nodeIndex, DiscoveryNode localNode, Function persistedStateSupplier) { + ClusterNode(int nodeIndex, DiscoveryNode localNode, Function persistedStateSupplier, + Settings nodeSettings) { this.nodeIndex = nodeIndex; this.localNode = localNode; + this.nodeSettings = nodeSettings; persistedState = persistedStateSupplier.apply(localNode); onNodeLog(localNode, this::setUp).run(); } @@ -1738,7 +1779,8 @@ protected Optional getDisruptableMockTransport(Transpo } }; - final Settings settings = Settings.builder() + final Settings settings = nodeSettings.hasValue(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey()) ? + nodeSettings : Settings.builder().put(nodeSettings) .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.get(Settings.EMPTY)).build(); // suppress auto-bootstrap transportService = mockTransport.createTransportService( @@ -1781,17 +1823,18 @@ void close() { } ClusterNode restartedNode() { - return restartedNode(Function.identity(), Function.identity()); + return restartedNode(Function.identity(), Function.identity(), nodeSettings); } - ClusterNode restartedNode(Function adaptGlobalMetaData, Function adaptCurrentTerm) { + ClusterNode restartedNode(Function adaptGlobalMetaData, Function adaptCurrentTerm, + Settings nodeSettings) { final TransportAddress address = randomBoolean() ? buildNewFakeTransportAddress() : localNode.getAddress(); final DiscoveryNode newLocalNode = new DiscoveryNode(localNode.getName(), localNode.getId(), UUIDs.randomBase64UUID(random()), // generated deterministically for repeatable tests address.address().getHostString(), address.getAddress(), address, Collections.emptyMap(), localNode.isMasterNode() ? EnumSet.allOf(Role.class) : emptySet(), Version.CURRENT); return new ClusterNode(nodeIndex, newLocalNode, - node -> new MockPersistedState(newLocalNode, persistedState, adaptGlobalMetaData, adaptCurrentTerm)); + node -> new MockPersistedState(newLocalNode, persistedState, adaptGlobalMetaData, adaptCurrentTerm), nodeSettings); } private PersistedState getPersistedState() { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java index 571843126f98..228d05c51c46 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java @@ -580,7 +580,26 @@ public void testConcreteIndicesIgnoreIndicesEmptyRequest() { assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, new String[]{})), equalTo(newHashSet("kuku", "testXXX"))); } + public void testConcreteIndicesNoIndicesErrorMessage() { + MetaData.Builder mdBuilder = MetaData.builder(); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, + IndicesOptions.fromOptions(false, false, true, true)); + IndexNotFoundException infe = expectThrows(IndexNotFoundException.class, + () -> indexNameExpressionResolver.concreteIndices(context, new String[]{})); + assertThat(infe.getMessage(), is("no such index [null] and no indices exist")); + } + public void testConcreteIndicesNoIndicesErrorMessageNoExpand() { + MetaData.Builder mdBuilder = MetaData.builder(); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, + IndicesOptions.fromOptions(false, false, false, false)); + IndexNotFoundException infe = expectThrows(IndexNotFoundException.class, + () -> indexNameExpressionResolver.concreteIndices(context, new String[]{})); + assertThat(infe.getMessage(), is("no such index [_all] and no indices exist")); + } + public void testConcreteIndicesWildcardExpansion() { MetaData.Builder mdBuilder = MetaData.builder() .put(indexBuilder("testXXX").state(State.OPEN)) diff --git a/server/src/test/java/org/elasticsearch/common/NumbersTests.java b/server/src/test/java/org/elasticsearch/common/NumbersTests.java index 46378ccc9e9f..4cab3206b7fd 100644 --- a/server/src/test/java/org/elasticsearch/common/NumbersTests.java +++ b/server/src/test/java/org/elasticsearch/common/NumbersTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.common; +import com.carrotsearch.randomizedtesting.annotations.Timeout; import org.elasticsearch.test.ESTestCase; import java.math.BigDecimal; @@ -27,19 +28,26 @@ public class NumbersTests extends ESTestCase { + @Timeout(millis = 10000) public void testToLong() { assertEquals(3L, Numbers.toLong("3", false)); assertEquals(3L, Numbers.toLong("3.1", true)); assertEquals(9223372036854775807L, Numbers.toLong("9223372036854775807.00", false)); assertEquals(-9223372036854775808L, Numbers.toLong("-9223372036854775808.00", false)); + assertEquals(9223372036854775807L, Numbers.toLong("9223372036854775807.00", true)); + assertEquals(-9223372036854775808L, Numbers.toLong("-9223372036854775808.00", true)); + assertEquals(9223372036854775807L, Numbers.toLong("9223372036854775807.99", true)); + assertEquals(-9223372036854775808L, Numbers.toLong("-9223372036854775808.99", true)); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> Numbers.toLong("9223372036854775808", false)); - assertEquals("Value [9223372036854775808] is out of range for a long", e.getMessage()); + assertEquals("Value [9223372036854775808] is out of range for a long", expectThrows(IllegalArgumentException.class, + () -> Numbers.toLong("9223372036854775808", false)).getMessage()); + assertEquals("Value [-9223372036854775809] is out of range for a long", expectThrows(IllegalArgumentException.class, + () -> Numbers.toLong("-9223372036854775809", false)).getMessage()); - e = expectThrows(IllegalArgumentException.class, - () -> Numbers.toLong("-9223372036854775809", false)); - assertEquals("Value [-9223372036854775809] is out of range for a long", e.getMessage()); + assertEquals("Value [1e99999999] is out of range for a long", expectThrows(IllegalArgumentException.class, + () -> Numbers.toLong("1e99999999", false)).getMessage()); + assertEquals("Value [-1e99999999] is out of range for a long", expectThrows(IllegalArgumentException.class, + () -> Numbers.toLong("-1e99999999", false)).getMessage()); } public void testToLongExact() { diff --git a/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java b/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java index 40822d5a38b8..5798b5f79920 100644 --- a/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java +++ b/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java @@ -343,6 +343,17 @@ public void testDuellingFormatsValidParsing() { assertSameDate("2012-W1-1", "weekyear_week_day"); } + public void testCompositeParsing(){ + //in all these examples the second pattern will be used + assertSameDate("2014-06-06T12:01:02.123", "yyyy-MM-dd'T'HH:mm:ss||yyyy-MM-dd'T'HH:mm:ss.SSS"); + assertSameDate("2014-06-06T12:01:02.123", "strictDateTimeNoMillis||yyyy-MM-dd'T'HH:mm:ss.SSS"); + assertSameDate("2014-06-06T12:01:02.123", "yyyy-MM-dd'T'HH:mm:ss+HH:MM||yyyy-MM-dd'T'HH:mm:ss.SSS"); + } + + public void testExceptionWhenCompositeParsingFails(){ + assertParseException("2014-06-06T12:01:02.123", "yyyy-MM-dd'T'HH:mm:ss||yyyy-MM-dd'T'HH:mm:ss.SS"); + } + public void testDuelingStrictParsing() { assertSameDate("2018W313", "strict_basic_week_date"); assertParseException("18W313", "strict_basic_week_date"); diff --git a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java index 60e2fa0b5687..aea0c8c5c25f 100644 --- a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java @@ -19,26 +19,19 @@ package org.elasticsearch.discovery; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.coordination.NoMasterBlockService; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Priority; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.disruption.BlockMasterServiceOnMaster; import org.elasticsearch.test.disruption.IntermittentLongGCDisruption; -import org.elasticsearch.test.disruption.LongGCDisruption; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; @@ -46,21 +39,13 @@ import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Map; -import java.util.Objects; import java.util.Set; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.nullValue; /** * Tests relating to the loss of the master. @@ -69,107 +54,6 @@ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) public class MasterDisruptionIT extends AbstractDisruptionTestCase { - /** - * Tests that emulates a frozen elected master node that unfreezes and pushes his cluster state to other nodes - * that already are following another elected master node. These nodes should reject this cluster state and prevent - * them from following the stale master. - */ - @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.test.disruption:TRACE") - public void testStaleMasterNotHijackingMajority() throws Exception { - final List nodes = startCluster(3); - - // Save the current master node as old master node, because that node will get frozen - final String oldMasterNode = internalCluster().getMasterName(); - for (String node : nodes) { - ensureStableCluster(3, node); - } - assertMaster(oldMasterNode, nodes); - - // Simulating a painful gc by suspending all threads for a long time on the current elected master node. - SingleNodeDisruption masterNodeDisruption = new LongGCDisruption(random(), oldMasterNode); - - // Save the majority side - final List majoritySide = new ArrayList<>(nodes); - majoritySide.remove(oldMasterNode); - - // Keeps track of the previous and current master when a master node transition took place on each node on the majority side: - final Map>> masters = Collections.synchronizedMap(new HashMap<>()); - for (final String node : majoritySide) { - masters.put(node, new ArrayList<>()); - internalCluster().getInstance(ClusterService.class, node).addListener(event -> { - DiscoveryNode previousMaster = event.previousState().nodes().getMasterNode(); - DiscoveryNode currentMaster = event.state().nodes().getMasterNode(); - if (!Objects.equals(previousMaster, currentMaster)) { - logger.info("node {} received new cluster state: {} \n and had previous cluster state: {}", node, event.state(), - event.previousState()); - String previousMasterNodeName = previousMaster != null ? previousMaster.getName() : null; - String currentMasterNodeName = currentMaster != null ? currentMaster.getName() : null; - masters.get(node).add(new Tuple<>(previousMasterNodeName, currentMasterNodeName)); - } - }); - } - - final CountDownLatch oldMasterNodeSteppedDown = new CountDownLatch(1); - internalCluster().getInstance(ClusterService.class, oldMasterNode).addListener(event -> { - if (event.state().nodes().getMasterNodeId() == null) { - oldMasterNodeSteppedDown.countDown(); - } - }); - - internalCluster().setDisruptionScheme(masterNodeDisruption); - logger.info("freezing node [{}]", oldMasterNode); - masterNodeDisruption.startDisrupting(); - - // Wait for the majority side to get stable - assertDifferentMaster(majoritySide.get(0), oldMasterNode); - assertDifferentMaster(majoritySide.get(1), oldMasterNode); - - // The old master node is frozen, but here we submit a cluster state update task that doesn't get executed, - // but will be queued and once the old master node un-freezes it gets executed. - // The old master node will send this update + the cluster state where he is flagged as master to the other - // nodes that follow the new master. These nodes should ignore this update. - internalCluster().getInstance(ClusterService.class, oldMasterNode).submitStateUpdateTask("sneaky-update", new - ClusterStateUpdateTask(Priority.IMMEDIATE) { - @Override - public ClusterState execute(ClusterState currentState) { - return ClusterState.builder(currentState).build(); - } - - @Override - public void onFailure(String source, Exception e) { - logger.warn(() -> new ParameterizedMessage("failure [{}]", source), e); - } - }); - - // Save the new elected master node - final String newMasterNode = internalCluster().getMasterName(majoritySide.get(0)); - logger.info("new detected master node [{}]", newMasterNode); - - // Stop disruption - logger.info("Unfreeze node [{}]", oldMasterNode); - masterNodeDisruption.stopDisrupting(); - - oldMasterNodeSteppedDown.await(30, TimeUnit.SECONDS); - // Make sure that the end state is consistent on all nodes: - assertMaster(newMasterNode, nodes); - - assertThat(masters.size(), equalTo(2)); - for (Map.Entry>> entry : masters.entrySet()) { - String nodeName = entry.getKey(); - List> recordedMasterTransition = entry.getValue(); - assertThat("[" + nodeName + "] Each node should only record two master node transitions", - recordedMasterTransition, hasSize(2)); - assertThat("[" + nodeName + "] First transition's previous master should be [" + oldMasterNode + "]", - recordedMasterTransition.get(0).v1(), equalTo(oldMasterNode)); - assertThat("[" + nodeName + "] First transition's current master should be [null]", - recordedMasterTransition.get(0).v2(), nullValue()); - assertThat("[" + nodeName + "] Second transition's previous master should be [null]", - recordedMasterTransition.get(1).v1(), nullValue()); - assertThat("[" + nodeName + "] Second transition's current master should be [" + newMasterNode + "]", - recordedMasterTransition.get(1).v2(), equalTo(newMasterNode)); - } - } - /** * Test that cluster recovers from a long GC on master that causes other nodes to elect a new one */ diff --git a/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java index 51fef980e377..c4655bcf7ce9 100644 --- a/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java @@ -18,28 +18,44 @@ */ package org.elasticsearch.discovery; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.coordination.Coordinator; import org.elasticsearch.cluster.coordination.FollowersChecker; import org.elasticsearch.cluster.coordination.LeaderChecker; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.disruption.LongGCDisruption; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.NetworkDisconnect; import org.elasticsearch.test.disruption.NetworkDisruption.NetworkLinkDisruptionType; import org.elasticsearch.test.disruption.NetworkDisruption.NetworkUnresponsive; import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions; +import org.elasticsearch.test.disruption.SingleNodeDisruption; import org.elasticsearch.test.junit.annotations.TestLogging; -import org.elasticsearch.test.transport.MockTransportService.TestPlugin; +import org.elasticsearch.test.transport.MockTransportService; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Map; +import java.util.Objects; import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import static java.util.Collections.singleton; @@ -55,7 +71,7 @@ public class StableMasterDisruptionIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Collections.singletonList(TestPlugin.class); + return Collections.singletonList(MockTransportService.TestPlugin.class); } /** @@ -152,4 +168,101 @@ private void testFollowerCheckerAfterMasterReelection(NetworkLinkDisruptionType networkDisruption.stopDisrupting(); ensureStableCluster(3); } + + + /** + * Tests that emulates a frozen elected master node that unfreezes and pushes its cluster state to other nodes that already are + * following another elected master node. These nodes should reject this cluster state and prevent them from following the stale master. + */ + public void testStaleMasterNotHijackingMajority() throws Exception { + final List nodes = internalCluster().startNodes(3, Settings.builder() + .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") + .put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") + .build()); + ensureStableCluster(3); + + // Save the current master node as old master node, because that node will get frozen + final String oldMasterNode = internalCluster().getMasterName(); + + // Simulating a painful gc by suspending all threads for a long time on the current elected master node. + SingleNodeDisruption masterNodeDisruption = new LongGCDisruption(random(), oldMasterNode); + + // Save the majority side + final List majoritySide = new ArrayList<>(nodes); + majoritySide.remove(oldMasterNode); + + // Keeps track of the previous and current master when a master node transition took place on each node on the majority side: + final Map>> masters = Collections.synchronizedMap(new HashMap<>()); + for (final String node : majoritySide) { + masters.put(node, new ArrayList<>()); + internalCluster().getInstance(ClusterService.class, node).addListener(event -> { + DiscoveryNode previousMaster = event.previousState().nodes().getMasterNode(); + DiscoveryNode currentMaster = event.state().nodes().getMasterNode(); + if (!Objects.equals(previousMaster, currentMaster)) { + logger.info("--> node {} received new cluster state: {} \n and had previous cluster state: {}", node, event.state(), + event.previousState()); + String previousMasterNodeName = previousMaster != null ? previousMaster.getName() : null; + String currentMasterNodeName = currentMaster != null ? currentMaster.getName() : null; + masters.get(node).add(new Tuple<>(previousMasterNodeName, currentMasterNodeName)); + } + }); + } + + final CountDownLatch oldMasterNodeSteppedDown = new CountDownLatch(1); + internalCluster().getInstance(ClusterService.class, oldMasterNode).addListener(event -> { + if (event.state().nodes().getMasterNodeId() == null) { + oldMasterNodeSteppedDown.countDown(); + } + }); + + internalCluster().setDisruptionScheme(masterNodeDisruption); + logger.info("--> freezing node [{}]", oldMasterNode); + masterNodeDisruption.startDisrupting(); + + // Wait for majority side to elect a new master + assertBusy(() -> { + for (final Map.Entry>> entry : masters.entrySet()) { + final List> transitions = entry.getValue(); + assertTrue(entry.getKey() + ": " + transitions, + transitions.stream().anyMatch(transition -> transition.v2() != null)); + } + }); + + // The old master node is frozen, but here we submit a cluster state update task that doesn't get executed, but will be queued and + // once the old master node un-freezes it gets executed. The old master node will send this update + the cluster state where it is + // flagged as master to the other nodes that follow the new master. These nodes should ignore this update. + internalCluster().getInstance(ClusterService.class, oldMasterNode).submitStateUpdateTask("sneaky-update", new + ClusterStateUpdateTask(Priority.IMMEDIATE) { + @Override + public ClusterState execute(ClusterState currentState) { + return ClusterState.builder(currentState).build(); + } + + @Override + public void onFailure(String source, Exception e) { + logger.warn(() -> new ParameterizedMessage("failure [{}]", source), e); + } + }); + + // Save the new elected master node + final String newMasterNode = internalCluster().getMasterName(majoritySide.get(0)); + logger.info("--> new detected master node [{}]", newMasterNode); + + // Stop disruption + logger.info("--> unfreezing node [{}]", oldMasterNode); + masterNodeDisruption.stopDisrupting(); + + oldMasterNodeSteppedDown.await(30, TimeUnit.SECONDS); + logger.info("--> [{}] stepped down as master", oldMasterNode); + ensureStableCluster(3); + + assertThat(masters.size(), equalTo(2)); + for (Map.Entry>> entry : masters.entrySet()) { + String nodeName = entry.getKey(); + List> transitions = entry.getValue(); + assertTrue("[" + nodeName + "] should not apply state from old master [" + oldMasterNode + "] but it did: " + transitions, + transitions.stream().noneMatch(t -> oldMasterNode.equals(t.v2()))); + } + } + } diff --git a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java index e16389a38471..e0fc4a4d5392 100644 --- a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java @@ -19,13 +19,22 @@ package org.elasticsearch.discovery.single; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LogEvent; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.coordination.JoinHelper; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.node.Node; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.MockHttpTransport; +import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.NodeConfigurationSource; +import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.transport.TransportService; import java.io.IOException; @@ -105,6 +114,78 @@ public Path nodeConfigPath(int nodeOrdinal) { } } + public void testCannotJoinNodeWithSingleNodeDiscovery() throws Exception { + MockLogAppender mockAppender = new MockLogAppender(); + mockAppender.start(); + mockAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "test", + JoinHelper.class.getCanonicalName(), + Level.INFO, + "failed to join") { + + @Override + public boolean innerMatch(final LogEvent event) { + return event.getThrown() != null + && event.getThrown().getClass() == RemoteTransportException.class + && event.getThrown().getCause() != null + && event.getThrown().getCause().getClass() == IllegalStateException.class + && event.getThrown().getCause().getMessage().contains( + "cannot join node with [discovery.type] set to [single-node]"); + } + }); + final TransportService service = internalCluster().getInstance(TransportService.class); + final int port = service.boundAddress().publishAddress().getPort(); + final NodeConfigurationSource configurationSource = new NodeConfigurationSource() { + @Override + public Settings nodeSettings(int nodeOrdinal) { + return Settings + .builder() + .put("discovery.type", "zen") + .put("transport.type", getTestTransportType()) + .put(Node.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s") + /* + * We align the port ranges of the two as then with zen discovery these two + * nodes would find each other. + */ + .put("transport.port", port + "-" + (port + 5 - 1)) + .build(); + } + + @Override + public Path nodeConfigPath(int nodeOrdinal) { + return null; + } + }; + try (InternalTestCluster other = + new InternalTestCluster( + randomLong(), + createTempDir(), + false, + false, + 1, + 1, + internalCluster().getClusterName(), + configurationSource, + 0, + "other", + Arrays.asList(getTestTransportPlugin(), MockHttpTransport.TestPlugin.class), + Function.identity())) { + + Logger clusterLogger = LogManager.getLogger(JoinHelper.class); + Loggers.addAppender(clusterLogger, mockAppender); + try { + other.beforeTest(random(), 0); + final ClusterState first = internalCluster().getInstance(ClusterService.class).state(); + assertThat(first.nodes().getSize(), equalTo(1)); + assertBusy(() -> mockAppender.assertAllExpectationsMatched()); + } finally { + Loggers.removeAppender(clusterLogger, mockAppender); + mockAppender.stop(); + } + } + } + public void testStatePersistence() throws Exception { createIndex("test"); internalCluster().fullRestart(); diff --git a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryTests.java b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryTests.java deleted file mode 100644 index c3dfad2d4379..000000000000 --- a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryTests.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.discovery.single; - -import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.service.ClusterApplier; -import org.elasticsearch.cluster.service.MasterService; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.threadpool.TestThreadPool; -import org.elasticsearch.threadpool.ThreadPool; - -import java.io.Closeable; -import java.util.Stack; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Supplier; - -import static org.elasticsearch.test.ClusterServiceUtils.createMasterService; -import static org.hamcrest.Matchers.equalTo; - -public class SingleNodeDiscoveryTests extends ESTestCase { - - public void testInitialJoin() throws Exception { - final Settings settings = Settings.EMPTY; - final Version version = Version.CURRENT; - final ThreadPool threadPool = new TestThreadPool(getClass().getName()); - final Stack stack = new Stack<>(); - try { - final MockTransportService transportService = - MockTransportService.createNewService(settings, version, threadPool, null); - stack.push(transportService); - transportService.start(); - final DiscoveryNode node = transportService.getLocalNode(); - final MasterService masterService = createMasterService(threadPool, node); - AtomicReference clusterState = new AtomicReference<>(); - final SingleNodeDiscovery discovery = - new SingleNodeDiscovery(Settings.EMPTY, transportService, - masterService, new ClusterApplier() { - @Override - public void setInitialState(ClusterState initialState) { - clusterState.set(initialState); - } - - @Override - public void onNewClusterState(String source, Supplier clusterStateSupplier, - ClusterApplyListener listener) { - clusterState.set(clusterStateSupplier.get()); - listener.onSuccess(source); - } - }, null); - discovery.start(); - discovery.startInitialJoin(); - final DiscoveryNodes nodes = clusterState.get().nodes(); - assertThat(nodes.getSize(), equalTo(1)); - assertThat(nodes.getMasterNode().getId(), equalTo(node.getId())); - } finally { - while (!stack.isEmpty()) { - IOUtils.closeWhileHandlingException(stack.pop()); - } - terminate(threadPool); - } - } - -} diff --git a/server/src/test/java/org/elasticsearch/get/GetActionIT.java b/server/src/test/java/org/elasticsearch/get/GetActionIT.java index 77303995f749..8be9a991d17e 100644 --- a/server/src/test/java/org/elasticsearch/get/GetActionIT.java +++ b/server/src/test/java/org/elasticsearch/get/GetActionIT.java @@ -441,7 +441,7 @@ public void testMultiGetWithVersion() throws Exception { assertThat(response.getResponses()[1].getResponse().getSourceAsMap().get("field").toString(), equalTo("value1")); assertThat(response.getResponses()[2].getFailure(), notNullValue()); assertThat(response.getResponses()[2].getFailure().getId(), equalTo("1")); - assertThat(response.getResponses()[2].getFailure().getMessage(), startsWith("[type1][1]: version conflict")); + assertThat(response.getResponses()[2].getFailure().getMessage(), startsWith("[1]: version conflict")); assertThat(response.getResponses()[2].getFailure().getFailure(), instanceOf(VersionConflictEngineException.class)); //Version from Lucene index @@ -464,7 +464,7 @@ public void testMultiGetWithVersion() throws Exception { assertThat(response.getResponses()[1].getResponse().getSourceAsMap().get("field").toString(), equalTo("value1")); assertThat(response.getResponses()[2].getFailure(), notNullValue()); assertThat(response.getResponses()[2].getFailure().getId(), equalTo("1")); - assertThat(response.getResponses()[2].getFailure().getMessage(), startsWith("[type1][1]: version conflict")); + assertThat(response.getResponses()[2].getFailure().getMessage(), startsWith("[1]: version conflict")); assertThat(response.getResponses()[2].getFailure().getFailure(), instanceOf(VersionConflictEngineException.class)); @@ -489,7 +489,7 @@ public void testMultiGetWithVersion() throws Exception { assertThat(response.getResponses()[1].getFailure(), notNullValue()); assertThat(response.getResponses()[1].getFailure().getId(), equalTo("2")); assertThat(response.getResponses()[1].getIndex(), equalTo("test")); - assertThat(response.getResponses()[1].getFailure().getMessage(), startsWith("[type1][2]: version conflict")); + assertThat(response.getResponses()[1].getFailure().getMessage(), startsWith("[2]: version conflict")); assertThat(response.getResponses()[2].getId(), equalTo("2")); assertThat(response.getResponses()[2].getIndex(), equalTo("test")); assertThat(response.getResponses()[2].getFailure(), nullValue()); @@ -515,7 +515,7 @@ public void testMultiGetWithVersion() throws Exception { assertThat(response.getResponses()[1].getFailure(), notNullValue()); assertThat(response.getResponses()[1].getFailure().getId(), equalTo("2")); assertThat(response.getResponses()[1].getIndex(), equalTo("test")); - assertThat(response.getResponses()[1].getFailure().getMessage(), startsWith("[type1][2]: version conflict")); + assertThat(response.getResponses()[1].getFailure().getMessage(), startsWith("[2]: version conflict")); assertThat(response.getResponses()[2].getId(), equalTo("2")); assertThat(response.getResponses()[2].getIndex(), equalTo("test")); assertThat(response.getResponses()[2].getFailure(), nullValue()); diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 5f6659afd739..351cccdff4aa 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -89,6 +89,7 @@ import java.util.function.Function; import static java.util.Collections.emptyMap; +import static org.elasticsearch.index.IndexService.IndexCreationContext.CREATE_INDEX; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; @@ -148,8 +149,8 @@ public void tearDown() throws Exception { } private IndexService newIndexService(IndexModule module) throws IOException { - return module.newIndexService(nodeEnvironment, xContentRegistry(), deleter, circuitBreakerService, bigArrays, threadPool, - scriptService, null, indicesQueryCache, mapperRegistry, + return module.newIndexService(CREATE_INDEX, nodeEnvironment, xContentRegistry(), deleter, circuitBreakerService, bigArrays, + threadPool, scriptService, null, indicesQueryCache, mapperRegistry, new IndicesFieldDataCache(settings, listener), writableRegistry()); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java index f5597ecb1f44..2142fca565c9 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java @@ -523,5 +523,15 @@ public void testInvalidGeopointValuesIgnored() throws Exception { BytesReference.bytes(XContentFactory.jsonBuilder() .startObject().field("location", "NaN,12").endObject() ), XContentType.JSON)).rootDoc().getField("location"), nullValue()); + + assertThat(defaultMapper.parse(new SourceToParse("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() + .startObject().startObject("location").nullField("lat").field("lon", 1).endObject().endObject() + ), XContentType.JSON)).rootDoc().getField("location"), nullValue()); + + assertThat(defaultMapper.parse(new SourceToParse("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() + .startObject().startObject("location").nullField("lat").nullField("lon").endObject().endObject() + ), XContentType.JSON)).rootDoc().getField("location"), nullValue()); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index 2c1a75b40d4c..d8c120e492d3 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -323,4 +323,115 @@ public void testDefaultMappingIsRejectedOn7() throws IOException { + " that indices can have at most one type.", e.getMessage()); } + public void testFieldNameLengthLimit() throws Throwable { + int maxFieldNameLength = randomIntBetween(15, 20); + String testString = new String(new char[maxFieldNameLength + 1]).replace("\0", "a"); + Settings settings = Settings.builder().put(MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING.getKey(), maxFieldNameLength) + .build(); + MapperService mapperService = createIndex("test1", settings).mapperService(); + + CompressedXContent mapping = new CompressedXContent(BytesReference.bytes( + XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties") + .startObject("field") + .field("type", "text") + .endObject() + .endObject() + .endObject().endObject())); + + mapperService.merge("type", mapping, MergeReason.MAPPING_UPDATE); + + CompressedXContent mappingUpdate = new CompressedXContent(BytesReference.bytes( + XContentFactory.jsonBuilder().startObject() + .startObject("properties") + .startObject(testString) + .field("type", "text") + .endObject() + .endObject() + .endObject())); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { + mapperService.merge("type", mappingUpdate, MergeReason.MAPPING_UPDATE); + }); + + assertEquals("Field name [" + testString + "] in index [test1] is too long. " + + "The limit is set to [" + maxFieldNameLength + "] characters but was [" + + testString.length() + "] characters", e.getMessage()); + } + + public void testObjectNameLengthLimit() throws Throwable { + int maxFieldNameLength = randomIntBetween(15, 20); + String testString = new String(new char[maxFieldNameLength + 1]).replace("\0", "a"); + Settings settings = Settings.builder().put(MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING.getKey(), maxFieldNameLength) + .build(); + MapperService mapperService = createIndex("test1", settings).mapperService(); + + CompressedXContent mapping = new CompressedXContent(BytesReference.bytes( + XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties") + .startObject(testString) + .field("type", "object") + .endObject() + .endObject() + .endObject().endObject())); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { + mapperService.merge("type", mapping, MergeReason.MAPPING_UPDATE); + }); + + assertEquals("Field name [" + testString + "] in index [test1] is too long. " + + "The limit is set to [" + maxFieldNameLength + "] characters but was [" + + testString.length() + "] characters", e.getMessage()); + } + + public void testAliasFieldNameLengthLimit() throws Throwable { + int maxFieldNameLength = randomIntBetween(15, 20); + String testString = new String(new char[maxFieldNameLength + 1]).replace("\0", "a"); + Settings settings = Settings.builder().put(MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING.getKey(), maxFieldNameLength) + .build(); + MapperService mapperService = createIndex("test1", settings).mapperService(); + + CompressedXContent mapping = new CompressedXContent(BytesReference.bytes( + XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties") + .startObject(testString) + .field("type", "alias") + .field("path", "field") + .endObject() + .startObject("field") + .field("type", "text") + .endObject() + .endObject() + .endObject().endObject())); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { + mapperService.merge("type", mapping, MergeReason.MAPPING_UPDATE); + }); + + assertEquals("Field name [" + testString + "] in index [test1] is too long. " + + "The limit is set to [" + maxFieldNameLength + "] characters but was [" + + testString.length() + "] characters", e.getMessage()); + } + + public void testMappingRecoverySkipFieldNameLengthLimit() throws Throwable { + int maxFieldNameLength = randomIntBetween(15, 20); + String testString = new String(new char[maxFieldNameLength + 1]).replace("\0", "a"); + Settings settings = Settings.builder().put(MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING.getKey(), maxFieldNameLength) + .build(); + MapperService mapperService = createIndex("test1", settings).mapperService(); + + CompressedXContent mapping = new CompressedXContent(BytesReference.bytes( + XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties") + .startObject(testString) + .field("type", "text") + .endObject() + .endObject() + .endObject().endObject())); + + DocumentMapper documentMapper = mapperService.merge("type", mapping, MergeReason.MAPPING_RECOVERY); + + assertEquals(testString, documentMapper.mappers().getMapper(testString).simpleName()); + } + } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java index ba7f5d846840..b4b9242daa45 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.mapper; +import com.carrotsearch.randomizedtesting.annotations.Timeout; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexableField; import org.elasticsearch.common.Strings; @@ -367,17 +368,20 @@ public void testEmptyName() throws IOException { } } + @Timeout(millis = 30000) public void testOutOfRangeValues() throws IOException { final List> inputs = Arrays.asList( OutOfRangeSpec.of(NumberType.BYTE, "128", "is out of range for a byte"), OutOfRangeSpec.of(NumberType.SHORT, "32768", "is out of range for a short"), OutOfRangeSpec.of(NumberType.INTEGER, "2147483648", "is out of range for an integer"), OutOfRangeSpec.of(NumberType.LONG, "9223372036854775808", "out of range for a long"), + OutOfRangeSpec.of(NumberType.LONG, "1e999999999", "out of range for a long"), OutOfRangeSpec.of(NumberType.BYTE, "-129", "is out of range for a byte"), OutOfRangeSpec.of(NumberType.SHORT, "-32769", "is out of range for a short"), OutOfRangeSpec.of(NumberType.INTEGER, "-2147483649", "is out of range for an integer"), OutOfRangeSpec.of(NumberType.LONG, "-9223372036854775809", "out of range for a long"), + OutOfRangeSpec.of(NumberType.LONG, "-1e999999999", "out of range for a long"), OutOfRangeSpec.of(NumberType.BYTE, 128, "is out of range for a byte"), OutOfRangeSpec.of(NumberType.SHORT, 32768, "out of range of Java short"), @@ -419,6 +423,10 @@ public void testOutOfRangeValues() throws IOException { e.getCause().getMessage(), containsString(item.message)); } } + + // the following two strings are in-range for a long after coercion + parseRequest(NumberType.LONG, createIndexRequest("9223372036854775807.9")); + parseRequest(NumberType.LONG, createIndexRequest("-9223372036854775808.9")); } private void parseRequest(NumberType type, BytesReference content) throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java index 84df2c51fb55..d01cc174c302 100644 --- a/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java @@ -294,7 +294,7 @@ public void testCombineDisjunctionInterval() throws IOException { } public void testNonIndexedFields() throws IOException { - IntervalsSourceProvider provider = createRandomSource(); + IntervalsSourceProvider provider = new IntervalsSourceProvider.Match("test", 0, true, null, null, null); IntervalQueryBuilder b = new IntervalQueryBuilder("no_such_field", provider); assertThat(b.toQuery(createShardContext()), equalTo(new MatchNoDocsQuery())); diff --git a/server/src/test/java/org/elasticsearch/index/query/MatchBoolPrefixQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MatchBoolPrefixQueryBuilderTests.java new file mode 100644 index 000000000000..b3a3a2512a5f --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/query/MatchBoolPrefixQueryBuilderTests.java @@ -0,0 +1,284 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.apache.lucene.analysis.MockSynonymAnalyzer; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.FuzzyQuery; +import org.apache.lucene.search.PrefixQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.SynonymQuery; +import org.apache.lucene.search.TermQuery; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.lucene.search.Queries; +import org.elasticsearch.index.search.MatchQuery; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.test.AbstractQueryTestCase; + +import java.io.IOException; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static java.util.Arrays.asList; +import static org.hamcrest.CoreMatchers.anyOf; +import static org.hamcrest.CoreMatchers.everyItem; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.equalToIgnoringCase; +import static org.hamcrest.Matchers.hasProperty; +import static org.hamcrest.Matchers.hasSize; + +public class MatchBoolPrefixQueryBuilderTests extends AbstractQueryTestCase { + + @Override + protected MatchBoolPrefixQueryBuilder doCreateTestQueryBuilder() { + final String fieldName = randomFrom(STRING_FIELD_NAME, STRING_ALIAS_FIELD_NAME); + final Object value = IntStream.rangeClosed(0, randomIntBetween(0, 3)) + .mapToObj(i -> randomAlphaOfLengthBetween(1, 10) + " ") + .collect(Collectors.joining()) + .trim(); + + final MatchBoolPrefixQueryBuilder queryBuilder = new MatchBoolPrefixQueryBuilder(fieldName, value); + + if (randomBoolean() && isTextField(fieldName)) { + queryBuilder.analyzer(randomFrom("simple", "keyword", "whitespace")); + } + + if (randomBoolean()) { + queryBuilder.operator(randomFrom(Operator.values())); + } + + if (randomBoolean()) { + queryBuilder.minimumShouldMatch(randomMinimumShouldMatch()); + } + + if (randomBoolean()) { + queryBuilder.fuzziness(randomFuzziness(fieldName)); + } + + if (randomBoolean()) { + queryBuilder.prefixLength(randomIntBetween(0, 10)); + } + + if (randomBoolean()) { + queryBuilder.maxExpansions(randomIntBetween(1, 1000)); + } + + if (randomBoolean()) { + queryBuilder.fuzzyTranspositions(randomBoolean()); + } + + if (randomBoolean()) { + queryBuilder.fuzzyRewrite(getRandomRewriteMethod()); + } + + return queryBuilder; + } + + @Override + protected void doAssertLuceneQuery(MatchBoolPrefixQueryBuilder queryBuilder, Query query, SearchContext context) throws IOException { + assertThat(query, notNullValue()); + assertThat(query, anyOf(instanceOf(BooleanQuery.class), instanceOf(PrefixQuery.class))); + + if (query instanceof PrefixQuery) { + final PrefixQuery prefixQuery = (PrefixQuery) query; + assertThat(prefixQuery.getPrefix().text(), equalToIgnoringCase((String) queryBuilder.value())); + } else { + assertThat(query, instanceOf(BooleanQuery.class)); + final BooleanQuery booleanQuery = (BooleanQuery) query; + // all queries except the last should be TermQuery or SynonymQuery + final Set allQueriesExceptLast = IntStream.range(0, booleanQuery.clauses().size() - 1) + .mapToObj(booleanQuery.clauses()::get) + .map(BooleanClause::getQuery) + .collect(Collectors.toSet()); + assertThat(allQueriesExceptLast, anyOf( + everyItem(instanceOf(TermQuery.class)), + everyItem(instanceOf(SynonymQuery.class)), + everyItem(instanceOf(FuzzyQuery.class)) + )); + + if (allQueriesExceptLast.stream().anyMatch(subQuery -> subQuery instanceof FuzzyQuery)) { + assertThat(queryBuilder.fuzziness(), notNullValue()); + } + allQueriesExceptLast.stream().filter(subQuery -> subQuery instanceof FuzzyQuery).forEach(subQuery -> { + final FuzzyQuery fuzzyQuery = (FuzzyQuery) subQuery; + assertThat(fuzzyQuery.getPrefixLength(), equalTo(queryBuilder.prefixLength())); + assertThat(fuzzyQuery.getTranspositions(), equalTo(queryBuilder.fuzzyTranspositions())); + }); + + // the last query should be PrefixQuery + final Query shouldBePrefixQuery = booleanQuery.clauses().get(booleanQuery.clauses().size() - 1).getQuery(); + assertThat(shouldBePrefixQuery, instanceOf(PrefixQuery.class)); + + if (queryBuilder.minimumShouldMatch() != null) { + final int optionalClauses = + (int) booleanQuery.clauses().stream().filter(clause -> clause.getOccur() == BooleanClause.Occur.SHOULD).count(); + final int expected = Queries.calculateMinShouldMatch(optionalClauses, queryBuilder.minimumShouldMatch()); + assertThat(booleanQuery.getMinimumNumberShouldMatch(), equalTo(expected)); + } + } + } + + public void testIllegalValues() { + { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new MatchBoolPrefixQueryBuilder(null, "value")); + assertEquals("[match_bool_prefix] requires fieldName", e.getMessage()); + } + + { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new MatchBoolPrefixQueryBuilder("name", null)); + assertEquals("[match_bool_prefix] requires query value", e.getMessage()); + } + + { + final MatchBoolPrefixQueryBuilder builder = new MatchBoolPrefixQueryBuilder("name", "value"); + builder.analyzer("bogusAnalyzer"); + QueryShardException e = expectThrows(QueryShardException.class, () -> builder.toQuery(createShardContext())); + assertThat(e.getMessage(), containsString("analyzer [bogusAnalyzer] not found")); + } + } + + public void testFromSimpleJson() throws IOException { + final String simple = + "{" + + "\"match_bool_prefix\": {" + + "\"fieldName\": \"fieldValue\"" + + "}" + + "}"; + final String expected = + "{" + + "\"match_bool_prefix\": {" + + "\"fieldName\": {" + + "\"query\": \"fieldValue\"," + + "\"operator\": \"OR\"," + + "\"prefix_length\": 0," + + "\"max_expansions\": 50," + + "\"fuzzy_transpositions\": true," + + "\"boost\": 1.0" + + "}" + + "}" + + "}"; + + final MatchBoolPrefixQueryBuilder builder = (MatchBoolPrefixQueryBuilder) parseQuery(simple); + checkGeneratedJson(expected, builder); + } + + public void testFromJson() throws IOException { + final String expected = + "{" + + "\"match_bool_prefix\": {" + + "\"fieldName\": {" + + "\"query\": \"fieldValue\"," + + "\"analyzer\": \"simple\"," + + "\"operator\": \"AND\"," + + "\"minimum_should_match\": \"2\"," + + "\"fuzziness\": \"1\"," + + "\"prefix_length\": 1," + + "\"max_expansions\": 10," + + "\"fuzzy_transpositions\": false," + + "\"fuzzy_rewrite\": \"constant_score\"," + + "\"boost\": 2.0" + + "}" + + "}" + + "}"; + + final MatchBoolPrefixQueryBuilder builder = (MatchBoolPrefixQueryBuilder) parseQuery(expected); + checkGeneratedJson(expected, builder); + } + + public void testParseFailsWithMultipleFields() { + { + final String json = + "{" + + "\"match_bool_prefix\" : {" + + "\"field_name_1\" : {" + + "\"query\" : \"foo\"" + + "}," + + "\"field_name_2\" : {" + + "\"query\" : \"foo\"\n" + + "}" + + "}" + + "}"; + final ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(json)); + assertEquals( + "[match_bool_prefix] query doesn't support multiple fields, found [field_name_1] and [field_name_2]", e.getMessage()); + } + + { + final String simpleJson = + "{" + + "\"match_bool_prefix\" : {" + + "\"field_name_1\" : \"foo\"," + + "\"field_name_2\" : \"foo\"" + + "}" + + "}"; + final ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(simpleJson)); + assertEquals( + "[match_bool_prefix] query doesn't support multiple fields, found [field_name_1] and [field_name_2]", e.getMessage()); + } + } + + public void testAnalysis() throws Exception { + final MatchBoolPrefixQueryBuilder builder = new MatchBoolPrefixQueryBuilder(STRING_FIELD_NAME, "foo bar baz"); + final Query query = builder.toQuery(createShardContext()); + + assertBooleanQuery(query, asList( + new TermQuery(new Term(STRING_FIELD_NAME, "foo")), + new TermQuery(new Term(STRING_FIELD_NAME, "bar")), + new PrefixQuery(new Term(STRING_FIELD_NAME, "baz")) + )); + } + + public void testAnalysisSynonym() throws Exception { + final MatchQuery matchQuery = new MatchQuery(createShardContext()); + matchQuery.setAnalyzer(new MockSynonymAnalyzer()); + final Query query = matchQuery.parse(MatchQuery.Type.BOOLEAN_PREFIX, STRING_FIELD_NAME, "fox dogs red"); + + assertBooleanQuery(query, asList( + new TermQuery(new Term(STRING_FIELD_NAME, "fox")), + new SynonymQuery(new Term(STRING_FIELD_NAME, "dogs"), new Term(STRING_FIELD_NAME, "dog")), + new PrefixQuery(new Term(STRING_FIELD_NAME, "red")) + )); + } + + public void testAnalysisSingleTerm() throws Exception { + final MatchBoolPrefixQueryBuilder builder = new MatchBoolPrefixQueryBuilder(STRING_FIELD_NAME, "foo"); + final Query query = builder.toQuery(createShardContext()); + assertThat(query, equalTo(new PrefixQuery(new Term(STRING_FIELD_NAME, "foo")))); + } + + private static void assertBooleanQuery(Query actual, List expectedClauseQueries) { + assertThat(actual, instanceOf(BooleanQuery.class)); + final BooleanQuery actualBooleanQuery = (BooleanQuery) actual; + assertThat(actualBooleanQuery.clauses(), hasSize(expectedClauseQueries.size())); + assertThat(actualBooleanQuery.clauses(), everyItem(hasProperty("occur", equalTo(BooleanClause.Occur.SHOULD)))); + + for (int i = 0; i < actualBooleanQuery.clauses().size(); i++) { + final Query clauseQuery = actualBooleanQuery.clauses().get(i).getQuery(); + assertThat(clauseQuery, equalTo(expectedClauseQueries.get(i))); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java index c258cce6c7c5..e9f2b447da13 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java @@ -21,6 +21,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.CannedBinaryTokenStream; +import org.apache.lucene.analysis.MockSynonymAnalyzer; import org.apache.lucene.index.Term; import org.apache.lucene.queries.ExtendedCommonTermsQuery; import org.apache.lucene.search.BooleanClause; @@ -28,6 +29,7 @@ import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.PointRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; @@ -394,6 +396,76 @@ public void testLenientPhraseQuery() throws Exception { containsString("field:[string_no_pos] was indexed without position data; cannot run PhraseQuery")); } + public void testAutoGenerateSynonymsPhraseQuery() throws Exception { + final MatchQuery matchQuery = new MatchQuery(createShardContext()); + matchQuery.setAnalyzer(new MockSynonymAnalyzer()); + + { + matchQuery.setAutoGenerateSynonymsPhraseQuery(false); + final Query query = matchQuery.parse(Type.BOOLEAN, STRING_FIELD_NAME, "guinea pig"); + final Query expectedQuery = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(new TermQuery(new Term(STRING_FIELD_NAME, "guinea")), BooleanClause.Occur.MUST) + .add(new TermQuery(new Term(STRING_FIELD_NAME, "pig")), BooleanClause.Occur.MUST) + .build(), + BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term(STRING_FIELD_NAME, "cavy")), BooleanClause.Occur.SHOULD) + .build(), + BooleanClause.Occur.SHOULD).build(); + assertThat(query, equalTo(expectedQuery)); + } + + { + matchQuery.setAutoGenerateSynonymsPhraseQuery(true); + final Query query = matchQuery.parse(Type.BOOLEAN, STRING_FIELD_NAME, "guinea pig"); + final Query expectedQuery = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(new PhraseQuery.Builder() + .add(new Term(STRING_FIELD_NAME, "guinea")) + .add(new Term(STRING_FIELD_NAME, "pig")) + .build(), + BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term(STRING_FIELD_NAME, "cavy")), BooleanClause.Occur.SHOULD) + .build(), + BooleanClause.Occur.SHOULD).build(); + assertThat(query, equalTo(expectedQuery)); + } + + { + matchQuery.setAutoGenerateSynonymsPhraseQuery(false); + final Query query = matchQuery.parse(Type.BOOLEAN_PREFIX, STRING_FIELD_NAME, "guinea pig"); + final Query expectedQuery = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(new TermQuery(new Term(STRING_FIELD_NAME, "guinea")), BooleanClause.Occur.MUST) + .add(new TermQuery(new Term(STRING_FIELD_NAME, "pig")), BooleanClause.Occur.MUST) + .build(), + BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term(STRING_FIELD_NAME, "cavy")), BooleanClause.Occur.SHOULD) + .build(), + BooleanClause.Occur.SHOULD).build(); + assertThat(query, equalTo(expectedQuery)); + } + + { + matchQuery.setAutoGenerateSynonymsPhraseQuery(true); + final Query query = matchQuery.parse(Type.BOOLEAN_PREFIX, STRING_FIELD_NAME, "guinea pig"); + final MultiPhrasePrefixQuery guineaPig = new MultiPhrasePrefixQuery(STRING_FIELD_NAME); + guineaPig.add(new Term(STRING_FIELD_NAME, "guinea")); + guineaPig.add(new Term(STRING_FIELD_NAME, "pig")); + final MultiPhrasePrefixQuery cavy = new MultiPhrasePrefixQuery(STRING_FIELD_NAME); + cavy.add(new Term(STRING_FIELD_NAME, "cavy")); + final Query expectedQuery = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(guineaPig, BooleanClause.Occur.SHOULD) + .add(cavy, BooleanClause.Occur.SHOULD) + .build(), + BooleanClause.Occur.SHOULD).build(); + assertThat(query, equalTo(expectedQuery)); + } + } + public void testMaxBooleanClause() { MatchQuery query = new MatchQuery(createShardContext()); query.setAnalyzer(new MockGraphAnalyzer(createGiantGraph(40))); diff --git a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java index 36ba370939b1..7ca722fc3113 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java @@ -31,6 +31,7 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.PointRangeQuery; +import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -52,10 +53,11 @@ import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBooleanSubQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertDisjunctionSubQuery; +import static org.hamcrest.CoreMatchers.anyOf; import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.either; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.collection.IsCollectionWithSize.hasSize; public class MultiMatchQueryBuilderTests extends AbstractQueryTestCase { @@ -91,10 +93,11 @@ protected MultiMatchQueryBuilder doCreateTestQueryBuilder() { // sets other parameters of the multi match query if (randomBoolean()) { - if (fieldName.equals(STRING_FIELD_NAME)) { + if (fieldName.equals(STRING_FIELD_NAME) || fieldName.equals(STRING_ALIAS_FIELD_NAME) || fieldName.equals(STRING_FIELD_NAME_2)) { query.type(randomFrom(MultiMatchQueryBuilder.Type.values())); } else { - query.type(randomValueOtherThan(MultiMatchQueryBuilder.Type.PHRASE_PREFIX, + query.type(randomValueOtherThanMany( + (type) -> type == Type.PHRASE_PREFIX || type == Type.BOOL_PREFIX, () -> randomFrom(MultiMatchQueryBuilder.Type.values()))); } } @@ -104,7 +107,7 @@ protected MultiMatchQueryBuilder doCreateTestQueryBuilder() { if (randomBoolean() && fieldName.equals(STRING_FIELD_NAME)) { query.analyzer(randomAnalyzer()); } - if (randomBoolean()) { + if (randomBoolean() && query.type() != Type.BOOL_PREFIX) { query.slop(randomIntBetween(0, 5)); } if (fieldName.equals(STRING_FIELD_NAME) && randomBoolean() && @@ -126,7 +129,7 @@ protected MultiMatchQueryBuilder doCreateTestQueryBuilder() { if (randomBoolean()) { query.tieBreaker(randomFloat()); } - if (randomBoolean()) { + if (randomBoolean() && query.type() != Type.BOOL_PREFIX) { query.cutoffFrequency((float) 10 / randomIntBetween(1, 100)); } if (randomBoolean()) { @@ -158,12 +161,21 @@ protected Map getAlternateVersions() { @Override protected void doAssertLuceneQuery(MultiMatchQueryBuilder queryBuilder, Query query, SearchContext context) throws IOException { // we rely on integration tests for deeper checks here - assertThat(query, either(instanceOf(BoostQuery.class)).or(instanceOf(TermQuery.class)) - .or(instanceOf(BooleanQuery.class)).or(instanceOf(DisjunctionMaxQuery.class)) - .or(instanceOf(FuzzyQuery.class)).or(instanceOf(MultiPhrasePrefixQuery.class)) - .or(instanceOf(MatchAllDocsQuery.class)).or(instanceOf(ExtendedCommonTermsQuery.class)) - .or(instanceOf(MatchNoDocsQuery.class)).or(instanceOf(PhraseQuery.class)) - .or(instanceOf(PointRangeQuery.class)).or(instanceOf(IndexOrDocValuesQuery.class))); + assertThat(query, anyOf(Arrays.asList( + instanceOf(BoostQuery.class), + instanceOf(TermQuery.class), + instanceOf(BooleanQuery.class), + instanceOf(DisjunctionMaxQuery.class), + instanceOf(FuzzyQuery.class), + instanceOf(MultiPhrasePrefixQuery.class), + instanceOf(MatchAllDocsQuery.class), + instanceOf(ExtendedCommonTermsQuery.class), + instanceOf(MatchNoDocsQuery.class), + instanceOf(PhraseQuery.class), + instanceOf(PointRangeQuery.class), + instanceOf(IndexOrDocValuesQuery.class), + instanceOf(PrefixQuery.class) + ))); } public void testIllegaArguments() { @@ -240,6 +252,51 @@ public void testToQueryFieldMissing() throws Exception { instanceOf(MatchNoDocsQuery.class)); } + public void testToQueryBooleanPrefixSingleField() throws IOException { + final MultiMatchQueryBuilder builder = new MultiMatchQueryBuilder("foo bar", STRING_FIELD_NAME); + builder.type(Type.BOOL_PREFIX); + final Query query = builder.toQuery(createShardContext()); + assertThat(query, instanceOf(BooleanQuery.class)); + final BooleanQuery booleanQuery = (BooleanQuery) query; + assertThat(booleanQuery.clauses(), hasSize(2)); + assertThat(assertBooleanSubQuery(booleanQuery, TermQuery.class, 0).getTerm(), equalTo(new Term(STRING_FIELD_NAME, "foo"))); + assertThat(assertBooleanSubQuery(booleanQuery, PrefixQuery.class, 1).getPrefix(), equalTo(new Term(STRING_FIELD_NAME, "bar"))); + } + + public void testToQueryBooleanPrefixMultipleFields() throws IOException { + { + final MultiMatchQueryBuilder builder = new MultiMatchQueryBuilder("foo bar", STRING_FIELD_NAME, STRING_ALIAS_FIELD_NAME); + builder.type(Type.BOOL_PREFIX); + final Query query = builder.toQuery(createShardContext()); + assertThat(query, instanceOf(DisjunctionMaxQuery.class)); + final DisjunctionMaxQuery disMaxQuery = (DisjunctionMaxQuery) query; + assertThat(disMaxQuery.getDisjuncts(), hasSize(2)); + for (Query disjunctQuery : disMaxQuery.getDisjuncts()) { + assertThat(disjunctQuery, instanceOf(BooleanQuery.class)); + final BooleanQuery booleanQuery = (BooleanQuery) disjunctQuery; + assertThat(booleanQuery.clauses(), hasSize(2)); + assertThat(assertBooleanSubQuery(booleanQuery, TermQuery.class, 0).getTerm(), equalTo(new Term(STRING_FIELD_NAME, "foo"))); + assertThat(assertBooleanSubQuery(booleanQuery, PrefixQuery.class, 1).getPrefix(), + equalTo(new Term(STRING_FIELD_NAME, "bar"))); + } + } + + { + // STRING_FIELD_NAME_2 is a keyword field + final MultiMatchQueryBuilder queryBuilder = new MultiMatchQueryBuilder("foo bar", STRING_FIELD_NAME, STRING_FIELD_NAME_2); + queryBuilder.type(Type.BOOL_PREFIX); + final Query query = queryBuilder.toQuery(createShardContext()); + assertThat(query, instanceOf(DisjunctionMaxQuery.class)); + final DisjunctionMaxQuery disMaxQuery = (DisjunctionMaxQuery) query; + assertThat(disMaxQuery.getDisjuncts(), hasSize(2)); + final BooleanQuery firstDisjunct = assertDisjunctionSubQuery(disMaxQuery, BooleanQuery.class, 0); + assertThat(firstDisjunct.clauses(), hasSize(2)); + assertThat(assertBooleanSubQuery(firstDisjunct, TermQuery.class, 0).getTerm(), equalTo(new Term(STRING_FIELD_NAME, "foo"))); + final PrefixQuery secondDisjunct = assertDisjunctionSubQuery(disMaxQuery, PrefixQuery.class, 1); + assertThat(secondDisjunct.getPrefix(), equalTo(new Term(STRING_FIELD_NAME_2, "foo bar"))); + } + } + public void testFromJson() throws IOException { String json = "{\n" + diff --git a/server/src/test/java/org/elasticsearch/index/refresh/RefreshStatsTests.java b/server/src/test/java/org/elasticsearch/index/refresh/RefreshStatsTests.java index f6705b9a2764..5e01c8d25b96 100644 --- a/server/src/test/java/org/elasticsearch/index/refresh/RefreshStatsTests.java +++ b/server/src/test/java/org/elasticsearch/index/refresh/RefreshStatsTests.java @@ -28,14 +28,17 @@ public class RefreshStatsTests extends ESTestCase { public void testSerialize() throws IOException { - RefreshStats stats = new RefreshStats(randomNonNegativeLong(), randomNonNegativeLong(), between(0, Integer.MAX_VALUE)); + RefreshStats stats = new RefreshStats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), + randomNonNegativeLong(), between(0, Integer.MAX_VALUE)); BytesStreamOutput out = new BytesStreamOutput(); stats.writeTo(out); StreamInput input = out.bytes().streamInput(); RefreshStats read = new RefreshStats(input); assertEquals(-1, input.read()); assertEquals(stats.getTotal(), read.getTotal()); + assertEquals(stats.getExternalTotal(), read.getExternalTotal()); assertEquals(stats.getListeners(), read.getListeners()); assertEquals(stats.getTotalTimeInMillis(), read.getTotalTimeInMillis()); + assertEquals(stats.getExternalTotalTimeInMillis(), read.getExternalTotalTimeInMillis()); } } diff --git a/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java b/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java index ee916dd4c47d..1a85e29f0209 100644 --- a/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java @@ -397,6 +397,8 @@ public void testParseGeoPoint() throws IOException { parser.nextToken(); GeoPoint point = GeoUtils.parseGeoPoint(parser); assertThat(point, equalTo(new GeoPoint(lat, lon))); + assertThat(parser.currentToken(), is(Token.END_OBJECT)); + assertNull(parser.nextToken()); } json = jsonBuilder().startObject().field("lat", String.valueOf(lat)).field("lon", String.valueOf(lon)).endObject(); try (XContentParser parser = createParser(json)) { @@ -438,6 +440,21 @@ public void testParseGeoPointStringZValueError() throws IOException { } } + public void testParseGeoPointArrayZValueError() throws IOException { + double lat = randomDouble() * 180 - 90 + randomIntBetween(-1000, 1000) * 180; + double lon = randomDouble() * 360 - 180 + randomIntBetween(-1000, 1000) * 360; + double alt = randomDouble() * 1000; + XContentBuilder json = jsonBuilder().startArray().value(lat).value(lon).value(alt).endArray(); + try (XContentParser parser = createParser(json)) { + parser.nextToken(); + Exception e = expectThrows(ElasticsearchParseException.class, + () -> GeoUtils.parseGeoPoint(parser, new GeoPoint(), false)); + assertThat(e.getMessage(), containsString("but [ignore_z_value] parameter is [false]")); + assertThat(parser.currentToken(), is(Token.END_ARRAY)); + assertNull(parser.nextToken()); + } + } + public void testParseGeoPointGeohash() throws IOException { for (int i = 0; i < 100; i++) { int geoHashLength = randomIntBetween(1, GeoHashUtils.PRECISION); @@ -451,6 +468,8 @@ public void testParseGeoPointGeohash() throws IOException { GeoPoint point = GeoUtils.parseGeoPoint(parser); assertThat(point.lat(), allOf(lessThanOrEqualTo(90.0), greaterThanOrEqualTo(-90.0))); assertThat(point.lon(), allOf(lessThanOrEqualTo(180.0), greaterThanOrEqualTo(-180.0))); + assertThat(parser.currentToken(), is(Token.END_OBJECT)); + assertNull(parser.nextToken()); } json = jsonBuilder().startObject().field("geohash", geohashBuilder.toString()).endObject(); try (XContentParser parser = createParser(json)) { @@ -470,6 +489,8 @@ public void testParseGeoPointGeohashWrongType() throws IOException { parser.nextToken(); Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); assertThat(e.getMessage(), containsString("geohash must be a string")); + assertThat(parser.currentToken(), is(Token.END_OBJECT)); + assertNull(parser.nextToken()); } } @@ -480,6 +501,8 @@ public void testParseGeoPointLatNoLon() throws IOException { parser.nextToken(); Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); assertThat(e.getMessage(), is("field [lon] missing")); + assertThat(parser.currentToken(), is(Token.END_OBJECT)); + assertNull(parser.nextToken()); } } @@ -490,6 +513,8 @@ public void testParseGeoPointLonNoLat() throws IOException { parser.nextToken(); Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); assertThat(e.getMessage(), is("field [lat] missing")); + assertThat(parser.currentToken(), is(Token.END_OBJECT)); + assertNull(parser.nextToken()); } } @@ -500,6 +525,8 @@ public void testParseGeoPointLonWrongType() throws IOException { parser.nextToken(); Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); assertThat(e.getMessage(), is("longitude must be a number")); + assertThat(parser.currentToken(), is(Token.END_OBJECT)); + assertNull(parser.nextToken()); } } @@ -510,6 +537,8 @@ public void testParseGeoPointLatWrongType() throws IOException { parser.nextToken(); Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); assertThat(e.getMessage(), is("latitude must be a number")); + assertThat(parser.currentToken(), is(Token.END_OBJECT)); + assertNull(parser.nextToken()); } } @@ -578,6 +607,9 @@ public void testParseGeoPointArrayWrongType() throws IOException { } Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); assertThat(e.getMessage(), is("numeric value expected")); + assertThat(parser.currentToken(), is(Token.END_ARRAY)); + assertThat(parser.nextToken(), is(Token.END_OBJECT)); + assertNull(parser.nextToken()); } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index 78d442891153..8e2403aedc26 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -82,6 +82,8 @@ import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.threadpool.ThreadPoolStats; import java.io.IOException; import java.io.UncheckedIOException; @@ -738,8 +740,7 @@ public void onFailure(Exception e) { t.join(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39565") - public void testPendingRefreshWithIntervalChange() throws InterruptedException { + public void testPendingRefreshWithIntervalChange() throws Exception { Settings.Builder builder = Settings.builder(); builder.put(IndexSettings.INDEX_SEARCH_IDLE_AFTER.getKey(), TimeValue.ZERO); IndexService indexService = createIndex("test", builder.build()); @@ -767,7 +768,14 @@ public void testPendingRefreshWithIntervalChange() throws InterruptedException { // wait for both to ensure we don't have in-flight operations updateSettingsLatch.await(); refreshLatch.await(); - + // ensure no scheduled refresh to compete with the scheduleRefresh we are going to verify. + assertBusy(() -> { + for (ThreadPoolStats.Stats stat : indexService.getThreadPool().stats()) { + if (stat.getName().equals(ThreadPool.Names.REFRESH) && (stat.getQueue() > 0 || stat.getActive() > 0)) { + throw new AssertionError(); // cause assert busy to retry + } + } + }); client().prepareIndex("test", "test", "2").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); assertTrue(shard.scheduledRefresh()); assertTrue(shard.isSearchIdle()); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index a78b6517ba7b..67106b04f8de 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -546,7 +546,7 @@ public void testPrimaryFillsSeqNoGapsOnPromotion() throws Exception { // most of the time this is large enough that most of the time there will be at least one gap final int operations = 1024 - scaledRandomIntBetween(0, 1024); - final Result result = indexOnReplicaWithGaps(indexShard, operations, Math.toIntExact(SequenceNumbers.NO_OPS_PERFORMED), false); + final Result result = indexOnReplicaWithGaps(indexShard, operations, Math.toIntExact(SequenceNumbers.NO_OPS_PERFORMED)); final int maxSeqNo = result.maxSeqNo; @@ -1093,7 +1093,7 @@ public void testGlobalCheckpointSync() throws IOException { public void testRestoreLocalHistoryFromTranslogOnPromotion() throws IOException, InterruptedException { final IndexShard indexShard = newStartedShard(false); final int operations = 1024 - scaledRandomIntBetween(0, 1024); - indexOnReplicaWithGaps(indexShard, operations, Math.toIntExact(SequenceNumbers.NO_OPS_PERFORMED), true); + indexOnReplicaWithGaps(indexShard, operations, Math.toIntExact(SequenceNumbers.NO_OPS_PERFORMED)); final long maxSeqNo = indexShard.seqNoStats().getMaxSeqNo(); final long globalCheckpointOnReplica = randomLongBetween(UNASSIGNED_SEQ_NO, indexShard.getLocalCheckpoint()); @@ -1159,9 +1159,7 @@ public void testRollbackReplicaEngineOnPromotion() throws IOException, Interrupt // most of the time this is large enough that most of the time there will be at least one gap final int operations = 1024 - scaledRandomIntBetween(0, 1024); - // todo: all tests should run with allowUpdates=true, but this specific test sometimes fails during lucene commit when updates are - // added (seed = F37E9647ABE5928) - indexOnReplicaWithGaps(indexShard, operations, Math.toIntExact(SequenceNumbers.NO_OPS_PERFORMED), false); + indexOnReplicaWithGaps(indexShard, operations, Math.toIntExact(SequenceNumbers.NO_OPS_PERFORMED)); final long globalCheckpointOnReplica = randomLongBetween(UNASSIGNED_SEQ_NO, indexShard.getLocalCheckpoint()); indexShard.updateGlobalCheckpointOnReplica(globalCheckpointOnReplica, "test"); @@ -1204,7 +1202,7 @@ public void onFailure(final Exception e) { } assertThat(indexShard.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(newMaxSeqNoOfUpdates)); // ensure that after the local checkpoint throw back and indexing again, the local checkpoint advances - final Result result = indexOnReplicaWithGaps(indexShard, operations, Math.toIntExact(indexShard.getLocalCheckpoint()), false); + final Result result = indexOnReplicaWithGaps(indexShard, operations, Math.toIntExact(indexShard.getLocalCheckpoint())); assertThat(indexShard.getLocalCheckpoint(), equalTo((long) result.localCheckpoint)); closeShard(indexShard, false); } @@ -1462,6 +1460,12 @@ public String[] listAll() throws IOException { return super.listAll(); } } + + // temporary override until LUCENE-8735 is integrated + @Override + public Set getPendingDeletions() throws IOException { + return in.getPendingDeletions(); + } }; try (Store store = createStore(shardId, new IndexSettings(metaData, Settings.EMPTY), directory)) { @@ -1513,6 +1517,33 @@ public void testRefreshMetric() throws IOException { closeShards(shard); } + public void testExternalRefreshMetric() throws IOException { + IndexShard shard = newStartedShard(); + assertThat(shard.refreshStats().getExternalTotal(), equalTo(2L)); // refresh on: finalize and end of recovery + long initialTotalTime = shard.refreshStats().getExternalTotalTimeInMillis(); + // check time advances + for (int i = 1; shard.refreshStats().getExternalTotalTimeInMillis() == initialTotalTime; i++) { + indexDoc(shard, "_doc", "test"); + assertThat(shard.refreshStats().getExternalTotal(), equalTo(2L + i - 1)); + shard.refresh("test"); + assertThat(shard.refreshStats().getExternalTotal(), equalTo(2L + i)); + assertThat(shard.refreshStats().getExternalTotalTimeInMillis(), greaterThanOrEqualTo(initialTotalTime)); + } + long externalRefreshCount = shard.refreshStats().getExternalTotal(); + + indexDoc(shard, "_doc", "test"); + try (Engine.GetResult ignored = shard.get(new Engine.Get(true, false, "_doc", "test", + new Term(IdFieldMapper.NAME, Uid.encodeId("test"))))) { + assertThat(shard.refreshStats().getExternalTotal(), equalTo(externalRefreshCount)); + assertThat(shard.refreshStats().getExternalTotal(), equalTo(shard.refreshStats().getTotal() - 1)); + } + indexDoc(shard, "_doc", "test"); + shard.writeIndexingBuffer(); + assertThat(shard.refreshStats().getExternalTotal(), equalTo(externalRefreshCount)); + assertThat(shard.refreshStats().getExternalTotal(), equalTo(shard.refreshStats().getTotal() - 2)); + closeShards(shard); + } + public void testIndexingOperationsListeners() throws IOException { IndexShard shard = newStartedShard(true); indexDoc(shard, "_doc", "0", "{\"foo\" : \"bar\"}"); @@ -3139,15 +3170,13 @@ class Result { * @param indexShard the shard * @param operations the number of operations * @param offset the starting sequence number - * @param allowUpdates whether updates should be added. * @return a pair of the maximum sequence number and whether or not a gap was introduced * @throws IOException if an I/O exception occurs while indexing on the shard */ private Result indexOnReplicaWithGaps( final IndexShard indexShard, final int operations, - final int offset, - boolean allowUpdates) throws IOException { + final int offset) throws IOException { int localCheckpoint = offset; int max = offset; boolean gap = false; @@ -3155,7 +3184,7 @@ private Result indexOnReplicaWithGaps( for (int i = offset + 1; i < operations; i++) { if (!rarely() || i == operations - 1) { // last operation can't be a gap as it's not a gap anymore final String id = ids.isEmpty() || randomBoolean() ? Integer.toString(i) : randomFrom(ids); - if (allowUpdates && ids.add(id) == false) { // this is an update + if (ids.add(id) == false) { // this is an update indexShard.advanceMaxSeqNoOfUpdatesOrDeletes(i); } SourceToParse sourceToParse = new SourceToParse(indexShard.shardId().getIndexName(), "_doc", id, @@ -3612,7 +3641,7 @@ public void testSupplyTombstoneDoc() throws Exception { public void testResetEngine() throws Exception { IndexShard shard = newStartedShard(false); - indexOnReplicaWithGaps(shard, between(0, 1000), Math.toIntExact(shard.getLocalCheckpoint()), false); + indexOnReplicaWithGaps(shard, between(0, 1000), Math.toIntExact(shard.getLocalCheckpoint())); final long globalCheckpoint = randomLongBetween(shard.getGlobalCheckpoint(), shard.getLocalCheckpoint()); shard.updateGlobalCheckpointOnReplica(globalCheckpoint, "test"); Set docBelowGlobalCheckpoint = getShardDocUIDs(shard).stream() @@ -3652,7 +3681,7 @@ public void testResetEngine() throws Exception { public void testConcurrentAcquireAllReplicaOperationsPermitsWithPrimaryTermUpdate() throws Exception { final IndexShard replica = newStartedShard(false); - indexOnReplicaWithGaps(replica, between(0, 1000), Math.toIntExact(replica.getLocalCheckpoint()), false); + indexOnReplicaWithGaps(replica, between(0, 1000), Math.toIntExact(replica.getLocalCheckpoint())); final int nbTermUpdates = randomIntBetween(1, 5); diff --git a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 4ad95c43b707..0216756e65a8 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; @@ -89,6 +90,7 @@ public class RefreshListenersTests extends ESTestCase { private volatile int maxListeners; private ThreadPool threadPool; private Store store; + private MeanMetric refreshMetric; @Before public void setupListeners() throws Exception { @@ -96,13 +98,15 @@ public void setupListeners() throws Exception { maxListeners = randomIntBetween(1, 1000); // Now setup the InternalEngine which is much more complicated because we aren't mocking anything threadPool = new TestThreadPool(getTestName()); + refreshMetric = new MeanMetric(); listeners = new RefreshListeners( () -> maxListeners, () -> engine.refresh("too-many-listeners"), // Immediately run listeners rather than adding them to the listener thread pool like IndexShard does to simplify the test. Runnable::run, logger, - threadPool.getThreadContext()); + threadPool.getThreadContext(), + refreshMetric); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("index", Settings.EMPTY); ShardId shardId = new ShardId(new Index("index", "_na_"), 1); diff --git a/server/src/test/java/org/elasticsearch/index/store/ByteSizeCachingDirectoryTests.java b/server/src/test/java/org/elasticsearch/index/store/ByteSizeCachingDirectoryTests.java index 49de00dd8bef..509f5e2a4c41 100644 --- a/server/src/test/java/org/elasticsearch/index/store/ByteSizeCachingDirectoryTests.java +++ b/server/src/test/java/org/elasticsearch/index/store/ByteSizeCachingDirectoryTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.util.Set; @LuceneTestCase.SuppressFileSystems("ExtrasFS") public class ByteSizeCachingDirectoryTests extends ESTestCase { @@ -45,6 +46,12 @@ public long fileLength(String name) throws IOException { numFileLengthCalls++; return super.fileLength(name); } + + // temporary override until LUCENE-8735 is integrated + @Override + public Set getPendingDeletions() throws IOException { + return in.getPendingDeletions(); + } } public void testBasics() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java index de67687c7f72..fc5870bc8e12 100644 --- a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java +++ b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java @@ -43,10 +43,12 @@ import org.apache.lucene.store.ByteBufferIndexInput; import org.apache.lucene.store.ChecksumIndexInput; import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.MMapDirectory; +import org.apache.lucene.store.NIOFSDirectory; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.TestUtil; @@ -1123,4 +1125,16 @@ public void testDeoptimizeMMap() throws IOException { } } } + + public void testGetPendingFiles() throws IOException { + final ShardId shardId = new ShardId("index", "_na_", 1); + final String testfile = "testfile"; + try (Store store = new Store(shardId, INDEX_SETTINGS, new NIOFSDirectory(createTempDir()), new DummyShardLock(shardId))) { + store.directory().createOutput(testfile, IOContext.DEFAULT).close(); + try (IndexInput input = store.directory().openInput(testfile, IOContext.DEFAULT)) { + store.directory().deleteFile(testfile); + assertEquals(FilterDirectory.unwrap(store.directory()).getPendingDeletions(), store.directory().getPendingDeletions()); + } + } + } } diff --git a/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java index 25f04532ac8c..7bfa50ff2b72 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.Table; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentElasticsearchExtension; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.recovery.RecoveryState; @@ -37,7 +38,9 @@ import org.elasticsearch.usage.UsageService; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; +import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Locale; @@ -53,7 +56,7 @@ public void testRestRecoveryAction() { final Settings settings = Settings.EMPTY; UsageService usageService = new UsageService(); final RestController restController = new RestController(Collections.emptySet(), null, null, null, usageService); - final RestRecoveryAction action = new RestRecoveryAction(settings, restController); + final RestCatRecoveryAction action = new RestCatRecoveryAction(settings, restController); final int totalShards = randomIntBetween(1, 32); final int successfulShards = Math.max(0, totalShards - randomIntBetween(1, 2)); final int failedShards = totalShards - successfulShards; @@ -64,7 +67,11 @@ public void testRestRecoveryAction() { final RecoveryState state = mock(RecoveryState.class); when(state.getShardId()).thenReturn(new ShardId(new Index("index", "_na_"), i)); final RecoveryState.Timer timer = mock(RecoveryState.Timer.class); - when(timer.time()).thenReturn((long)randomIntBetween(1000000, 10 * 1000000)); + final long startTime = randomLongBetween(0, new Date().getTime()); + when(timer.startTime()).thenReturn(startTime); + final long time = randomLongBetween(1000000, 10 * 1000000); + when(timer.time()).thenReturn(time); + when(timer.stopTime()).thenReturn(startTime + time); when(state.getTimer()).thenReturn(timer); when(state.getRecoverySource()).thenReturn(TestShardRouting.randomRecoverySource()); when(state.getStage()).thenReturn(randomFrom(RecoveryState.Stage.values())); @@ -122,63 +129,78 @@ public void testRestRecoveryAction() { List headers = table.getHeaders(); - assertThat(headers.get(0).value, equalTo("index")); - assertThat(headers.get(1).value, equalTo("shard")); - assertThat(headers.get(2).value, equalTo("time")); - assertThat(headers.get(3).value, equalTo("type")); - assertThat(headers.get(4).value, equalTo("stage")); - assertThat(headers.get(5).value, equalTo("source_host")); - assertThat(headers.get(6).value, equalTo("source_node")); - assertThat(headers.get(7).value, equalTo("target_host")); - assertThat(headers.get(8).value, equalTo("target_node")); - assertThat(headers.get(9).value, equalTo("repository")); - assertThat(headers.get(10).value, equalTo("snapshot")); - assertThat(headers.get(11).value, equalTo("files")); - assertThat(headers.get(12).value, equalTo("files_recovered")); - assertThat(headers.get(13).value, equalTo("files_percent")); - assertThat(headers.get(14).value, equalTo("files_total")); - assertThat(headers.get(15).value, equalTo("bytes")); - assertThat(headers.get(16).value, equalTo("bytes_recovered")); - assertThat(headers.get(17).value, equalTo("bytes_percent")); - assertThat(headers.get(18).value, equalTo("bytes_total")); - assertThat(headers.get(19).value, equalTo("translog_ops")); - assertThat(headers.get(20).value, equalTo("translog_ops_recovered")); - assertThat(headers.get(21).value, equalTo("translog_ops_percent")); + final List expectedHeaders = Arrays.asList( + "index", + "shard", + "start_time", + "start_time_millis", + "stop_time", + "stop_time_millis", + "time", + "type", + "stage", + "source_host", + "source_node", + "target_host", + "target_node", + "repository", + "snapshot", + "files", + "files_recovered", + "files_percent", + "files_total", + "bytes", + "bytes_recovered", + "bytes_percent", + "bytes_total", + "translog_ops", + "translog_ops_recovered", + "translog_ops_percent"); + + for (int i = 0; i < expectedHeaders.size(); i++) { + assertThat(headers.get(i).value, equalTo(expectedHeaders.get(i))); + } assertThat(table.getRows().size(), equalTo(successfulShards)); + for (int i = 0; i < successfulShards; i++) { final RecoveryState state = recoveryStates.get(i); - List cells = table.getRows().get(i); - assertThat(cells.get(0).value, equalTo("index")); - assertThat(cells.get(1).value, equalTo(i)); - assertThat(cells.get(2).value, equalTo(new TimeValue(state.getTimer().time()))); - assertThat(cells.get(3).value, equalTo(state.getRecoverySource().getType().name().toLowerCase(Locale.ROOT))); - assertThat(cells.get(4).value, equalTo(state.getStage().name().toLowerCase(Locale.ROOT))); - assertThat(cells.get(5).value, equalTo(state.getSourceNode() == null ? "n/a" : state.getSourceNode().getHostName())); - assertThat(cells.get(6).value, equalTo(state.getSourceNode() == null ? "n/a" : state.getSourceNode().getName())); - assertThat(cells.get(7).value, equalTo(state.getTargetNode().getHostName())); - assertThat(cells.get(8).value, equalTo(state.getTargetNode().getName())); - assertThat( - cells.get(9).value, - equalTo(state.getRecoverySource() == null || state.getRecoverySource().getType() != RecoverySource.Type.SNAPSHOT ? - "n/a" : - ((SnapshotRecoverySource) state.getRecoverySource()).snapshot().getRepository())); - assertThat( - cells.get(10).value, - equalTo(state.getRecoverySource() == null || state.getRecoverySource().getType() != RecoverySource.Type.SNAPSHOT ? - "n/a" : - ((SnapshotRecoverySource) state.getRecoverySource()).snapshot().getSnapshotId().getName())); - assertThat(cells.get(11).value, equalTo(state.getIndex().totalRecoverFiles())); - assertThat(cells.get(12).value, equalTo(state.getIndex().recoveredFileCount())); - assertThat(cells.get(13).value, equalTo(percent(state.getIndex().recoveredFilesPercent()))); - assertThat(cells.get(14).value, equalTo(state.getIndex().totalFileCount())); - assertThat(cells.get(15).value, equalTo(state.getIndex().totalRecoverBytes())); - assertThat(cells.get(16).value, equalTo(state.getIndex().recoveredBytes())); - assertThat(cells.get(17).value, equalTo(percent(state.getIndex().recoveredBytesPercent()))); - assertThat(cells.get(18).value, equalTo(state.getIndex().totalBytes())); - assertThat(cells.get(19).value, equalTo(state.getTranslog().totalOperations())); - assertThat(cells.get(20).value, equalTo(state.getTranslog().recoveredOperations())); - assertThat(cells.get(21).value, equalTo(percent(state.getTranslog().recoveredPercent()))); + final List expectedValues = Arrays.asList( + "index", + i, + XContentElasticsearchExtension.DEFAULT_DATE_PRINTER.print(state.getTimer().startTime()), + state.getTimer().startTime(), + XContentElasticsearchExtension.DEFAULT_DATE_PRINTER.print(state.getTimer().stopTime()), + state.getTimer().stopTime(), + new TimeValue(state.getTimer().time()), + state.getRecoverySource().getType().name().toLowerCase(Locale.ROOT), + state.getStage().name().toLowerCase(Locale.ROOT), + state.getSourceNode() == null ? "n/a" : state.getSourceNode().getHostName(), + state.getSourceNode() == null ? "n/a" : state.getSourceNode().getName(), + state.getTargetNode().getHostName(), + state.getTargetNode().getName(), + state.getRecoverySource() == null || state.getRecoverySource().getType() != RecoverySource.Type.SNAPSHOT ? + "n/a" : + ((SnapshotRecoverySource) state.getRecoverySource()).snapshot().getRepository(), + state.getRecoverySource() == null || state.getRecoverySource().getType() != RecoverySource.Type.SNAPSHOT ? + "n/a" : + ((SnapshotRecoverySource) state.getRecoverySource()).snapshot().getSnapshotId().getName(), + state.getIndex().totalRecoverFiles(), + state.getIndex().recoveredFileCount(), + percent(state.getIndex().recoveredFilesPercent()), + state.getIndex().totalFileCount(), + state.getIndex().totalRecoverBytes(), + state.getIndex().recoveredBytes(), + percent(state.getIndex().recoveredBytesPercent()), + state.getIndex().totalBytes(), + state.getTranslog().totalOperations(), + state.getTranslog().recoveredOperations(), + percent(state.getTranslog().recoveredPercent())); + + final List cells = table.getRows().get(i); + for (int j = 0; j < expectedValues.size(); j++) { + assertThat(cells.get(j).value, equalTo(expectedValues.get(j))); + } } } diff --git a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java index c8ab7cc19dcf..b7755dd32141 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java @@ -321,6 +321,7 @@ public List> getRescorers() { "intervals", "match", "match_all", + "match_bool_prefix", "match_none", "match_phrase", "match_phrase_prefix", diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/InternalAggregationsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/InternalAggregationsTests.java index 81626459db4f..aa244ff7a320 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/InternalAggregationsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/InternalAggregationsTests.java @@ -50,18 +50,19 @@ public class InternalAggregationsTests extends ESTestCase { public void testReduceEmptyAggs() { List aggs = Collections.emptyList(); InternalAggregation.ReduceContext reduceContext = new InternalAggregation.ReduceContext(null, null, randomBoolean()); - assertNull(InternalAggregations.reduce(aggs, Collections.emptyList(), reduceContext)); + assertNull(InternalAggregations.reduce(aggs, reduceContext)); } public void testNonFinalReduceTopLevelPipelineAggs() { InternalAggregation terms = new StringTerms("name", BucketOrder.key(true), 10, 1, Collections.emptyList(), Collections.emptyMap(), DocValueFormat.RAW, 25, false, 10, Collections.emptyList(), 0); - List aggs = Collections.singletonList(new InternalAggregations(Collections.singletonList(terms))); List topLevelPipelineAggs = new ArrayList<>(); MaxBucketPipelineAggregationBuilder maxBucketPipelineAggregationBuilder = new MaxBucketPipelineAggregationBuilder("test", "test"); topLevelPipelineAggs.add((SiblingPipelineAggregator)maxBucketPipelineAggregationBuilder.create()); + List aggs = Collections.singletonList(new InternalAggregations(Collections.singletonList(terms), + topLevelPipelineAggs)); InternalAggregation.ReduceContext reduceContext = new InternalAggregation.ReduceContext(null, null, false); - InternalAggregations reducedAggs = InternalAggregations.reduce(aggs, topLevelPipelineAggs, reduceContext); + InternalAggregations reducedAggs = InternalAggregations.reduce(aggs, reduceContext); assertEquals(1, reducedAggs.getTopLevelPipelineAggregators().size()); assertEquals(1, reducedAggs.aggregations.size()); } @@ -79,15 +80,15 @@ public void testFinalReduceTopLevelPipelineAggs() { Collections.singletonList(siblingPipelineAggregator)); reducedAggs = InternalAggregations.reduce(Collections.singletonList(aggs), reduceContext); } else { - InternalAggregations aggs = new InternalAggregations(Collections.singletonList(terms)); - List topLevelPipelineAggs = Collections.singletonList(siblingPipelineAggregator); - reducedAggs = InternalAggregations.reduce(Collections.singletonList(aggs), topLevelPipelineAggs, reduceContext); + InternalAggregations aggs = new InternalAggregations(Collections.singletonList(terms), + Collections.singletonList(siblingPipelineAggregator)); + reducedAggs = InternalAggregations.reduce(Collections.singletonList(aggs), reduceContext); } assertEquals(0, reducedAggs.getTopLevelPipelineAggregators().size()); assertEquals(2, reducedAggs.aggregations.size()); } - public void testSerialization() throws Exception { + public static InternalAggregations createTestInstance() throws Exception { List aggsList = new ArrayList<>(); if (randomBoolean()) { StringTermsTests stringTermsTests = new StringTermsTests(); @@ -116,7 +117,11 @@ public void testSerialization() throws Exception { topLevelPipelineAggs.add((SiblingPipelineAggregator)new SumBucketPipelineAggregationBuilder("name3", "bucket3").create()); } } - InternalAggregations aggregations = new InternalAggregations(aggsList, topLevelPipelineAggs); + return new InternalAggregations(aggsList, topLevelPipelineAggs); + } + + public void testSerialization() throws Exception { + InternalAggregations aggregations = createTestInstance(); writeToAndReadFrom(aggregations, 0); } diff --git a/server/src/test/java/org/elasticsearch/search/query/QuerySearchResultTests.java b/server/src/test/java/org/elasticsearch/search/query/QuerySearchResultTests.java new file mode 100644 index 000000000000..64712b3e417a --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/query/QuerySearchResultTests.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.query; + +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TotalHits; +import org.elasticsearch.Version; +import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.InternalAggregationsTests; +import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator; +import org.elasticsearch.search.suggest.SuggestTests; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; + +import java.util.List; + +import static java.util.Collections.emptyList; + +public class QuerySearchResultTests extends ESTestCase { + + private final NamedWriteableRegistry namedWriteableRegistry; + + public QuerySearchResultTests() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList()); + this.namedWriteableRegistry = new NamedWriteableRegistry(searchModule.getNamedWriteables()); + } + + private static QuerySearchResult createTestInstance() throws Exception { + ShardId shardId = new ShardId("index", "uuid", randomInt()); + QuerySearchResult result = new QuerySearchResult(randomLong(), new SearchShardTarget("node", shardId, null, OriginalIndices.NONE)); + if (randomBoolean()) { + result.terminatedEarly(randomBoolean()); + } + TopDocs topDocs = new TopDocs(new TotalHits(randomLongBetween(0, Long.MAX_VALUE), TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]); + result.topDocs(new TopDocsAndMaxScore(topDocs, randomBoolean() ? Float.NaN : randomFloat()), new DocValueFormat[0]); + result.size(randomInt()); + result.from(randomInt()); + if (randomBoolean()) { + result.suggest(SuggestTests.createTestItem()); + } + if (randomBoolean()) { + result.aggregations(InternalAggregationsTests.createTestInstance()); + } + return result; + } + + public void testSerialization() throws Exception { + QuerySearchResult querySearchResult = createTestInstance(); + Version version = VersionUtils.randomVersion(random()); + QuerySearchResult deserialized = copyStreamable(querySearchResult, namedWriteableRegistry, QuerySearchResult::new, version); + assertEquals(querySearchResult.getRequestId(), deserialized.getRequestId()); + assertNull(deserialized.getSearchShardTarget()); + assertEquals(querySearchResult.topDocs().maxScore, deserialized.topDocs().maxScore, 0f); + assertEquals(querySearchResult.topDocs().topDocs.totalHits, deserialized.topDocs().topDocs.totalHits); + assertEquals(querySearchResult.from(), deserialized.from()); + assertEquals(querySearchResult.size(), deserialized.size()); + assertEquals(querySearchResult.hasAggs(), deserialized.hasAggs()); + if (deserialized.hasAggs()) { + Aggregations aggs = querySearchResult.consumeAggs(); + Aggregations deserializedAggs = deserialized.consumeAggs(); + assertEquals(aggs.asList(), deserializedAggs.asList()); + List pipelineAggs = ((InternalAggregations) aggs).getTopLevelPipelineAggregators(); + List deserializedPipelineAggs = + ((InternalAggregations) deserializedAggs).getTopLevelPipelineAggregators(); + assertEquals(pipelineAggs.size(), deserializedPipelineAggs.size()); + for (int i = 0; i < pipelineAggs.size(); i++) { + SiblingPipelineAggregator pipelineAgg = pipelineAggs.get(i); + SiblingPipelineAggregator deserializedPipelineAgg = deserializedPipelineAggs.get(i); + assertArrayEquals(pipelineAgg.bucketsPaths(), deserializedPipelineAgg.bucketsPaths()); + assertEquals(pipelineAgg.name(), deserializedPipelineAgg.name()); + } + } + assertEquals(querySearchResult.terminatedEarly(), deserialized.terminatedEarly()); + } +} diff --git a/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java b/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java new file mode 100644 index 000000000000..9c5d56315f65 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java @@ -0,0 +1,209 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collections; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +public class InboundHandlerTests extends ESTestCase { + + private final TestThreadPool threadPool = new TestThreadPool(getClass().getName()); + private final Version version = Version.CURRENT; + + private TaskManager taskManager; + private InboundHandler handler; + private FakeTcpChannel channel; + + @Before + public void setUp() throws Exception { + super.setUp(); + TransportLogger transportLogger = new TransportLogger(); + taskManager = new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()); + channel = new FakeTcpChannel(randomBoolean(), buildNewFakeTransportAddress().address(), buildNewFakeTransportAddress().address()); + NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); + InboundMessage.Reader reader = new InboundMessage.Reader(version, namedWriteableRegistry, threadPool.getThreadContext()); + TransportHandshaker handshaker = new TransportHandshaker(version, threadPool, (n, c, r, v) -> { + }, (v, f, c, r, r_id) -> { + }); + TransportKeepAlive keepAlive = new TransportKeepAlive(threadPool, TcpChannel::sendMessage); + OutboundHandler outboundHandler = new OutboundHandler("node", version, new String[0], threadPool, BigArrays.NON_RECYCLING_INSTANCE, + transportLogger); + handler = new InboundHandler(threadPool, outboundHandler, reader, new NoneCircuitBreakerService(), transportLogger, handshaker, + keepAlive); + } + + @After + public void tearDown() throws Exception { + ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); + super.tearDown(); + } + + public void testPing() throws Exception { + AtomicReference channelCaptor = new AtomicReference<>(); + RequestHandlerRegistry registry = new RequestHandlerRegistry<>("test-request", TestRequest::new, taskManager, + (request, channel, task) -> channelCaptor.set(channel), ThreadPool.Names.SAME, false, true); + handler.registerRequestHandler(registry); + + handler.inboundMessage(channel, BytesArray.EMPTY); + assertEquals(1, handler.getReadBytes().count()); + assertEquals(6, handler.getReadBytes().sum()); + if (channel.isServerChannel()) { + BytesReference ping = channel.getMessageCaptor().get(); + assertEquals('E', ping.get(0)); + assertEquals(6, ping.length()); + } + } + + public void testRequestAndResponse() throws Exception { + String action = "test-request"; + boolean isCompressed = randomBoolean(); + boolean isError = randomBoolean(); + AtomicReference requestCaptor = new AtomicReference<>(); + AtomicReference responseCaptor = new AtomicReference<>(); + AtomicReference exceptionCaptor = new AtomicReference<>(); + AtomicReference channelCaptor = new AtomicReference<>(); + + long requestId = handler.getResponseHandlers().add(new Transport.ResponseContext<>(new TransportResponseHandler() { + @Override + public void handleResponse(TestResponse response) { + responseCaptor.set(response); + } + + @Override + public void handleException(TransportException exp) { + exceptionCaptor.set(exp); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + + @Override + public TestResponse read(StreamInput in) throws IOException { + return new TestResponse(in); + } + }, null, action)); + RequestHandlerRegistry registry = new RequestHandlerRegistry<>(action, TestRequest::new, taskManager, + (request, channel, task) -> { + channelCaptor.set(channel); + requestCaptor.set(request); + }, ThreadPool.Names.SAME, false, true); + handler.registerRequestHandler(registry); + String requestValue = randomAlphaOfLength(10); + OutboundMessage.Request request = new OutboundMessage.Request(threadPool.getThreadContext(), new String[0], + new TestRequest(requestValue), version, action, requestId, false, isCompressed); + + BytesReference bytes = request.serialize(new BytesStreamOutput()); + handler.inboundMessage(channel, bytes.slice(6, bytes.length() - 6)); + + TransportChannel transportChannel = channelCaptor.get(); + assertEquals(Version.CURRENT, transportChannel.getVersion()); + assertEquals("transport", transportChannel.getChannelType()); + assertEquals(requestValue, requestCaptor.get().value); + + String responseValue = randomAlphaOfLength(10); + if (isError) { + transportChannel.sendResponse(new ElasticsearchException("boom")); + } else { + transportChannel.sendResponse(new TestResponse(responseValue)); + } + BytesReference serializedResponse = channel.getMessageCaptor().get(); + handler.inboundMessage(channel, serializedResponse.slice(6, serializedResponse.length() - 6)); + + if (isError) { + assertTrue(exceptionCaptor.get() instanceof RemoteTransportException); + assertTrue(exceptionCaptor.get().getCause() instanceof ElasticsearchException); + assertEquals("boom", exceptionCaptor.get().getCause().getMessage()); + } else { + assertEquals(responseValue, responseCaptor.get().value); + } + } + + private static class TestRequest extends TransportRequest { + + String value; + + private TestRequest(String value) { + this.value = value; + } + + private TestRequest(StreamInput in) throws IOException { + super(in); + this.value = in.readString(); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(value); + } + } + + private static class TestResponse extends TransportResponse { + + String value; + + private TestResponse(String value) { + this.value = value; + } + + private TestResponse(StreamInput in) throws IOException { + super(in); + this.value = in.readString(); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(value); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java index 9d0380324437..4519513db281 100644 --- a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java @@ -19,37 +19,15 @@ package org.elasticsearch.transport; -import org.elasticsearch.Version; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.network.CloseableChannel; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.util.PageCacheRecycler; -import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.threadpool.TestThreadPool; -import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; import java.io.StreamCorruptedException; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.core.IsInstanceOf.instanceOf; -import static org.mockito.Mockito.mock; /** Unit tests for {@link TcpTransport} */ public class TcpTransportTests extends ESTestCase { @@ -157,141 +135,6 @@ public void testAddressLimit() throws Exception { assertEquals(102, addresses[2].getPort()); } - @SuppressForbidden(reason = "Allow accessing localhost") - public void testCompressRequestAndResponse() throws IOException { - final boolean compressed = randomBoolean(); - Req request = new Req(randomRealisticUnicodeOfLengthBetween(10, 100)); - ThreadPool threadPool = new TestThreadPool(TcpTransportTests.class.getName()); - AtomicReference messageCaptor = new AtomicReference<>(); - try { - TcpTransport transport = new TcpTransport(Settings.EMPTY, Version.CURRENT, threadPool, - PageCacheRecycler.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService(), null, null) { - - @Override - protected FakeServerChannel bind(String name, InetSocketAddress address) throws IOException { - return null; - } - - @Override - protected FakeTcpChannel initiateChannel(DiscoveryNode node) throws IOException { - return new FakeTcpChannel(false); - } - - @Override - protected void stopInternal() { - } - - @Override - public Releasable openConnection(DiscoveryNode node, ConnectionProfile profile, ActionListener listener) { - if (compressed) { - assertTrue(profile.getCompressionEnabled()); - } - int numConnections = profile.getNumConnections(); - ArrayList fakeChannels = new ArrayList<>(numConnections); - for (int i = 0; i < numConnections; ++i) { - fakeChannels.add(new FakeTcpChannel(false, messageCaptor)); - } - listener.onResponse(new NodeChannels(node, fakeChannels, profile, Version.CURRENT)); - return () -> CloseableChannel.closeChannels(fakeChannels, false); - } - }; - - DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT); - ConnectionProfile.Builder profileBuilder = new ConnectionProfile.Builder(TestProfiles.LIGHT_PROFILE); - if (compressed) { - profileBuilder.setCompressionEnabled(true); - } else { - profileBuilder.setCompressionEnabled(false); - } - PlainActionFuture future = PlainActionFuture.newFuture(); - transport.openConnection(node, profileBuilder.build(), future); - Transport.Connection connection = future.actionGet(); - connection.sendRequest(42, "foobar", request, TransportRequestOptions.EMPTY); - transport.registerRequestHandler(new RequestHandlerRegistry<>("foobar", Req::new, mock(TaskManager.class), - (request1, channel, task) -> channel.sendResponse(TransportResponse.Empty.INSTANCE), ThreadPool.Names.SAME, - true, true)); - - BytesReference reference = messageCaptor.get(); - assertNotNull(reference); - - AtomicReference responseCaptor = new AtomicReference<>(); - InetSocketAddress address = new InetSocketAddress(InetAddress.getLocalHost(), 0); - FakeTcpChannel responseChannel = new FakeTcpChannel(true, address, address, "profile", responseCaptor); - transport.messageReceived(reference.slice(6, reference.length() - 6), responseChannel); - - - StreamInput streamIn = responseCaptor.get().streamInput(); - streamIn.skip(TcpHeader.MARKER_BYTES_SIZE); - @SuppressWarnings("unused") - int len = streamIn.readInt(); - long requestId = streamIn.readLong(); - assertEquals(42, requestId); - byte status = streamIn.readByte(); - Version version = Version.fromId(streamIn.readInt()); - assertEquals(Version.CURRENT, version); - assertEquals(compressed, TransportStatus.isCompress(status)); - assertFalse(TransportStatus.isRequest(status)); - if (compressed) { - final int bytesConsumed = TcpHeader.HEADER_SIZE; - streamIn = CompressorFactory.compressor(reference.slice(bytesConsumed, reference.length() - bytesConsumed)) - .streamInput(streamIn); - } - threadPool.getThreadContext().readHeaders(streamIn); - TransportResponse.Empty.INSTANCE.readFrom(streamIn); - - } finally { - ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); - } - } - - private static final class FakeServerChannel implements TcpServerChannel { - - @Override - public void close() { - } - - @Override - public String getProfile() { - return null; - } - - @Override - public void addCloseListener(ActionListener listener) { - } - - @Override - public boolean isOpen() { - return false; - } - - @Override - public InetSocketAddress getLocalAddress() { - return null; - } - } - - private static final class Req extends TransportRequest { - public String value; - - private Req(String value) { - this.value = value; - } - - private Req(StreamInput in) throws IOException { - value = in.readString(); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - value = in.readString(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(value); - } - } - public void testDecodeWithIncompleteHeader() throws IOException { BytesStreamOutput streamOutput = new BytesStreamOutput(1 << 14); streamOutput.write('E'); diff --git a/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java b/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java index d9f25a369d61..54d9a015b4e4 100644 --- a/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java +++ b/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java @@ -160,7 +160,7 @@ public void testValidateEmptyCluster() { client().admin().indices().prepareValidateQuery().get(); fail("Expected IndexNotFoundException"); } catch (IndexNotFoundException e) { - assertThat(e.getMessage(), is("no such index [null]")); + assertThat(e.getMessage(), is("no such index [null] and no indices exist")); } } diff --git a/test/fixtures/hdfs-fixture/Dockerfile b/test/fixtures/hdfs-fixture/Dockerfile new file mode 100644 index 000000000000..b9d0e60b7d6d --- /dev/null +++ b/test/fixtures/hdfs-fixture/Dockerfile @@ -0,0 +1,8 @@ +FROM java:8-jre + +RUN apt-get update && apt-get install net-tools + +EXPOSE 9998 +EXPOSE 9999 + +CMD java -cp "/fixture:/fixture/*" hdfs.MiniHDFS /data \ No newline at end of file diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 3f08ca7970ca..f2aebda46b87 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -18,25 +18,23 @@ */ apply plugin: 'elasticsearch.build' +apply plugin: 'elasticsearch.test.fixtures' -versions << [ - 'hadoop2': '2.8.1' -] - -// we create MiniHdfsCluster with the hadoop artifact dependencies { - compile "org.apache.hadoop:hadoop-minicluster:${versions.hadoop2}" + compile "org.apache.hadoop:hadoop-minicluster:2.8.1" +} + +task syncClasses(type: Sync) { + from sourceSets.test.runtimeClasspath + into "${buildDir}/fixture" } -// for testing, until fixtures are actually debuggable. -// gradle hides *EVERYTHING* so you have no clue what went wrong. -task hdfs(type: JavaExec) { - classpath = sourceSets.test.compileClasspath + sourceSets.test.output - main = "hdfs.MiniHDFS" - args = [ 'build/fixtures/hdfsFixture' ] +preProcessFixture { + dependsOn syncClasses + + doLast { + file("${buildDir}/shared").mkdirs() + } } -// just a test fixture: we aren't using jars in releases -thirdPartyAudit.enabled = false -// TODO: add a simple HDFS client test for this fixture unitTest.enabled = false diff --git a/test/fixtures/hdfs-fixture/docker-compose.yml b/test/fixtures/hdfs-fixture/docker-compose.yml index e69de29bb2d1..5bdc40b1f724 100644 --- a/test/fixtures/hdfs-fixture/docker-compose.yml +++ b/test/fixtures/hdfs-fixture/docker-compose.yml @@ -0,0 +1,11 @@ +version: '3' +services: + hdfs: + hostname: hdfs.build.elastic.co + build: + context: . + dockerfile: Dockerfile + volumes: + - ./build/fixture:/fixture + ports: + - "9999:9999" diff --git a/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java b/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java index ce7401fe25ca..01315cdab01c 100644 --- a/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java +++ b/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java @@ -98,7 +98,6 @@ public static void main(String[] args) throws Exception { UserGroupInformation.setConfiguration(cfg); - // TODO: remove hardcoded port! MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(cfg); if (secure) { builder.nameNodePort(9998); diff --git a/test/fixtures/krb5kdc-fixture/Dockerfile b/test/fixtures/krb5kdc-fixture/Dockerfile new file mode 100644 index 000000000000..50de6334b9c7 --- /dev/null +++ b/test/fixtures/krb5kdc-fixture/Dockerfile @@ -0,0 +1,9 @@ +FROM ubuntu:14.04 +ADD . /fixture +RUN echo kerberos.build.elastic.co > /etc/hostname && echo "127.0.0.1 kerberos.build.elastic.co" >> /etc/hosts +RUN bash /fixture/src/main/resources/provision/installkdc.sh + +EXPOSE 88 +EXPOSE 88/udp + +CMD sleep infinity \ No newline at end of file diff --git a/test/fixtures/krb5kdc-fixture/Vagrantfile b/test/fixtures/krb5kdc-fixture/Vagrantfile deleted file mode 100644 index 72be4dad9cbe..000000000000 --- a/test/fixtures/krb5kdc-fixture/Vagrantfile +++ /dev/null @@ -1,53 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : - -# Licensed to Elasticsearch under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# This Vagrantfile exists to define a virtual machine running MIT's Kerberos 5 -# for usage as a testing fixture for the build process. -# -# In order to connect to the KDC process on this virtual machine, find and use -# the rendered krb5.conf file in the build output directory (build/conf). -# -# In order to provision principals in the KDC, use the provided addprinc.sh -# script with vagrant's ssh facility: -# -# vagrant ssh -c /vagrant/src/main/resources/provision/addprinc.sh principal -# -# You will find the newly created principal's keytab file in the build output -# directory (build/keytabs). Principal creation is idempotent, and will recopy -# existing user keytabs from the KDC if they already exist. - -Vagrant.configure("2") do |config| - - config.vm.define "krb5kdc" do |config| - config.vm.box = "elastic/ubuntu-14.04-x86_64" - end - - config.vm.hostname = "kerberos.build.elastic.co" - - if Vagrant.has_plugin?("vagrant-cachier") - config.cache.scope = :box - end - - config.vm.network "forwarded_port", guest: 88, host: 60088, protocol: "tcp" - config.vm.network "forwarded_port", guest: 88, host: 60088, protocol: "udp" - - config.vm.provision "shell", path: "src/main/resources/provision/installkdc.sh" - -end diff --git a/test/fixtures/krb5kdc-fixture/build.gradle b/test/fixtures/krb5kdc-fixture/build.gradle index 685483d53477..a3ca8d41bc4d 100644 --- a/test/fixtures/krb5kdc-fixture/build.gradle +++ b/test/fixtures/krb5kdc-fixture/build.gradle @@ -16,68 +16,38 @@ * specific language governing permissions and limitations * under the License. */ +apply plugin: 'elasticsearch.test.fixtures' -apply plugin: 'elasticsearch.build' - -Map vagrantEnvVars = [ - 'VAGRANT_CWD' : "${project.projectDir.absolutePath}", - 'VAGRANT_VAGRANTFILE' : 'Vagrantfile', - 'VAGRANT_PROJECT_DIR' : "${project.projectDir.absolutePath}" -] - -String box = "krb5kdc" - -List defaultPrincipals = [ "elasticsearch" ] - -task update(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'box' - subcommand 'update' - boxName box - environmentVars vagrantEnvVars +// installKDC uses tabs in it for the Kerberos ACL file. +// Ignore it for pattern checking. +forbiddenPatterns { + exclude "**/installkdc.sh" } -task up(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'up' - args '--provision', '--provider', 'virtualbox' - boxName box - environmentVars vagrantEnvVars - dependsOn update -} +List services = ["peppa", "hdfs"] -task addDefaultPrincipals { - dependsOn up +preProcessFixture.doLast { + // We need to create these up-front because if docker creates them they will be owned by root and we won't be + // able to clean them up + services.each { file("${buildDir}/shared/${it}").mkdirs() } } -for (String principal : defaultPrincipals) { - Task addTask = project.tasks.create("addPrincipal#${principal}", org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'ssh' - args '--command', "sudo bash /vagrant/src/main/resources/provision/addprinc.sh $principal" - boxName box - environmentVars vagrantEnvVars - dependsOn up +postProcessFixture { + inputs.dir("${buildDir}/shared") + services.each { service -> + File confTemplate = file("${buildDir}/shared/${service}/krb5.conf.template") + File confFile = file("${buildDir}/shared/${service}/krb5.conf") + outputs.file(confFile) + doLast { + assert confTemplate.exists() + String confContents = confTemplate.text + .replace("\${MAPPED_PORT}", "${ext."test.fixtures.${service}.udp.88"}") + confFile.text = confContents + } } - addDefaultPrincipals.dependsOn(addTask) } -task halt(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'halt' - boxName box - environmentVars vagrantEnvVars -} - -task destroy(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'destroy' - args '-f' - boxName box - environmentVars vagrantEnvVars - dependsOn halt -} +project.ext.krb5Conf = { service -> file("$buildDir/shared/${service}/krb5.conf") } +project.ext.krb5Keytabs = { service, fileName -> file("$buildDir/shared/${service}/keytabs/${fileName}") } -thirdPartyAudit.enabled = false unitTest.enabled = false - -// installKDC uses tabs in it for the Kerberos ACL file. -// Ignore it for pattern checking. -forbiddenPatterns { - exclude "**/installkdc.sh" -} diff --git a/test/fixtures/krb5kdc-fixture/docker-compose.yml b/test/fixtures/krb5kdc-fixture/docker-compose.yml new file mode 100644 index 000000000000..4d018dd6c3e0 --- /dev/null +++ b/test/fixtures/krb5kdc-fixture/docker-compose.yml @@ -0,0 +1,24 @@ +version: '3' +services: + peppa: + hostname: kerberos.build.elastic.co + build: + context: . + dockerfile: Dockerfile + command: "bash /fixture/src/main/resources/provision/peppa.sh" + volumes: + - ./build/shared/peppa:/fixture/build + ports: + - "4444" + - "88/udp" + hdfs: + hostname: kerberos.build.elastic.co + build: + context: . + dockerfile: Dockerfile + command: "bash /fixture/src/main/resources/provision/hdfs.sh" + volumes: + - ./build/shared/hdfs:/fixture/build + ports: + - "4444" + - "88/udp" diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/addprinc.sh b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/addprinc.sh index d0d1570ae299..9fc2a0735d66 100755 --- a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/addprinc.sh +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/addprinc.sh @@ -19,6 +19,9 @@ set -e +krb5kdc +kadmind + if [[ $# -lt 1 ]]; then echo 'Usage: addprinc.sh principalName [password]' echo ' principalName user principal name without realm' @@ -30,7 +33,7 @@ PRINC="$1" PASSWD="$2" USER=$(echo $PRINC | tr "/" "_") -VDIR=/vagrant +VDIR=/fixture RESOURCES=$VDIR/src/main/resources PROV_DIR=$RESOURCES/provision ENVPROP_FILE=$RESOURCES/env.properties @@ -64,3 +67,9 @@ else sudo kadmin -p $ADMIN_PRIN -kt $ADMIN_KTAB -q "addprinc -pw $PASSWD $PRINC" fi fi + +echo "Copying conf to local" +# make the configuration available externally +cp -v $LOCALSTATEDIR/krb5.conf $BUILD_DIR/krb5.conf.template +# We are running as root in the container, allow non root users running the container to be able to clean these up +chmod -R 777 $BUILD_DIR \ No newline at end of file diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/hdfs.sh b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/hdfs.sh new file mode 100644 index 000000000000..ef5bba076444 --- /dev/null +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/hdfs.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +set -e + +addprinc.sh "elasticsearch" +addprinc.sh "hdfs/hdfs.build.elastic.co" + +# Use this as a signal that setup is complete +python3 -m http.server 4444 & + +sleep infinity \ No newline at end of file diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/installkdc.sh b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/installkdc.sh index 2dc8ed92c946..51af7984ce47 100755 --- a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/installkdc.sh +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/installkdc.sh @@ -22,32 +22,15 @@ set -e # KDC installation steps and considerations based on https://web.mit.edu/kerberos/krb5-latest/doc/admin/install_kdc.html # and helpful input from https://help.ubuntu.com/community/Kerberos -VDIR=/vagrant +VDIR=/fixture RESOURCES=$VDIR/src/main/resources PROV_DIR=$RESOURCES/provision ENVPROP_FILE=$RESOURCES/env.properties -BUILD_DIR=$VDIR/build -CONF_DIR=$BUILD_DIR/conf -KEYTAB_DIR=$BUILD_DIR/keytabs LOCALSTATEDIR=/etc LOGDIR=/var/log/krb5 MARKER_FILE=/etc/marker -# Output location for our rendered configuration files and keytabs -mkdir -p $BUILD_DIR -rm -rf $BUILD_DIR/* -mkdir -p $CONF_DIR -mkdir -p $KEYTAB_DIR - -if [ -f $MARKER_FILE ]; then - echo "Already provisioned..." - echo "Recopying configuration files..." - cp $LOCALSTATEDIR/krb5.conf $CONF_DIR/krb5.conf - cp $LOCALSTATEDIR/krb5kdc/kdc.conf $CONF_DIR/kdc.conf - exit 0; -fi - # Pull environment information REALM_NAME=$(cat $ENVPROP_FILE | grep realm= | cut -d '=' -f 2) KDC_NAME=$(cat $ENVPROP_FILE | grep kdc= | cut -d '=' -f 2) @@ -60,7 +43,7 @@ sed -i 's/${REALM_NAME}/'$REALM_NAME'/g' $LOCALSTATEDIR/krb5.conf sed -i 's/${KDC_NAME}/'$KDC_NAME'/g' $LOCALSTATEDIR/krb5.conf sed -i 's/${BUILD_ZONE}/'$BUILD_ZONE'/g' $LOCALSTATEDIR/krb5.conf sed -i 's/${ELASTIC_ZONE}/'$ELASTIC_ZONE'/g' $LOCALSTATEDIR/krb5.conf -cp $LOCALSTATEDIR/krb5.conf $CONF_DIR/krb5.conf + # Transfer and interpolate the kdc.conf mkdir -p $LOCALSTATEDIR/krb5kdc @@ -69,7 +52,6 @@ sed -i 's/${REALM_NAME}/'$REALM_NAME'/g' $LOCALSTATEDIR/krb5kdc/kdc.conf sed -i 's/${KDC_NAME}/'$KDC_NAME'/g' $LOCALSTATEDIR/krb5kdc/kdc.conf sed -i 's/${BUILD_ZONE}/'$BUILD_ZONE'/g' $LOCALSTATEDIR/krb5kdc/kdc.conf sed -i 's/${ELASTIC_ZONE}/'$ELASTIC_ZONE'/g' $LOCALSTATEDIR/krb5.conf -cp $LOCALSTATEDIR/krb5kdc/kdc.conf $CONF_DIR/kdc.conf # Touch logging locations mkdir -p $LOGDIR @@ -112,9 +94,5 @@ EOF kadmin.local -q "addprinc -pw elastic admin/admin@$REALM_NAME" kadmin.local -q "ktadd -k /etc/admin.keytab admin/admin@$REALM_NAME" -# Start Kerberos Services -krb5kdc -kadmind - -# Mark that the vm is already provisioned -touch $MARKER_FILE \ No newline at end of file +# Create a link so addprinc.sh is on path +ln -s $PROV_DIR/addprinc.sh /usr/bin/ \ No newline at end of file diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template index e572c12e7095..9504b49bc730 100644 --- a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template @@ -32,12 +32,8 @@ [realms] ${REALM_NAME} = { - kdc = ${KDC_NAME}:88 - kdc = ${KDC_NAME}:60088 - kdc = localhost:60088 - kdc = localhost:88 - kdc = 127.0.0.1:60088 kdc = 127.0.0.1:88 + kdc = 127.0.0.1:${MAPPED_PORT} admin_server = ${KDC_NAME}:749 default_domain = ${BUILD_ZONE} } diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/peppa.sh b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/peppa.sh new file mode 100644 index 000000000000..815a9e94e8cb --- /dev/null +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/peppa.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +set -e + +addprinc.sh elasticsearch +addprinc.sh HTTP/localhost +addprinc.sh peppa +addprinc.sh george dino + +# Use this as a signal that setup is complete +python3 -m http.server 4444 & + +sleep infinity \ No newline at end of file diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index c97d28921545..7fb2d50302c1 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -1052,7 +1052,7 @@ public static List readAllOperationsInLucene(Engine engine, * Asserts the provided engine has a consistent document history between translog and Lucene index. */ public static void assertConsistentHistoryBetweenTranslogAndLuceneIndex(Engine engine, MapperService mapper) throws IOException { - if (mapper.documentMapper() == null || engine.config().getIndexSettings().isSoftDeleteEnabled() == false + if (mapper == null || mapper.documentMapper() == null || engine.config().getIndexSettings().isSoftDeleteEnabled() == false || (engine instanceof InternalEngine) == false) { return; } diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 10a61a748cd3..86f012fbd658 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -631,13 +631,9 @@ private Settings getNodeSettings(final int nodeId, final long seed, final Settin .put("node.name", name) .put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), seed); - final String discoveryType = DISCOVERY_TYPE_SETTING.get(updatedSettings.build()); - final boolean usingSingleNodeDiscovery = discoveryType.equals("single-node"); - if (usingSingleNodeDiscovery == false) { - if (autoManageMinMasterNodes) { - assertThat("automatically managing min master nodes require nodes to complete a join cycle when starting", - updatedSettings.get(INITIAL_STATE_TIMEOUT_SETTING.getKey()), nullValue()); - } + if (autoManageMinMasterNodes) { + assertThat("automatically managing min master nodes require nodes to complete a join cycle when starting", + updatedSettings.get(INITIAL_STATE_TIMEOUT_SETTING.getKey()), nullValue()); } return updatedSettings.build(); @@ -1160,7 +1156,7 @@ private synchronized void reset(boolean wipeData) throws IOException { nextNodeId.set(newSize); assert size() == newSize; - if (newSize > 0) { + if (autoManageMinMasterNodes && newSize > 0) { validateClusterFormed(); } logger.debug("Cluster is consistent again - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 187393e8c9e9..87596beb451d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -36,7 +36,6 @@ import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientBuilder; import org.elasticsearch.client.WarningsHandler; -import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.common.CheckedRunnable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.PathUtils; @@ -53,6 +52,7 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ESTestCase; import org.junit.After; import org.junit.AfterClass; @@ -544,7 +544,7 @@ private void wipeCluster() throws Exception { for (Object snapshot : snapshots) { Map snapshotInfo = (Map) snapshot; String name = (String) snapshotInfo.get("snapshot"); - if (SnapshotsInProgress.State.valueOf((String) snapshotInfo.get("state")).completed() == false) { + if (SnapshotState.valueOf((String) snapshotInfo.get("state")).completed() == false) { inProgressSnapshots.computeIfAbsent(repoName, key -> new ArrayList<>()).add(snapshotInfo); } logger.debug("wiping snapshot [{}/{}]", repoName, name); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java index fea1c3997530..bdcf426d118f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java @@ -47,7 +47,8 @@ public final class Features { "warnings", "yaml", "contains", - "transform_and_set" + "transform_and_set", + "arbitrary_key" )); private Features() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ObjectPath.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ObjectPath.java index 8ebeca4233ab..36d1ff04a559 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ObjectPath.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ObjectPath.java @@ -102,7 +102,17 @@ private Object evaluate(String key, Object object, Stash stash) throws IOExcepti } if (object instanceof Map) { - return ((Map) object).get(key); + final Map objectAsMap = (Map) object; + if ("_arbitrary_key_".equals(key)) { + if (objectAsMap.isEmpty()) { + throw new IllegalArgumentException("requested [" + key + "] but the map was empty"); + } + if (objectAsMap.containsKey(key)) { + throw new IllegalArgumentException("requested meta-key [" + key + "] but the map unexpectedly contains this key"); + } + return objectAsMap.keySet().iterator().next(); + } + return objectAsMap.get(key); } if (object instanceof List) { List list = (List) object; @@ -149,7 +159,7 @@ private String[] parsePath(String path) { list.add(current.toString()); } - return list.toArray(new String[list.size()]); + return list.toArray(new String[0]); } /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java index c067c1aabd85..2fcdf9be511a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java @@ -21,7 +21,6 @@ import com.carrotsearch.randomizedtesting.SeedUtils; import com.carrotsearch.randomizedtesting.generators.RandomPicks; - import org.apache.logging.log4j.Logger; import org.apache.lucene.index.CheckIndex; import org.apache.lucene.store.BaseDirectoryWrapper; @@ -55,6 +54,7 @@ import java.nio.file.Path; import java.util.Arrays; import java.util.Random; +import java.util.Set; public class MockFSDirectoryService extends FsDirectoryService { @@ -179,6 +179,12 @@ public synchronized void crash() throws IOException { super.crash(); } } + + // temporary override until LUCENE-8735 is integrated + @Override + public Set getPendingDeletions() throws IOException { + return in.getPendingDeletions(); + } } static final class CloseableDirectory implements Closeable { diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index b51f2f78c943..954d268fe3a6 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -34,12 +34,10 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.CloseableChannel; -import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -47,11 +45,9 @@ import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.mocksocket.MockServerSocket; import org.elasticsearch.node.Node; import org.elasticsearch.tasks.Task; @@ -63,7 +59,6 @@ import org.elasticsearch.test.transport.StubbableTransport; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.nio.MockNioTransport; import org.junit.After; import org.junit.Before; @@ -998,6 +993,7 @@ public void handleException(TransportException exp) { } @TestLogging(value = "org.elasticsearch.transport.TransportService.tracer:trace") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40586") public void testTracerLog() throws Exception { TransportRequestHandler handler = (request, channel, task) -> channel.sendResponse(new StringMessageResponse("")); TransportRequestHandler handlerWithError = (request, channel, task) -> { @@ -2001,34 +1997,23 @@ public void testKeepAlivePings() throws Exception { public void testTcpHandshake() { assumeTrue("only tcp transport has a handshake method", serviceA.getOriginalTransport() instanceof TcpTransport); - TcpTransport originalTransport = (TcpTransport) serviceA.getOriginalTransport(); - NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); - - MockNioTransport transport = new MockNioTransport(Settings.EMPTY, Version.CURRENT, threadPool, - new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, namedWriteableRegistry, - new NoneCircuitBreakerService()) { - @Override - protected void handleRequest(TcpChannel channel, InboundMessage.Request request, int messageLengthBytes) - throws IOException { - // we flip the isHandshake bit back and act like the handler is not found - byte status = (byte) (request.status & ~(1 << 3)); - Version version = request.getVersion(); - InboundMessage.Request nonHandshakeRequest = new InboundMessage.Request(request.threadContext, version, - status, request.getRequestId(), request.getActionName(), request.getFeatures(), request.getStreamInput()); - super.handleRequest(channel, nonHandshakeRequest, messageLengthBytes); - } - }; - - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, transport, Version.CURRENT, threadPool, - null, Collections.emptySet())) { + try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + service.addMessageListener(new TransportMessageListener() { + @Override + public void onRequestReceived(long requestId, String action) { + if (TransportHandshaker.HANDSHAKE_ACTION_NAME.equals(action)) { + throw new ActionNotFoundTransportException(action); + } + } + }); service.start(); service.acceptIncomingRequests(); // this acts like a node that doesn't have support for handshakes DiscoveryNode node = - new DiscoveryNode("TS_TPC", "TS_TPC", transport.boundAddress().publishAddress(), emptyMap(), emptySet(), version0); + new DiscoveryNode("TS_TPC", "TS_TPC", service.boundAddress().publishAddress(), emptyMap(), emptySet(), version0); ConnectTransportException exception = expectThrows(ConnectTransportException.class, () -> serviceA.connectToNode(node)); - assertTrue(exception.getCause() instanceof IllegalStateException); - assertEquals("handshake failed", exception.getCause().getMessage()); + assertTrue(exception.getCause() instanceof TransportException); + assertEquals("handshake failed because connection reset", exception.getCause().getMessage()); } ConnectionProfile connectionProfile = ConnectionProfile.buildDefaultConnectionProfile(Settings.EMPTY); @@ -2036,7 +2021,7 @@ protected void handleRequest(TcpChannel channel, InboundMessage.Request request, DiscoveryNode node = new DiscoveryNode("TS_TPC", "TS_TPC", service.boundAddress().publishAddress(), emptyMap(), emptySet(), version0); PlainActionFuture future = PlainActionFuture.newFuture(); - originalTransport.openConnection(node, connectionProfile, future); + serviceA.getOriginalTransport().openConnection(node, connectionProfile, future); try (Transport.Connection connection = future.actionGet()) { assertEquals(connection.getVersion(), Version.CURRENT); } diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ObjectPathTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ObjectPathTests.java index 79d6d42092a8..9345d7373307 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ObjectPathTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ObjectPathTests.java @@ -34,6 +34,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.isOneOf; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -181,6 +182,56 @@ public void testEvaluateObjectKeys() throws Exception { assertThat(strings, contains("template_1", "template_2")); } + public void testEvaluateArbitraryKey() throws Exception { + XContentBuilder xContentBuilder = randomXContentBuilder(); + xContentBuilder.startObject(); + xContentBuilder.startObject("metadata"); + xContentBuilder.startObject("templates"); + xContentBuilder.startObject("template_1"); + xContentBuilder.field("field1", "value"); + xContentBuilder.endObject(); + xContentBuilder.startObject("template_2"); + xContentBuilder.field("field2", "value"); + xContentBuilder.field("field3", "value"); + xContentBuilder.endObject(); + xContentBuilder.startObject("template_3"); + xContentBuilder.endObject(); + xContentBuilder.startObject("template_4"); + xContentBuilder.field("_arbitrary_key_", "value"); + xContentBuilder.endObject(); + xContentBuilder.endObject(); + xContentBuilder.endObject(); + xContentBuilder.endObject(); + ObjectPath objectPath = ObjectPath.createFromXContent(xContentBuilder.contentType().xContent(), + BytesReference.bytes(xContentBuilder)); + + { + final Object object = objectPath.evaluate("metadata.templates.template_1._arbitrary_key_"); + assertThat(object, instanceOf(String.class)); + final String key = (String) object; + assertThat(key, equalTo("field1")); + } + + { + final Object object = objectPath.evaluate("metadata.templates.template_2._arbitrary_key_"); + assertThat(object, instanceOf(String.class)); + final String key = (String) object; + assertThat(key, isOneOf("field2", "field3")); + } + + { + final IllegalArgumentException exception + = expectThrows(IllegalArgumentException.class, () -> objectPath.evaluate("metadata.templates.template_3._arbitrary_key_")); + assertThat(exception.getMessage(), equalTo("requested [_arbitrary_key_] but the map was empty")); + } + + { + final IllegalArgumentException exception + = expectThrows(IllegalArgumentException.class, () -> objectPath.evaluate("metadata.templates.template_4._arbitrary_key_")); + assertThat(exception.getMessage(), equalTo("requested meta-key [_arbitrary_key_] but the map unexpectedly contains this key")); + } + } + public void testEvaluateStashInPropertyName() throws Exception { XContentBuilder xContentBuilder = randomXContentBuilder(); xContentBuilder.startObject(); diff --git a/x-pack/docs/en/rest-api/security/create-role-mappings.asciidoc b/x-pack/docs/en/rest-api/security/create-role-mappings.asciidoc index de2ad5af3081..bfd6a14d3ed7 100644 --- a/x-pack/docs/en/rest-api/security/create-role-mappings.asciidoc +++ b/x-pack/docs/en/rest-api/security/create-role-mappings.asciidoc @@ -50,15 +50,46 @@ mapping is performed. user. Within the `metadata` object, keys beginning with `_` are reserved for system usage. -`roles` (required):: -(list) A list of roles that are granted to the users that match the role mapping -rules. +`roles`:: +(list of strings) A list of role names that are granted to the users that match +the role mapping rules. +_Exactly one of `roles` or `role_templates` must be specified_. + +`role_templates`:: +(list of objects) A list of mustache templates that will be evaluated to +determine the roles names that should granted to the users that match the role +mapping rules. +The format of these objects is defined below. +_Exactly one of `roles` or `role_templates` must be specified_. `rules` (required):: (object) The rules that determine which users should be matched by the mapping. A rule is a logical condition that is expressed by using a JSON DSL. See <>. +==== Role Templates + +The most common use for role mappings is to create a mapping from a known value +on the user to a fixed role name. +For example, all users in the `cn=admin,dc=example,dc=com` LDAP group should be +given the `superuser` role in {es}. +The `roles` field is used for this purpose. + +For more complex needs it is possible to use Mustache templates to dynamically +determine the names of the roles that should be granted to the user. +The `role_templates` field is used for this purpose. + +All of the <> that are available in the +role mapping `rules` are also available in the role templates. Thus it is possible +to assign a user to a role that reflects their `username`, their `groups` or the +name of the `realm` to which they authenticated. + +By default a template is evaluated to produce a single string that is the name +of the role which should be assigned to the user. If the `format` of the template +is set to `"json"` then the template is expected to produce a JSON string, or an +array of JSON strings for the role name(s). + +The Examples section below demonstrates the use of templated role names. ==== Authorization @@ -117,12 +148,26 @@ POST /_security/role_mapping/mapping2 -------------------------------------------------- // CONSOLE +The following example matches users who authenticated against a specific realm: +[source, js] +------------------------------------------------------------ +POST /_security/role_mapping/mapping3 +{ + "roles": [ "ldap-user" ], + "enabled": true, + "rules": { + "field" : { "realm.name" : "ldap1" } + } +} +------------------------------------------------------------ +// CONSOLE + The following example matches any user where either the username is `esadmin` or the user is in the `cn=admin,dc=example,dc=com` group: [source, js] ------------------------------------------------------------ -POST /_security/role_mapping/mapping3 +POST /_security/role_mapping/mapping4 { "roles": [ "superuser" ], "enabled": true, @@ -144,25 +189,52 @@ POST /_security/role_mapping/mapping3 ------------------------------------------------------------ // CONSOLE -The following example matches users who authenticated against a specific realm: +The example above is useful when the group names in your identity management +system (such as Active Directory, or a SAML Identity Provider) do not have a +1-to-1 correspondence with the names of roles in {es}. The role mapping is the +means by which you link a _group name_ with a _role name_. + +However, in rare cases the names of your groups may be an exact match for the +names of your {es} roles. This can be the case when your SAML Identity Provider +includes its own "group mapping" feature and can be configured to release {es} +role names in the user's SAML attributes. + +In these cases it is possible to use a template that treats the group names as +role names. + +*Note*: This should only be done if you intend to define roles for all of the +provided groups. Mapping a user to a large number of unnecessary or undefined +roles is inefficient and can have a negative effect on system performance. +If you only need to map a subset of the groups, then you should do this +using explicit mappings. + [source, js] ------------------------------------------------------------ -POST /_security/role_mapping/mapping4 +POST /_security/role_mapping/mapping5 { - "roles": [ "ldap-user" ], - "enabled": true, + "role_templates": [ + { + "template": { "source": "{{#tojson}}groups{{/tojson}}" }, <1> + "format" : "json" <2> + } + ], "rules": { - "field" : { "realm.name" : "ldap1" } - } + "field" : { "realm.name" : "saml1" } + }, + "enabled": true } ------------------------------------------------------------ // CONSOLE +<1> The `tojson` mustache function is used to convert the list of + group names into a valid JSON array. +<2> Because the template produces a JSON array, the format must be + set to `json`. The following example matches users within a specific LDAP sub-tree: [source, js] ------------------------------------------------------------ -POST /_security/role_mapping/mapping5 +POST /_security/role_mapping/mapping6 { "roles": [ "example-user" ], "enabled": true, @@ -178,7 +250,7 @@ specific realm: [source, js] ------------------------------------------------------------ -POST /_security/role_mapping/mapping6 +POST /_security/role_mapping/mapping7 { "roles": [ "ldap-example-user" ], "enabled": true, @@ -203,7 +275,7 @@ following mapping matches any user where *all* of these conditions are met: [source, js] ------------------------------------------------------------ -POST /_security/role_mapping/mapping7 +POST /_security/role_mapping/mapping8 { "roles": [ "superuser" ], "enabled": true, @@ -240,3 +312,32 @@ POST /_security/role_mapping/mapping7 } ------------------------------------------------------------ // CONSOLE + +A templated role can be used to automatically map every user to their own +custom role. The role itself can be defined through the +<> or using a +{stack-ov}/custom-roles-authorization.html#implementing-custom-roles-provider[custom roles provider]. + +In this example every user who authenticates using the "cloud-saml" realm +will be automatically mapped to two roles - the `"saml_user"` role and a +role that is their username prefixed with `_user_`. +As an example, the user `nwong` would be assigned the `saml_user` and +`_user_nwong` roles. + +[source, js] +------------------------------------------------------------ +POST /_security/role_mapping/mapping9 +{ + "rules": { "field": { "realm.name": "cloud-saml" } }, + "role_templates": [ + { "template": { "source" : "saml_user" } }, <1> + { "template": { "source" : "_user_{{username}}" } } + ], + "enabled": true +} +------------------------------------------------------------ +// CONSOLE +<1> Because it is not possible to specify both `roles` and `role_templates` in + the same role mapping, we can apply a "fixed name" role by using a template + that has no substitutions. + diff --git a/x-pack/docs/en/watcher/actions/email.asciidoc b/x-pack/docs/en/watcher/actions/email.asciidoc index 565783dd61e7..1f8de3831910 100644 --- a/x-pack/docs/en/watcher/actions/email.asciidoc +++ b/x-pack/docs/en/watcher/actions/email.asciidoc @@ -325,7 +325,7 @@ In order to store the account SMTP password, use the keystore command [source,yaml] -------------------------------------------------- -bin/elasticsearch-keystore xpack.notification.email.account.gmail_account.smtp.secure_password +bin/elasticsearch-keystore add xpack.notification.email.account.gmail_account.smtp.secure_password -------------------------------------------------- If you get an authentication error that indicates that you need to continue the @@ -363,7 +363,7 @@ In order to store the account SMTP password, use the keystore command [source,yaml] -------------------------------------------------- -bin/elasticsearch-keystore xpack.notification.email.account.outlook_account.smtp.secure_password +bin/elasticsearch-keystore add xpack.notification.email.account.outlook_account.smtp.secure_password -------------------------------------------------- @@ -400,7 +400,7 @@ In order to store the account SMTP password, use the keystore command [source,yaml] -------------------------------------------------- -bin/elasticsearch-keystore xpack.notification.email.account.ses_account.smtp.secure_password +bin/elasticsearch-keystore add xpack.notification.email.account.ses_account.smtp.secure_password -------------------------------------------------- NOTE: You need to use your Amazon SES SMTP credentials to send email through @@ -443,7 +443,7 @@ In order to store the account SMTP password, use the keystore command [source,yaml] -------------------------------------------------- -bin/elasticsearch-keystore xpack.notification.email.account.exchange_account.smtp.secure_password +bin/elasticsearch-keystore add xpack.notification.email.account.exchange_account.smtp.secure_password -------------------------------------------------- [float] diff --git a/x-pack/docs/en/watcher/actions/jira.asciidoc b/x-pack/docs/en/watcher/actions/jira.asciidoc index f0b9c714181b..4608ee6ab1af 100644 --- a/x-pack/docs/en/watcher/actions/jira.asciidoc +++ b/x-pack/docs/en/watcher/actions/jira.asciidoc @@ -109,12 +109,15 @@ Jira account you need to specify (see {ref}/secure-settings.html[secure settings [source,yaml] -------------------------------------------------- -bin/elasticsearch-keystore xpack.notification.jira.account.monitoring.secure_url -bin/elasticsearch-keystore xpack.notification.jira.account.monitoring.secure_user -bin/elasticsearch-keystore xpack.notification.jira.account.monitoring.secure_password +bin/elasticsearch-keystore add xpack.notification.jira.account.monitoring.secure_url +bin/elasticsearch-keystore add xpack.notification.jira.account.monitoring.secure_user +bin/elasticsearch-keystore add xpack.notification.jira.account.monitoring.secure_password -------------------------------------------------- -deprecated[The insecure way of storing sensitive data (`url`, `user` and `password`) in the configuration file or the cluster settings is deprecated] +[WARNING] +====== +Storing sensitive data (`url`, `user` and `password`) in the configuration file or the cluster settings is insecure and has been deprecated. Please use {es}'s secure {ref}/secure-settings.html[keystore] method instead. +====== To avoid credentials that transit in clear text over the network, {watcher} will reject `url` settings like `http://internal-jira.elastic.co` that are based on diff --git a/x-pack/docs/en/watcher/actions/slack.asciidoc b/x-pack/docs/en/watcher/actions/slack.asciidoc index ef8b907677b8..0e9177c604d0 100644 --- a/x-pack/docs/en/watcher/actions/slack.asciidoc +++ b/x-pack/docs/en/watcher/actions/slack.asciidoc @@ -196,16 +196,20 @@ image::images/slack-add-webhook-integration.jpg[] image::images/slack-copy-webhook-url.jpg[] To configure a Slack account, at a minimum you need to specify the account -name and webhook URL in the elasticsearch keystore (se {ref}/secure-settings.html[secure settings]): +name and webhook URL in the {es} keystore (see {ref}/secure-settings.html[secure settings]): [source,shell] -------------------------------------------------- bin/elasticsearch-keystore add xpack.notification.slack.account.monitoring.secure_url -------------------------------------------------- -deprecated[You can also configure this via settings in the `elasticsearch.yml` file, using the keystore is the preferred and secure way of doing this] +[WARNING] +====== +You can no longer configure Slack accounts using `elasticsearch.yml` settings. +Please use {es}'s secure {ref}/secure-settings.html[keystore] method instead. +====== -You can also specify defaults for the {ref}/notification-settings.html#slack-account-attributes[Slack +You can specify defaults for the {ref}/notification-settings.html#slack-account-attributes[Slack notification attributes]: [source,yaml] diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java index 968db14cc90f..dceb2db9b544 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java @@ -43,7 +43,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.snapshots.RestoreInfo; import org.elasticsearch.snapshots.RestoreService; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.RemoteTransportException; @@ -266,7 +265,6 @@ public void testRetentionLeaseIsRenewedDuringRecovery() throws Exception { } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40089") public void testRetentionLeasesAreNotBeingRenewedAfterRecoveryCompletes() throws Exception { final String leaderIndex = "leader"; final int numberOfShards = randomIntBetween(1, 3); @@ -463,7 +461,6 @@ public void testUnfollowRemovesRetentionLeases() throws Exception { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40089") public void testUnfollowFailsToRemoveRetentionLeases() throws Exception { final String leaderIndex = "leader"; final String followerIndex = "follower"; @@ -534,7 +531,6 @@ public void testUnfollowFailsToRemoveRetentionLeases() throws Exception { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40089") public void testRetentionLeaseRenewedWhileFollowing() throws Exception { final String leaderIndex = "leader"; final String followerIndex = "follower"; @@ -618,7 +614,6 @@ public void testRetentionLeaseAdvancesWhileFollowing() throws Exception { } @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39509") - @TestLogging(value = "org.elasticsearch.xpack.ccr:trace") public void testRetentionLeaseRenewalIsCancelledWhenFollowingIsPaused() throws Exception { final String leaderIndex = "leader"; final String followerIndex = "follower"; @@ -748,7 +743,6 @@ public void testRetentionLeaseRenewalIsResumedWhenFollowingIsResumed() throws Ex assertRetentionLeaseRenewal(numberOfShards, numberOfReplicas, followerIndex, leaderIndex); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40089") public void testRetentionLeaseIsAddedIfItDisappearsWhileFollowing() throws Exception { final String leaderIndex = "leader"; final String followerIndex = "follower"; diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java index ef1fa1a80259..44f8583bb9b5 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java @@ -217,7 +217,7 @@ public void testValidation() throws IOException { validate(request, leaderIMD, followIMD, UUIDs, mapperService); } } - + public void testDynamicIndexSettingsAreClassified() { // We should be conscious which dynamic settings are replicated from leader to follower index. // This is the list of settings that should be replicated: @@ -229,6 +229,7 @@ public void testDynamicIndexSettingsAreClassified() { replicatedSettings.add(MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING); replicatedSettings.add(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING); replicatedSettings.add(MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING); + replicatedSettings.add(MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING); replicatedSettings.add(MapperService.INDEX_MAPPER_DYNAMIC_SETTING); replicatedSettings.add(IndexSettings.MAX_NGRAM_DIFF_SETTING); replicatedSettings.add(IndexSettings.MAX_SHINGLE_DIFF_SETTING); @@ -237,7 +238,7 @@ public void testDynamicIndexSettingsAreClassified() { if (setting.isDynamic()) { boolean notReplicated = TransportResumeFollowAction.NON_REPLICATED_SETTINGS.contains(setting); boolean replicated = replicatedSettings.contains(setting); - assertThat("setting [" + setting.getKey() + "] is not classified as replicated xor not replicated", + assertThat("setting [" + setting.getKey() + "] is not classified as replicated or not replicated", notReplicated ^ replicated, is(true)); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java index 196982c0a35f..c1a682757d14 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java @@ -96,10 +96,26 @@ public GraphExploreRequest indicesOptions(IndicesOptions indicesOptions) { return this; } + /** + * The document types to execute the explore against. Defaults to be executed against + * all types. + * + * @deprecated Types are in the process of being removed. Instead of using a type, prefer to + * filter on a field on the document. + */ + @Deprecated public String[] types() { return this.types; } + /** + * The document types to execute the explore request against. Defaults to be executed against + * all types. + * + * @deprecated Types are in the process of being removed. Instead of using a type, prefer to + * filter on a field on the document. + */ + @Deprecated public GraphExploreRequest types(String... types) { this.types = types; return this; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index a745215fa553..dc8403b7bd54 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -57,9 +57,9 @@ import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; import org.elasticsearch.xpack.core.indexlifecycle.LifecycleType; -import org.elasticsearch.xpack.core.indexlifecycle.SetPriorityAction; import org.elasticsearch.xpack.core.indexlifecycle.ReadOnlyAction; import org.elasticsearch.xpack.core.indexlifecycle.RolloverAction; +import org.elasticsearch.xpack.core.indexlifecycle.SetPriorityAction; import org.elasticsearch.xpack.core.indexlifecycle.ShrinkAction; import org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType; import org.elasticsearch.xpack.core.indexlifecycle.UnfollowAction; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditMessage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditMessage.java new file mode 100644 index 000000000000..85327337730f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditMessage.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.common.notifications; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Date; +import java.util.Objects; + +public abstract class AbstractAuditMessage implements ToXContentObject { + public static final ParseField TYPE = new ParseField("audit_message"); + + public static final ParseField MESSAGE = new ParseField("message"); + public static final ParseField LEVEL = new ParseField("level"); + public static final ParseField TIMESTAMP = new ParseField("timestamp"); + public static final ParseField NODE_NAME = new ParseField("node_name"); + + private final String resourceId; + private final String message; + private final Level level; + private final Date timestamp; + private final String nodeName; + + public AbstractAuditMessage(String resourceId, String message, Level level, String nodeName) { + this.resourceId = resourceId; + this.message = Objects.requireNonNull(message); + this.level = Objects.requireNonNull(level); + this.timestamp = new Date(); + this.nodeName = nodeName; + } + + protected AbstractAuditMessage(String resourceId, String message, Level level, Date timestamp, String nodeName) { + this.resourceId = resourceId; + this.message = Objects.requireNonNull(message); + this.level = Objects.requireNonNull(level); + this.timestamp = Objects.requireNonNull(timestamp); + this.nodeName = nodeName; + } + + public final String getResourceId() { + return resourceId; + } + + public final String getMessage() { + return message; + } + + public final Level getLevel() { + return level; + } + + public final Date getTimestamp() { + return timestamp; + } + + public final String getNodeName() { + return nodeName; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + if (resourceId != null) { + builder.field(getResourceField(), resourceId); + } + builder.field(MESSAGE.getPreferredName(), message); + builder.field(LEVEL.getPreferredName(), level); + builder.field(TIMESTAMP.getPreferredName(), timestamp.getTime()); + if (nodeName != null) { + builder.field(NODE_NAME.getPreferredName(), nodeName); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(resourceId, message, level, timestamp); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj instanceof AbstractAuditMessage == false) { + return false; + } + + AbstractAuditMessage other = (AbstractAuditMessage) obj; + return Objects.equals(resourceId, other.resourceId) && + Objects.equals(message, other.message) && + Objects.equals(level, other.level) && + Objects.equals(timestamp, other.timestamp); + } + + protected abstract String getResourceField(); + + public abstract static class AbstractBuilder { + + public T info(String resourceId, String message, String nodeName) { + return newMessage(Level.INFO, resourceId, message, nodeName); + } + + public T warning(String resourceId, String message, String nodeName) { + return newMessage(Level.WARNING, resourceId, message, nodeName); + } + + public T error(String resourceId, String message, String nodeName) { + return newMessage(Level.ERROR, resourceId, message, nodeName); + } + + protected abstract T newMessage(Level level, String resourceId, String message, String nodeName); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/Auditor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/Auditor.java new file mode 100644 index 000000000000..01acb18900b2 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/Auditor.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.common.notifications; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; + +public class Auditor { + + private static final Logger logger = LogManager.getLogger(Auditor.class); + private final Client client; + private final String nodeName; + private final String auditIndex; + private final String executionOrigin; + private final AbstractAuditMessage.AbstractBuilder messageBuilder; + + public Auditor(Client client, + String nodeName, + String auditIndex, + String executionOrigin, + AbstractAuditMessage.AbstractBuilder messageBuilder) { + this.client = Objects.requireNonNull(client); + this.nodeName = Objects.requireNonNull(nodeName); + this.auditIndex = auditIndex; + this.executionOrigin = executionOrigin; + this.messageBuilder = Objects.requireNonNull(messageBuilder); + } + + public final void info(String resourceId, String message) { + indexDoc(messageBuilder.info(resourceId, message, nodeName)); + } + + public final void warning(String resourceId, String message) { + indexDoc(messageBuilder.warning(resourceId, message, nodeName)); + } + + public final void error(String resourceId, String message) { + indexDoc(messageBuilder.error(resourceId, message, nodeName)); + } + + protected void onIndexResponse(IndexResponse response) { + logger.trace("Successfully wrote audit message"); + } + + protected void onIndexFailure(Exception exception) { + logger.debug("Failed to write audit message", exception); + } + + private void indexDoc(ToXContent toXContent) { + IndexRequest indexRequest = new IndexRequest(auditIndex); + indexRequest.source(toXContentBuilder(toXContent)); + indexRequest.timeout(TimeValue.timeValueSeconds(5)); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), + executionOrigin, + indexRequest, + ActionListener.wrap( + this::onIndexResponse, + this::onIndexFailure + ), client::index); + } + + private XContentBuilder toXContentBuilder(ToXContent toXContent) { + try (XContentBuilder jsonBuilder = jsonBuilder()) { + return toXContent.toXContent(jsonBuilder, ToXContent.EMPTY_PARAMS); + } catch (IOException e) { + throw new RuntimeException(e); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/Level.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/Level.java new file mode 100644 index 000000000000..34aac72517ee --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/Level.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.common.notifications; + +import java.util.Locale; + +public enum Level { + INFO, WARNING, ERROR; + + /** + * Case-insensitive from string method. + * + * @param value + * String representation + * @return The condition type + */ + public static Level fromString(String value) { + return Level.valueOf(value.toUpperCase(Locale.ROOT)); + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java index e25b2619b19f..73e639cec5e1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java @@ -26,12 +26,14 @@ public final class DataFrameField { public static final ParseField INDEX_DOC_TYPE = new ParseField("doc_type"); public static final ParseField SOURCE = new ParseField("source"); public static final ParseField DESTINATION = new ParseField("dest"); + public static final ParseField FORCE = new ParseField("force"); // common strings public static final String TASK_NAME = "data_frame/transforms"; public static final String REST_BASE_PATH = "/_data_frame/"; public static final String REST_BASE_PATH_TRANSFORMS = REST_BASE_PATH + "transforms/"; public static final String REST_BASE_PATH_TRANSFORMS_BY_ID = REST_BASE_PATH_TRANSFORMS + "{id}/"; + public static final String DATA_FRAME_TRANSFORM_AUDIT_ID_FIELD = "transform_id"; // note: this is used to match tasks public static final String PERSISTENT_TASK_DESCRIPTION_PREFIX = "data_frame_"; @@ -51,6 +53,9 @@ public final class DataFrameField { */ public static final String FOR_INTERNAL_STORAGE = "for_internal_storage"; + // internal document id + public static String DOCUMENT_ID_FIELD = "_id"; + private DataFrameField() { } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformAction.java index 13e62da090c3..0316153fbc82 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformAction.java @@ -6,17 +6,14 @@ package org.elasticsearch.xpack.core.dataframe.action; import org.elasticsearch.action.Action; -import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.tasks.BaseTasksRequest; import org.elasticsearch.action.support.tasks.BaseTasksResponse; -import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.tasks.Task; @@ -42,16 +39,13 @@ public Response newResponse() { return new Response(); } - public static class Request extends BaseTasksRequest implements ToXContentFragment { + public static class Request extends BaseTasksRequest { private String id; public Request(String id) { this.id = ExceptionsHelper.requireNonNull(id, DataFrameField.ID.getPreferredName()); } - private Request() { - } - public Request(StreamInput in) throws IOException { super(in); id = in.readString(); @@ -77,12 +71,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(DataFrameField.ID.getPreferredName(), id); - return builder; - } - @Override public int hashCode() { return Objects.hash(id); @@ -102,14 +90,6 @@ public boolean equals(Object obj) { } } - public static class RequestBuilder - extends ActionRequestBuilder { - - protected RequestBuilder(ElasticsearchClient client, DeleteDataFrameTransformAction action) { - super(client, action, new DeleteDataFrameTransformAction.Request()); - } - } - public static class Response extends BaseTasksResponse implements Writeable, ToXContentObject { private boolean acknowledged; public Response(StreamInput in) throws IOException { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsAction.java index ca6bf9d16e62..ac1498c72a6d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsAction.java @@ -8,14 +8,11 @@ import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.Action; -import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.action.AbstractGetResourcesRequest; @@ -48,7 +45,7 @@ public Response newResponse() { return new Response(); } - public static class Request extends AbstractGetResourcesRequest implements ToXContent { + public static class Request extends AbstractGetResourcesRequest { private static final int MAX_SIZE_RETURN = 1000; @@ -78,25 +75,12 @@ public ActionRequestValidationException validate() { return exception; } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(DataFrameField.ID.getPreferredName(), getResourceId()); - return builder; - } - @Override public String getResourceIdField() { return DataFrameField.ID.getPreferredName(); } } - public static class RequestBuilder extends ActionRequestBuilder { - - protected RequestBuilder(ElasticsearchClient client, GetDataFrameTransformsAction action) { - super(client, action, new Request()); - } - } - public static class Response extends AbstractGetResourcesResponse implements Writeable, ToXContentObject { public static final String INVALID_TRANSFORMS_DEPRECATION_WARNING = "Found [{}] invalid transforms"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsAction.java index 47e922033072..f0e92aa36db2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsAction.java @@ -7,19 +7,16 @@ package org.elasticsearch.xpack.core.dataframe.action; import org.elasticsearch.action.Action; -import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.tasks.BaseTasksRequest; import org.elasticsearch.action.support.tasks.BaseTasksResponse; -import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.tasks.Task; @@ -44,7 +41,7 @@ public Response newResponse() { return new Response(); } - public static class Request extends BaseTasksRequest implements ToXContent { + public static class Request extends BaseTasksRequest { private String id; public Request(String id) { @@ -55,8 +52,6 @@ public Request(String id) { } } - private Request() {} - public Request(StreamInput in) throws IOException { super(in); id = in.readString(); @@ -87,12 +82,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(DataFrameField.ID.getPreferredName(), id); - return builder; - } - @Override public int hashCode() { return Objects.hash(id); @@ -111,13 +100,6 @@ public boolean equals(Object obj) { } } - public static class RequestBuilder extends ActionRequestBuilder { - - protected RequestBuilder(ElasticsearchClient client, GetDataFrameTransformsStatsAction action) { - super(client, action, new Request()); - } - } - public static class Response extends BaseTasksResponse implements Writeable, ToXContentObject { private List transformsStateAndStats; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformAction.java index 4ddde9ddc467..8116e1d1f23e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformAction.java @@ -28,6 +28,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; @@ -59,7 +60,7 @@ public Request() { } public static Request fromXContent(final XContentParser parser) throws IOException { Map content = parser.map(); // Destination and ID are not required for Preview, so we just supply our own - content.put(DataFrameField.DESTINATION.getPreferredName(), "unused-transform-preview-index"); + content.put(DataFrameField.DESTINATION.getPreferredName(), Collections.singletonMap("index", "unused-transform-preview-index")); content.put(DataFrameField.ID.getPreferredName(), "transform-preview"); try(XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().map(content); XContentParser newParser = XContentType.JSON diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java index 6c226003f663..51b5e0d4ec1d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java @@ -10,8 +10,6 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContentObject; @@ -100,13 +98,6 @@ public boolean equals(Object obj) { } } - public static class RequestBuilder extends MasterNodeOperationRequestBuilder { - - protected RequestBuilder(ElasticsearchClient client, PutDataFrameTransformAction action) { - super(client, action, new Request()); - } - } - public static class Response extends AcknowledgedResponse { public Response() { super(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformAction.java index 0ac94b6c6aac..b86a2339faa4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformAction.java @@ -7,15 +7,12 @@ package org.elasticsearch.xpack.core.dataframe.action; import org.elasticsearch.action.Action; -import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.tasks.BaseTasksResponse; -import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.dataframe.DataFrameField; @@ -39,12 +36,14 @@ public Response newResponse() { return new Response(); } - public static class Request extends AcknowledgedRequest implements ToXContent { + public static class Request extends AcknowledgedRequest { private String id; + private boolean force; - public Request(String id) { + public Request(String id, boolean force) { this.id = ExceptionsHelper.requireNonNull(id, DataFrameField.ID.getPreferredName()); + this.force = force; } public Request() { @@ -59,6 +58,10 @@ public String getId() { return id; } + public boolean isForce() { + return force; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -70,12 +73,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(DataFrameField.ID.getPreferredName(), id); - return builder; - } - @Override public int hashCode() { return Objects.hash(id); @@ -94,13 +91,6 @@ public boolean equals(Object obj) { } } - public static class RequestBuilder extends ActionRequestBuilder { - - protected RequestBuilder(ElasticsearchClient client, StartDataFrameTransformAction action) { - super(client, action, new Request()); - } - } - public static class Response extends BaseTasksResponse implements Writeable, ToXContentObject { private boolean started; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskAction.java index a51b9243c3d4..d3c96fb9cf17 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskAction.java @@ -7,15 +7,12 @@ package org.elasticsearch.xpack.core.dataframe.action; import org.elasticsearch.action.Action; -import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.tasks.BaseTasksRequest; import org.elasticsearch.action.support.tasks.BaseTasksResponse; -import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.dataframe.DataFrameField; @@ -39,7 +36,7 @@ public Response newResponse() { return new Response(); } - public static class Request extends BaseTasksRequest implements ToXContent { + public static class Request extends BaseTasksRequest { private String id; @@ -70,12 +67,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(DataFrameField.ID.getPreferredName(), id); - return builder; - } - @Override public int hashCode() { return Objects.hash(id); @@ -94,13 +85,6 @@ public boolean equals(Object obj) { } } - public static class RequestBuilder extends ActionRequestBuilder { - - protected RequestBuilder(ElasticsearchClient client, StartDataFrameTransformTaskAction action) { - super(client, action, new Request()); - } - } - public static class Response extends BaseTasksResponse implements Writeable, ToXContentObject { private boolean started; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java index 250442b69379..7fa437bd1560 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java @@ -6,17 +6,14 @@ package org.elasticsearch.xpack.core.dataframe.action; import org.elasticsearch.action.Action; -import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.tasks.BaseTasksRequest; import org.elasticsearch.action.support.tasks.BaseTasksResponse; -import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.tasks.Task; @@ -44,26 +41,29 @@ public Response newResponse() { return new Response(); } - public static class Request extends BaseTasksRequest implements ToXContent { + public static class Request extends BaseTasksRequest { private String id; private final boolean waitForCompletion; + private final boolean force; - public Request(String id, boolean waitForCompletion, @Nullable TimeValue timeout) { + public Request(String id, boolean waitForCompletion, boolean force, @Nullable TimeValue timeout) { this.id = ExceptionsHelper.requireNonNull(id, DataFrameField.ID.getPreferredName()); this.waitForCompletion = waitForCompletion; + this.force = force; // use the timeout value already present in BaseTasksRequest this.setTimeout(timeout == null ? DEFAULT_TIMEOUT : timeout); } private Request() { - this(null, false, null); + this(null, false, false, null); } public Request(StreamInput in) throws IOException { super(in); id = in.readString(); waitForCompletion = in.readBoolean(); + force = in.readBoolean(); } public String getId() { @@ -78,11 +78,16 @@ public boolean waitForCompletion() { return waitForCompletion; } + public boolean isForce() { + return force; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(id); out.writeBoolean(waitForCompletion); + out.writeBoolean(force); } @Override @@ -90,20 +95,10 @@ public ActionRequestValidationException validate() { return null; } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(DataFrameField.ID.getPreferredName(), id); - builder.field(DataFrameField.WAIT_FOR_COMPLETION.getPreferredName(), waitForCompletion); - if (this.getTimeout() != null) { - builder.field(DataFrameField.TIMEOUT.getPreferredName(), this.getTimeout()); - } - return builder; - } - @Override public int hashCode() { // the base class does not implement hashCode, therefore we need to hash timeout ourselves - return Objects.hash(id, waitForCompletion, this.getTimeout()); + return Objects.hash(id, waitForCompletion, force, this.getTimeout()); } @Override @@ -122,7 +117,9 @@ public boolean equals(Object obj) { return false; } - return Objects.equals(id, other.id) && Objects.equals(waitForCompletion, other.waitForCompletion); + return Objects.equals(id, other.id) && + Objects.equals(waitForCompletion, other.waitForCompletion) && + Objects.equals(force, other.force); } @Override @@ -133,13 +130,6 @@ public boolean match(Task task) { } } - public static class RequestBuilder extends ActionRequestBuilder { - - protected RequestBuilder(ElasticsearchClient client, StopDataFrameTransformAction action) { - super(client, action, new Request()); - } - } - public static class Response extends BaseTasksResponse implements Writeable, ToXContentObject { private boolean stopped; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/notifications/DataFrameAuditMessage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/notifications/DataFrameAuditMessage.java new file mode 100644 index 000000000000..7dab9be6ab3c --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/notifications/DataFrameAuditMessage.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.dataframe.notifications; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.common.notifications.AbstractAuditMessage; +import org.elasticsearch.xpack.core.common.notifications.Level; +import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; + +import java.util.Date; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.elasticsearch.xpack.core.dataframe.DataFrameField.DATA_FRAME_TRANSFORM_AUDIT_ID_FIELD; + +public class DataFrameAuditMessage extends AbstractAuditMessage { + + private static final ParseField TRANSFORM_ID = new ParseField(DATA_FRAME_TRANSFORM_AUDIT_ID_FIELD); + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "data_frame_audit_message", + true, + a -> new DataFrameAuditMessage((String)a[0], (String)a[1], (Level)a[2], (Date)a[3], (String)a[4])); + + static { + PARSER.declareString(optionalConstructorArg(), TRANSFORM_ID); + PARSER.declareString(constructorArg(), MESSAGE); + PARSER.declareField(constructorArg(), p -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return Level.fromString(p.text()); + } + throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); + }, LEVEL, ObjectParser.ValueType.STRING); + PARSER.declareField(constructorArg(), parser -> { + if (parser.currentToken() == XContentParser.Token.VALUE_NUMBER) { + return new Date(parser.longValue()); + } else if (parser.currentToken() == XContentParser.Token.VALUE_STRING) { + return new Date(TimeUtils.dateStringToEpoch(parser.text())); + } + throw new IllegalArgumentException( + "unexpected token [" + parser.currentToken() + "] for [" + TIMESTAMP.getPreferredName() + "]"); + }, TIMESTAMP, ObjectParser.ValueType.VALUE); + PARSER.declareString(optionalConstructorArg(), NODE_NAME); + } + + public DataFrameAuditMessage(String resourceId, String message, Level level, String nodeName) { + super(resourceId, message, level, nodeName); + } + + protected DataFrameAuditMessage(String resourceId, String message, Level level, Date timestamp, String nodeName) { + super(resourceId, message, level, timestamp, nodeName); + } + + @Override + protected String getResourceField() { + return TRANSFORM_ID.getPreferredName(); + } + + public static AbstractAuditMessage.AbstractBuilder builder() { + return new AbstractBuilder() { + @Override + protected DataFrameAuditMessage newMessage(Level level, String resourceId, String message, String nodeName) { + return new DataFrameAuditMessage(resourceId, message, level, nodeName); + } + }; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfig.java index 6172bb2de1f6..a8e14faf2f0c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfig.java @@ -17,7 +17,6 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; import org.elasticsearch.xpack.core.dataframe.transforms.pivot.PivotConfig; @@ -38,7 +37,6 @@ public class DataFrameTransformConfig extends AbstractDiffable LENIENT_PARSER = createParser(true); private final String id; - private final String source; - private final String dest; + private final SourceConfig source; + private final DestConfig dest; // headers store the user context from the creating user, which allows us to run the transform as this user // the header only contains name, groups and other context but no authorization keys private Map headers; - private final QueryConfig queryConfig; private final PivotConfig pivotConfig; private static ConstructingObjectParser createParser(boolean lenient) { @@ -70,8 +67,8 @@ private static ConstructingObjectParser create DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_INCONSISTENT_ID, id, optionalId)); } - String source = (String) args[1]; - String dest = (String) args[2]; + SourceConfig source = (SourceConfig) args[1]; + DestConfig dest = (DestConfig) args[2]; // ignored, only for internal storage: String docType = (String) args[3]; @@ -83,26 +80,16 @@ private static ConstructingObjectParser create @SuppressWarnings("unchecked") Map headers = (Map) args[4]; - // default handling: if the user does not specify a query, we default to match_all - QueryConfig queryConfig = null; - if (args[5] == null) { - queryConfig = new QueryConfig(Collections.singletonMap(MatchAllQueryBuilder.NAME, Collections.emptyMap()), - new MatchAllQueryBuilder()); - } else { - queryConfig = (QueryConfig) args[5]; - } - - PivotConfig pivotConfig = (PivotConfig) args[6]; - return new DataFrameTransformConfig(id, source, dest, headers, queryConfig, pivotConfig); + PivotConfig pivotConfig = (PivotConfig) args[5]; + return new DataFrameTransformConfig(id, source, dest, headers, pivotConfig); }); parser.declareString(optionalConstructorArg(), DataFrameField.ID); - parser.declareString(constructorArg(), DataFrameField.SOURCE); - parser.declareString(constructorArg(), DataFrameField.DESTINATION); + parser.declareObject(constructorArg(), (p, c) -> SourceConfig.fromXContent(p, lenient), DataFrameField.SOURCE); + parser.declareObject(constructorArg(), (p, c) -> DestConfig.fromXContent(p, lenient), DataFrameField.DESTINATION); parser.declareString(optionalConstructorArg(), DataFrameField.INDEX_DOC_TYPE); parser.declareObject(optionalConstructorArg(), (p, c) -> p.mapStrings(), HEADERS); - parser.declareObject(optionalConstructorArg(), (p, c) -> QueryConfig.fromXContent(p, lenient), QUERY); parser.declareObject(optionalConstructorArg(), (p, c) -> PivotConfig.fromXContent(p, lenient), PIVOT_TRANSFORM); return parser; @@ -113,15 +100,13 @@ public static String documentId(String transformId) { } public DataFrameTransformConfig(final String id, - final String source, - final String dest, + final SourceConfig source, + final DestConfig dest, final Map headers, - final QueryConfig queryConfig, final PivotConfig pivotConfig) { this.id = ExceptionsHelper.requireNonNull(id, DataFrameField.ID.getPreferredName()); this.source = ExceptionsHelper.requireNonNull(source, DataFrameField.SOURCE.getPreferredName()); this.dest = ExceptionsHelper.requireNonNull(dest, DataFrameField.DESTINATION.getPreferredName()); - this.queryConfig = ExceptionsHelper.requireNonNull(queryConfig, QUERY.getPreferredName()); this.setHeaders(headers == null ? Collections.emptyMap() : headers); this.pivotConfig = pivotConfig; @@ -133,10 +118,9 @@ public DataFrameTransformConfig(final String id, public DataFrameTransformConfig(final StreamInput in) throws IOException { id = in.readString(); - source = in.readString(); - dest = in.readString(); + source = new SourceConfig(in); + dest = new DestConfig(in); setHeaders(in.readMap(StreamInput::readString, StreamInput::readString)); - queryConfig = in.readOptionalWriteable(QueryConfig::new); pivotConfig = in.readOptionalWriteable(PivotConfig::new); } @@ -144,11 +128,11 @@ public String getId() { return id; } - public String getSource() { + public SourceConfig getSource() { return source; } - public String getDestination() { + public DestConfig getDestination() { return dest; } @@ -164,30 +148,20 @@ public PivotConfig getPivotConfig() { return pivotConfig; } - public QueryConfig getQueryConfig() { - return queryConfig; - } - public boolean isValid() { - // collect validation results from all child objects - if (queryConfig != null && queryConfig.isValid() == false) { - return false; - } - if (pivotConfig != null && pivotConfig.isValid() == false) { return false; } - return true; + return source.isValid() && dest.isValid(); } @Override public void writeTo(final StreamOutput out) throws IOException { out.writeString(id); - out.writeString(source); - out.writeString(dest); + source.writeTo(out); + dest.writeTo(out); out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); - out.writeOptionalWriteable(queryConfig); out.writeOptionalWriteable(pivotConfig); } @@ -197,9 +171,6 @@ public XContentBuilder toXContent(final XContentBuilder builder, final Params pa builder.field(DataFrameField.ID.getPreferredName(), id); builder.field(DataFrameField.SOURCE.getPreferredName(), source); builder.field(DataFrameField.DESTINATION.getPreferredName(), dest); - if (queryConfig != null) { - builder.field(QUERY.getPreferredName(), queryConfig); - } if (pivotConfig != null) { builder.field(PIVOT_TRANSFORM.getPreferredName(), pivotConfig); } @@ -230,13 +201,12 @@ public boolean equals(Object other) { && Objects.equals(this.source, that.source) && Objects.equals(this.dest, that.dest) && Objects.equals(this.headers, that.headers) - && Objects.equals(this.queryConfig, that.queryConfig) && Objects.equals(this.pivotConfig, that.pivotConfig); } @Override - public int hashCode() { - return Objects.hash(id, source, dest, headers, queryConfig, pivotConfig); + public int hashCode(){ + return Objects.hash(id, source, dest, headers, pivotConfig); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformState.java index 2b0dc8366c20..4b90407ee1ad 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformState.java @@ -8,6 +8,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; @@ -21,7 +22,6 @@ import java.io.IOException; import java.util.Collections; -import java.util.HashMap; import java.util.Map; import java.util.Objects; import java.util.SortedMap; @@ -33,28 +33,44 @@ public class DataFrameTransformState implements Task.Status, PersistentTaskState { public static final String NAME = DataFrameField.TASK_NAME; - private final IndexerState state; + private final DataFrameTransformTaskState taskState; + private final IndexerState indexerState; private final long generation; @Nullable private final SortedMap currentPosition; + @Nullable + private final String reason; - private static final ParseField STATE = new ParseField("transform_state"); + private static final ParseField TASK_STATE = new ParseField("task_state"); + private static final ParseField INDEXER_STATE = new ParseField("indexer_state"); private static final ParseField CURRENT_POSITION = new ParseField("current_position"); private static final ParseField GENERATION = new ParseField("generation"); + private static final ParseField REASON = new ParseField("reason"); @SuppressWarnings("unchecked") public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, - args -> new DataFrameTransformState((IndexerState) args[0], (HashMap) args[1], (long) args[2])); + true, + args -> new DataFrameTransformState((DataFrameTransformTaskState) args[0], + (IndexerState) args[1], + (Map) args[2], + (long) args[3], + (String) args[4])); static { + PARSER.declareField(constructorArg(), p -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return DataFrameTransformTaskState.fromString(p.text()); + } + throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); + }, TASK_STATE, ObjectParser.ValueType.STRING); PARSER.declareField(constructorArg(), p -> { if (p.currentToken() == XContentParser.Token.VALUE_STRING) { return IndexerState.fromString(p.text()); } throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); - }, STATE, ObjectParser.ValueType.STRING); + }, INDEXER_STATE, ObjectParser.ValueType.STRING); PARSER.declareField(optionalConstructorArg(), p -> { if (p.currentToken() == XContentParser.Token.START_OBJECT) { return p.map(); @@ -64,23 +80,36 @@ public class DataFrameTransformState implements Task.Status, PersistentTaskState } throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); }, CURRENT_POSITION, ObjectParser.ValueType.VALUE_OBJECT_ARRAY); - PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), GENERATION); + PARSER.declareLong(constructorArg(), GENERATION); + PARSER.declareString(optionalConstructorArg(), REASON); } - public DataFrameTransformState(IndexerState state, @Nullable Map position, long generation) { - this.state = state; + public DataFrameTransformState(DataFrameTransformTaskState taskState, + IndexerState indexerState, + @Nullable Map position, + long generation, + @Nullable String reason) { + this.taskState = taskState; + this.indexerState = indexerState; this.currentPosition = position == null ? null : Collections.unmodifiableSortedMap(new TreeMap<>(position)); this.generation = generation; + this.reason = reason; } public DataFrameTransformState(StreamInput in) throws IOException { - state = IndexerState.fromStream(in); + taskState = DataFrameTransformTaskState.fromStream(in); + indexerState = IndexerState.fromStream(in); currentPosition = in.readBoolean() ? Collections.unmodifiableSortedMap(new TreeMap<>(in.readMap())) : null; generation = in.readLong(); + reason = in.readOptionalString(); + } + + public DataFrameTransformTaskState getTaskState() { + return taskState; } public IndexerState getIndexerState() { - return state; + return indexerState; } public Map getPosition() { @@ -91,6 +120,10 @@ public long getGeneration() { return generation; } + public String getReason() { + return reason; + } + public static DataFrameTransformState fromXContent(XContentParser parser) { try { return PARSER.parse(parser, null); @@ -102,11 +135,15 @@ public static DataFrameTransformState fromXContent(XContentParser parser) { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(STATE.getPreferredName(), state.value()); + builder.field(TASK_STATE.getPreferredName(), taskState.value()); + builder.field(INDEXER_STATE.getPreferredName(), indexerState.value()); if (currentPosition != null) { builder.field(CURRENT_POSITION.getPreferredName(), currentPosition); } builder.field(GENERATION.getPreferredName(), generation); + if (reason != null) { + builder.field(REASON.getPreferredName(), reason); + } builder.endObject(); return builder; } @@ -118,12 +155,14 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { - state.writeTo(out); + taskState.writeTo(out); + indexerState.writeTo(out); out.writeBoolean(currentPosition != null); if (currentPosition != null) { out.writeMap(currentPosition); } out.writeLong(generation); + out.writeOptionalString(reason); } @Override @@ -138,12 +177,20 @@ public boolean equals(Object other) { DataFrameTransformState that = (DataFrameTransformState) other; - return Objects.equals(this.state, that.state) && Objects.equals(this.currentPosition, that.currentPosition) - && this.generation == that.generation; + return Objects.equals(this.taskState, that.taskState) && + Objects.equals(this.indexerState, that.indexerState) && + Objects.equals(this.currentPosition, that.currentPosition) && + this.generation == that.generation && + Objects.equals(this.reason, that.reason); } @Override public int hashCode() { - return Objects.hash(state, currentPosition, generation); + return Objects.hash(taskState, indexerState, currentPosition, generation, reason); + } + + @Override + public String toString() { + return Strings.toString(this); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateAndStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateAndStats.java index 116ad482d01d..e155998aa2e0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateAndStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateAndStats.java @@ -41,7 +41,7 @@ public class DataFrameTransformStateAndStats implements Writeable, ToXContentObj public static DataFrameTransformStateAndStats initialStateAndStats(String id) { return new DataFrameTransformStateAndStats(id, - new DataFrameTransformState(IndexerState.STOPPED, null, 0), + new DataFrameTransformState(DataFrameTransformTaskState.STOPPED, IndexerState.STOPPED, null, 0L, null), new DataFrameIndexerTransformStats()); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformTaskState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformTaskState.java new file mode 100644 index 000000000000..795daca61ace --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformTaskState.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.dataframe.transforms; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Locale; + +public enum DataFrameTransformTaskState implements Writeable { + STOPPED, STARTED, FAILED; + + public static DataFrameTransformTaskState fromString(String name) { + return valueOf(name.trim().toUpperCase(Locale.ROOT)); + } + + public static DataFrameTransformTaskState fromStream(StreamInput in) throws IOException { + return in.readEnum(DataFrameTransformTaskState.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + DataFrameTransformTaskState state = this; + out.writeEnum(state); + } + + public String value() { + return name().toLowerCase(Locale.ROOT); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DestConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DestConfig.java new file mode 100644 index 000000000000..285f970a4a4e --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DestConfig.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.dataframe.transforms; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +public class DestConfig implements Writeable, ToXContentObject { + + public static final ParseField INDEX = new ParseField("index"); + + public static final ConstructingObjectParser STRICT_PARSER = createParser(false); + public static final ConstructingObjectParser LENIENT_PARSER = createParser(true); + + private static ConstructingObjectParser createParser(boolean lenient) { + ConstructingObjectParser parser = new ConstructingObjectParser<>("data_frame_config_dest", + lenient, + args -> new DestConfig((String)args[0])); + parser.declareString(constructorArg(), INDEX); + return parser; + } + + private final String index; + + public DestConfig(String index) { + this.index = ExceptionsHelper.requireNonNull(index, INDEX.getPreferredName()); + } + + public DestConfig(final StreamInput in) throws IOException { + index = in.readString(); + } + + public String getIndex() { + return index; + } + + public boolean isValid() { + return index.isEmpty() == false; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(index); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(INDEX.getPreferredName(), index); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } + if (other == null || other.getClass() != getClass()) { + return false; + } + + DestConfig that = (DestConfig) other; + return Objects.equals(index, that.index); + } + + @Override + public int hashCode(){ + return Objects.hash(index); + } + + public static DestConfig fromXContent(final XContentParser parser, boolean lenient) throws IOException { + return lenient ? LENIENT_PARSER.apply(parser, null) : STRICT_PARSER.apply(parser, null); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/QueryConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/QueryConfig.java index 7f189a44dd5e..670b1009d298 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/QueryConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/QueryConfig.java @@ -21,10 +21,12 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; import java.io.IOException; +import java.util.Collections; import java.util.Map; import java.util.Objects; @@ -37,6 +39,11 @@ public class QueryConfig extends AbstractDiffable implements Writea private final Map source; private final QueryBuilder query; + static QueryConfig matchAll() { + return new QueryConfig(Collections.singletonMap(MatchAllQueryBuilder.NAME, Collections.emptyMap()), + new MatchAllQueryBuilder()); + } + public QueryConfig(final Map source, final QueryBuilder query) { this.source = Objects.requireNonNull(source); this.query = query; @@ -110,4 +117,4 @@ public boolean equals(Object other) { public boolean isValid() { return this.query != null; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/SourceConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/SourceConfig.java new file mode 100644 index 000000000000..a11950701d15 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/SourceConfig.java @@ -0,0 +1,139 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.dataframe.transforms; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + + +public class SourceConfig implements Writeable, ToXContentObject { + + public static final ParseField QUERY = new ParseField("query"); + public static final ParseField INDEX = new ParseField("index"); + + public static final ConstructingObjectParser STRICT_PARSER = createParser(false); + public static final ConstructingObjectParser LENIENT_PARSER = createParser(true); + + private static ConstructingObjectParser createParser(boolean lenient) { + ConstructingObjectParser parser = new ConstructingObjectParser<>("data_frame_config_source", + lenient, + args -> { + @SuppressWarnings("unchecked") + String[] index = ((List)args[0]).toArray(new String[0]); + // default handling: if the user does not specify a query, we default to match_all + QueryConfig queryConfig = args[1] == null ? QueryConfig.matchAll() : (QueryConfig) args[1]; + return new SourceConfig(index, queryConfig); + }); + parser.declareStringArray(constructorArg(), INDEX); + parser.declareObject(optionalConstructorArg(), (p, c) -> QueryConfig.fromXContent(p, lenient), QUERY); + return parser; + } + + private final String[] index; + private final QueryConfig queryConfig; + + /** + * Create a new SourceConfig for the provided indices. + * + * {@link QueryConfig} defaults to a MatchAll query. + * + * @param index Any number of indices. At least one non-null, non-empty, index should be provided + */ + public SourceConfig(String... index) { + this(index, QueryConfig.matchAll()); + } + + /** + * Create a new SourceConfig for the provided indices, from which data is gathered with the provided {@link QueryConfig} + * + * @param index Any number of indices. At least one non-null, non-empty, index should be provided + * @param queryConfig A QueryConfig object that contains the desired query, needs to be non-null + */ + public SourceConfig(String[] index, QueryConfig queryConfig) { + ExceptionsHelper.requireNonNull(index, INDEX.getPreferredName()); + if (index.length == 0) { + throw new IllegalArgumentException("must specify at least one index"); + } + if (Arrays.stream(index).anyMatch(Strings::isNullOrEmpty)) { + throw new IllegalArgumentException("all indices need to be non-null and non-empty"); + } + this.index = index; + this.queryConfig = ExceptionsHelper.requireNonNull(queryConfig, QUERY.getPreferredName()); + } + + public SourceConfig(final StreamInput in) throws IOException { + index = in.readStringArray(); + queryConfig = new QueryConfig(in); + } + + public String[] getIndex() { + return index; + } + + public QueryConfig getQueryConfig() { + return queryConfig; + } + + public boolean isValid() { + return queryConfig.isValid(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeStringArray(index); + queryConfig.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.array(INDEX.getPreferredName(), index); + builder.field(QUERY.getPreferredName(), queryConfig); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } + if (other == null || other.getClass() != getClass()) { + return false; + } + + SourceConfig that = (SourceConfig) other; + return Arrays.equals(index, that.index) && Objects.equals(queryConfig, that.queryConfig); + } + + @Override + public int hashCode(){ + // Using Arrays.hashCode as Objects.hash does not deeply hash nested arrays. Since we are doing Array.equals, this is necessary + int hash = Arrays.hashCode(index); + return 31 * hash + (queryConfig == null ? 0 : queryConfig.hashCode()); + } + + public static SourceConfig fromXContent(final XContentParser parser, boolean lenient) throws IOException { + return lenient ? LENIENT_PARSER.apply(parser, null) : STRICT_PARSER.apply(parser, null); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleMetadata.java index 7dce5e85ab75..c0cafa8e9079 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleMetadata.java @@ -106,7 +106,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public Version getMinimalSupportedVersion() { - return Version.V_7_0_0; + return Version.V_6_6_0; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForNoFollowersStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForNoFollowersStep.java index 3cfaeba048d5..958120b99b87 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForNoFollowersStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForNoFollowersStep.java @@ -20,7 +20,9 @@ import java.io.IOException; import java.util.Arrays; +import java.util.Collection; import java.util.Objects; +import java.util.Optional; /** * A step that waits until the index it's used on is no longer a leader index. @@ -57,8 +59,11 @@ public void evaluateCondition(IndexMetaData indexMetaData, Listener listener) { boolean isCurrentlyLeaderIndex = Arrays.stream(indexStats.getShards()) .map(ShardStats::getRetentionLeaseStats) - .flatMap(retentionLeaseStats -> retentionLeaseStats.retentionLeases().leases().stream()) - .anyMatch(lease -> CCR_LEASE_KEY.equals(lease.source())); + .map(Optional::ofNullable) + .map(o -> o.flatMap(stats -> Optional.ofNullable(stats.retentionLeases()))) + .map(o -> o.flatMap(leases -> Optional.ofNullable(leases.leases()))) + .map(o -> o.map(Collection::stream)) + .anyMatch(lease -> lease.isPresent() && lease.get().anyMatch(l -> CCR_LEASE_KEY.equals(l.source()))); if (isCurrentlyLeaderIndex) { listener.onResponse(false, new Info()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java index 087e29ec8b56..168adaa11165 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java @@ -5,12 +5,14 @@ */ package org.elasticsearch.xpack.core.security.action.rolemapping; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.core.security.authc.support.mapper.TemplateRoleName; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.ExpressionParser; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.RoleMapperExpression; import org.elasticsearch.xpack.core.security.support.MetadataUtils; @@ -35,6 +37,7 @@ public class PutRoleMappingRequest extends ActionRequest private String name = null; private boolean enabled = true; private List roles = Collections.emptyList(); + private List roleTemplates = Collections.emptyList(); private RoleMapperExpression rules = null; private Map metadata = Collections.emptyMap(); private RefreshPolicy refreshPolicy = RefreshPolicy.IMMEDIATE; @@ -46,20 +49,20 @@ public PutRoleMappingRequest() { public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; if (name == null) { - validationException = addValidationError("role-mapping name is missing", - validationException); + validationException = addValidationError("role-mapping name is missing", validationException); } - if (roles.isEmpty()) { - validationException = addValidationError("role-mapping roles are missing", - validationException); + if (roles.isEmpty() && roleTemplates.isEmpty()) { + validationException = addValidationError("role-mapping roles or role-templates are missing", validationException); + } + if (roles.size() > 0 && roleTemplates.size() > 0) { + validationException = addValidationError("role-mapping cannot have both roles and role-templates", validationException); } if (rules == null) { - validationException = addValidationError("role-mapping rules are missing", - validationException); + validationException = addValidationError("role-mapping rules are missing", validationException); } if (MetadataUtils.containsReservedMetadata(metadata)) { - validationException = addValidationError("metadata keys may not start with [" + - MetadataUtils.RESERVED_PREFIX + "]", validationException); + validationException = addValidationError("metadata keys may not start with [" + MetadataUtils.RESERVED_PREFIX + "]", + validationException); } return validationException; } @@ -84,10 +87,18 @@ public List getRoles() { return Collections.unmodifiableList(roles); } + public List getRoleTemplates() { + return Collections.unmodifiableList(roleTemplates); + } + public void setRoles(List roles) { this.roles = new ArrayList<>(roles); } + public void setRoleTemplates(List templates) { + this.roleTemplates = new ArrayList<>(templates); + } + public RoleMapperExpression getRules() { return rules; } @@ -126,6 +137,9 @@ public void readFrom(StreamInput in) throws IOException { this.name = in.readString(); this.enabled = in.readBoolean(); this.roles = in.readStringList(); + if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + this.roleTemplates = in.readList(TemplateRoleName::new); + } this.rules = ExpressionParser.readExpression(in); this.metadata = in.readMap(); this.refreshPolicy = RefreshPolicy.readFrom(in); @@ -137,6 +151,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeBoolean(enabled); out.writeStringCollection(roles); + if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + out.writeList(roleTemplates); + } ExpressionParser.writeExpression(rules, out); out.writeMap(metadata); refreshPolicy.writeTo(out); @@ -147,6 +164,7 @@ public ExpressionRoleMapping getMapping() { name, rules, roles, + roleTemplates, metadata, enabled ); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequestBuilder.java index c74952e9dfd0..14f722d16941 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequestBuilder.java @@ -5,18 +5,19 @@ */ package org.elasticsearch.xpack.core.security.action.rolemapping; -import java.io.IOException; -import java.util.Arrays; -import java.util.Map; - import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.support.WriteRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.core.security.authc.support.mapper.TemplateRoleName; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.RoleMapperExpression; +import java.io.IOException; +import java.util.Arrays; +import java.util.Map; + /** * Builder for requests to add/update a role-mapping to the native store * @@ -38,6 +39,7 @@ public PutRoleMappingRequestBuilder source(String name, BytesReference source, request.setName(name); request.setEnabled(mapping.isEnabled()); request.setRoles(mapping.getRoles()); + request.setRoleTemplates(mapping.getRoleTemplates()); request.setRules(mapping.getExpression()); request.setMetadata(mapping.getMetadata()); return this; @@ -52,6 +54,10 @@ public PutRoleMappingRequestBuilder roles(String... roles) { request.setRoles(Arrays.asList(roles)); return this; } + public PutRoleMappingRequestBuilder roleTemplates(TemplateRoleName... templates) { + request.setRoleTemplates(Arrays.asList(templates)); + return this; + } public PutRoleMappingRequestBuilder expression(RoleMapperExpression expression) { request.setRules(expression); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java index 95d1e9fa7714..dd5fb08fa14b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.core.security.authc.support.mapper; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; @@ -15,20 +16,28 @@ import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.ExpressionModel; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.ExpressionParser; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.RoleMapperExpression; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import java.io.IOException; import java.io.InputStream; +import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.Set; import java.util.function.BiConsumer; +import java.util.stream.Collectors; +import java.util.stream.Stream; /** * A representation of a single role-mapping for use in NativeRoleMappingStore. @@ -50,27 +59,30 @@ public class ExpressionRoleMapping implements ToXContentObject, Writeable { static { PARSER.declareStringArray(Builder::roles, Fields.ROLES); - PARSER.declareField(Builder::rules, ExpressionParser::parseObject, Fields.RULES, ObjectParser.ValueType.OBJECT); - PARSER.declareField(Builder::metadata, XContentParser::map, Fields.METADATA, ObjectParser.ValueType.OBJECT); + PARSER.declareObjectArray(Builder::roleTemplates, (parser, ctx) -> TemplateRoleName.parse(parser), Fields.ROLE_TEMPLATES); + PARSER.declareField(Builder::rules, ExpressionParser::parseObject, Fields.RULES, ValueType.OBJECT); + PARSER.declareField(Builder::metadata, XContentParser::map, Fields.METADATA, ValueType.OBJECT); PARSER.declareBoolean(Builder::enabled, Fields.ENABLED); BiConsumer ignored = (b, v) -> { }; // skip the doc_type and type fields in case we're parsing directly from the index PARSER.declareString(ignored, new ParseField(NativeRoleMappingStoreField.DOC_TYPE_FIELD)); PARSER.declareString(ignored, new ParseField(UPGRADE_API_TYPE_FIELD)); - } + } private final String name; private final RoleMapperExpression expression; private final List roles; + private final List roleTemplates ; private final Map metadata; private final boolean enabled; - public ExpressionRoleMapping(String name, RoleMapperExpression expr, List roles, Map metadata, - boolean enabled) { + public ExpressionRoleMapping(String name, RoleMapperExpression expr, List roles, List templates, + Map metadata, boolean enabled) { this.name = name; this.expression = expr; - this.roles = roles; + this.roles = roles == null ? Collections.emptyList() : roles; + this.roleTemplates = templates == null ? Collections.emptyList() : templates; this.metadata = metadata; this.enabled = enabled; } @@ -79,6 +91,11 @@ public ExpressionRoleMapping(StreamInput in) throws IOException { this.name = in.readString(); this.enabled = in.readBoolean(); this.roles = in.readStringList(); + if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + this.roleTemplates = in.readList(TemplateRoleName::new); + } else { + this.roleTemplates = Collections.emptyList(); + } this.expression = ExpressionParser.readExpression(in); this.metadata = in.readMap(); } @@ -88,6 +105,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeBoolean(enabled); out.writeStringCollection(roles); + if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + out.writeList(roleTemplates); + } ExpressionParser.writeExpression(expression, out); out.writeMap(metadata); } @@ -103,7 +123,7 @@ public String getName() { /** * The expression that determines whether the roles in this mapping should be applied to any given user. * If the expression - * {@link RoleMapperExpression#match(org.elasticsearch.xpack.security.authc.support.mapper.expressiondsl.ExpressionModel) matches} a + * {@link RoleMapperExpression#match(ExpressionModel) matches} a * org.elasticsearch.xpack.security.authc.support.UserRoleMapper.UserData user, then the user should be assigned this mapping's * {@link #getRoles() roles} */ @@ -119,6 +139,14 @@ public List getRoles() { return Collections.unmodifiableList(roles); } + /** + * The list of {@link RoleDescriptor roles} (specified by a {@link TemplateRoleName template} that evaluates to one or more names) + * that should be assigned to users that match the {@link #getExpression() expression} in this mapping. + */ + public List getRoleTemplates() { + return Collections.unmodifiableList(roleTemplates); + } + /** * Meta-data for this mapping. This exists for external systems of user to track information about this mapping such as where it was * sourced from, when it was loaded, etc. @@ -137,7 +165,30 @@ public boolean isEnabled() { @Override public String toString() { - return getClass().getSimpleName() + "<" + name + " ; " + roles + " = " + Strings.toString(expression) + ">"; + return getClass().getSimpleName() + "<" + name + " ; " + roles + "/" + roleTemplates + " = " + Strings.toString(expression) + ">"; + } + + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final ExpressionRoleMapping that = (ExpressionRoleMapping) o; + return this.enabled == that.enabled && + Objects.equals(this.name, that.name) && + Objects.equals(this.expression, that.expression) && + Objects.equals(this.roles, that.roles) && + Objects.equals(this.roleTemplates, that.roleTemplates) && + Objects.equals(this.metadata, that.metadata); + } + + @Override + public int hashCode() { + return Objects.hash(name, expression, roles, roleTemplates, metadata, enabled); } /** @@ -157,7 +208,7 @@ public static ExpressionRoleMapping parse(String name, BytesReference source, XC */ public static ExpressionRoleMapping parse(String name, XContentParser parser) throws IOException { try { - final Builder builder = PARSER.parse(parser, null); + final Builder builder = PARSER.parse(parser, name); return builder.build(name); } catch (IllegalArgumentException | IllegalStateException e) { throw new ParsingException(parser.getTokenLocation(), e.getMessage(), e); @@ -166,38 +217,55 @@ public static ExpressionRoleMapping parse(String name, XContentParser parser) th /** * Converts this {@link ExpressionRoleMapping} into XContent that is compatible with - * the format handled by {@link #parse(String, XContentParser)}. + * the format handled by {@link #parse(String, BytesReference, XContentType)}. */ @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { return toXContent(builder, params, false); } - public XContentBuilder toXContent(XContentBuilder builder, Params params, boolean includeDocType) throws IOException { + public XContentBuilder toXContent(XContentBuilder builder, Params params, boolean indexFormat) throws IOException { builder.startObject(); builder.field(Fields.ENABLED.getPreferredName(), enabled); - builder.startArray(Fields.ROLES.getPreferredName()); - for (String r : roles) { - builder.value(r); + if (roles.isEmpty() == false) { + builder.startArray(Fields.ROLES.getPreferredName()); + for (String r : roles) { + builder.value(r); + } + builder.endArray(); + } + if (roleTemplates.isEmpty() == false) { + builder.startArray(Fields.ROLE_TEMPLATES.getPreferredName()); + for (TemplateRoleName r : roleTemplates) { + builder.value(r); + } + builder.endArray(); } - builder.endArray(); builder.field(Fields.RULES.getPreferredName()); expression.toXContent(builder, params); builder.field(Fields.METADATA.getPreferredName(), metadata); - if (includeDocType) { + if (indexFormat) { builder.field(NativeRoleMappingStoreField.DOC_TYPE_FIELD, NativeRoleMappingStoreField.DOC_TYPE_ROLE_MAPPING); } return builder.endObject(); } + public Set getRoleNames(ScriptService scriptService, ExpressionModel model) { + return Stream.concat(this.roles.stream(), + this.roleTemplates.stream() + .flatMap(r -> r.getRoleNames(scriptService, model).stream()) + ).collect(Collectors.toSet()); + } + /** * Used to facilitate the use of {@link ObjectParser} (via {@link #PARSER}). */ private static class Builder { private RoleMapperExpression rules; private List roles; + private List roleTemplates; private Map metadata = Collections.emptyMap(); private Boolean enabled; @@ -207,7 +275,12 @@ Builder rules(RoleMapperExpression expression) { } Builder roles(List roles) { - this.roles = roles; + this.roles = new ArrayList<>(roles); + return this; + } + + Builder roleTemplates(List templates) { + this.roleTemplates = new ArrayList<>(templates); return this; } @@ -222,7 +295,7 @@ Builder enabled(boolean enabled) { } private ExpressionRoleMapping build(String name) { - if (roles == null) { + if (roles == null && roleTemplates == null) { throw missingField(name, Fields.ROLES); } if (rules == null) { @@ -231,17 +304,17 @@ private ExpressionRoleMapping build(String name) { if (enabled == null) { throw missingField(name, Fields.ENABLED); } - return new ExpressionRoleMapping(name, rules, roles, metadata, enabled); + return new ExpressionRoleMapping(name, rules, roles, roleTemplates, metadata, enabled); } private IllegalStateException missingField(String id, ParseField field) { return new IllegalStateException("failed to parse role-mapping [" + id + "]. missing field [" + field + "]"); } - } public interface Fields { ParseField ROLES = new ParseField("roles"); + ParseField ROLE_TEMPLATES = new ParseField("role_templates"); ParseField ENABLED = new ParseField("enabled"); ParseField RULES = new ParseField("rules"); ParseField METADATA = new ParseField("metadata"); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/TemplateRoleName.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/TemplateRoleName.java new file mode 100644 index 000000000000..d77882d6454d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/TemplateRoleName.java @@ -0,0 +1,211 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.security.authc.support.mapper; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParseException; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.ExpressionModel; +import org.elasticsearch.xpack.core.security.support.MustacheTemplateEvaluator; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * Representation of a Mustache template for expressing one or more roles names in a {@link ExpressionRoleMapping}. + */ +public class TemplateRoleName implements ToXContent, Writeable { + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "role-mapping-template", false, arr -> new TemplateRoleName((BytesReference) arr[0], (Format) arr[1])); + + static { + PARSER.declareField(constructorArg(), TemplateRoleName::extractTemplate, Fields.TEMPLATE, ObjectParser.ValueType.OBJECT_OR_STRING); + PARSER.declareField(optionalConstructorArg(), Format::fromXContent, Fields.FORMAT, ObjectParser.ValueType.STRING); + } + + private final BytesReference template; + private final Format format; + + public TemplateRoleName(BytesReference template, Format format) { + this.template = template; + this.format = format == null ? Format.STRING : format; + } + + public TemplateRoleName(StreamInput in) throws IOException { + this.template = in.readBytesReference(); + this.format = in.readEnum(Format.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBytesReference(template); + out.writeEnum(format); + } + + public BytesReference getTemplate() { + return template; + } + + public Format getFormat() { + return format; + } + + public List getRoleNames(ScriptService scriptService, ExpressionModel model) { + try { + final String evaluation = parseTemplate(scriptService, model.asMap()); + switch (format) { + case STRING: + return Collections.singletonList(evaluation); + case JSON: + return convertJsonToList(evaluation); + default: + throw new IllegalStateException("Unsupported format [" + format + "]"); + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + private List convertJsonToList(String evaluation) throws IOException { + final XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, evaluation); + XContentParser.Token token = parser.currentToken(); + if (token == null) { + token = parser.nextToken(); + } + if (token == XContentParser.Token.VALUE_STRING) { + return Collections.singletonList(parser.text()); + } else if (token == XContentParser.Token.START_ARRAY) { + return parser.list().stream() + .filter(Objects::nonNull) + .map(o -> { + if (o instanceof String) { + return (String) o; + } else { + throw new XContentParseException( + "Roles array may only contain strings but found [" + o.getClass().getName() + "] [" + o + "]"); + } + }).collect(Collectors.toList()); + } else { + throw new XContentParseException( + "Roles template must generate a string or an array of strings, but found [" + token + "]"); + } + } + + private String parseTemplate(ScriptService scriptService, Map parameters) throws IOException { + final XContentParser parser = XContentHelper.createParser( + NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, template, XContentType.JSON); + return MustacheTemplateEvaluator.evaluate(scriptService, parser, parameters); + } + + private static BytesReference extractTemplate(XContentParser parser, Void ignore) throws IOException { + if (parser.currentToken() == XContentParser.Token.VALUE_STRING) { + return new BytesArray(parser.text()); + } else { + XContentBuilder builder = JsonXContent.contentBuilder(); + builder.generator().copyCurrentStructure(parser); + return BytesReference.bytes(builder); + } + } + + static TemplateRoleName parse(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public String toString() { + return "template-" + format + "{" + template.utf8ToString() + "}"; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field(Fields.TEMPLATE.getPreferredName(), template.utf8ToString()) + .field(Fields.FORMAT.getPreferredName(), format.formatName()) + .endObject(); + } + + @Override + public boolean isFragment() { + return false; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final TemplateRoleName that = (TemplateRoleName) o; + return Objects.equals(this.template, that.template) && + this.format == that.format; + } + + @Override + public int hashCode() { + return Objects.hash(template, format); + } + + private interface Fields { + ParseField TEMPLATE = new ParseField("template"); + ParseField FORMAT = new ParseField("format"); + } + + public enum Format { + JSON, STRING; + + private static Format fromXContent(XContentParser parser) throws IOException { + final XContentParser.Token token = parser.currentToken(); + if (token != XContentParser.Token.VALUE_STRING) { + throw new XContentParseException(parser.getTokenLocation(), + "Expected [" + XContentParser.Token.VALUE_STRING + "] but found [" + token + "]"); + } + final String text = parser.text(); + try { + return Format.valueOf(text.toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException e) { + String valueNames = Stream.of(values()).map(Format::formatName).collect(Collectors.joining(",")); + throw new XContentParseException(parser.getTokenLocation(), + "Invalid format [" + text + "] expected one of [" + valueNames + "]"); + } + + } + + public String formatName() { + return name().toLowerCase(Locale.ROOT); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionModel.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionModel.java index 8d43f864878a..d12cc67dcca1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionModel.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionModel.java @@ -6,9 +6,9 @@ package org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl; import org.elasticsearch.common.Numbers; -import org.elasticsearch.common.collect.Tuple; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -22,10 +22,13 @@ public class ExpressionModel { public static final Predicate NULL_PREDICATE = field -> field.getValue() == null; - private Map>> fields; + + private final Map fieldValues; + private final Map> fieldPredicates; public ExpressionModel() { - this.fields = new HashMap<>(); + this.fieldValues = new HashMap<>(); + this.fieldPredicates = new HashMap<>(); } /** @@ -41,7 +44,8 @@ public ExpressionModel defineField(String name, Object value) { * Defines a field using a supplied predicate. */ public ExpressionModel defineField(String name, Object value, Predicate predicate) { - this.fields.put(name, new Tuple<>(value, predicate)); + this.fieldValues.put(name, value); + this.fieldPredicates.put(name, predicate); return this; } @@ -49,13 +53,7 @@ public ExpressionModel defineField(String name, Object value, Predicateany of the provided values. */ public boolean test(String field, List values) { - final Tuple> tuple = this.fields.get(field); - final Predicate predicate; - if (tuple == null) { - predicate = NULL_PREDICATE; - } else { - predicate = tuple.v2(); - } + final Predicate predicate = this.fieldPredicates.getOrDefault(field, NULL_PREDICATE); return values.stream().anyMatch(predicate); } @@ -103,4 +101,12 @@ private static boolean numberEquals(Number left, Object other) { return Numbers.toLongExact(left) == Numbers.toLongExact(right); } + public Map asMap() { + return Collections.unmodifiableMap(fieldValues); + } + + @Override + public String toString() { + return fieldValues.toString(); + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/FieldExpression.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/FieldExpression.java index 0e681b110efa..bea4bbb1cc8f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/FieldExpression.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/FieldExpression.java @@ -18,6 +18,7 @@ import java.io.IOException; import java.util.Collections; import java.util.List; +import java.util.Objects; /** * An expression that evaluates to true if a field (map element) matches @@ -151,6 +152,22 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder.value(value); } + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final FieldValue that = (FieldValue) o; + return Objects.equals(this.value, that.value); + } + + @Override + public int hashCode() { + return Objects.hash(value); + } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ApplicationPermission.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ApplicationPermission.java index 0cd4e8a8b0dd..da6af4ec7cbd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ApplicationPermission.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ApplicationPermission.java @@ -104,15 +104,18 @@ public ResourcePrivilegesMap checkResourcePrivileges(final String applicationNam for (String checkResource : checkForResources) { for (String checkPrivilegeName : checkForPrivilegeNames) { final Set nameSet = Collections.singleton(checkPrivilegeName); - final ApplicationPrivilege checkPrivilege = ApplicationPrivilege.get(applicationName, nameSet, storedPrivileges); - assert checkPrivilege.getApplication().equals(applicationName) : "Privilege " + checkPrivilege + " should have application " - + applicationName; - assert checkPrivilege.name().equals(nameSet) : "Privilege " + checkPrivilege + " should have name " + nameSet; - - if (grants(checkPrivilege, checkResource)) { - resourcePrivilegesMapBuilder.addResourcePrivilege(checkResource, checkPrivilegeName, Boolean.TRUE); - } else { - resourcePrivilegesMapBuilder.addResourcePrivilege(checkResource, checkPrivilegeName, Boolean.FALSE); + final Set checkPrivileges = ApplicationPrivilege.get(applicationName, nameSet, storedPrivileges); + logger.trace("Resolved privileges [{}] for [{},{}]", checkPrivileges, applicationName, nameSet); + for (ApplicationPrivilege checkPrivilege : checkPrivileges) { + assert Automatons.predicate(applicationName).test(checkPrivilege.getApplication()) : "Privilege " + checkPrivilege + + " should have application " + applicationName; + assert checkPrivilege.name().equals(nameSet) : "Privilege " + checkPrivilege + " should have name " + nameSet; + + if (grants(checkPrivilege, checkResource)) { + resourcePrivilegesMapBuilder.addResourcePrivilege(checkResource, checkPrivilegeName, Boolean.TRUE); + } else { + resourcePrivilegesMapBuilder.addResourcePrivilege(checkResource, checkPrivilegeName, Boolean.FALSE); + } } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilege.java index 13db17a63bb0..c4460b000e6d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilege.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.security.authz.privilege; import org.elasticsearch.common.Strings; +import org.elasticsearch.xpack.core.security.support.Automatons; import java.util.Arrays; import java.util.Collection; @@ -15,6 +16,7 @@ import java.util.Objects; import java.util.Set; import java.util.function.Function; +import java.util.function.Predicate; import java.util.regex.Pattern; import java.util.stream.Collectors; @@ -101,7 +103,7 @@ private static void validateApplicationName(String application, boolean allowWil if (allowWildcard == false) { throw new IllegalArgumentException("Application names may not contain '*' (found '" + application + "')"); } - if(application.equals("*")) { + if (application.equals("*")) { // this is allowed and short-circuiting here makes the later validation simpler return; } @@ -128,7 +130,10 @@ private static void validateApplicationName(String application, boolean allowWil } if (parts.length > 1) { - final String suffix = parts[1]; + String suffix = parts[1]; + if (allowWildcard && suffix.endsWith("*")) { + suffix = suffix.substring(0, suffix.length() - 1); + } if (Strings.validFileName(suffix) == false) { throw new IllegalArgumentException("An application name suffix may not contain any of the characters '" + Strings.collectionToDelimitedString(Strings.INVALID_FILENAME_CHARS, "") + "' (found '" + suffix + "')"); @@ -165,20 +170,38 @@ public static void validatePrivilegeOrActionName(String name) { } /** - * Finds or creates an application privileges with the provided names. + * Finds or creates a collection of application privileges with the provided names. + * If application is a wildcard, it will be expanded to all matching application names in {@code stored} * Each element in {@code name} may be the name of a stored privilege (to be resolved from {@code stored}, or a bespoke action pattern. */ - public static ApplicationPrivilege get(String application, Set name, Collection stored) { + public static Set get(String application, Set name, Collection stored) { if (name.isEmpty()) { - return NONE.apply(application); + return Collections.singleton(NONE.apply(application)); + } else if (application.contains("*")) { + Predicate predicate = Automatons.predicate(application); + final Set result = stored.stream() + .map(ApplicationPrivilegeDescriptor::getApplication) + .filter(predicate) + .distinct() + .map(appName -> resolve(appName, name, stored)) + .collect(Collectors.toSet()); + if (result.isEmpty()) { + return Collections.singleton(resolve(application, name, Collections.emptyMap())); + } else { + return result; + } } else { - Map lookup = stored.stream() - .filter(apd -> apd.getApplication().equals(application)) - .collect(Collectors.toMap(ApplicationPrivilegeDescriptor::getName, Function.identity())); - return resolve(application, name, lookup); + return Collections.singleton(resolve(application, name, stored)); } } + private static ApplicationPrivilege resolve(String application, Set name, Collection stored) { + final Map lookup = stored.stream() + .filter(apd -> apd.getApplication().equals(application)) + .collect(Collectors.toMap(ApplicationPrivilegeDescriptor::getName, Function.identity())); + return resolve(application, name, lookup); + } + private static ApplicationPrivilege resolve(String application, Set names, Map lookup) { final int size = names.size(); if (size == 0) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilegeDescriptor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilegeDescriptor.java index 85d6aad3e356..613f64f93b54 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilegeDescriptor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilegeDescriptor.java @@ -23,6 +23,8 @@ import java.util.Objects; import java.util.Set; +import static org.elasticsearch.common.Strings.collectionToCommaDelimitedString; + /** * An {@code ApplicationPrivilegeDescriptor} is a representation of a stored {@link ApplicationPrivilege}. * A user (via a role) can be granted an application privilege by name (e.g. ("myapp", "read"). @@ -104,6 +106,11 @@ public XContentBuilder toXContent(XContentBuilder builder, boolean includeTypeFi return builder.endObject(); } + @Override + public String toString() { + return getClass().getSimpleName() + "{[" + application + "],[" + name + "],[" + collectionToCommaDelimitedString(actions) + "]}"; + } + /** * Construct a new {@link ApplicationPrivilegeDescriptor} from XContent. * diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index fe8ef032e8fe..d2745e4ef8e2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -158,10 +158,18 @@ private static Map initializeReservedRoles() { null, MetadataUtils.DEFAULT_RESERVED_METADATA)) .put("data_frame_transforms_admin", new RoleDescriptor("data_frame_transforms_admin", new String[] { "manage_data_frame_transforms" }, - null, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null)) + new RoleDescriptor.IndicesPrivileges[]{ + RoleDescriptor.IndicesPrivileges.builder() + .indices(".data-frame-notifications*") + .privileges("view_index_metadata", "read").build() + }, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null)) .put("data_frame_transforms_user", new RoleDescriptor("data_frame_transforms_user", new String[] { "monitor_data_frame_transforms" }, - null, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null)) + new RoleDescriptor.IndicesPrivileges[]{ + RoleDescriptor.IndicesPrivileges.builder() + .indices(".data-frame-notifications*") + .privileges("view_index_metadata", "read").build() + }, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null)) .put("watcher_admin", new RoleDescriptor("watcher_admin", new String[] { "manage_watcher" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder().indices(Watch.INDEX, TriggeredWatchStoreField.INDEX_NAME, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/support/SecurityQueryTemplateEvaluator.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/support/SecurityQueryTemplateEvaluator.java index 951c4acf10d0..73a1d7fcde50 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/support/SecurityQueryTemplateEvaluator.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/support/SecurityQueryTemplateEvaluator.java @@ -11,10 +11,8 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.script.ScriptType; -import org.elasticsearch.script.TemplateScript; +import org.elasticsearch.xpack.core.security.support.MustacheTemplateEvaluator; import org.elasticsearch.xpack.core.security.user.User; import java.io.IOException; @@ -66,27 +64,19 @@ public static String evaluateTemplate(final String querySource, final ScriptServ if (token != XContentParser.Token.START_OBJECT) { throw new ElasticsearchParseException("Unexpected token [" + token + "]"); } - Script script = Script.parse(parser); - // Add the user details to the params - Map params = new HashMap<>(); - if (script.getParams() != null) { - params.putAll(script.getParams()); - } Map userModel = new HashMap<>(); userModel.put("username", user.principal()); userModel.put("full_name", user.fullName()); userModel.put("email", user.email()); userModel.put("roles", Arrays.asList(user.roles())); userModel.put("metadata", Collections.unmodifiableMap(user.metadata())); - params.put("_user", userModel); - // Always enforce mustache script lang: - script = new Script(script.getType(), script.getType() == ScriptType.STORED ? null : "mustache", script.getIdOrCode(), - script.getOptions(), params); - TemplateScript compiledTemplate = scriptService.compile(script, TemplateScript.CONTEXT).newInstance(script.getParams()); - return compiledTemplate.execute(); + Map extraParams = Collections.singletonMap("_user", userModel); + + return MustacheTemplateEvaluator.evaluate(scriptService, parser, extraParams); } else { return querySource; } } } + } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/MustacheTemplateEvaluator.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/MustacheTemplateEvaluator.java new file mode 100644 index 000000000000..02f730333de3 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/MustacheTemplateEvaluator.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.security.support; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.script.TemplateScript; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +/** + * Utility class for evaluating Mustache templates at runtime. + */ +public final class MustacheTemplateEvaluator { + + private MustacheTemplateEvaluator() { + throw new UnsupportedOperationException("Cannot construct " + MustacheTemplateEvaluator.class); + } + + public static String evaluate(ScriptService scriptService, XContentParser parser, Map extraParams) throws IOException { + Script script = Script.parse(parser); + // Add the user details to the params + Map params = new HashMap<>(); + if (script.getParams() != null) { + params.putAll(script.getParams()); + } + extraParams.forEach(params::put); + // Always enforce mustache script lang: + script = new Script(script.getType(), script.getType() == ScriptType.STORED ? null : "mustache", script.getIdOrCode(), + script.getOptions(), params); + TemplateScript compiledTemplate = scriptService.compile(script, TemplateScript.CONTEXT).newInstance(script.getParams()); + return compiledTemplate.execute(); + } +} diff --git a/x-pack/plugin/core/src/main/resources/security-index-template.json b/x-pack/plugin/core/src/main/resources/security-index-template.json index 94bb2b03ee04..f4e3cd6db020 100644 --- a/x-pack/plugin/core/src/main/resources/security-index-template.json +++ b/x-pack/plugin/core/src/main/resources/security-index-template.json @@ -45,6 +45,16 @@ "roles" : { "type" : "keyword" }, + "role_templates" : { + "properties": { + "template" : { + "type": "text" + }, + "format" : { + "type": "keyword" + } + } + }, "password" : { "type" : "keyword", "index" : false, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditMessageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditMessageTests.java new file mode 100644 index 000000000000..8fb425698376 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditMessageTests.java @@ -0,0 +1,126 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.common.notifications; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; +import org.junit.Before; + +import java.util.Date; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +public class AbstractAuditMessageTests extends AbstractXContentTestCase { + private long startMillis; + + static class TestAuditMessage extends AbstractAuditMessage { + private static final ParseField ID = new ParseField("test_id"); + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + AbstractAuditMessage.TYPE.getPreferredName(), + true, + a -> new TestAuditMessage((String)a[0], (String)a[1], (Level)a[2], (Date)a[3], (String)a[4])); + + static { + PARSER.declareString(optionalConstructorArg(), ID); + PARSER.declareString(constructorArg(), MESSAGE); + PARSER.declareField(constructorArg(), p -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return Level.fromString(p.text()); + } + throw new IllegalArgumentException("Unsupported token [" + p.currentToken() + "]"); + }, LEVEL, ObjectParser.ValueType.STRING); + PARSER.declareField(constructorArg(), parser -> { + if (parser.currentToken() == XContentParser.Token.VALUE_NUMBER) { + return new Date(parser.longValue()); + } else if (parser.currentToken() == XContentParser.Token.VALUE_STRING) { + return new Date(TimeUtils.dateStringToEpoch(parser.text())); + } + throw new IllegalArgumentException( + "unexpected token [" + parser.currentToken() + "] for [" + TIMESTAMP.getPreferredName() + "]"); + }, TIMESTAMP, ObjectParser.ValueType.VALUE); + PARSER.declareString(optionalConstructorArg(), NODE_NAME); + } + + TestAuditMessage(String resourceId, String message, Level level, String nodeName) { + super(resourceId, message, level, nodeName); + } + + TestAuditMessage(String resourceId, String message, Level level, Date timestamp, String nodeName) { + super(resourceId, message, level, timestamp, nodeName); + } + + @Override + protected String getResourceField() { + return "test_id"; + } + + static AbstractAuditMessage.AbstractBuilder newBuilder() { + return new AbstractBuilder() { + @Override + protected TestAuditMessage newMessage(Level level, String resourceId, String message, String nodeName) { + return new TestAuditMessage(resourceId, message, level, nodeName); + } + }; + } + } + + @Before + public void setStartTime() { + startMillis = System.currentTimeMillis(); + } + + public void testNewInfo() { + TestAuditMessage info = TestAuditMessage.newBuilder().info("foo", "some info", "some_node"); + assertEquals("foo", info.getResourceId()); + assertEquals("some info", info.getMessage()); + assertEquals(Level.INFO, info.getLevel()); + assertDateBetweenStartAndNow(info.getTimestamp()); + } + + public void testNewWarning() { + TestAuditMessage warning = TestAuditMessage.newBuilder().warning("bar", "some warning", "some_node"); + assertEquals("bar", warning.getResourceId()); + assertEquals("some warning", warning.getMessage()); + assertEquals(Level.WARNING, warning.getLevel()); + assertDateBetweenStartAndNow(warning.getTimestamp()); + } + + + public void testNewError() { + TestAuditMessage error = TestAuditMessage.newBuilder().error("foo", "some error", "some_node"); + assertEquals("foo", error.getResourceId()); + assertEquals("some error", error.getMessage()); + assertEquals(Level.ERROR, error.getLevel()); + assertDateBetweenStartAndNow(error.getTimestamp()); + } + + private void assertDateBetweenStartAndNow(Date timestamp) { + long timestampMillis = timestamp.getTime(); + assertTrue(timestampMillis >= startMillis); + assertTrue(timestampMillis <= System.currentTimeMillis()); + } + + @Override + protected TestAuditMessage doParseInstance(XContentParser parser) { + return TestAuditMessage.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected TestAuditMessage createTestInstance() { + return new TestAuditMessage(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 200), + randomFrom(Level.values()), randomAlphaOfLengthBetween(1, 20)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AuditorTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AuditorTests.java new file mode 100644 index 000000000000..1389af62dc71 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AuditorTests.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.common.notifications; + +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import java.io.IOException; + +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class AuditorTests extends ESTestCase { + private Client client; + private ArgumentCaptor indexRequestCaptor; + private static final String TEST_ORIGIN = "test_origin"; + private static final String TEST_INDEX = "test_index"; + private static final AbstractAuditMessage.AbstractBuilder builder = + AbstractAuditMessageTests.TestAuditMessage.newBuilder(); + + @Before + public void setUpMocks() { + client = mock(Client.class); + ThreadPool threadPool = mock(ThreadPool.class); + when(client.threadPool()).thenReturn(threadPool); + when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + + indexRequestCaptor = ArgumentCaptor.forClass(IndexRequest.class); + } + + public void testInfo() throws IOException { + Auditor auditor = new Auditor<>(client, "node_1", TEST_INDEX, TEST_ORIGIN, builder); + auditor.info("foo", "Here is my info"); + + verify(client).index(indexRequestCaptor.capture(), any()); + IndexRequest indexRequest = indexRequestCaptor.getValue(); + assertArrayEquals(new String[] {TEST_INDEX}, indexRequest.indices()); + assertEquals(TimeValue.timeValueSeconds(5), indexRequest.timeout()); + AbstractAuditMessageTests.TestAuditMessage auditMessage = parseAuditMessage(indexRequest.source()); + assertEquals("foo", auditMessage.getResourceId()); + assertEquals("Here is my info", auditMessage.getMessage()); + assertEquals(Level.INFO, auditMessage.getLevel()); + } + + public void testWarning() throws IOException { + Auditor auditor = new Auditor<>(client, "node_1", TEST_INDEX, TEST_ORIGIN, builder); + auditor.warning("bar", "Here is my warning"); + + verify(client).index(indexRequestCaptor.capture(), any()); + IndexRequest indexRequest = indexRequestCaptor.getValue(); + assertArrayEquals(new String[] {TEST_INDEX}, indexRequest.indices()); + assertEquals(TimeValue.timeValueSeconds(5), indexRequest.timeout()); + AbstractAuditMessageTests.TestAuditMessage auditMessage = parseAuditMessage(indexRequest.source()); + assertEquals("bar", auditMessage.getResourceId()); + assertEquals("Here is my warning", auditMessage.getMessage()); + assertEquals(Level.WARNING, auditMessage.getLevel()); + } + + public void testError() throws IOException { + Auditor auditor = new Auditor<>(client, "node_1", TEST_INDEX, TEST_ORIGIN, builder); + auditor.error("foobar", "Here is my error"); + + verify(client).index(indexRequestCaptor.capture(), any()); + IndexRequest indexRequest = indexRequestCaptor.getValue(); + assertArrayEquals(new String[] {TEST_INDEX}, indexRequest.indices()); + assertEquals(TimeValue.timeValueSeconds(5), indexRequest.timeout()); + AbstractAuditMessageTests.TestAuditMessage auditMessage = parseAuditMessage(indexRequest.source()); + assertEquals("foobar", auditMessage.getResourceId()); + assertEquals("Here is my error", auditMessage.getMessage()); + assertEquals(Level.ERROR, auditMessage.getLevel()); + } + + private AbstractAuditMessageTests.TestAuditMessage parseAuditMessage(BytesReference msg) throws IOException { + XContentParser parser = XContentFactory.xContent(XContentHelper.xContentType(msg)) + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, msg.streamInput()); + return AbstractAuditMessageTests.TestAuditMessage.PARSER.apply(parser, null); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/LevelTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/LevelTests.java new file mode 100644 index 000000000000..a66d230b4678 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/LevelTests.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.common.notifications; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; + +public class LevelTests extends ESTestCase { + + public void testFromString() { + assertEquals(Level.INFO, Level.fromString("info")); + assertEquals(Level.INFO, Level.fromString("INFO")); + assertEquals(Level.WARNING, Level.fromString("warning")); + assertEquals(Level.WARNING, Level.fromString("WARNING")); + assertEquals(Level.ERROR, Level.fromString("error")); + assertEquals(Level.ERROR, Level.fromString("ERROR")); + } + + public void testToString() { + assertEquals("info", Level.INFO.toString()); + assertEquals("warning", Level.WARNING.toString()); + assertEquals("error", Level.ERROR.toString()); + } + + public void testValidOrdinals() { + assertThat(Level.INFO.ordinal(), equalTo(0)); + assertThat(Level.WARNING.ordinal(), equalTo(1)); + assertThat(Level.ERROR.ordinal(), equalTo(2)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsActionResponseTests.java index 516b79159167..d6e32bd46c43 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsActionResponseTests.java @@ -44,6 +44,7 @@ public void testInvalidTransforms() throws IOException { assertWarnings(LoggerMessageFormat.format(Response.INVALID_TRANSFORMS_DEPRECATION_WARNING, 2)); } + @SuppressWarnings("unchecked") public void testNoHeaderInResponse() throws IOException { List transforms = new ArrayList<>(); @@ -62,7 +63,8 @@ public void testNoHeaderInResponse() throws IOException { assertEquals(transforms.size(), transformsResponse.size()); for (int i = 0; i < transforms.size(); ++i) { - assertEquals(transforms.get(i).getSource(), XContentMapValues.extractValue("source", transformsResponse.get(i))); + assertArrayEquals(transforms.get(i).getSource().getIndex(), + ((ArrayList)XContentMapValues.extractValue("source.index", transformsResponse.get(i))).toArray(new String[0])); assertEquals(null, XContentMapValues.extractValue("headers", transformsResponse.get(i))); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformActionRequestTests.java index 4593efe48983..1936dda9117e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformActionRequestTests.java @@ -17,13 +17,14 @@ import org.elasticsearch.test.AbstractStreamableXContentTestCase; import org.elasticsearch.xpack.core.dataframe.action.PreviewDataFrameTransformAction.Request; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.QueryConfigTests; +import org.elasticsearch.xpack.core.dataframe.transforms.DestConfig; import org.elasticsearch.xpack.core.dataframe.transforms.pivot.PivotConfigTests; import org.junit.Before; import java.io.IOException; import static java.util.Collections.emptyList; +import static org.elasticsearch.xpack.core.dataframe.transforms.SourceConfigTests.randomSourceConfig; public class PreviewDataFrameTransformActionRequestTests extends AbstractStreamableXContentTestCase { @@ -65,8 +66,8 @@ protected boolean supportsUnknownFields() { @Override protected Request createTestInstance() { - DataFrameTransformConfig config = new DataFrameTransformConfig("transform-preview", randomAlphaOfLength(10), - "unused-transform-preview-index", null, QueryConfigTests.randomQueryConfig(), PivotConfigTests.randomPivotConfig()); + DataFrameTransformConfig config = new DataFrameTransformConfig("transform-preview", randomSourceConfig(), + new DestConfig("unused-transform-preview-index"), null, PivotConfigTests.randomPivotConfig()); return new Request(config); } @@ -74,8 +75,9 @@ public void testParsingOverwritesIdAndDestFields() throws IOException { // id & dest fields will be set by the parser BytesArray json = new BytesArray( "{ " + - "\"source\":\"foo\", " + - "\"query\": {\"match_all\": {}}," + + "\"source\":{" + + " \"index\":\"foo\", " + + " \"query\": {\"match_all\": {}}}," + "\"pivot\": {" + "\"group_by\": {\"destination-field2\": {\"terms\": {\"field\": \"term-field\"}}}," + "\"aggs\": {\"avg_response\": {\"avg\": {\"field\": \"responsetime\"}}}" + @@ -87,7 +89,7 @@ public void testParsingOverwritesIdAndDestFields() throws IOException { Request request = Request.fromXContent(parser); assertEquals("transform-preview", request.getConfig().getId()); - assertEquals("unused-transform-preview-index", request.getConfig().getDestination()); + assertEquals("unused-transform-preview-index", request.getConfig().getDestination().getIndex()); } } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformActionRequestTests.java index 976db70c45f4..6220a08fb108 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformActionRequestTests.java @@ -13,7 +13,7 @@ public class StartDataFrameTransformActionRequestTests extends AbstractWireSerializingTestCase { @Override protected Request createTestInstance() { - return new Request(randomAlphaOfLengthBetween(1, 20)); + return new Request(randomAlphaOfLengthBetween(1, 20), randomBoolean()); } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskActionRequestTests.java new file mode 100644 index 000000000000..8d3d8e3ac789 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskActionRequestTests.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.dataframe.action; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +public class StartDataFrameTransformTaskActionRequestTests extends + AbstractWireSerializingTestCase { + @Override + protected StartDataFrameTransformTaskAction.Request createTestInstance() { + return new StartDataFrameTransformTaskAction.Request(randomAlphaOfLength(4)); + } + + @Override + protected Writeable.Reader instanceReader() { + return StartDataFrameTransformTaskAction.Request::new; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskActionResponseTests.java new file mode 100644 index 000000000000..62165f87968e --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskActionResponseTests.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.dataframe.action; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +public class StartDataFrameTransformTaskActionResponseTests extends + AbstractWireSerializingTestCase { + @Override + protected StartDataFrameTransformTaskAction.Response createTestInstance() { + return new StartDataFrameTransformTaskAction.Response(randomBoolean()); + } + + @Override + protected Writeable.Reader instanceReader() { + return StartDataFrameTransformTaskAction.Response::new; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformActionRequestTests.java index 767725a564a5..c117e249aef9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformActionRequestTests.java @@ -16,7 +16,7 @@ public class StopDataFrameTransformActionRequestTests extends AbstractWireSerial @Override protected Request createTestInstance() { TimeValue timeout = randomBoolean() ? TimeValue.timeValueMinutes(randomIntBetween(1, 10)) : null; - return new Request(randomAlphaOfLengthBetween(1, 10), randomBoolean(), timeout); + return new Request(randomAlphaOfLengthBetween(1, 10), randomBoolean(), randomBoolean(), timeout); } @Override @@ -27,9 +27,10 @@ protected Writeable.Reader instanceReader() { public void testSameButDifferentTimeout() { String id = randomAlphaOfLengthBetween(1, 10); boolean waitForCompletion = randomBoolean(); + boolean force = randomBoolean(); - Request r1 = new Request(id, waitForCompletion, TimeValue.timeValueSeconds(10)); - Request r2 = new Request(id, waitForCompletion, TimeValue.timeValueSeconds(20)); + Request r1 = new Request(id, waitForCompletion, force, TimeValue.timeValueSeconds(10)); + Request r2 = new Request(id, waitForCompletion, force, TimeValue.timeValueSeconds(20)); assertNotEquals(r1,r2); assertNotEquals(r1.hashCode(),r2.hashCode()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/notifications/DataFrameAuditMessageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/notifications/DataFrameAuditMessageTests.java new file mode 100644 index 000000000000..e845dd76fc67 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/notifications/DataFrameAuditMessageTests.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.dataframe.notifications; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.xpack.core.common.notifications.Level; +import org.junit.Before; + +import java.util.Date; + +public class DataFrameAuditMessageTests extends AbstractXContentTestCase { + private long startMillis; + + @Before + public void setStartTime() { + startMillis = System.currentTimeMillis(); + } + + public void testNewInfo() { + DataFrameAuditMessage info = DataFrameAuditMessage.builder().info("foo", "some info", "some_node"); + assertEquals("foo", info.getResourceId()); + assertEquals("some info", info.getMessage()); + assertEquals(Level.INFO, info.getLevel()); + assertDateBetweenStartAndNow(info.getTimestamp()); + } + + public void testNewWarning() { + DataFrameAuditMessage warning = DataFrameAuditMessage.builder().warning("bar", "some warning", "some_node"); + assertEquals("bar", warning.getResourceId()); + assertEquals("some warning", warning.getMessage()); + assertEquals(Level.WARNING, warning.getLevel()); + assertDateBetweenStartAndNow(warning.getTimestamp()); + } + + + public void testNewError() { + DataFrameAuditMessage error = DataFrameAuditMessage.builder().error("foo", "some error", "some_node"); + assertEquals("foo", error.getResourceId()); + assertEquals("some error", error.getMessage()); + assertEquals(Level.ERROR, error.getLevel()); + assertDateBetweenStartAndNow(error.getTimestamp()); + } + + private void assertDateBetweenStartAndNow(Date timestamp) { + long timestampMillis = timestamp.getTime(); + assertTrue(timestampMillis >= startMillis); + assertTrue(timestampMillis <= System.currentTimeMillis()); + } + + @Override + protected DataFrameAuditMessage doParseInstance(XContentParser parser) { + return DataFrameAuditMessage.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected DataFrameAuditMessage createTestInstance() { + return new DataFrameAuditMessage( + randomBoolean() ? null : randomAlphaOfLength(10), + randomAlphaOfLengthBetween(1, 20), + randomFrom(Level.values()), + randomBoolean() ? null : randomAlphaOfLengthBetween(1, 20) + ); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfigTests.java index 94530890ed0a..cb0f25ea89c0 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfigTests.java @@ -22,6 +22,9 @@ import java.util.Map; import static org.elasticsearch.test.TestMatchers.matchesPattern; +import static org.elasticsearch.xpack.core.dataframe.transforms.DestConfigTests.randomDestConfig; +import static org.elasticsearch.xpack.core.dataframe.transforms.SourceConfigTests.randomInvalidSourceConfig; +import static org.elasticsearch.xpack.core.dataframe.transforms.SourceConfigTests.randomSourceConfig; public class DataFrameTransformConfigTests extends AbstractSerializingDataFrameTestCase { @@ -29,35 +32,30 @@ public class DataFrameTransformConfigTests extends AbstractSerializingDataFrameT private boolean runWithHeaders; public static DataFrameTransformConfig randomDataFrameTransformConfigWithoutHeaders() { - return new DataFrameTransformConfig(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10), - randomAlphaOfLengthBetween(1, 10), null, QueryConfigTests.randomQueryConfig(), PivotConfigTests.randomPivotConfig()); + return randomDataFrameTransformConfigWithoutHeaders(randomAlphaOfLengthBetween(1, 10)); } public static DataFrameTransformConfig randomDataFrameTransformConfig() { - return new DataFrameTransformConfig(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10), - randomAlphaOfLengthBetween(1, 10), randomHeaders(), QueryConfigTests.randomQueryConfig(), - PivotConfigTests.randomPivotConfig()); + return randomDataFrameTransformConfig(randomAlphaOfLengthBetween(1, 10)); } public static DataFrameTransformConfig randomDataFrameTransformConfigWithoutHeaders(String id) { - return new DataFrameTransformConfig(id, randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10), null, - QueryConfigTests.randomQueryConfig(), PivotConfigTests.randomPivotConfig()); + return new DataFrameTransformConfig(id, randomSourceConfig(), randomDestConfig(), null, + PivotConfigTests.randomPivotConfig()); } public static DataFrameTransformConfig randomDataFrameTransformConfig(String id) { - return new DataFrameTransformConfig(id, randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10), randomHeaders(), - QueryConfigTests.randomQueryConfig(), PivotConfigTests.randomPivotConfig()); + return new DataFrameTransformConfig(id, randomSourceConfig(), randomDestConfig(), randomHeaders(), + PivotConfigTests.randomPivotConfig()); } public static DataFrameTransformConfig randomInvalidDataFrameTransformConfig() { if (randomBoolean()) { - return new DataFrameTransformConfig(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10), - randomAlphaOfLengthBetween(1, 10), randomHeaders(), QueryConfigTests.randomInvalidQueryConfig(), - PivotConfigTests.randomPivotConfig()); + return new DataFrameTransformConfig(randomAlphaOfLengthBetween(1, 10), randomInvalidSourceConfig(), + randomDestConfig(), randomHeaders(), PivotConfigTests.randomPivotConfig()); } // else - return new DataFrameTransformConfig(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10), - randomAlphaOfLengthBetween(1, 10), randomHeaders(), QueryConfigTests.randomQueryConfig(), - PivotConfigTests.randomInvalidPivotConfig()); + return new DataFrameTransformConfig(randomAlphaOfLengthBetween(1, 10), randomSourceConfig(), + randomDestConfig(), randomHeaders(), PivotConfigTests.randomInvalidPivotConfig()); } @Before @@ -99,8 +97,8 @@ private static Map randomHeaders() { public void testDefaultMatchAll() throws IOException { String pivotTransform = "{" - + " \"source\" : \"src\"," - + " \"dest\" : \"dest\"," + + " \"source\" : {\"index\":\"src\"}," + + " \"dest\" : {\"index\": \"dest\"}," + " \"pivot\" : {" + " \"group_by\": {" + " \"id\": {" @@ -114,8 +112,8 @@ public void testDefaultMatchAll() throws IOException { + "} } } } }"; DataFrameTransformConfig dataFrameTransformConfig = createDataFrameTransformConfigFromString(pivotTransform, "test_match_all"); - assertNotNull(dataFrameTransformConfig.getQueryConfig()); - assertTrue(dataFrameTransformConfig.getQueryConfig().isValid()); + assertNotNull(dataFrameTransformConfig.getSource().getQueryConfig()); + assertTrue(dataFrameTransformConfig.getSource().getQueryConfig().isValid()); try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()) { XContentBuilder content = dataFrameTransformConfig.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS); @@ -128,8 +126,8 @@ public void testDefaultMatchAll() throws IOException { public void testPreventHeaderInjection() throws IOException { String pivotTransform = "{" + " \"headers\" : {\"key\" : \"value\" }," - + " \"source\" : \"src\"," - + " \"dest\" : \"dest\"," + + " \"source\" : {\"index\":\"src\"}," + + " \"dest\" : {\"index\": \"dest\"}," + " \"pivot\" : {" + " \"group_by\": {" + " \"id\": {" @@ -167,8 +165,8 @@ public void testXContentForInternalStorage() throws IOException { public void testSetIdInBody() throws IOException { String pivotTransform = "{" + " \"id\" : \"body_id\"," - + " \"source\" : \"src\"," - + " \"dest\" : \"dest\"," + + " \"source\" : {\"index\":\"src\"}," + + " \"dest\" : {\"index\": \"dest\"}," + " \"pivot\" : {" + " \"group_by\": {" + " \"id\": {" diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateTests.java index 1f8eac1f5ba0..341faafdf12a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateTests.java @@ -14,11 +14,16 @@ import java.io.IOException; import java.util.HashMap; import java.util.Map; +import java.util.function.Predicate; public class DataFrameTransformStateTests extends AbstractSerializingTestCase { public static DataFrameTransformState randomDataFrameTransformState() { - return new DataFrameTransformState(randomFrom(IndexerState.values()), randomPosition(), randomLongBetween(0,10)); + return new DataFrameTransformState(randomFrom(DataFrameTransformTaskState.values()), + randomFrom(IndexerState.values()), + randomPosition(), + randomLongBetween(0,10), + randomBoolean() ? null : randomAlphaOfLength(10)); } @Override @@ -53,4 +58,14 @@ private static Map randomPosition() { } return position; } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return field -> !field.isEmpty(); + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformTaskStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformTaskStateTests.java new file mode 100644 index 000000000000..62c73846f59e --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformTaskStateTests.java @@ -0,0 +1,81 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.dataframe.transforms; + +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class DataFrameTransformTaskStateTests extends ESTestCase { + + public void testValidOrdinals() { + assertThat(DataFrameTransformTaskState.STOPPED.ordinal(), equalTo(0)); + assertThat(DataFrameTransformTaskState.STARTED.ordinal(), equalTo(1)); + assertThat(DataFrameTransformTaskState.FAILED.ordinal(), equalTo(2)); + } + + public void testwriteTo() throws Exception { + try (BytesStreamOutput out = new BytesStreamOutput()) { + DataFrameTransformTaskState.STOPPED.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + assertThat(in.readVInt(), equalTo(0)); + } + } + + try (BytesStreamOutput out = new BytesStreamOutput()) { + DataFrameTransformTaskState.STARTED.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + assertThat(in.readVInt(), equalTo(1)); + } + } + + try (BytesStreamOutput out = new BytesStreamOutput()) { + DataFrameTransformTaskState.FAILED.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + assertThat(in.readVInt(), equalTo(2)); + } + } + } + + public void testReadFrom() throws Exception { + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeVInt(0); + try (StreamInput in = out.bytes().streamInput()) { + assertThat(DataFrameTransformTaskState.fromStream(in), equalTo(DataFrameTransformTaskState.STOPPED)); + } + } + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeVInt(1); + try (StreamInput in = out.bytes().streamInput()) { + assertThat(DataFrameTransformTaskState.fromStream(in), equalTo(DataFrameTransformTaskState.STARTED)); + } + } + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeVInt(2); + try (StreamInput in = out.bytes().streamInput()) { + assertThat(DataFrameTransformTaskState.fromStream(in), equalTo(DataFrameTransformTaskState.FAILED)); + } + } + } + + public void testInvalidReadFrom() throws Exception { + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeVInt(randomIntBetween(3, Integer.MAX_VALUE)); + try (StreamInput in = out.bytes().streamInput()) { + DataFrameTransformTaskState.fromStream(in); + fail("Expected IOException"); + } catch(IOException e) { + assertThat(e.getMessage(), containsString("Unknown DataFrameTransformTaskState ordinal [")); + } + + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DestConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DestConfigTests.java new file mode 100644 index 000000000000..b29fa46c34ed --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DestConfigTests.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.dataframe.transforms; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentParser; +import org.junit.Before; + +import java.io.IOException; + +public class DestConfigTests extends AbstractSerializingDataFrameTestCase { + + private boolean lenient; + + public static DestConfig randomDestConfig() { + return new DestConfig(randomAlphaOfLength(10)); + } + + @Before + public void setRandomFeatures() { + lenient = randomBoolean(); + } + + @Override + protected DestConfig doParseInstance(XContentParser parser) throws IOException { + return DestConfig.fromXContent(parser, lenient); + } + + @Override + protected boolean supportsUnknownFields() { + return lenient; + } + + @Override + protected DestConfig createTestInstance() { + return randomDestConfig(); + } + + @Override + protected Reader instanceReader() { + return DestConfig::new; + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/SourceConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/SourceConfigTests.java new file mode 100644 index 000000000000..b8eee446f377 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/SourceConfigTests.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.dataframe.transforms; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentParser; +import org.junit.Before; + +import java.io.IOException; +import java.util.function.Predicate; + +public class SourceConfigTests extends AbstractSerializingDataFrameTestCase { + + private boolean lenient; + + public static SourceConfig randomSourceConfig() { + return new SourceConfig(generateRandomStringArray(10, 10, false, false), + QueryConfigTests.randomQueryConfig()); + } + + public static SourceConfig randomInvalidSourceConfig() { + // create something broken but with a source + return new SourceConfig(generateRandomStringArray(10, 10, false, false), + QueryConfigTests.randomInvalidQueryConfig()); + } + + @Before + public void setRandomFeatures() { + lenient = randomBoolean(); + } + + @Override + protected SourceConfig doParseInstance(XContentParser parser) throws IOException { + return SourceConfig.fromXContent(parser, lenient); + } + + @Override + protected SourceConfig createTestInstance() { + return lenient ? randomBoolean() ? randomSourceConfig() : randomInvalidSourceConfig() : randomSourceConfig(); + } + + @Override + protected boolean supportsUnknownFields() { + return lenient; + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + // allow unknown fields in the root of the object only as QueryConfig stores a Map + return field -> !field.isEmpty(); + } + + @Override + protected Reader instanceReader() { + return SourceConfig::new; + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForNoFollowersStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForNoFollowersStepTests.java index f1f3c053e234..6953455489d1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForNoFollowersStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForNoFollowersStepTests.java @@ -132,6 +132,42 @@ public void onFailure(Exception e) { containsString("this index is a leader index; waiting for all following indices to cease following before proceeding")); } + public void testNoShardStats() { + WaitForNoFollowersStep step = createRandomInstance(); + + String indexName = randomAlphaOfLengthBetween(5,10); + + int numberOfShards = randomIntBetween(1, 100); + final IndexMetaData indexMetaData = IndexMetaData.builder(indexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(numberOfShards) + .numberOfReplicas(randomIntBetween(1, 10)) + .build(); + + ShardStats sStats = new ShardStats(null, mockShardPath(), null, null, null, null); + ShardStats[] shardStats = new ShardStats[1]; + shardStats[0] = sStats; + mockIndexStatsCall(step.getClient(), indexName, new IndexStats(indexName, "uuid", shardStats)); + + final SetOnce conditionMetHolder = new SetOnce<>(); + final SetOnce stepInfoHolder = new SetOnce<>(); + step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { + @Override + public void onResponse(boolean conditionMet, ToXContentObject infomationContext) { + conditionMetHolder.set(conditionMet); + stepInfoHolder.set(infomationContext); + } + + @Override + public void onFailure(Exception e) { + fail("onFailure should not be called in this test, called with exception: " + e.getMessage()); + } + }); + + assertTrue(conditionMetHolder.get()); + assertNull(stepInfoHolder.get()); + } + public void testFailure() { WaitForNoFollowersStep step = createRandomInstance(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/support/mapper/TemplateRoleNameTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/support/mapper/TemplateRoleNameTests.java new file mode 100644 index 000000000000..cab10ca72832 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/support/mapper/TemplateRoleNameTests.java @@ -0,0 +1,119 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.security.authc.support.mapper; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.ByteBufferStreamInput; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.script.ScriptModule; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.mustache.MustacheScriptEngine; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; +import org.elasticsearch.xpack.core.security.authc.support.mapper.TemplateRoleName.Format; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.ExpressionModel; +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; + +public class TemplateRoleNameTests extends ESTestCase { + + public void testParseRoles() throws Exception { + final TemplateRoleName role1 = parse("{ \"template\": { \"source\": \"_user_{{username}}\" } }"); + assertThat(role1, Matchers.instanceOf(TemplateRoleName.class)); + assertThat(role1.getTemplate().utf8ToString(), equalTo("{\"source\":\"_user_{{username}}\"}")); + assertThat(role1.getFormat(), equalTo(Format.STRING)); + + final TemplateRoleName role2 = parse( + "{ \"template\": \"{\\\"source\\\":\\\"{{#tojson}}groups{{/tojson}}\\\"}\", \"format\":\"json\" }"); + assertThat(role2, Matchers.instanceOf(TemplateRoleName.class)); + assertThat(role2.getTemplate().utf8ToString(), + equalTo("{\"source\":\"{{#tojson}}groups{{/tojson}}\"}")); + assertThat(role2.getFormat(), equalTo(Format.JSON)); + } + + public void testToXContent() throws Exception { + final String json = "{" + + "\"template\":\"{\\\"source\\\":\\\"" + randomAlphaOfLengthBetween(8, 24) + "\\\"}\"," + + "\"format\":\"" + randomFrom(Format.values()).formatName() + "\"" + + "}"; + assertThat(Strings.toString(parse(json)), equalTo(json)); + } + + public void testSerializeTemplate() throws Exception { + trySerialize(new TemplateRoleName(new BytesArray(randomAlphaOfLengthBetween(12, 60)), randomFrom(Format.values()))); + } + + public void testEqualsAndHashCode() throws Exception { + tryEquals(new TemplateRoleName(new BytesArray(randomAlphaOfLengthBetween(12, 60)), randomFrom(Format.values()))); + } + + public void testEvaluateRoles() throws Exception { + final ScriptService scriptService = new ScriptService(Settings.EMPTY, + Collections.singletonMap(MustacheScriptEngine.NAME, new MustacheScriptEngine()), ScriptModule.CORE_CONTEXTS); + final ExpressionModel model = new ExpressionModel(); + model.defineField("username", "hulk"); + model.defineField("groups", Arrays.asList("avengers", "defenders", "panthenon")); + + final TemplateRoleName plainString = new TemplateRoleName(new BytesArray("{ \"source\":\"heroes\" }"), Format.STRING); + assertThat(plainString.getRoleNames(scriptService, model), contains("heroes")); + + final TemplateRoleName user = new TemplateRoleName(new BytesArray("{ \"source\":\"_user_{{username}}\" }"), Format.STRING); + assertThat(user.getRoleNames(scriptService, model), contains("_user_hulk")); + + final TemplateRoleName groups = new TemplateRoleName(new BytesArray("{ \"source\":\"{{#tojson}}groups{{/tojson}}\" }"), + Format.JSON); + assertThat(groups.getRoleNames(scriptService, model), contains("avengers", "defenders", "panthenon")); + } + + private TemplateRoleName parse(String json) throws IOException { + final XContentParser parser = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json); + final TemplateRoleName role = TemplateRoleName.parse(parser); + assertThat(role, notNullValue()); + return role; + } + + public void trySerialize(TemplateRoleName original) throws Exception { + BytesStreamOutput output = new BytesStreamOutput(); + original.writeTo(output); + + final StreamInput rawInput = ByteBufferStreamInput.wrap(BytesReference.toBytes(output.bytes())); + final TemplateRoleName serialized = new TemplateRoleName(rawInput); + assertEquals(original, serialized); + } + + public void tryEquals(TemplateRoleName original) { + final EqualsHashCodeTestUtils.CopyFunction copy = + rmt -> new TemplateRoleName(rmt.getTemplate(), rmt.getFormat()); + final EqualsHashCodeTestUtils.MutateFunction mutate = rmt -> { + if (randomBoolean()) { + return new TemplateRoleName(rmt.getTemplate(), + randomValueOtherThan(rmt.getFormat(), () -> randomFrom(Format.values()))); + } else { + final String templateStr = rmt.getTemplate().utf8ToString(); + return new TemplateRoleName(new BytesArray(templateStr.substring(randomIntBetween(1, templateStr.length() / 2))), + rmt.getFormat()); + } + }; + EqualsHashCodeTestUtils.checkEqualsAndHashCode(original, copy, mutate); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/ApplicationPermissionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/ApplicationPermissionTests.java index 992ca8db1b08..0f81b872b86d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/ApplicationPermissionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/ApplicationPermissionTests.java @@ -13,15 +13,16 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; -import static java.util.Collections.singletonList; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; public class ApplicationPermissionTests extends ESTestCase { @@ -34,6 +35,7 @@ public class ApplicationPermissionTests extends ESTestCase { private ApplicationPrivilege app1Delete = storePrivilege("app1", "delete", "write/delete"); private ApplicationPrivilege app1Create = storePrivilege("app1", "create", "write/create"); private ApplicationPrivilege app2Read = storePrivilege("app2", "read", "read/*"); + private ApplicationPrivilege otherAppRead = storePrivilege("other-app", "read", "read/*"); private ApplicationPrivilege storePrivilege(String app, String name, String... patterns) { store.add(new ApplicationPrivilegeDescriptor(app, name, Sets.newHashSet(patterns), Collections.emptyMap())); @@ -104,6 +106,16 @@ public void testDoesNotMatchAcrossApplications() { assertThat(buildPermission(app1All, "*").grants(app2Read, "123"), equalTo(false)); } + public void testMatchingWithWildcardApplicationNames() { + final Set readAllApp = ApplicationPrivilege.get("app*", Collections.singleton("read"), store); + assertThat(buildPermission(readAllApp, "*").grants(app1Read, "123"), equalTo(true)); + assertThat(buildPermission(readAllApp, "foo/*").grants(app2Read, "foo/bar"), equalTo(true)); + + assertThat(buildPermission(readAllApp, "*").grants(app1Write, "123"), equalTo(false)); + assertThat(buildPermission(readAllApp, "foo/*").grants(app2Read, "bar/baz"), equalTo(false)); + assertThat(buildPermission(readAllApp, "*").grants(otherAppRead, "abc"), equalTo(false)); + } + public void testMergedPermissionChecking() { final ApplicationPrivilege app1ReadWrite = compositePrivilege("app1", app1Read, app1Write); final ApplicationPermission hasPermission = buildPermission(app1ReadWrite, "allow/*"); @@ -138,16 +150,27 @@ public void testInspectPermissionContents() { } private ApplicationPrivilege actionPrivilege(String appName, String... actions) { - return ApplicationPrivilege.get(appName, Sets.newHashSet(actions), Collections.emptyList()); + final Set privileges = ApplicationPrivilege.get(appName, Sets.newHashSet(actions), Collections.emptyList()); + assertThat(privileges, hasSize(1)); + return privileges.iterator().next(); } private ApplicationPrivilege compositePrivilege(String application, ApplicationPrivilege... children) { Set names = Stream.of(children).map(ApplicationPrivilege::name).flatMap(Set::stream).collect(Collectors.toSet()); - return ApplicationPrivilege.get(application, names, store); + final Set privileges = ApplicationPrivilege.get(application, names, store); + assertThat(privileges, hasSize(1)); + return privileges.iterator().next(); } - private ApplicationPermission buildPermission(ApplicationPrivilege privilege, String... resources) { - return new ApplicationPermission(singletonList(new Tuple<>(privilege, Sets.newHashSet(resources)))); + return buildPermission(Collections.singleton(privilege), resources); + } + + private ApplicationPermission buildPermission(Collection privileges, String... resources) { + final Set resourceSet = Sets.newHashSet(resources); + final List>> privilegesAndResources = privileges.stream() + .map(p -> new Tuple<>(p, resourceSet)) + .collect(Collectors.toList()); + return new ApplicationPermission(privilegesAndResources); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilegeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilegeTests.java index c65f06f05f95..cd917ed81f16 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilegeTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilegeTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.EqualsHashCodeTestUtils; +import org.hamcrest.Matchers; import org.junit.Assert; import java.util.Arrays; @@ -22,9 +23,11 @@ import static org.elasticsearch.common.Strings.collectionToCommaDelimitedString; import static org.hamcrest.Matchers.arrayContainingInAnyOrder; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.iterableWithSize; public class ApplicationPrivilegeTests extends ESTestCase { @@ -59,6 +62,12 @@ public void testValidationOfApplicationName() { assertNoException(app, () -> ApplicationPrivilege.validateApplicationName(app)); assertNoException(app, () -> ApplicationPrivilege.validateApplicationNameOrWildcard(app)); } + + // wildcards in the suffix + for (String app : Arrays.asList("app1-*", "app1-foo*", "app1-.*", "app1-.foo.*", appNameWithSpecialChars + "*")) { + assertValidationFailure(app, "application name", () -> ApplicationPrivilege.validateApplicationName(app)); + assertNoException(app, () -> ApplicationPrivilege.validateApplicationNameOrWildcard(app)); + } } public void testValidationOfPrivilegeName() { @@ -101,16 +110,23 @@ public void testNonePrivilege() { } public void testGetPrivilegeByName() { - final ApplicationPrivilegeDescriptor descriptor = descriptor("my-app", "read", "data:read/*", "action:login"); + final ApplicationPrivilegeDescriptor myRead = descriptor("my-app", "read", "data:read/*", "action:login"); final ApplicationPrivilegeDescriptor myWrite = descriptor("my-app", "write", "data:write/*", "action:login"); final ApplicationPrivilegeDescriptor myAdmin = descriptor("my-app", "admin", "data:read/*", "action:*"); final ApplicationPrivilegeDescriptor yourRead = descriptor("your-app", "read", "data:read/*", "action:login"); - final Set stored = Sets.newHashSet(descriptor, myWrite, myAdmin, yourRead); + final Set stored = Sets.newHashSet(myRead, myWrite, myAdmin, yourRead); + + final Set myAppRead = ApplicationPrivilege.get("my-app", Collections.singleton("read"), stored); + assertThat(myAppRead, iterableWithSize(1)); + assertPrivilegeEquals(myAppRead.iterator().next(), myRead); - assertEqual(ApplicationPrivilege.get("my-app", Collections.singleton("read"), stored), descriptor); - assertEqual(ApplicationPrivilege.get("my-app", Collections.singleton("write"), stored), myWrite); + final Set myAppWrite = ApplicationPrivilege.get("my-app", Collections.singleton("write"), stored); + assertThat(myAppWrite, iterableWithSize(1)); + assertPrivilegeEquals(myAppWrite.iterator().next(), myWrite); - final ApplicationPrivilege readWrite = ApplicationPrivilege.get("my-app", Sets.newHashSet("read", "write"), stored); + final Set myReadWrite = ApplicationPrivilege.get("my-app", Sets.newHashSet("read", "write"), stored); + assertThat(myReadWrite, Matchers.hasSize(1)); + final ApplicationPrivilege readWrite = myReadWrite.iterator().next(); assertThat(readWrite.getApplication(), equalTo("my-app")); assertThat(readWrite.name(), containsInAnyOrder("read", "write")); assertThat(readWrite.getPatterns(), arrayContainingInAnyOrder("data:read/*", "data:write/*", "action:login")); @@ -124,10 +140,10 @@ public void testGetPrivilegeByName() { } } - private void assertEqual(ApplicationPrivilege myReadPriv, ApplicationPrivilegeDescriptor myRead) { - assertThat(myReadPriv.getApplication(), equalTo(myRead.getApplication())); - assertThat(getPrivilegeName(myReadPriv), equalTo(myRead.getName())); - assertThat(Sets.newHashSet(myReadPriv.getPatterns()), equalTo(myRead.getActions())); + private void assertPrivilegeEquals(ApplicationPrivilege privilege, ApplicationPrivilegeDescriptor descriptor) { + assertThat(privilege.getApplication(), equalTo(descriptor.getApplication())); + assertThat(privilege.name(), contains(descriptor.getName())); + assertThat(Sets.newHashSet(privilege.getPatterns()), equalTo(descriptor.getActions())); } private ApplicationPrivilegeDescriptor descriptor(String application, String name, String... actions) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index cb39388ec9c5..9d970cca5511 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -1047,6 +1047,7 @@ public void testDataFrameTransformsAdminRole() { assertThat(role.cluster().check(StopDataFrameTransformAction.NAME, request), is(true)); assertThat(role.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(false)); + assertOnlyReadAllowed(role, ".data-frame-notifications-1"); assertNoAccessAllowed(role, "foo"); assertNoAccessAllowed(role, ".data-frame-internal-1"); // internal use only @@ -1070,6 +1071,7 @@ public void testDataFrameTransformsUserRole() { assertThat(role.cluster().check(StopDataFrameTransformAction.NAME, request), is(false)); assertThat(role.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(false)); + assertOnlyReadAllowed(role, ".data-frame-notifications-1"); assertNoAccessAllowed(role, "foo"); assertNoAccessAllowed(role, ".data-frame-internal-1"); diff --git a/x-pack/plugin/data-frame/build.gradle b/x-pack/plugin/data-frame/build.gradle index bff8118bfc42..ad4d846fd160 100644 --- a/x-pack/plugin/data-frame/build.gradle +++ b/x-pack/plugin/data-frame/build.gradle @@ -8,9 +8,6 @@ esplugin { extendedPlugins = ['x-pack-core'] } -compileJava.options.compilerArgs << "-Xlint:-rawtypes" -compileTestJava.options.compilerArgs << "-Xlint:-rawtypes" - dependencies { compileOnly "org.elasticsearch:elasticsearch:${version}" diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java new file mode 100644 index 000000000000..2367e255cd9b --- /dev/null +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java @@ -0,0 +1,78 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.integration; + +import org.elasticsearch.client.Request; +import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; +import org.junit.Before; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.Matchers.is; + +public class DataFrameAuditorIT extends DataFrameRestTestCase { + + private static final String TEST_USER_NAME = "df_admin_plus_data"; + private static final String DATA_ACCESS_ROLE = "test_data_access"; + private static final String BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS = + basicAuthHeaderValue(TEST_USER_NAME, TEST_PASSWORD_SECURE_STRING); + + private static boolean indicesCreated = false; + + // preserve indices in order to reuse source indices in several test cases + @Override + protected boolean preserveIndicesUponCompletion() { + return true; + } + + @Before + public void createIndexes() throws IOException { + + // it's not possible to run it as @BeforeClass as clients aren't initialized then, so we need this little hack + if (indicesCreated) { + return; + } + + createReviewsIndex(); + indicesCreated = true; + setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME); + setupUser(TEST_USER_NAME, Arrays.asList("data_frame_transforms_admin", DATA_ACCESS_ROLE)); + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40594") + @SuppressWarnings("unchecked") + public void testAuditorWritesAudits() throws Exception { + String transformId = "simplePivotForAudit"; + String dataFrameIndex = "pivot_reviews_user_id_above_20"; + setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, dataFrameIndex); + String query = "\"match\": {\"user_id\": \"user_26\"}"; + + createPivotReviewsTransform(transformId, dataFrameIndex, query, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + + startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + + // Make sure we wrote to the audit + assertTrue(indexExists(DataFrameInternalIndex.AUDIT_INDEX)); + refreshIndex(DataFrameInternalIndex.AUDIT_INDEX); + Request request = new Request("GET", DataFrameInternalIndex.AUDIT_INDEX + "/_search"); + request.setJsonEntity("{\"query\":{\"term\":{\"transform_id\":\"simplePivotForAudit\"}}}"); + Map response = entityAsMap(client().performRequest(request)); + Map hitRsp = (Map) ((List) ((Map)response.get("hits")).get("hits")).get(0); + Map source = (Map)hitRsp.get("_source"); + assertThat(source.get("transform_id"), equalTo(transformId)); + assertThat(source.get("level"), equalTo("info")); + assertThat(source.get("message"), is(notNullValue())); + assertThat(source.get("node_name"), is(notNullValue())); + assertThat(source.get("timestamp"), is(notNullValue())); + } +} diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java index 635038e2a486..99c08f1a5058 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java @@ -96,8 +96,8 @@ public void testHistogramPivot() throws Exception { BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" - + " \"source\": \"" + REVIEWS_INDEX_NAME + "\"," - + " \"dest\": \"" + dataFrameIndex + "\","; + + " \"source\": {\"index\":\"" + REVIEWS_INDEX_NAME + "\"}," + + " \"dest\": {\"index\":\"" + dataFrameIndex + "\"},"; config += " \"pivot\": {" + " \"group_by\": {" @@ -134,8 +134,8 @@ public void testBiggerPivot() throws Exception { BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" - + " \"source\": \"reviews\"," - + " \"dest\": \"" + dataFrameIndex + "\","; + + " \"source\": {\"index\":\"" + REVIEWS_INDEX_NAME + "\"}," + + " \"dest\": {\"index\":\"" + dataFrameIndex + "\"},"; config += " \"pivot\": {" + " \"group_by\": {" @@ -209,8 +209,8 @@ public void testDateHistogramPivot() throws Exception { BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" - + " \"source\": \"" + REVIEWS_INDEX_NAME + "\"," - + " \"dest\": \"" + dataFrameIndex + "\","; + + " \"source\": {\"index\":\"" + REVIEWS_INDEX_NAME + "\"}," + + " \"dest\": {\"index\":\"" + dataFrameIndex + "\"},"; config += " \"pivot\": {" + " \"group_by\": {" @@ -245,7 +245,7 @@ public void testPreviewTransform() throws Exception { BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" - + " \"source\": \"" + REVIEWS_INDEX_NAME + "\","; + + " \"source\": {\"index\":\"" + REVIEWS_INDEX_NAME + "\"} ,"; config += " \"pivot\": {" + " \"group_by\": {" @@ -268,6 +268,7 @@ public void testPreviewTransform() throws Exception { }); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40537") public void testPivotWithMaxOnDateField() throws Exception { String transformId = "simpleDateHistogramPivotWithMaxTime"; String dataFrameIndex = "pivot_reviews_via_date_histogram_with_max_time"; @@ -277,8 +278,8 @@ public void testPivotWithMaxOnDateField() throws Exception { BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" - + " \"source\": \"" + REVIEWS_INDEX_NAME + "\"," - + " \"dest\": \"" + dataFrameIndex + "\","; + + " \"source\": {\"index\": \"" + REVIEWS_INDEX_NAME + "\"}," + + " \"dest\": {\"index\":\"" + dataFrameIndex + "\"},"; config +=" \"pivot\": { \n" + " \"group_by\": {\n" + diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java index 431905ed75e7..6b9300916a99 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java @@ -139,13 +139,12 @@ protected void createPivotReviewsTransform(String transformId, String dataFrameI final Request createDataframeTransformRequest = createRequestWithAuth("PUT", DATAFRAME_ENDPOINT + transformId, authHeader); String config = "{" - + " \"source\": \"" + REVIEWS_INDEX_NAME + "\"," - + " \"dest\": \"" + dataFrameIndex + "\","; + + " \"dest\": {\"index\":\"" + dataFrameIndex + "\"},"; if (query != null) { - config += "\"query\": {" - + query - + "},"; + config += " \"source\": {\"index\":\"" + REVIEWS_INDEX_NAME + "\", \"query\":{" + query + "}},"; + } else { + config += " \"source\": {\"index\":\"" + REVIEWS_INDEX_NAME + "\"},"; } config += " \"pivot\": {" @@ -168,17 +167,34 @@ protected void createPivotReviewsTransform(String transformId, String dataFrameI assertTrue(indexExists(dataFrameIndex)); } - protected void startAndWaitForTransform(String transformId, String dataFrameIndex) throws Exception { - startAndWaitForTransform(transformId, dataFrameIndex, null); + protected void startDataframeTransform(String transformId, boolean force) throws IOException { + startDataframeTransform(transformId, force, null); } - protected void startAndWaitForTransform(String transformId, String dataFrameIndex, String authHeader) throws Exception { + protected void startDataframeTransform(String transformId, boolean force, String authHeader) throws IOException { // start the transform final Request startTransformRequest = createRequestWithAuth("POST", DATAFRAME_ENDPOINT + transformId + "/_start", authHeader); - + startTransformRequest.addParameter(DataFrameField.FORCE.getPreferredName(), Boolean.toString(force)); Map startTransformResponse = entityAsMap(client().performRequest(startTransformRequest)); assertThat(startTransformResponse.get("started"), equalTo(Boolean.TRUE)); + } + protected void stopDataFrameTransform(String transformId, boolean force) throws Exception { + // start the transform + final Request stopTransformRequest = createRequestWithAuth("POST", DATAFRAME_ENDPOINT + transformId + "/_stop", null); + stopTransformRequest.addParameter(DataFrameField.FORCE.getPreferredName(), Boolean.toString(force)); + stopTransformRequest.addParameter(DataFrameField.WAIT_FOR_COMPLETION.getPreferredName(), Boolean.toString(true)); + Map stopTransformResponse = entityAsMap(client().performRequest(stopTransformRequest)); + assertThat(stopTransformResponse.get("stopped"), equalTo(Boolean.TRUE)); + } + + protected void startAndWaitForTransform(String transformId, String dataFrameIndex) throws Exception { + startAndWaitForTransform(transformId, dataFrameIndex, null); + } + + protected void startAndWaitForTransform(String transformId, String dataFrameIndex, String authHeader) throws Exception { + // start the transform + startDataframeTransform(transformId, false, authHeader); // wait until the dataframe has been created and all data is available waitForDataFrameGeneration(transformId); refreshIndex(dataFrameIndex); @@ -217,13 +233,29 @@ private static List> getDataFrameTransforms() throws IOExcep } protected static String getDataFrameIndexerState(String transformId) throws IOException { + Map transformStatsAsMap = getDataFrameState(transformId); + return transformStatsAsMap == null ? null : + (String) XContentMapValues.extractValue("state.indexer_state", transformStatsAsMap); + } + + protected static String getDataFrameTaskState(String transformId) throws IOException { + Map transformStatsAsMap = getDataFrameState(transformId); + return transformStatsAsMap == null ? null : (String) XContentMapValues.extractValue("state.task_state", transformStatsAsMap); + } + + protected static Map getDataFrameState(String transformId) throws IOException { Response statsResponse = client().performRequest(new Request("GET", DATAFRAME_ENDPOINT + transformId + "/_stats")); List transforms = ((List) entityAsMap(statsResponse).get("transforms")); if (transforms.isEmpty()) { return null; } - Map transformStatsAsMap = (Map) transforms.get(0); - return (String) XContentMapValues.extractValue("state.transform_state", transformStatsAsMap); + return (Map) transforms.get(0); + } + + protected static void deleteDataFrameTransform(String transformId) throws IOException { + Request request = new Request("DELETE", DATAFRAME_ENDPOINT + transformId); + request.addParameter("ignore", "404"); // Ignore 404s because they imply someone was racing us to delete this + adminClient().performRequest(request); } @AfterClass @@ -252,9 +284,7 @@ protected static void wipeDataFrameTransforms() throws IOException, InterruptedE for (Map transformConfig : transformConfigs) { String transformId = (String) transformConfig.get("id"); - Request request = new Request("DELETE", DATAFRAME_ENDPOINT + transformId); - request.addParameter("ignore", "404"); // Ignore 404s because they imply someone was racing us to delete this - adminClient().performRequest(request); + deleteDataFrameTransform(transformId); } // transforms should be all gone diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java new file mode 100644 index 000000000000..de0757a30ba1 --- /dev/null +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.integration; + +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.Matchers.equalTo; + +public class DataFrameTaskFailedStateIT extends DataFrameRestTestCase { + + public void testDummy() { + // remove once the awaits fix below is resolved + } + + @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/40543") + public void testFailureStateInteraction() throws Exception { + createReviewsIndex(); + String transformId = "failure_pivot_1"; + String dataFrameIndex = "failure_pivot_reviews"; + createPivotReviewsTransform(transformId, dataFrameIndex, null); + deleteIndex(REVIEWS_INDEX_NAME); // trigger start failure due to index missing + startDataframeTransform(transformId, false); + awaitState(transformId, DataFrameTransformTaskState.FAILED); + Map fullState = getDataFrameState(transformId); + + // Verify we have failed for the expected reason + assertThat(XContentMapValues.extractValue("state.reason", fullState), + equalTo("task encountered irrecoverable failure: no such index [reviews]")); + assertThat(XContentMapValues.extractValue("state.indexer_state", fullState), equalTo("started")); + + // Verify that we cannot stop or start the transform when the task is in a failed state + ResponseException ex = expectThrows(ResponseException.class, () -> stopDataFrameTransform(transformId, false)); + assertThat(ex.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.CONFLICT.getStatus())); + assertThat(XContentMapValues.extractValue("error.reason", entityAsMap(ex.getResponse())), + equalTo("Unable to stop data frame transform [failure_pivot_1] as it is in a failed state with reason: [" + + "task encountered irrecoverable failure: no such index [reviews]]. Use force stop to stop the data frame transform.")); + + ex = expectThrows(ResponseException.class, () -> startDataframeTransform(transformId, false)); + assertThat(ex.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.CONFLICT.getStatus())); + assertThat(XContentMapValues.extractValue("error.reason", entityAsMap(ex.getResponse())), + equalTo("Unable to start data frame transform [failure_pivot_1] as it is in a failed state with failure: [" + + "task encountered irrecoverable failure: no such index [reviews]]. " + + "Use force start to restart data frame transform once error is resolved.")); + + // Correct the failure by creating the reviews index again + createReviewsIndex(); + // Force start the data frame to indicate failure correction + startDataframeTransform(transformId, true); + // Wait for data to be indexed appropriately and refresh for search + waitForDataFrameGeneration(transformId); + refreshIndex(dataFrameIndex); + + // Verify that we have started and that our reason is cleared + fullState = getDataFrameState(transformId); + assertThat(XContentMapValues.extractValue("state.reason", fullState), is(nullValue())); + assertThat(XContentMapValues.extractValue("state.task_state", fullState), equalTo("started")); + assertThat(XContentMapValues.extractValue("state.indexer_state", fullState), equalTo("started")); + assertThat(XContentMapValues.extractValue("stats.search_failures", fullState), equalTo(1)); + + // get and check some users to verify we restarted + assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_0", 3.776978417); + assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_5", 3.72); + assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_11", 3.846153846); + assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_20", 3.769230769); + assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_26", 3.918918918); + + + stopDataFrameTransform(transformId, true); + deleteDataFrameTransform(transformId); + } + + private void awaitState(String transformId, DataFrameTransformTaskState state) throws Exception { + assertBusy(() -> { + String currentState = getDataFrameTaskState(transformId); + assertThat(state.value(), equalTo(currentState)); + }); + } + + private void assertOnePivotValue(String query, double expected) throws IOException { + Map searchResult = getAsMap(query); + + assertEquals(1, XContentMapValues.extractValue("hits.total.value", searchResult)); + double actual = (Double) ((List) XContentMapValues.extractValue("hits.hits._source.avg_rating", searchResult)).get(0); + assertEquals(expected, actual, 0.000001); + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java index fdd14d4cff85..a32c01c43a0a 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java @@ -40,6 +40,7 @@ import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.common.notifications.Auditor; import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction; import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsAction; import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsStatsAction; @@ -48,6 +49,7 @@ import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformAction; import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformTaskAction; import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction; +import org.elasticsearch.xpack.core.dataframe.notifications.DataFrameAuditMessage; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; import org.elasticsearch.xpack.dataframe.action.TransportDeleteDataFrameTransformAction; import org.elasticsearch.xpack.dataframe.action.TransportGetDataFrameTransformsAction; @@ -83,6 +85,7 @@ import java.util.function.UnaryOperator; import static java.util.Collections.emptyList; +import static org.elasticsearch.xpack.core.ClientHelper.DATA_FRAME_ORIGIN; public class DataFrame extends Plugin implements ActionPlugin, PersistentTaskPlugin { @@ -99,6 +102,7 @@ public class DataFrame extends Plugin implements ActionPlugin, PersistentTaskPlu private final Settings settings; private final boolean transportClientMode; private final SetOnce dataFrameTransformsConfigManager = new SetOnce<>(); + private final SetOnce> dataFrameAuditor = new SetOnce<>(); private final SetOnce dataFrameTransformsCheckpointService = new SetOnce<>(); private final SetOnce schedulerEngine = new SetOnce<>(); @@ -180,11 +184,15 @@ public Collection createComponents(Client client, ClusterService cluster if (enabled == false || transportClientMode) { return emptyList(); } - + dataFrameAuditor.set(new Auditor<>(client, + clusterService.getNodeName(), + DataFrameInternalIndex.AUDIT_INDEX, + DATA_FRAME_ORIGIN, + DataFrameAuditMessage.builder())); dataFrameTransformsConfigManager.set(new DataFrameTransformsConfigManager(client, xContentRegistry)); dataFrameTransformsCheckpointService.set(new DataFrameTransformsCheckpointService(client)); - return Arrays.asList(dataFrameTransformsConfigManager.get(), dataFrameTransformsCheckpointService.get()); + return Arrays.asList(dataFrameTransformsConfigManager.get(), dataFrameAuditor.get(), dataFrameTransformsCheckpointService.get()); } @Override @@ -195,6 +203,11 @@ public UnaryOperator> getIndexTemplateMetaDat } catch (IOException e) { logger.error("Error creating data frame index template", e); } + try { + templates.put(DataFrameInternalIndex.AUDIT_INDEX, DataFrameInternalIndex.getAuditIndexTemplateMetaData()); + } catch (IOException e) { + logger.warn("Error creating data frame audit index", e); + } return templates; }; } @@ -210,10 +223,12 @@ public List> getPersistentTasksExecutor(ClusterServic // the transforms config manager should have been created assert dataFrameTransformsConfigManager.get() != null; + // the auditor should have been created + assert dataFrameAuditor.get() != null; assert dataFrameTransformsCheckpointService.get() != null; return Collections.singletonList(new DataFrameTransformPersistentTasksExecutor(client, dataFrameTransformsConfigManager.get(), - dataFrameTransformsCheckpointService.get(), schedulerEngine.get(), threadPool)); + dataFrameTransformsCheckpointService.get(), schedulerEngine.get(), dataFrameAuditor.get(), threadPool)); } @Override diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java index 131ad690d2b6..2cdc4009e785 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java @@ -12,20 +12,18 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.tasks.TransportTasksAction; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.discovery.MasterNotDiscoveredException; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; -import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction; -import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction.Request; import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction.Response; +import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformTask; @@ -36,8 +34,7 @@ public class TransportDeleteDataFrameTransformAction extends TransportTasksActio private final DataFrameTransformsConfigManager transformsConfigManager; @Inject - public TransportDeleteDataFrameTransformAction(TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, PersistentTasksService persistentTasksService, + public TransportDeleteDataFrameTransformAction(TransportService transportService, ActionFilters actionFilters, ClusterService clusterService, DataFrameTransformsConfigManager transformsConfigManager) { super(DeleteDataFrameTransformAction.NAME, clusterService, transportService, actionFilters, Request::new, Response::new, Response::new, ThreadPool.Names.SAME); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java index 8cafb33cc62b..78f682303481 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java @@ -60,8 +60,8 @@ protected void doExecute(Task task, final DataFrameTransformConfig config = request.getConfig(); - Pivot pivot = new Pivot(config.getSource(), - config.getQueryConfig().getQuery(), + Pivot pivot = new Pivot(config.getSource().getIndex(), + config.getSource().getQueryConfig().getQuery(), config.getPivotConfig()); getPreview(pivot, ActionListener.wrap( @@ -82,7 +82,13 @@ private void getPreview(Pivot pivot, ActionListener>> l r -> { final CompositeAggregation agg = r.getAggregations().get(COMPOSITE_AGGREGATION_NAME); DataFrameIndexerTransformStats stats = new DataFrameIndexerTransformStats(); - listener.onResponse(pivot.extractResults(agg, deducedMappings, stats).collect(Collectors.toList())); + // remove all internal fields + List> results = pivot.extractResults(agg, deducedMappings, stats) + .map(record -> { + record.keySet().removeIf(k -> k.startsWith("_")); + return record; + }).collect(Collectors.toList()); + listener.onResponse(results); }, listener::onFailure )); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java index 63fd878652ff..6f364d91e44f 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java @@ -30,7 +30,6 @@ import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; -import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -72,7 +71,6 @@ public class TransportPutDataFrameTransformAction public TransportPutDataFrameTransformAction(Settings settings, TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, ClusterService clusterService, XPackLicenseState licenseState, - PersistentTasksService persistentTasksService, DataFrameTransformsConfigManager dataFrameTransformsConfigManager, Client client) { super(PutDataFrameTransformAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, PutDataFrameTransformAction.Request::new); @@ -119,36 +117,36 @@ protected void masterOperation(Request request, ClusterState clusterState, Actio return; } - String[] dest = indexNameExpressionResolver.concreteIndexNames(clusterState, + final String[] dest = indexNameExpressionResolver.concreteIndexNames(clusterState, IndicesOptions.lenientExpandOpen(), - config.getDestination()); + config.getDestination().getIndex()); if (dest.length > 0) { listener.onFailure(new ElasticsearchStatusException( - DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_DEST_INDEX_ALREADY_EXISTS, config.getDestination()), + DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_DEST_INDEX_ALREADY_EXISTS, + config.getDestination().getIndex()), RestStatus.BAD_REQUEST)); return; } - String[] src = indexNameExpressionResolver.concreteIndexNames(clusterState, - IndicesOptions.lenientExpandOpen(), - config.getSource()); - if (src.length == 0) { - listener.onFailure(new ElasticsearchStatusException( - DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_SOURCE_INDEX_MISSING, config.getSource()), - RestStatus.BAD_REQUEST)); - return; + for(String src : config.getSource().getIndex()) { + if (indexNameExpressionResolver.concreteIndexNames(clusterState, IndicesOptions.lenientExpandOpen(), src).length == 0) { + listener.onFailure(new ElasticsearchStatusException( + DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_SOURCE_INDEX_MISSING, src), + RestStatus.BAD_REQUEST)); + return; + } } // Early check to verify that the user can create the destination index and can read from the source if (licenseState.isAuthAllowed()) { final String username = securityContext.getUser().principal(); RoleDescriptor.IndicesPrivileges sourceIndexPrivileges = RoleDescriptor.IndicesPrivileges.builder() - .indices(config.getSource()) + .indices(config.getSource().getIndex()) .privileges("read") .build(); RoleDescriptor.IndicesPrivileges destIndexPrivileges = RoleDescriptor.IndicesPrivileges.builder() - .indices(config.getDestination()) + .indices(config.getDestination().getIndex()) .privileges("read", "index", "create_index") .build(); @@ -196,7 +194,9 @@ private void handlePrivsResponse(String username, private void putDataFrame(DataFrameTransformConfig config, ActionListener listener) { - final Pivot pivot = new Pivot(config.getSource(), config.getQueryConfig().getQuery(), config.getPivotConfig()); + final Pivot pivot = new Pivot(config.getSource().getIndex(), + config.getSource().getQueryConfig().getQuery(), + config.getPivotConfig()); // <5> Return the listener, or clean up destination index on failure. @@ -206,7 +206,7 @@ private void putDataFrame(DataFrameTransformConfig config, ActionListener listener.onFailure(putTransformConfigurationException), deleteIndexException -> { String msg = "Failed to delete destination index after creating transform [" + config.getId() + "] failed"; diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java index 7fa19fa50e89..f68e246ed860 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java @@ -33,6 +33,8 @@ import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformTaskAction; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransform; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; import java.util.Collection; @@ -53,8 +55,8 @@ public TransportStartDataFrameTransformAction(TransportService transportService, ThreadPool threadPool, IndexNameExpressionResolver indexNameExpressionResolver, DataFrameTransformsConfigManager dataFrameTransformsConfigManager, PersistentTasksService persistentTasksService, Client client) { - super(StartDataFrameTransformAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, - StartDataFrameTransformAction.Request::new); + super(StartDataFrameTransformAction.NAME, transportService, clusterService, threadPool, actionFilters, + StartDataFrameTransformAction.Request::new, indexNameExpressionResolver); this.licenseState = licenseState; this.dataFrameTransformsConfigManager = dataFrameTransformsConfigManager; this.persistentTasksService = persistentTasksService; @@ -94,11 +96,7 @@ protected void masterOperation(StartDataFrameTransformAction.Request request, new StartDataFrameTransformTaskAction.Request(request.getId()), ActionListener.wrap( r -> listener.onResponse(new StartDataFrameTransformAction.Response(true)), - startingFailure -> cancelDataFrameTask(task.getId(), - transformTask.getId(), - startingFailure, - listener::onFailure) - )), + listener::onFailure)), listener::onFailure)); }, listener::onFailure @@ -122,7 +120,21 @@ protected void masterOperation(StartDataFrameTransformAction.Request request, transformTask, persistentTaskActionListener); } else { - persistentTaskActionListener.onResponse(existingTask); + DataFrameTransformState transformState = (DataFrameTransformState)existingTask.getState(); + if(transformState.getTaskState() == DataFrameTransformTaskState.FAILED && request.isForce() == false) { + listener.onFailure(new ElasticsearchStatusException( + "Unable to start data frame transform [" + config.getId() + + "] as it is in a failed state with failure: [" + transformState.getReason() + + "]. Use force start to restart data frame transform once error is resolved.", + RestStatus.CONFLICT)); + } else if (transformState.getTaskState() != DataFrameTransformTaskState.STOPPED && + transformState.getTaskState() != DataFrameTransformTaskState.FAILED) { + listener.onFailure(new ElasticsearchStatusException( + "Unable to start data frame transform [" + config.getId() + + "] as it is in state [" + transformState.getTaskState() + "]", RestStatus.CONFLICT)); + } else { + persistentTaskActionListener.onResponse(existingTask); + } } }, listener::onFailure diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java index 78e8425758f9..2234226a5013 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.dataframe.action; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; @@ -15,11 +16,13 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformTask; @@ -60,6 +63,14 @@ protected void doExecute(Task task, StopDataFrameTransformAction.Request request protected void taskOperation(StopDataFrameTransformAction.Request request, DataFrameTransformTask transformTask, ActionListener listener) { if (transformTask.getTransformId().equals(request.getId())) { + if (transformTask.getState().getTaskState() == DataFrameTransformTaskState.FAILED && request.isForce() == false) { + listener.onFailure( + new ElasticsearchStatusException("Unable to stop data frame transform [" + request.getId() + + "] as it is in a failed state with reason: [" + transformTask.getState().getReason() + + "]. Use force stop to stop the data frame transform.", + RestStatus.CONFLICT)); + return; + } if (request.waitForCompletion() == false) { transformTask.stop(listener); } else { diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointService.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointService.java index 00cc2d2f522a..5d65016a890b 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointService.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointService.java @@ -68,7 +68,7 @@ public void getCheckpoint(DataFrameTransformConfig transformConfig, long checkpo long timeUpperBound = 0; // 1st get index to see the indexes the user has access to - GetIndexRequest getIndexRequest = new GetIndexRequest().indices(transformConfig.getSource()); + GetIndexRequest getIndexRequest = new GetIndexRequest().indices(transformConfig.getSource().getIndex()); ClientHelper.executeWithHeadersAsync(transformConfig.getHeaders(), ClientHelper.DATA_FRAME_ORIGIN, client, GetIndexAction.INSTANCE, getIndexRequest, ActionListener.wrap(getIndexResponse -> { @@ -76,7 +76,7 @@ public void getCheckpoint(DataFrameTransformConfig transformConfig, long checkpo // 2nd get stats request ClientHelper.executeAsyncWithOrigin(client, ClientHelper.DATA_FRAME_ORIGIN, IndicesStatsAction.INSTANCE, - new IndicesStatsRequest().indices(transformConfig.getSource()), ActionListener.wrap(response -> { + new IndicesStatsRequest().indices(transformConfig.getSource().getIndex()), ActionListener.wrap(response -> { if (response.getFailedShards() != 0) { throw new CheckpointException("Source has [" + response.getFailedShards() + "] failed shards"); } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameInternalIndex.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameInternalIndex.java index 1871bc067967..2905e4c22579 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameInternalIndex.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameInternalIndex.java @@ -13,12 +13,17 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.xpack.core.common.notifications.AbstractAuditMessage; import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.core.dataframe.transforms.DestConfig; +import org.elasticsearch.xpack.core.dataframe.transforms.SourceConfig; import java.io.IOException; import java.util.Collections; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; +import static org.elasticsearch.xpack.core.dataframe.DataFrameField.DATA_FRAME_TRANSFORM_AUDIT_ID_FIELD; public final class DataFrameInternalIndex { @@ -28,10 +33,19 @@ public final class DataFrameInternalIndex { public static final String INDEX_TEMPLATE_NAME = INDEX_TEMPLATE_PATTERN + INDEX_TEMPLATE_VERSION; public static final String INDEX_NAME = INDEX_TEMPLATE_NAME; + public static final String AUDIT_TEMPLATE_VERSION = "1"; + public static final String AUDIT_INDEX_PREFIX = ".data-frame-notifications-"; + public static final String AUDIT_INDEX = AUDIT_INDEX_PREFIX + AUDIT_TEMPLATE_VERSION; + // constants for mappings public static final String DYNAMIC = "dynamic"; public static final String PROPERTIES = "properties"; public static final String TYPE = "type"; + public static final String ENABLED = "enabled"; + public static final String DATE = "date"; + public static final String TEXT = "text"; + public static final String FIELDS = "fields"; + public static final String RAW = "raw"; // data types public static final String DOUBLE = "double"; @@ -50,6 +64,52 @@ public static IndexTemplateMetaData getIndexTemplateMetaData() throws IOExceptio return dataFrameTemplate; } + public static IndexTemplateMetaData getAuditIndexTemplateMetaData() throws IOException { + IndexTemplateMetaData dataFrameTemplate = IndexTemplateMetaData.builder(AUDIT_INDEX) + .patterns(Collections.singletonList(AUDIT_INDEX_PREFIX + "*")) + .version(Version.CURRENT.id) + .settings(Settings.builder() + // the audits are expected to be small + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, "0-1")) + .putMapping(MapperService.SINGLE_MAPPING_NAME, Strings.toString(auditMappings())) + .build(); + return dataFrameTemplate; + } + + private static XContentBuilder auditMappings() throws IOException { + XContentBuilder builder = jsonBuilder().startObject(); + builder.startObject(SINGLE_MAPPING_NAME); + addMetaInformation(builder); + builder.field(DYNAMIC, "false"); + builder.startObject(PROPERTIES) + .startObject(DATA_FRAME_TRANSFORM_AUDIT_ID_FIELD) + .field(TYPE, KEYWORD) + .endObject() + .startObject(AbstractAuditMessage.LEVEL.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(AbstractAuditMessage.MESSAGE.getPreferredName()) + .field(TYPE, TEXT) + .startObject(FIELDS) + .startObject(RAW) + .field(TYPE, KEYWORD) + .endObject() + .endObject() + .endObject() + .startObject(AbstractAuditMessage.TIMESTAMP.getPreferredName()) + .field(TYPE, DATE) + .endObject() + .startObject(AbstractAuditMessage.NODE_NAME.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .endObject() + .endObject() + .endObject(); + + return builder; + } + private static XContentBuilder mappings() throws IOException { XContentBuilder builder = jsonBuilder(); builder.startObject(); @@ -81,10 +141,21 @@ private static XContentBuilder addDataFrameTransformsConfigMappings(XContentBuil .field(TYPE, KEYWORD) .endObject() .startObject(DataFrameField.SOURCE.getPreferredName()) - .field(TYPE, KEYWORD) + .startObject(PROPERTIES) + .startObject(SourceConfig.INDEX.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(SourceConfig.QUERY.getPreferredName()) + .field(ENABLED, "false") + .endObject() + .endObject() .endObject() .startObject(DataFrameField.DESTINATION.getPreferredName()) - .field(TYPE, KEYWORD) + .startObject(PROPERTIES) + .startObject(DestConfig.INDEX.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .endObject() .endObject(); } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataframeIndex.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataframeIndex.java index 440a065b48c6..9e0580797465 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataframeIndex.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataframeIndex.java @@ -38,7 +38,7 @@ private DataframeIndex() { public static void createDestinationIndex(Client client, DataFrameTransformConfig transformConfig, Map mappings, final ActionListener listener) { - CreateIndexRequest request = new CreateIndexRequest(transformConfig.getDestination()); + CreateIndexRequest request = new CreateIndexRequest(transformConfig.getDestination().getIndex()); // TODO: revisit number of shards, number of replicas request.settings(Settings.builder() // <1> @@ -50,7 +50,7 @@ public static void createDestinationIndex(Client client, DataFrameTransformConfi listener.onResponse(true); }, e -> { String message = DataFrameMessages.getMessage(DataFrameMessages.FAILED_TO_CREATE_DESTINATION_INDEX, - transformConfig.getDestination(), transformConfig.getId()); + transformConfig.getDestination().getIndex(), transformConfig.getId()); logger.error(message); listener.onFailure(new RuntimeException(message, e)); })); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStartDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStartDataFrameTransformAction.java index 76bf5c7230b9..1d9b3f29a613 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStartDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStartDataFrameTransformAction.java @@ -28,7 +28,8 @@ public RestStartDataFrameTransformAction(Settings settings, RestController contr @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { String id = restRequest.param(DataFrameField.ID.getPreferredName()); - StartDataFrameTransformAction.Request request = new StartDataFrameTransformAction.Request(id); + boolean force = restRequest.paramAsBoolean(DataFrameField.FORCE.getPreferredName(), false); + StartDataFrameTransformAction.Request request = new StartDataFrameTransformAction.Request(id, force); request.timeout(restRequest.paramAsTime(DataFrameField.TIMEOUT.getPreferredName(), AcknowledgedRequest.DEFAULT_ACK_TIMEOUT)); return channel -> client.execute(StartDataFrameTransformAction.INSTANCE, request, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStopDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStopDataFrameTransformAction.java index 880373181240..e93898b905ba 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStopDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStopDataFrameTransformAction.java @@ -30,8 +30,9 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient TimeValue timeout = restRequest.paramAsTime(DataFrameField.TIMEOUT.getPreferredName(), StopDataFrameTransformAction.DEFAULT_TIMEOUT); boolean waitForCompletion = restRequest.paramAsBoolean(DataFrameField.WAIT_FOR_COMPLETION.getPreferredName(), false); + boolean force = restRequest.paramAsBoolean(DataFrameField.FORCE.getPreferredName(), false); - StopDataFrameTransformAction.Request request = new StopDataFrameTransformAction.Request(id, waitForCompletion, timeout); + StopDataFrameTransformAction.Request request = new StopDataFrameTransformAction.Request(id, waitForCompletion, force, timeout); return channel -> client.execute(StopDataFrameTransformAction.INSTANCE, request, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java index f32e90869402..bb07722ddeed 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.indexing.AsyncTwoPhaseIndexer; @@ -48,9 +49,9 @@ public DataFrameIndexer(Executor executor, AtomicReference initial @Override protected void onStartJob(long now) { - QueryBuilder queryBuilder = getConfig().getQueryConfig().getQuery(); + QueryBuilder queryBuilder = getConfig().getSource().getQueryConfig().getQuery(); - pivot = new Pivot(getConfig().getSource(), queryBuilder, getConfig().getPivotConfig()); + pivot = new Pivot(getConfig().getSource().getIndex(), queryBuilder, getConfig().getPivotConfig()); } @Override @@ -70,18 +71,31 @@ protected IterationResult> doProcess(SearchResponse searchRe */ private Stream processBucketsToIndexRequests(CompositeAggregation agg) { final DataFrameTransformConfig transformConfig = getConfig(); - String indexName = transformConfig.getDestination(); + String indexName = transformConfig.getDestination().getIndex(); return pivot.extractResults(agg, getFieldMappings(), getStats()).map(document -> { + String id = (String) document.get(DataFrameField.DOCUMENT_ID_FIELD); + + if (id == null) { + throw new RuntimeException("Expected a document id but got null."); + } + XContentBuilder builder; try { builder = jsonBuilder(); - builder.map(document); + builder.startObject(); + for (Map.Entry value : document.entrySet()) { + // skip all internal fields + if (value.getKey().startsWith("_") == false) { + builder.field(value.getKey(), value.getValue()); + } + } + builder.endObject(); } catch (IOException e) { throw new UncheckedIOException(e); } - IndexRequest request = new IndexRequest(indexName).source(builder); + IndexRequest request = new IndexRequest(indexName).source(builder).id(id); return request; }); } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java index 370091ca6f36..d53354db2aa7 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java @@ -16,9 +16,12 @@ import org.elasticsearch.persistent.PersistentTasksExecutor; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.common.notifications.Auditor; import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.core.dataframe.notifications.DataFrameAuditMessage; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransform; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; import org.elasticsearch.xpack.dataframe.DataFrame; import org.elasticsearch.xpack.dataframe.checkpoint.DataFrameTransformsCheckpointService; @@ -35,15 +38,20 @@ public class DataFrameTransformPersistentTasksExecutor extends PersistentTasksEx private final DataFrameTransformsCheckpointService dataFrameTransformsCheckpointService; private final SchedulerEngine schedulerEngine; private final ThreadPool threadPool; + private final Auditor auditor; - public DataFrameTransformPersistentTasksExecutor(Client client, DataFrameTransformsConfigManager transformsConfigManager, - DataFrameTransformsCheckpointService dataFrameTransformsCheckpointService, SchedulerEngine schedulerEngine, - ThreadPool threadPool) { + public DataFrameTransformPersistentTasksExecutor(Client client, + DataFrameTransformsConfigManager transformsConfigManager, + DataFrameTransformsCheckpointService dataFrameTransformsCheckpointService, + SchedulerEngine schedulerEngine, + Auditor auditor, + ThreadPool threadPool) { super(DataFrameField.TASK_NAME, DataFrame.TASK_THREAD_POOL_NAME); this.client = client; this.transformsConfigManager = transformsConfigManager; this.dataFrameTransformsCheckpointService = dataFrameTransformsCheckpointService; this.schedulerEngine = schedulerEngine; + this.auditor = auditor; this.threadPool = threadPool; } @@ -53,6 +61,11 @@ protected void nodeOperation(AllocatedPersistentTask task, @Nullable DataFrameTr SchedulerEngine.Job schedulerJob = new SchedulerEngine.Job( DataFrameTransformTask.SCHEDULE_NAME + "_" + params.getId(), next()); + DataFrameTransformState transformState = (DataFrameTransformState) state; + if (transformState != null && transformState.getTaskState() == DataFrameTransformTaskState.FAILED) { + logger.warn("Tried to start failed transform [" + params.getId() + "] failure reason: " + transformState.getReason()); + return; + } // Note that while the task is added to the scheduler here, the internal state will prevent // it from doing any work until the task is "started" via the StartTransform api schedulerEngine.register(buildTask); @@ -71,7 +84,7 @@ static SchedulerEngine.Schedule next() { protected AllocatedPersistentTask createTask(long id, String type, String action, TaskId parentTaskId, PersistentTasksCustomMetaData.PersistentTask persistentTask, Map headers) { return new DataFrameTransformTask(id, type, action, parentTaskId, persistentTask.getParams(), - (DataFrameTransformState) persistentTask.getState(), client, transformsConfigManager, dataFrameTransformsCheckpointService, - schedulerEngine, threadPool, headers); + (DataFrameTransformState) persistentTask.getState(), client, transformsConfigManager, + dataFrameTransformsCheckpointService, schedulerEngine, auditor, threadPool, headers); } } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java index 107adc3b2bf5..b8bc2870307a 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java @@ -18,19 +18,24 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.persistent.AllocatedPersistentTask; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.common.notifications.Auditor; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; +import org.elasticsearch.xpack.core.dataframe.notifications.DataFrameAuditMessage; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransform; import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformTaskAction; import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformTaskAction.Response; import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransform; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine.Event; @@ -38,39 +43,55 @@ import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; import org.elasticsearch.xpack.dataframe.transforms.pivot.SchemaUtil; +import java.util.Arrays; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; + public class DataFrameTransformTask extends AllocatedPersistentTask implements SchedulerEngine.Listener { private static final Logger logger = LogManager.getLogger(DataFrameTransformTask.class); + // TODO consider moving to dynamic cluster setting + private static final int MAX_CONTINUOUS_FAILURES = 10; + private static final IndexerState[] RUNNING_STATES = new IndexerState[]{IndexerState.STARTED, IndexerState.INDEXING}; public static final String SCHEDULE_NAME = DataFrameField.TASK_NAME + "/schedule"; private final DataFrameTransform transform; private final SchedulerEngine schedulerEngine; private final ThreadPool threadPool; private final DataFrameIndexer indexer; + private final Auditor auditor; + private final AtomicReference taskState; + private final AtomicReference stateReason; // the generation of this data frame, for v1 there will be only // 0: data frame not created or still indexing // 1: data frame complete, all data has been indexed private final AtomicReference generation; + private final AtomicInteger failureCount; public DataFrameTransformTask(long id, String type, String action, TaskId parentTask, DataFrameTransform transform, - DataFrameTransformState state, Client client, DataFrameTransformsConfigManager transformsConfigManager, - DataFrameTransformsCheckpointService transformsCheckpointService, SchedulerEngine schedulerEngine, ThreadPool threadPool, - Map headers) { + DataFrameTransformState state, Client client, DataFrameTransformsConfigManager transformsConfigManager, + DataFrameTransformsCheckpointService transformsCheckpointService, + SchedulerEngine schedulerEngine, Auditor auditor, + ThreadPool threadPool, Map headers) { super(id, type, action, DataFrameField.PERSISTENT_TASK_DESCRIPTION_PREFIX + transform.getId(), parentTask, headers); this.transform = transform; this.schedulerEngine = schedulerEngine; this.threadPool = threadPool; + this.auditor = auditor; IndexerState initialState = IndexerState.STOPPED; + DataFrameTransformTaskState initialTaskState = DataFrameTransformTaskState.STOPPED; + String initialReason = null; long initialGeneration = 0; Map initialPosition = null; logger.info("[{}] init, got state: [{}]", transform.getId(), state != null); if (state != null) { + initialTaskState = state.getTaskState(); + initialReason = state.getReason(); final IndexerState existingState = state.getIndexerState(); logger.info("[{}] Loading existing state: [{}], position [{}]", transform.getId(), existingState, state.getPosition()); if (existingState.equals(IndexerState.INDEXING)) { @@ -87,8 +108,11 @@ public DataFrameTransformTask(long id, String type, String action, TaskId parent } this.indexer = new ClientDataFrameIndexer(transform.getId(), transformsConfigManager, transformsCheckpointService, - new AtomicReference<>(initialState), initialPosition, client); - this.generation = new AtomicReference(initialGeneration); + new AtomicReference<>(initialState), initialPosition, client, auditor); + this.generation = new AtomicReference<>(initialGeneration); + this.taskState = new AtomicReference<>(initialTaskState); + this.stateReason = new AtomicReference<>(initialReason); + this.failureCount = new AtomicInteger(0); } public String getTransformId() { @@ -104,7 +128,7 @@ public Status getStatus() { } public DataFrameTransformState getState() { - return new DataFrameTransformState(indexer.getState(), indexer.getPosition(), generation.get()); + return new DataFrameTransformState(taskState.get(), indexer.getState(), indexer.getPosition(), generation.get(), stateReason.get()); } public DataFrameIndexerTransformStats getStats() { @@ -120,64 +144,71 @@ public boolean isStopped() { } public synchronized void start(ActionListener listener) { - final IndexerState prevState = indexer.getState(); - if (prevState != IndexerState.STOPPED) { - // fails if the task is not STOPPED - listener.onFailure(new ElasticsearchException("Cannot start task for data frame transform [{}], because state was [{}]", - transform.getId(), prevState)); - return; - } - final IndexerState newState = indexer.start(); - if (newState != IndexerState.STARTED) { + if (Arrays.stream(RUNNING_STATES).noneMatch(newState::equals)) { listener.onFailure(new ElasticsearchException("Cannot start task for data frame transform [{}], because state was [{}]", transform.getId(), newState)); return; } - - final DataFrameTransformState state = new DataFrameTransformState(IndexerState.STOPPED, indexer.getPosition(), generation.get()); - - logger.debug("Updating state for data frame transform [{}] to [{}][{}]", transform.getId(), state.getIndexerState(), - state.getPosition()); - updatePersistentTaskState(state, - ActionListener.wrap( - (task) -> { - logger.debug("Successfully updated state for data frame transform [" + transform.getId() + "] to [" - + state.getIndexerState() + "][" + state.getPosition() + "]"); - listener.onResponse(new StartDataFrameTransformTaskAction.Response(true)); - }, (exc) -> { - // We were unable to update the persistent status, so we need to shutdown the indexer too. - indexer.stop(); - listener.onFailure(new ElasticsearchException("Error while updating state for data frame transform [" + stateReason.set(null); + taskState.set(DataFrameTransformTaskState.STARTED); + failureCount.set(0); + + final DataFrameTransformState state = new DataFrameTransformState( + DataFrameTransformTaskState.STARTED, + IndexerState.STOPPED, + indexer.getPosition(), + generation.get(), + null); + + logger.info("Updating state for data frame transform [{}] to [{}]", transform.getId(), state.toString()); + persistStateToClusterState(state, ActionListener.wrap( + task -> { + auditor.info(transform.getId(), "Updated state to [" + state.getTaskState() + "]"); + listener.onResponse(new StartDataFrameTransformTaskAction.Response(true)); + }, + exc -> { + indexer.stop(); + listener.onFailure(new ElasticsearchException("Error while updating state for data frame transform [" + transform.getId() + "] to [" + state.getIndexerState() + "].", exc)); - }) - ); + } + )); } public synchronized void stop(ActionListener listener) { + // taskState is initialized as STOPPED and is updated in tandem with the indexerState + // Consequently, if it is STOPPED, we consider the whole task STOPPED. + if (taskState.get() == DataFrameTransformTaskState.STOPPED) { + listener.onResponse(new StopDataFrameTransformAction.Response(true)); + return; + } final IndexerState newState = indexer.stop(); switch (newState) { case STOPPED: - listener.onResponse(new StopDataFrameTransformAction.Response(true)); - break; - + // Fall through to `STOPPING` as the behavior is the same for both, we should persist for both case STOPPING: // update the persistent state to STOPPED. There are two scenarios and both are safe: // 1. we persist STOPPED now, indexer continues a bit then sees the flag and checkpoints another STOPPED with the more recent // position. // 2. we persist STOPPED now, indexer continues a bit but then dies. When/if we resume we'll pick up at last checkpoint, // overwrite some docs and eventually checkpoint. - DataFrameTransformState state = new DataFrameTransformState(IndexerState.STOPPED, indexer.getPosition(), generation.get()); - updatePersistentTaskState(state, ActionListener.wrap((task) -> { - logger.debug("Successfully updated state for data frame transform [{}] to [{}]", transform.getId(), - state.getIndexerState()); - listener.onResponse(new StopDataFrameTransformAction.Response(true)); - }, (exc) -> { - listener.onFailure(new ElasticsearchException("Error while updating state for data frame transform [{}] to [{}]", exc, - transform.getId(), state.getIndexerState())); - })); + taskState.set(DataFrameTransformTaskState.STOPPED); + DataFrameTransformState state = new DataFrameTransformState( + DataFrameTransformTaskState.STOPPED, + IndexerState.STOPPED, + indexer.getPosition(), + generation.get(), + stateReason.get()); + persistStateToClusterState(state, ActionListener.wrap( + task -> { + auditor.info(transform.getId(), "Updated state to [" + state.getTaskState() + "]"); + listener.onResponse(new StopDataFrameTransformAction.Response(true)); + }, + exc -> listener.onFailure(new ElasticsearchException( + "Error while updating state for data frame transform [{}] to [{}]", exc, + transform.getId(), + state.getIndexerState())))); break; - default: listener.onFailure(new ElasticsearchException("Cannot stop task for data frame transform [{}], because state was [{}]", transform.getId(), newState)); @@ -210,6 +241,40 @@ synchronized void shutdown() { markAsCompleted(); } + void persistStateToClusterState(DataFrameTransformState state, + ActionListener> listener) { + updatePersistentTaskState(state, ActionListener.wrap( + success -> { + logger.debug("Successfully updated state for data frame transform [{}] to [{}]", transform.getId(), state.toString()); + listener.onResponse(success); + }, + failure -> { + auditor.warning(transform.getId(), "Failed to persist to state to cluster state: " + failure.getMessage()); + logger.error("Failed to update state for data frame transform [" + transform.getId() + "]", failure); + listener.onFailure(failure); + } + )); + } + + private boolean isIrrecoverableFailure(Exception e) { + return e instanceof IndexNotFoundException || e instanceof DataFrameConfigurationException; + } + + synchronized void handleFailure(Exception e) { + if (isIrrecoverableFailure(e) || failureCount.incrementAndGet() > MAX_CONTINUOUS_FAILURES) { + String failureMessage = isIrrecoverableFailure(e) ? + "task encountered irrecoverable failure: " + e.getMessage() : + "task encountered more than " + MAX_CONTINUOUS_FAILURES + " failures; latest failure: " + e.getMessage(); + auditor.error(transform.getId(), failureMessage); + stateReason.set(failureMessage); + taskState.set(DataFrameTransformTaskState.FAILED); + persistStateToClusterState(getState(), ActionListener.wrap( + r -> failureCount.set(0), // Successfully marked as failed, reset counter so that task can be restarted + exception -> {} // Noop, internal method logs the failure to update the state + )); + } + } + /** * This is called when the persistent task signals that the allocated task should be terminated. * Termination in the task framework is essentially voluntary, as the allocated task can only be @@ -231,18 +296,23 @@ protected class ClientDataFrameIndexer extends DataFrameIndexer { private final DataFrameTransformsConfigManager transformsConfigManager; private final DataFrameTransformsCheckpointService transformsCheckpointService; private final String transformId; + private final Auditor auditor; + // Keeps track of the last exception that was written to our audit, keeps us from spamming the audit index + private volatile String lastAuditedExceptionMessage = null; private Map fieldMappings = null; private DataFrameTransformConfig transformConfig = null; public ClientDataFrameIndexer(String transformId, DataFrameTransformsConfigManager transformsConfigManager, - DataFrameTransformsCheckpointService transformsCheckpointService, AtomicReference initialState, - Map initialPosition, Client client) { + DataFrameTransformsCheckpointService transformsCheckpointService, + AtomicReference initialState, Map initialPosition, Client client, + Auditor auditor) { super(threadPool.executor(ThreadPool.Names.GENERIC), initialState, initialPosition); this.transformId = transformId; this.transformsConfigManager = transformsConfigManager; this.transformsCheckpointService = transformsCheckpointService; this.client = client; + this.auditor = auditor; } @Override @@ -262,12 +332,17 @@ protected String getJobId() { @Override public synchronized boolean maybeTriggerAsyncJob(long now) { + if (taskState.get() == DataFrameTransformTaskState.FAILED) { + logger.debug("Schedule was triggered for transform [" + getJobId() + "] but task is failed. Ignoring trigger."); + return false; + } + if (transformConfig == null) { CountDownLatch latch = new CountDownLatch(1); - transformsConfigManager.getTransformConfiguration(transformId, new LatchedActionListener<>(ActionListener.wrap(config -> { - transformConfig = config; - }, e -> { + transformsConfigManager.getTransformConfiguration(transformId, new LatchedActionListener<>(ActionListener.wrap( + config -> transformConfig = config, + e -> { throw new RuntimeException( DataFrameMessages.getMessage(DataFrameMessages.FAILED_TO_LOAD_TRANSFORM_CONFIGURATION, transformId), e); }), latch)); @@ -280,21 +355,21 @@ public synchronized boolean maybeTriggerAsyncJob(long now) { } } - // todo: set job into failed state if (transformConfig.isValid() == false) { - throw new RuntimeException( - DataFrameMessages.getMessage(DataFrameMessages.DATA_FRAME_TRANSFORM_CONFIGURATION_INVALID, transformId)); + DataFrameConfigurationException exception = new DataFrameConfigurationException(transformId); + handleFailure(exception); + throw exception; } if (fieldMappings == null) { CountDownLatch latch = new CountDownLatch(1); - SchemaUtil.getDestinationFieldMappings(client, transformConfig.getDestination(), new LatchedActionListener<>( + SchemaUtil.getDestinationFieldMappings(client, transformConfig.getDestination().getIndex(), new LatchedActionListener<>( ActionListener.wrap( destinationMappings -> fieldMappings = destinationMappings, e -> { throw new RuntimeException( DataFrameMessages.getMessage(DataFrameMessages.DATA_FRAME_UNABLE_TO_GATHER_FIELD_MAPPINGS, - transformConfig.getDestination()), + transformConfig.getDestination().getIndex()), e); }), latch)); try { @@ -302,7 +377,7 @@ public synchronized boolean maybeTriggerAsyncJob(long now) { } catch (InterruptedException e) { throw new RuntimeException( DataFrameMessages.getMessage(DataFrameMessages.DATA_FRAME_UNABLE_TO_GATHER_FIELD_MAPPINGS, - transformConfig.getDestination()), + transformConfig.getDestination().getIndex()), e); } } @@ -330,34 +405,57 @@ protected void doSaveState(IndexerState indexerState, Map positi return; } - if(indexerState.equals(IndexerState.STARTED)) { - // if the indexer resets the state to started, it means it is done, so increment the generation + if(indexerState.equals(IndexerState.STARTED) && getStats().getNumDocuments() > 0) { + // if the indexer resets the state to started, it means it is done with a run through the data. + // But, if there were no documents, we should allow it to attempt to gather more again, as there is no risk of overwriting + // Some reasons for no documents are (but is not limited to): + // * Could have failed early on search or index + // * Have an empty index + // * Have a query that returns no documents generation.compareAndSet(0L, 1L); } - final DataFrameTransformState state = new DataFrameTransformState(indexerState, getPosition(), generation.get()); + final DataFrameTransformState state = new DataFrameTransformState( + taskState.get(), + indexerState, + getPosition(), + generation.get(), + stateReason.get()); logger.info("Updating persistent state of transform [" + transform.getId() + "] to [" + state.toString() + "]"); - - updatePersistentTaskState(state, ActionListener.wrap(task -> next.run(), exc -> { - logger.error("Updating persistent state of transform [" + transform.getId() + "] failed", exc); - next.run(); - })); + persistStateToClusterState(state, ActionListener.wrap(t -> next.run(), e -> next.run())); } @Override protected void onFailure(Exception exc) { - logger.warn("Data frame transform [" + transform.getId() + "] failed with an exception: ", exc); + // Since our schedule fires again very quickly after failures it is possible to run into the same failure numerous + // times in a row, very quickly. We do not want to spam the audit log with repeated failures, so only record the first one + if (exc.getMessage().equals(lastAuditedExceptionMessage) == false) { + auditor.warning(transform.getId(), "Data frame transform encountered an exception: " + exc.getMessage()); + lastAuditedExceptionMessage = exc.getMessage(); + } + logger.warn("Data frame transform [" + transform.getId() + "] encountered an exception: ", exc); + handleFailure(exc); } @Override protected void onFinish() { + auditor.info(transform.getId(), "Finished indexing for data frame transform"); logger.info("Finished indexing for data frame transform [" + transform.getId() + "]"); } @Override protected void onAbort() { + auditor.info(transform.getId(), "Received abort request, stopping indexer"); logger.info("Data frame transform [" + transform.getId() + "] received abort request, stopping indexer"); shutdown(); } } + + class DataFrameConfigurationException extends RuntimeException { + + DataFrameConfigurationException(String transformId) { + super(DataFrameMessages.getMessage(DataFrameMessages.DATA_FRAME_TRANSFORM_CONFIGURATION_INVALID, transformId)); + } + + } } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/IDGenerator.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/IDGenerator.java new file mode 100644 index 000000000000..d9223fe90dd4 --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/IDGenerator.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms; + +import org.apache.lucene.util.BytesRefBuilder; +import org.elasticsearch.common.Numbers; +import org.elasticsearch.common.hash.MurmurHash3; + +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.TreeMap; + +/** + * ID Generator for creating unique but deterministic document ids. + * + * uses MurmurHash with 128 bits + */ +public final class IDGenerator { + private static final byte[] NULL_VALUE = "__NULL_VALUE__".getBytes(StandardCharsets.UTF_8); + private static final byte DELIM = '$'; + private static final long SEED = 19; + private static final int MAX_FIRST_BYTES = 5; + + private final TreeMap objectsForIDGeneration = new TreeMap<>(); + + public IDGenerator() { + } + + /** + * Add a value to the generator + * @param key object identifier, to be used for consistent sorting + * @param value the value + */ + public void add(String key, Object value) { + if (objectsForIDGeneration.containsKey(key)) { + throw new IllegalArgumentException("Keys must be unique"); + } + objectsForIDGeneration.put(key, value); + } + + /** + * Create a document id based on the input objects + * + * @return a document id as string + */ + public String getID() { + if (objectsForIDGeneration.size() == 0) { + throw new RuntimeException("Add at least 1 object before generating the ID"); + } + + BytesRefBuilder buffer = new BytesRefBuilder(); + BytesRefBuilder hashedBytes = new BytesRefBuilder(); + + for (Object value : objectsForIDGeneration.values()) { + byte[] v = getBytes(value); + + buffer.append(v, 0, v.length); + buffer.append(DELIM); + + // keep the 1st byte of every object + if (hashedBytes.length() <= MAX_FIRST_BYTES) { + hashedBytes.append(v[0]); + } + } + MurmurHash3.Hash128 hasher = MurmurHash3.hash128(buffer.bytes(), 0, buffer.length(), SEED, new MurmurHash3.Hash128()); + hashedBytes.append(Numbers.longToBytes(hasher.h1), 0, 8); + hashedBytes.append(Numbers.longToBytes(hasher.h2), 0, 8); + return Base64.getUrlEncoder().withoutPadding().encodeToString(hashedBytes.bytes()); + } + + /** + * Turns objects into byte arrays, only supporting types returned groupBy + * + * @param value the value as object + * @return a byte representation of the input object + */ + private static byte[] getBytes(Object value) { + if (value == null) { + return NULL_VALUE; + } else if (value instanceof String) { + return ((String) value).getBytes(StandardCharsets.UTF_8); + } else if (value instanceof Long) { + return Numbers.longToBytes((Long) value); + } else if (value instanceof Double) { + return Numbers.doubleToBytes((Double) value); + } else if (value instanceof Integer) { + return Numbers.intToBytes((Integer) value); + } + + throw new IllegalArgumentException("Value of type [" + value.getClass() + "] is not supported"); + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java index fa7536497c4f..5d77f82e610a 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java @@ -13,8 +13,10 @@ import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation.SingleValue; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.pivot.GroupConfig; +import org.elasticsearch.xpack.dataframe.transforms.IDGenerator; import java.util.Collection; import java.util.HashMap; @@ -43,10 +45,17 @@ public static Stream> extractCompositeAggregationResults(Com DataFrameIndexerTransformStats stats) { return agg.getBuckets().stream().map(bucket -> { stats.incrementNumDocuments(bucket.getDocCount()); - Map document = new HashMap<>(); - groups.getGroups().keySet().forEach(destinationFieldName -> - document.put(destinationFieldName, bucket.getKey().get(destinationFieldName))); + // generator to create unique but deterministic document ids, so we + // - do not create duplicates if we re-run after failure + // - update documents + IDGenerator idGen = new IDGenerator(); + + groups.getGroups().keySet().forEach(destinationFieldName -> { + Object value = bucket.getKey().get(destinationFieldName); + idGen.add(destinationFieldName, value); + document.put(destinationFieldName, value); + }); for (AggregationBuilder aggregationBuilder : aggregationBuilders) { String aggName = aggregationBuilder.getName(); @@ -71,6 +80,9 @@ public static Stream> extractCompositeAggregationResults(Com assert false; } } + + document.put(DataFrameField.DOCUMENT_ID_FIELD, idGen.getID()); + return document; }); } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java index 26ac7d93bf3c..0cf3edec1628 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java @@ -38,13 +38,13 @@ public class Pivot { private static final String COMPOSITE_AGGREGATION_NAME = "_data_frame"; private final PivotConfig config; - private final String source; + private final String[] source; // objects for re-using private final CompositeAggregationBuilder cachedCompositeAggregation; private final SearchRequest cachedSearchRequest; - public Pivot(String source, QueryBuilder query, PivotConfig config) { + public Pivot(String[] source, QueryBuilder query, PivotConfig config) { this.source = source; this.config = config; this.cachedCompositeAggregation = createCompositeAggregation(config); @@ -108,7 +108,7 @@ private void runTestQuery(Client client, final ActionListener listener) })); } - private static SearchRequest createSearchRequest(String index, QueryBuilder query, CompositeAggregationBuilder compositeAggregation) { + private static SearchRequest createSearchRequest(String[] index, QueryBuilder query, CompositeAggregationBuilder compositeAggregation) { SearchRequest searchRequest = new SearchRequest(index); SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.aggregation(compositeAggregation); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/SchemaUtil.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/SchemaUtil.java index 175be3ea30e1..ff967213e814 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/SchemaUtil.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/SchemaUtil.java @@ -57,7 +57,7 @@ public static boolean isNumericType(String type) { */ public static void deduceMappings(final Client client, final PivotConfig config, - final String source, + final String[] source, final ActionListener> listener) { // collects the fieldnames used as source for aggregations Map aggregationSourceFieldNames = new HashMap<>(); @@ -156,7 +156,7 @@ private static Map resolveMappings(Map aggregati /* * Very "magic" helper method to extract the source mappings */ - private static void getSourceFieldMappings(Client client, String index, String[] fields, + private static void getSourceFieldMappings(Client client, String[] index, String[] fields, ActionListener> listener) { GetFieldMappingsRequest fieldMappingRequest = new GetFieldMappingsRequest(); fieldMappingRequest.indices(index); @@ -182,7 +182,7 @@ private static Map extractFieldMappings(Map map = (Map) typeMap; if (map.containsKey("type")) { String type = map.get("type").toString(); - logger.debug("Extracted type for [" + fieldName + "] : [" + type + "]"); + logger.debug("Extracted type for [" + fieldName + "] : [" + type + "] from index [" + indexName +"]"); // TODO: overwrites types, requires resolve if // types are mixed extractedTypes.put(fieldName, type); diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointServiceTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointServiceTests.java index ea5362d184b3..0868315165cd 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointServiceTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointServiceTests.java @@ -82,6 +82,7 @@ public void testExtractIndexCheckpointsLostPrimaries() { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40368") public void testExtractIndexCheckpointsInconsistentGlobalCheckpoints() { Map expectedCheckpoints = new HashMap<>(); Set indices = randomUserIndices(); diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/IDGeneratorTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/IDGeneratorTests.java new file mode 100644 index 000000000000..fd378a2c4c17 --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/IDGeneratorTests.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms; + +import org.elasticsearch.test.ESTestCase; + +public class IDGeneratorTests extends ESTestCase { + + public void testSupportedTypes() { + IDGenerator idGen = new IDGenerator(); + idGen.add("key1", "value1"); + String id = idGen.getID(); + idGen.add("key2", null); + assertNotEquals(id, idGen.getID()); + id = idGen.getID(); + idGen.add("key3", "value3"); + assertNotEquals(id, idGen.getID()); + id = idGen.getID(); + idGen.add("key4", 12L); + assertNotEquals(id, idGen.getID()); + id = idGen.getID(); + idGen.add("key5", 44.444); + assertNotEquals(id, idGen.getID()); + idGen.add("key6", 13); + assertNotEquals(id, idGen.getID()); + } + + public void testOrderIndependence() { + IDGenerator idGen = new IDGenerator(); + idGen.add("key1", "value1"); + idGen.add("key2", "value2"); + String id1 = idGen.getID(); + + idGen = new IDGenerator(); + idGen.add("key2", "value2"); + idGen.add("key1", "value1"); + String id2 = idGen.getID(); + + assertEquals(id1, id2); + } + + public void testEmptyThrows() { + IDGenerator idGen = new IDGenerator(); + + RuntimeException e = expectThrows(RuntimeException.class, () -> idGen.getID()); + + assertEquals("Add at least 1 object before generating the ID", e.getMessage()); + } + + public void testDuplicatedKeyThrows() { + IDGenerator idGen = new IDGenerator(); + idGen.add("key1", "value1"); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> idGen.add("key1", "some_other_value")); + + assertEquals("Keys must be unique", e.getMessage()); + } + +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java index 287f327d0f66..eedf6264f348 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java @@ -44,6 +44,7 @@ import org.elasticsearch.search.aggregations.pipeline.ParsedStatsBucket; import org.elasticsearch.search.aggregations.pipeline.StatsBucketPipelineAggregationBuilder; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.pivot.GroupConfig; @@ -51,8 +52,10 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.stream.Collectors; import static java.util.Arrays.asList; @@ -147,7 +150,7 @@ aggTypedName, asMap( executeTest(groupBy, aggregationBuilders, input, fieldTypeMap, expected, 20); } - public void testExtractCompositeAggregationResultsMultiSources() throws IOException { + public void testExtractCompositeAggregationResultsMultipleGroups() throws IOException { String targetField = randomAlphaOfLengthBetween(5, 10); String targetField2 = randomAlphaOfLengthBetween(5, 10) + "_2"; @@ -406,19 +409,159 @@ aggTypedName2, asMap( executeTest(groupBy, aggregationBuilders, input, fieldTypeMap, expected, 10); } + public void testExtractCompositeAggregationResultsDocIDs() throws IOException { + String targetField = randomAlphaOfLengthBetween(5, 10); + String targetField2 = randomAlphaOfLengthBetween(5, 10) + "_2"; + + GroupConfig groupBy = parseGroupConfig("{" + + "\"" + targetField + "\" : {" + + " \"terms\" : {" + + " \"field\" : \"doesn't_matter_for_this_test\"" + + " } }," + + "\"" + targetField2 + "\" : {" + + " \"terms\" : {" + + " \"field\" : \"doesn't_matter_for_this_test\"" + + " } }" + + "}"); + + String aggName = randomAlphaOfLengthBetween(5, 10); + String aggTypedName = "avg#" + aggName; + Collection aggregationBuilders = Collections.singletonList(AggregationBuilders.avg(aggName)); + + Map inputFirstRun = asMap( + "buckets", + asList( + asMap( + KEY, asMap( + targetField, "ID1", + targetField2, "ID1_2" + ), + aggTypedName, asMap( + "value", 42.33), + DOC_COUNT, 1), + asMap( + KEY, asMap( + targetField, "ID1", + targetField2, "ID2_2" + ), + aggTypedName, asMap( + "value", 8.4), + DOC_COUNT, 2), + asMap( + KEY, asMap( + targetField, "ID2", + targetField2, "ID1_2" + ), + aggTypedName, asMap( + "value", 28.99), + DOC_COUNT, 3), + asMap( + KEY, asMap( + targetField, "ID3", + targetField2, "ID2_2" + ), + aggTypedName, asMap( + "value", 12.55), + DOC_COUNT, 4) + )); + + Map inputSecondRun = asMap( + "buckets", + asList( + asMap( + KEY, asMap( + targetField, "ID1", + targetField2, "ID1_2" + ), + aggTypedName, asMap( + "value", 433.33), + DOC_COUNT, 12), + asMap( + KEY, asMap( + targetField, "ID1", + targetField2, "ID2_2" + ), + aggTypedName, asMap( + "value", 83.4), + DOC_COUNT, 32), + asMap( + KEY, asMap( + targetField, "ID2", + targetField2, "ID1_2" + ), + aggTypedName, asMap( + "value", 21.99), + DOC_COUNT, 2), + asMap( + KEY, asMap( + targetField, "ID3", + targetField2, "ID2_2" + ), + aggTypedName, asMap( + "value", 122.55), + DOC_COUNT, 44) + )); + DataFrameIndexerTransformStats stats = new DataFrameIndexerTransformStats(); + + Map fieldTypeMap = asStringMap( + aggName, "double", + targetField, "keyword", + targetField2, "keyword" + ); + + List> resultFirstRun = runExtraction(groupBy, aggregationBuilders, inputFirstRun, fieldTypeMap, stats); + List> resultSecondRun = runExtraction(groupBy, aggregationBuilders, inputSecondRun, fieldTypeMap, stats); + + assertNotEquals(resultFirstRun, resultSecondRun); + + Set documentIdsFirstRun = new HashSet<>(); + resultFirstRun.forEach(m -> { + documentIdsFirstRun.add((String) m.get(DataFrameField.DOCUMENT_ID_FIELD)); + }); + + assertEquals(4, documentIdsFirstRun.size()); + + Set documentIdsSecondRun = new HashSet<>(); + resultSecondRun.forEach(m -> { + documentIdsSecondRun.add((String) m.get(DataFrameField.DOCUMENT_ID_FIELD)); + }); + + assertEquals(4, documentIdsSecondRun.size()); + assertEquals(documentIdsFirstRun, documentIdsSecondRun); + } + + + private void executeTest(GroupConfig groups, Collection aggregationBuilders, Map input, Map fieldTypeMap, List> expected, long expectedDocCounts) throws IOException { DataFrameIndexerTransformStats stats = new DataFrameIndexerTransformStats(); XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); builder.map(input); + List> result = runExtraction(groups, aggregationBuilders, input, fieldTypeMap, stats); + + // remove the document ids and test uniqueness + Set documentIds = new HashSet<>(); + result.forEach(m -> { + documentIds.add((String) m.remove(DataFrameField.DOCUMENT_ID_FIELD)); + }); + + assertEquals(result.size(), documentIds.size()); + assertEquals(expected, result); + assertEquals(expectedDocCounts, stats.getNumDocuments()); + + } + + private List> runExtraction(GroupConfig groups, Collection aggregationBuilders, + Map input, Map fieldTypeMap, DataFrameIndexerTransformStats stats) throws IOException { + + XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + builder.map(input); + try (XContentParser parser = createParser(builder)) { CompositeAggregation agg = ParsedComposite.fromXContent(parser, "my_feature"); - List> result = AggregationResultUtils - .extractCompositeAggregationResults(agg, groups, aggregationBuilders, fieldTypeMap, stats).collect(Collectors.toList()); - - assertEquals(expected, result); - assertEquals(expectedDocCounts, stats.getNumDocuments()); + return AggregationResultUtils.extractCompositeAggregationResults(agg, groups, aggregationBuilders, fieldTypeMap, stats) + .collect(Collectors.toList()); } } diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java index f253efad0e94..c39e9a2589fc 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java @@ -85,13 +85,13 @@ protected NamedXContentRegistry xContentRegistry() { } public void testValidateExistingIndex() throws Exception { - Pivot pivot = new Pivot("existing_source_index", new MatchAllQueryBuilder(), getValidPivotConfig()); + Pivot pivot = new Pivot(new String[]{"existing_source_index"}, new MatchAllQueryBuilder(), getValidPivotConfig()); assertValidTransform(client, pivot); } public void testValidateNonExistingIndex() throws Exception { - Pivot pivot = new Pivot("non_existing_source_index", new MatchAllQueryBuilder(), getValidPivotConfig()); + Pivot pivot = new Pivot(new String[]{"non_existing_source_index"}, new MatchAllQueryBuilder(), getValidPivotConfig()); assertInvalidTransform(client, pivot); } @@ -99,7 +99,7 @@ public void testValidateNonExistingIndex() throws Exception { public void testSearchFailure() throws Exception { // test a failure during the search operation, transform creation fails if // search has failures although they might just be temporary - Pivot pivot = new Pivot("existing_source_index_with_failing_shards", + Pivot pivot = new Pivot(new String[]{"existing_source_index_with_failing_shards"}, new MatchAllQueryBuilder(), getValidPivotConfig()); @@ -110,10 +110,7 @@ public void testValidateAllSupportedAggregations() throws Exception { for (String agg : supportedAggregations) { AggregationConfig aggregationConfig = getAggregationConfig(agg); - Pivot pivot = new Pivot("existing_source", - new MatchAllQueryBuilder(), - getValidPivotConfig(aggregationConfig)); - + Pivot pivot = new Pivot(new String[]{"existing_source"}, new MatchAllQueryBuilder(), getValidPivotConfig(aggregationConfig)); assertValidTransform(client, pivot); } } @@ -122,10 +119,7 @@ public void testValidateAllUnsupportedAggregations() throws Exception { for (String agg : unsupportedAggregations) { AggregationConfig aggregationConfig = getAggregationConfig(agg); - Pivot pivot = new Pivot("existing_source", - new MatchAllQueryBuilder(), - getValidPivotConfig(aggregationConfig)); - + Pivot pivot = new Pivot(new String[]{"existing_source"}, new MatchAllQueryBuilder(), getValidPivotConfig(aggregationConfig)); assertInvalidTransform(client, pivot); } } diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/util/BatchedDataIteratorTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/util/BatchedDataIteratorTests.java index 2a9e99de5590..4ca60acac37e 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/util/BatchedDataIteratorTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/util/BatchedDataIteratorTests.java @@ -146,7 +146,7 @@ public void testQueryReturnsThreeBatches() throws Exception { testIterator.next(future); batch = future.get(); assertEquals(1, batch.size()); - assertTrue(batch.containsAll(Collections.singletonList(createJsonDoc("f")))); + assertTrue(batch.contains(createJsonDoc("f"))); assertFalse(testIterator.hasNext()); assertTrue(wasScrollCleared); @@ -183,7 +183,6 @@ private void assertSearchRequest() { SearchRequest searchRequest = searchRequests.get(0); assertThat(searchRequest.indices(), equalTo(new String[] {INDEX_NAME})); assertThat(searchRequest.scroll().keepAlive(), equalTo(TimeValue.timeValueMinutes(5))); - assertThat(searchRequest.types().length, equalTo(0)); assertThat(searchRequest.source().query(), equalTo(QueryBuilders.matchAllQuery())); assertThat(searchRequest.source().trackTotalHitsUpTo(), is(SearchContext.TRACK_TOTAL_HITS_ACCURATE)); } @@ -315,15 +314,6 @@ public SearchHitBuilder(int docId) { fields = new HashMap<>(); } - public SearchHitBuilder addField(String name, Object value) { - return addField(name, Arrays.asList(value)); - } - - public SearchHitBuilder addField(String name, List values) { - fields.put(name, new DocumentField(name, values)); - return this; - } - public SearchHitBuilder setSource(String sourceJson) { hit.sourceRef(new BytesArray(sourceJson)); return this; diff --git a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java index 470260a7efac..130d6deed567 100644 --- a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java +++ b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java @@ -41,6 +41,8 @@ public class RestGraphAction extends XPackRestHandler { private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(RestGraphAction.class)); + public static final String TYPES_DEPRECATION_MESSAGE = "[types removal]" + + " Specifying types in graph requests is deprecated."; public static final ParseField TIMEOUT_FIELD = new ParseField("timeout"); public static final ParseField SIGNIFICANCE_FIELD = new ParseField("use_significance"); @@ -111,7 +113,10 @@ public RestChannelConsumer doPrepareRequest(final RestRequest request, final XPa parseHop(parser, currentHop, graphRequest); } - graphRequest.types(Strings.splitStringByCommaToArray(request.param("type"))); + if (request.hasParam("type")) { + deprecationLogger.deprecatedAndMaybeLog("graph_with_types", TYPES_DEPRECATION_MESSAGE); + graphRequest.types(Strings.splitStringByCommaToArray(request.param("type"))); + } return channel -> client.es().execute(INSTANCE, graphRequest, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/graph/src/test/java/org/elasticsearch/xpack/graph/rest/action/RestGraphActionTests.java b/x-pack/plugin/graph/src/test/java/org/elasticsearch/xpack/graph/rest/action/RestGraphActionTests.java new file mode 100644 index 000000000000..486ac4e70e34 --- /dev/null +++ b/x-pack/plugin/graph/src/test/java/org/elasticsearch/xpack/graph/rest/action/RestGraphActionTests.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.graph.rest.action; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.test.rest.RestActionTestCase; +import org.junit.Before; + +public class RestGraphActionTests extends RestActionTestCase { + + @Before + public void setUpAction() { + new RestGraphAction(Settings.EMPTY, controller()); + } + + public void testTypeInPath() { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) + .withMethod(RestRequest.Method.GET) + .withPath("/some_index/some_type/_graph/explore") + .withContent(new BytesArray("{}"), XContentType.JSON) + .build(); + + dispatchRequest(request); + assertWarnings(RestGraphAction.TYPES_DEPRECATION_MESSAGE); + } + +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleMetadataTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleMetadataTests.java index 790dd5de632e..2444cbf99fd5 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleMetadataTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleMetadataTests.java @@ -16,18 +16,18 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.indexlifecycle.FreezeAction; -import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; import org.elasticsearch.test.AbstractDiffableSerializationTestCase; import org.elasticsearch.xpack.core.indexlifecycle.AllocateAction; import org.elasticsearch.xpack.core.indexlifecycle.DeleteAction; import org.elasticsearch.xpack.core.indexlifecycle.ForceMergeAction; +import org.elasticsearch.xpack.core.indexlifecycle.FreezeAction; import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata.IndexLifecycleMetadataDiff; import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; import org.elasticsearch.xpack.core.indexlifecycle.LifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; import org.elasticsearch.xpack.core.indexlifecycle.Phase; import org.elasticsearch.xpack.core.indexlifecycle.ReadOnlyAction; import org.elasticsearch.xpack.core.indexlifecycle.RolloverAction; @@ -137,7 +137,7 @@ protected Reader> diffReader() { } public void testMinimumSupportedVersion() { - assertEquals(Version.V_7_0_0, createTestInstance().getMinimalSupportedVersion()); + assertEquals(Version.V_6_6_0, createTestInstance().getMinimalSupportedVersion()); } public void testcontext() { diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/PersistJobIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/PersistJobIT.java index 5d76e34772d8..a68fa2fe02a8 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/PersistJobIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/PersistJobIT.java @@ -51,7 +51,6 @@ public void testPersistJob() throws Exception { } // check that state is persisted after time has been advanced even if no new data is seen in the interim - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40347") public void testPersistJobOnGracefulShutdown_givenTimeAdvancedAfterNoNewData() throws Exception { String jobId = "time-advanced-after-no-new-data-test"; @@ -60,6 +59,7 @@ public void testPersistJobOnGracefulShutdown_givenTimeAdvancedAfterNoNewData() t FlushJobAction.Response flushResponse = flushJob(jobId, true); closeJob(jobId); + long job1CloseTime = System.currentTimeMillis() / 1000; // Check that state has been persisted SearchResponse stateDocsResponse1 = client().prepareSearch(AnomalyDetectorsIndex.jobStateIndexPattern()) @@ -71,7 +71,7 @@ public void testPersistJobOnGracefulShutdown_givenTimeAdvancedAfterNoNewData() t int numQuantileRecords = 0; int numStateRecords = 0; for (SearchHit hit : stateDocsResponse1.getHits().getHits()) { - logger.info(hit.getId()); + logger.info("1: " + hit.getId()); if (hit.getId().contains("quantiles")) { ++numQuantileRecords; } else if (hit.getId().contains("model_state")) { @@ -82,6 +82,13 @@ public void testPersistJobOnGracefulShutdown_givenTimeAdvancedAfterNoNewData() t assertThat(numQuantileRecords, equalTo(1)); assertThat(numStateRecords, equalTo(1)); + // To generate unique snapshot IDs ensure that there is at least a 1s delay between the + // time each job was closed + assertBusy(() -> { + long timeNow = System.currentTimeMillis() / 1000; + assertFalse(job1CloseTime >= timeNow); + }); + // re-open the job openJob(jobId); @@ -104,7 +111,7 @@ public void testPersistJobOnGracefulShutdown_givenTimeAdvancedAfterNoNewData() t numQuantileRecords = 0; numStateRecords = 0; for (SearchHit hit : stateDocsResponse2.getHits().getHits()) { - logger.info(hit.getId()); + logger.info("2: " + hit.getId()); if (hit.getId().contains("quantiles")) { ++numQuantileRecords; } else if (hit.getId().contains("model_state")) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java index 16a8e946e7ab..953f75801c74 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java @@ -21,21 +21,21 @@ import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.plugins.Platforms; import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.XPackSettings; -import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.ml.MachineLearningFeatureSetUsage; import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction; import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.ml.job.JobManagerHolder; -import org.elasticsearch.xpack.ml.process.NativeController; -import org.elasticsearch.xpack.ml.process.NativeControllerHolder; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; import org.elasticsearch.xpack.core.ml.stats.ForecastStats; import org.elasticsearch.xpack.core.ml.stats.StatsAccumulator; +import org.elasticsearch.xpack.ml.job.JobManagerHolder; +import org.elasticsearch.xpack.ml.process.NativeController; +import org.elasticsearch.xpack.ml.process.NativeControllerHolder; import java.io.IOException; import java.util.Arrays; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java index 58ff31a6bc84..44fc5f48a594 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java @@ -5,6 +5,8 @@ */ package org.elasticsearch.xpack.ml.action; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ElasticsearchTimeoutException; @@ -50,6 +52,7 @@ public class TransportSetUpgradeModeAction extends TransportMasterNodeAction { + private static final Logger logger = LogManager.getLogger(TransportSetUpgradeModeAction.class); private final AtomicBoolean isRunning = new AtomicBoolean(false); private final PersistentTasksClusterService persistentTasksClusterService; private final PersistentTasksService persistentTasksService; @@ -88,6 +91,7 @@ protected void masterOperation(SetUpgradeModeAction.Request request, ClusterStat String msg = "Attempted to set [upgrade_mode] to [" + request.isEnabled() + "] from [" + MlMetadata.getMlMetadata(state).isUpgradeMode() + "] while previous request was processing."; + logger.info(msg); Exception detail = new IllegalStateException(msg); listener.onFailure(new ElasticsearchStatusException( "Cannot change [upgrade_mode]. Previous request is still being processed.", @@ -98,17 +102,23 @@ protected void masterOperation(SetUpgradeModeAction.Request request, ClusterStat // Noop, nothing for us to do, simply return fast to the caller if (request.isEnabled() == MlMetadata.getMlMetadata(state).isUpgradeMode()) { + logger.info("Upgrade mode noop"); isRunning.set(false); listener.onResponse(new AcknowledgedResponse(true)); return; } + logger.info("Starting to set [upgrade_mode] to [" + request.isEnabled() + + "] from [" + MlMetadata.getMlMetadata(state).isUpgradeMode() + "]"); + ActionListener wrappedListener = ActionListener.wrap( r -> { + logger.info("Completed upgrade mode request"); isRunning.set(false); listener.onResponse(r); }, e -> { + logger.info("Completed upgrade mode request but with failure", e); isRunning.set(false); listener.onFailure(e); } @@ -131,9 +141,14 @@ protected void masterOperation(SetUpgradeModeAction.Request request, ClusterStat try { // Handle potential node timeouts, // these should be considered failures as tasks as still potentially executing + logger.info("Waited for tasks to be unassigned"); + if (r.getNodeFailures().isEmpty() == false) { + logger.info("There were node failures waiting for tasks", r.getNodeFailures().get(0)); + } rethrowAndSuppress(r.getNodeFailures()); wrappedListener.onResponse(new AcknowledgedResponse(true)); } catch (ElasticsearchException ex) { + logger.info("Caught node failures waiting for tasks to be unassigned", ex); wrappedListener.onFailure(ex); } }, @@ -144,7 +159,10 @@ protected void masterOperation(SetUpgradeModeAction.Request request, ClusterStat // <3> After isolating the datafeeds, unassign the tasks ActionListener> isolateDatafeedListener = ActionListener.wrap( - isolatedDatafeeds -> unassignPersistentTasks(tasksCustomMetaData, unassignPersistentTasksListener), + isolatedDatafeeds -> { + logger.info("Isolated the datafeeds"); + unassignPersistentTasks(tasksCustomMetaData, unassignPersistentTasksListener); + }, wrappedListener::onFailure ); @@ -176,20 +194,24 @@ protected void masterOperation(SetUpgradeModeAction.Request request, ClusterStat // State change was not acknowledged, we either timed out or ran into some exception // We should not continue and alert failure to the end user if (acknowledgedResponse.isAcknowledged() == false) { + logger.info("Cluster state update is NOT acknowledged"); wrappedListener.onFailure(new ElasticsearchTimeoutException("Unknown error occurred while updating cluster state")); return; } // There are no tasks to worry about starting/stopping if (tasksCustomMetaData == null || tasksCustomMetaData.tasks().isEmpty()) { + logger.info("No tasks to worry about after state update"); wrappedListener.onResponse(new AcknowledgedResponse(true)); return; } // Did we change from disabled -> enabled? if (request.isEnabled()) { + logger.info("Enabling upgrade mode, must isolate datafeeds"); isolateDatafeeds(tasksCustomMetaData, isolateDatafeedListener); } else { + logger.info("Disabling upgrade mode, must wait for tasks to not have AWAITING_UPGRADE assignment"); persistentTasksService.waitForPersistentTasksCondition( (persistentTasksCustomMetaData) -> // Wait for jobs to not be "Awaiting upgrade" @@ -202,7 +224,10 @@ protected void masterOperation(SetUpgradeModeAction.Request request, ClusterStat (t) -> t.getAssignment().equals(AWAITING_UPGRADE)) .isEmpty(), request.timeout(), - ActionListener.wrap(r -> wrappedListener.onResponse(new AcknowledgedResponse(true)), wrappedListener::onFailure) + ActionListener.wrap(r -> { + logger.info("Done waiting for tasks to be out of AWAITING_UPGRADE"); + wrappedListener.onResponse(new AcknowledgedResponse(true)); + }, wrappedListener::onFailure) ); } }, @@ -215,11 +240,13 @@ protected void masterOperation(SetUpgradeModeAction.Request request, ClusterStat @Override protected AcknowledgedResponse newResponse(boolean acknowledged) { + logger.info("Cluster update response built: " + acknowledged); return new AcknowledgedResponse(acknowledged); } @Override public ClusterState execute(ClusterState currentState) throws Exception { + logger.info("Executing cluster state update"); MlMetadata.Builder builder = new MlMetadata.Builder(currentState.metaData().custom(MlMetadata.TYPE)); builder.isUpgradeMode(request.isEnabled()); ClusterState.Builder newState = ClusterState.builder(currentState); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexStatsMonitoringDoc.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexStatsMonitoringDoc.java index c72e645dfd04..470004037704 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexStatsMonitoringDoc.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexStatsMonitoringDoc.java @@ -162,6 +162,7 @@ protected void innerToXContent(XContentBuilder builder, Params params) throws IO "index_stats.primaries.segments.fixed_bit_set_memory_in_bytes", "index_stats.primaries.store.size_in_bytes", "index_stats.primaries.refresh.total_time_in_millis", + "index_stats.primaries.refresh.external_total_time_in_millis", "index_stats.total.docs.count", "index_stats.total.fielddata.memory_size_in_bytes", "index_stats.total.fielddata.evictions", @@ -191,5 +192,6 @@ protected void innerToXContent(XContentBuilder builder, Params params) throws IO "index_stats.total.segments.version_map_memory_in_bytes", "index_stats.total.segments.fixed_bit_set_memory_in_bytes", "index_stats.total.store.size_in_bytes", - "index_stats.total.refresh.total_time_in_millis"); + "index_stats.total.refresh.total_time_in_millis", + "index_stats.total.refresh.external_total_time_in_millis"); } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexStatsMonitoringDocTests.java index da9063507daa..d3bcf13d9969 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/indices/IndexStatsMonitoringDocTests.java @@ -42,6 +42,7 @@ import java.util.Locale; import java.util.Set; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -125,7 +126,8 @@ public void testToXContent() throws IOException { new IndexStatsMonitoringDoc("_cluster", 1502266739402L, 1506593717631L, node, indexStats, metaData, routingTable); final BytesReference xContent = XContentHelper.toXContent(document, XContentType.JSON, false); - assertEquals("{" + assertThat(xContent.utf8ToString(), equalTo( + "{" + "\"cluster_uuid\":\"_cluster\"," + "\"timestamp\":\"2017-08-09T08:18:59.402Z\"," + "\"interval_ms\":1506593717631," @@ -148,19 +150,20 @@ public void testToXContent() throws IOException { + "\"size_in_bytes\":13" + "}," + "\"indexing\":{" - + "\"index_total\":15," - + "\"index_time_in_millis\":16," - + "\"throttle_time_in_millis\":17" + + "\"index_total\":16," + + "\"index_time_in_millis\":17," + + "\"throttle_time_in_millis\":18" + "}," + "\"search\":{" - + "\"query_total\":18," - + "\"query_time_in_millis\":19" + + "\"query_total\":19," + + "\"query_time_in_millis\":20" + "}," + "\"merges\":{" + "\"total_size_in_bytes\":4" + "}," + "\"refresh\":{" - + "\"total_time_in_millis\":14" + + "\"total_time_in_millis\":14," + + "\"external_total_time_in_millis\":15" + "}," + "\"query_cache\":{" + "\"memory_size_in_bytes\":5," @@ -173,17 +176,17 @@ public void testToXContent() throws IOException { + "\"evictions\":3" + "}," + "\"segments\":{" - + "\"count\":20," - + "\"memory_in_bytes\":21," - + "\"terms_memory_in_bytes\":22," - + "\"stored_fields_memory_in_bytes\":23," - + "\"term_vectors_memory_in_bytes\":24," - + "\"norms_memory_in_bytes\":25," - + "\"points_memory_in_bytes\":26," - + "\"doc_values_memory_in_bytes\":27," - + "\"index_writer_memory_in_bytes\":28," - + "\"version_map_memory_in_bytes\":29," - + "\"fixed_bit_set_memory_in_bytes\":30" + + "\"count\":21," + + "\"memory_in_bytes\":22," + + "\"terms_memory_in_bytes\":23," + + "\"stored_fields_memory_in_bytes\":24," + + "\"term_vectors_memory_in_bytes\":25," + + "\"norms_memory_in_bytes\":26," + + "\"points_memory_in_bytes\":27," + + "\"doc_values_memory_in_bytes\":28," + + "\"index_writer_memory_in_bytes\":29," + + "\"version_map_memory_in_bytes\":30," + + "\"fixed_bit_set_memory_in_bytes\":31" + "}," + "\"request_cache\":{" + "\"memory_size_in_bytes\":9," @@ -200,19 +203,20 @@ public void testToXContent() throws IOException { + "\"size_in_bytes\":13" + "}," + "\"indexing\":{" - + "\"index_total\":15," - + "\"index_time_in_millis\":16," - + "\"throttle_time_in_millis\":17" + + "\"index_total\":16," + + "\"index_time_in_millis\":17," + + "\"throttle_time_in_millis\":18" + "}," + "\"search\":{" - + "\"query_total\":18," - + "\"query_time_in_millis\":19" + + "\"query_total\":19," + + "\"query_time_in_millis\":20" + "}," + "\"merges\":{" + "\"total_size_in_bytes\":4" + "}," + "\"refresh\":{" - + "\"total_time_in_millis\":14" + + "\"total_time_in_millis\":14," + + "\"external_total_time_in_millis\":15" + "}," + "\"query_cache\":{" + "\"memory_size_in_bytes\":5," @@ -225,17 +229,17 @@ public void testToXContent() throws IOException { + "\"evictions\":3" + "}," + "\"segments\":{" - + "\"count\":20," - + "\"memory_in_bytes\":21," - + "\"terms_memory_in_bytes\":22," - + "\"stored_fields_memory_in_bytes\":23," - + "\"term_vectors_memory_in_bytes\":24," - + "\"norms_memory_in_bytes\":25," - + "\"points_memory_in_bytes\":26," - + "\"doc_values_memory_in_bytes\":27," - + "\"index_writer_memory_in_bytes\":28," - + "\"version_map_memory_in_bytes\":29," - + "\"fixed_bit_set_memory_in_bytes\":30" + + "\"count\":21," + + "\"memory_in_bytes\":22," + + "\"terms_memory_in_bytes\":23," + + "\"stored_fields_memory_in_bytes\":24," + + "\"term_vectors_memory_in_bytes\":25," + + "\"norms_memory_in_bytes\":26," + + "\"points_memory_in_bytes\":27," + + "\"doc_values_memory_in_bytes\":28," + + "\"index_writer_memory_in_bytes\":29," + + "\"version_map_memory_in_bytes\":30," + + "\"fixed_bit_set_memory_in_bytes\":31" + "}," + "\"request_cache\":{" + "\"memory_size_in_bytes\":9," @@ -245,7 +249,7 @@ public void testToXContent() throws IOException { + "}" + "}" + "}" - + "}", xContent.utf8ToString()); + + "}")); } public void testToXContentWithNullStats() throws IOException { @@ -322,7 +326,7 @@ private static CommonStats mockCommonStats() { commonStats.getQueryCache().add(new QueryCacheStats(++iota, ++iota, ++iota, ++iota, no)); commonStats.getRequestCache().add(new RequestCacheStats(++iota, ++iota, ++iota, ++iota)); commonStats.getStore().add(new StoreStats(++iota)); - commonStats.getRefresh().add(new RefreshStats(no, ++iota, (int) no)); + commonStats.getRefresh().add(new RefreshStats(no, ++iota, no, ++iota, (int) no)); final IndexingStats.Stats indexingStats = new IndexingStats.Stats(++iota, ++iota, no, no, no, no, no, no, false, ++iota); commonStats.getIndexing().add(new IndexingStats(indexingStats, null)); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index fdfc6c9a5949..c6f269b1edd4 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -370,7 +370,7 @@ public Collection createComponents(Client client, ClusterService cluster NamedXContentRegistry xContentRegistry, Environment environment, NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { try { - return createComponents(client, threadPool, clusterService, resourceWatcherService); + return createComponents(client, threadPool, clusterService, resourceWatcherService, scriptService); } catch (final Exception e) { throw new IllegalStateException("security initialization failed", e); } @@ -378,7 +378,7 @@ public Collection createComponents(Client client, ClusterService cluster // pkg private for testing - tests want to pass in their set of extensions hence we are not using the extension service directly Collection createComponents(Client client, ThreadPool threadPool, ClusterService clusterService, - ResourceWatcherService resourceWatcherService) throws Exception { + ResourceWatcherService resourceWatcherService, ScriptService scriptService) throws Exception { if (enabled == false) { return Collections.emptyList(); } @@ -404,7 +404,8 @@ Collection createComponents(Client client, ThreadPool threadPool, Cluste // realms construction final NativeUsersStore nativeUsersStore = new NativeUsersStore(settings, client, securityIndex.get()); - final NativeRoleMappingStore nativeRoleMappingStore = new NativeRoleMappingStore(settings, client, securityIndex.get()); + final NativeRoleMappingStore nativeRoleMappingStore = new NativeRoleMappingStore(settings, client, securityIndex.get(), + scriptService); final AnonymousUser anonymousUser = new AnonymousUser(settings); final ReservedRealm reservedRealm = new ReservedRealm(env, settings, nativeUsersStore, anonymousUser, securityIndex.get(), threadPool); @@ -617,7 +618,6 @@ public static List> getSettings(boolean transportClientMode, List ACCEPT_DEFAULT_PASSWORD_SETTING = Setting.boolSetting( - SecurityField.setting("authc.accept_default_password"), true, Setting.Property.NodeScope, Setting.Property.Filtered, - Setting.Property.Deprecated); public static final Setting BOOTSTRAP_ELASTIC_PASSWORD = SecureSetting.secureString("bootstrap.password", KeyStoreWrapper.SEED_SETTING); @@ -250,7 +246,6 @@ private Version getDefinedVersion(String username) { } public static void addSettings(List> settingsList) { - settingsList.add(ACCEPT_DEFAULT_PASSWORD_SETTING); settingsList.add(BOOTSTRAP_ELASTIC_PASSWORD); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java index cbb352e67ab3..e8d874bc9d48 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.xpack.core.security.ScrollHelper; import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheAction; import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheResponse; @@ -51,7 +52,6 @@ import java.util.concurrent.CopyOnWriteArrayList; import java.util.function.Supplier; import java.util.stream.Collectors; -import java.util.stream.Stream; import static org.elasticsearch.action.DocWriteResponse.Result.CREATED; import static org.elasticsearch.action.DocWriteResponse.Result.DELETED; @@ -99,12 +99,14 @@ public void onFailure(Exception e) { private final Settings settings; private final Client client; private final SecurityIndexManager securityIndex; + private final ScriptService scriptService; private final List realmsToRefresh = new CopyOnWriteArrayList<>(); - public NativeRoleMappingStore(Settings settings, Client client, SecurityIndexManager securityIndex) { + public NativeRoleMappingStore(Settings settings, Client client, SecurityIndexManager securityIndex, ScriptService scriptService) { this.settings = settings; this.client = client; this.securityIndex = securityIndex; + this.scriptService = scriptService; } private String getNameFromId(String id) { @@ -120,7 +122,7 @@ private String getIdForName(String name) { * Loads all mappings from the index. * package private for unit testing */ - void loadMappings(ActionListener> listener) { + protected void loadMappings(ActionListener> listener) { if (securityIndex.isIndexUpToDate() == false) { listener.onFailure(new IllegalStateException( "Security index is not on the current version - the native realm will not be operational until " + @@ -149,7 +151,7 @@ void loadMappings(ActionListener> listener) { } } - private ExpressionRoleMapping buildMapping(String id, BytesReference source) { + protected ExpressionRoleMapping buildMapping(String id, BytesReference source) { try (InputStream stream = source.streamInput(); XContentParser parser = XContentType.JSON.xContent() .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { @@ -349,17 +351,16 @@ public void resolveRoles(UserData user, ActionListener> listener) { getRoleMappings(null, ActionListener.wrap( mappings -> { final ExpressionModel model = user.asModel(); - Stream stream = mappings.stream() - .filter(ExpressionRoleMapping::isEnabled) - .filter(m -> m.getExpression().match(model)); - if (logger.isTraceEnabled()) { - stream = stream.map(m -> { - logger.trace("User [{}] matches role-mapping [{}] with roles [{}]", user.getUsername(), m.getName(), - m.getRoles()); - return m; - }); - } - final Set roles = stream.flatMap(m -> m.getRoles().stream()).collect(Collectors.toSet()); + final Set roles = mappings.stream() + .filter(ExpressionRoleMapping::isEnabled) + .filter(m -> m.getExpression().match(model)) + .flatMap(m -> { + final Set roleNames = m.getRoleNames(scriptService, model); + logger.trace("Applying role-mapping [{}] to user-model [{}] produced role-names [{}]", + m.getName(), model, roleNames); + return roleNames.stream(); + }) + .collect(Collectors.toSet()); logger.debug("Mapping user [{}] to roles [{}]", user, roles); listener.onResponse(roles); }, listener::onFailure diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java index 2d1d4a98b4ba..48659b896866 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java @@ -402,8 +402,8 @@ public static void buildRoleFromDescriptors(Collection roleDescr .flatMap(Collection::stream) .collect(Collectors.toSet()); privilegeStore.getPrivileges(applicationNames, applicationPrivilegeNames, ActionListener.wrap(appPrivileges -> { - applicationPrivilegesMap.forEach((key, names) -> - builder.addApplicationPrivilege(ApplicationPrivilege.get(key.v1(), names, appPrivileges), key.v2())); + applicationPrivilegesMap.forEach((key, names) -> ApplicationPrivilege.get(key.v1(), names, appPrivileges) + .forEach(priv -> builder.addApplicationPrivilege(priv, key.v2()))); listener.onResponse(builder.build()); }, listener::onFailure)); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java index 09c89752f831..19694bb00331 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java @@ -34,9 +34,11 @@ import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.security.ScrollHelper; import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheRequest; @@ -46,6 +48,7 @@ import org.elasticsearch.xpack.security.support.SecurityIndexManager; import java.io.IOException; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -62,6 +65,7 @@ import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; import static org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilegeDescriptor.DOC_TYPE_VALUE; +import static org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilegeDescriptor.Fields.APPLICATION; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; /** @@ -97,7 +101,7 @@ public void getPrivileges(Collection applications, Collection na listener.onResponse(Collections.emptyList()); } else if (frozenSecurityIndex.isAvailable() == false) { listener.onFailure(frozenSecurityIndex.getUnavailableReason()); - } else if (applications != null && applications.size() == 1 && names != null && names.size() == 1) { + } else if (isSinglePrivilegeMatch(applications, names)) { getPrivilege(Objects.requireNonNull(Iterables.get(applications, 0)), Objects.requireNonNull(Iterables.get(names, 0)), ActionListener.wrap(privilege -> listener.onResponse(privilege == null ? Collections.emptyList() : Collections.singletonList(privilege)), @@ -110,11 +114,14 @@ public void getPrivileges(Collection applications, Collection na if (isEmpty(applications) && isEmpty(names)) { query = typeQuery; } else if (isEmpty(names)) { - query = QueryBuilders.boolQuery().filter(typeQuery).filter( - QueryBuilders.termsQuery(ApplicationPrivilegeDescriptor.Fields.APPLICATION.getPreferredName(), applications)); + query = QueryBuilders.boolQuery().filter(typeQuery).filter(getApplicationNameQuery(applications)); } else if (isEmpty(applications)) { query = QueryBuilders.boolQuery().filter(typeQuery) - .filter(QueryBuilders.termsQuery(ApplicationPrivilegeDescriptor.Fields.NAME.getPreferredName(), names)); + .filter(getPrivilegeNameQuery(names)); + } else if (hasWildcard(applications)) { + query = QueryBuilders.boolQuery().filter(typeQuery) + .filter(getApplicationNameQuery(applications)) + .filter(getPrivilegeNameQuery(names)); } else { final String[] docIds = applications.stream() .flatMap(a -> names.stream().map(n -> toDocId(a, n))) @@ -139,6 +146,49 @@ public void getPrivileges(Collection applications, Collection na } } + private boolean isSinglePrivilegeMatch(Collection applications, Collection names) { + return applications != null && applications.size() == 1 && hasWildcard(applications) == false && names != null && names.size() == 1; + } + + private boolean hasWildcard(Collection applications) { + return applications.stream().anyMatch(n -> n.endsWith("*")); + } + + private QueryBuilder getPrivilegeNameQuery(Collection names) { + return QueryBuilders.termsQuery(ApplicationPrivilegeDescriptor.Fields.NAME.getPreferredName(), names); + } + + private QueryBuilder getApplicationNameQuery(Collection applications) { + if (applications.contains("*")) { + return QueryBuilders.existsQuery(APPLICATION.getPreferredName()); + } + final List rawNames = new ArrayList<>(applications.size()); + final List wildcardNames = new ArrayList<>(applications.size()); + for (String name : applications) { + if (name.endsWith("*")) { + wildcardNames.add(name); + } else { + rawNames.add(name); + } + } + + assert rawNames.isEmpty() == false || wildcardNames.isEmpty() == false; + + TermsQueryBuilder termsQuery = rawNames.isEmpty() ? null : QueryBuilders.termsQuery(APPLICATION.getPreferredName(), rawNames); + if (wildcardNames.isEmpty()) { + return termsQuery; + } + final BoolQueryBuilder boolQuery = QueryBuilders.boolQuery(); + if (termsQuery != null) { + boolQuery.filter(termsQuery); + } + for (String wildcard : wildcardNames) { + final String prefix = wildcard.substring(0, wildcard.length() - 1); + boolQuery.filter(QueryBuilders.prefixQuery(APPLICATION.getPreferredName(), prefix)); + } + return boolQuery; + } + private static boolean isEmpty(Collection collection) { return collection == null || collection.isEmpty(); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java index b82bf7f3c7fc..c0ec72277d87 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java @@ -26,10 +26,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -69,7 +66,6 @@ import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; -import static org.elasticsearch.xpack.core.security.SecurityField.setting; import static org.elasticsearch.xpack.core.security.authz.RoleDescriptor.ROLE_TYPE; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; @@ -83,11 +79,6 @@ */ public class NativeRolesStore implements BiConsumer, ActionListener> { - // these are no longer used, but leave them around for users upgrading - private static final Setting CACHE_SIZE_SETTING = - Setting.intSetting(setting("authz.store.roles.index.cache.max_size"), 10000, Property.NodeScope, Property.Deprecated); - private static final Setting CACHE_TTL_SETTING = Setting.timeSetting(setting("authz.store.roles.index.cache.ttl"), - TimeValue.timeValueMinutes(20), Property.NodeScope, Property.Deprecated); private static final Logger logger = LogManager.getLogger(NativeRolesStore.class); private final Settings settings; @@ -413,11 +404,6 @@ static RoleDescriptor transformRole(String id, BytesReference sourceBytes, Logge } } - public static void addSettings(List> settings) { - settings.add(CACHE_SIZE_SETTING); - settings.add(CACHE_TTL_SETTING); - } - /** * Gets the document's id field for the given role name. */ diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 35180ab8f31d..cc573fd9247f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.license.TestUtils; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.plugins.MapperPlugin; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.threadpool.ThreadPool; @@ -129,7 +130,7 @@ protected SSLService getSslService() { Client client = mock(Client.class); when(client.threadPool()).thenReturn(threadPool); when(client.settings()).thenReturn(settings); - return security.createComponents(client, threadPool, clusterService, mock(ResourceWatcherService.class)); + return security.createComponents(client, threadPool, clusterService, mock(ResourceWatcherService.class), mock(ScriptService.class)); } private static T findComponent(Class type, Collection components) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java index 91222a5af584..ee5f935fcc56 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java @@ -25,9 +25,10 @@ import java.util.Map; import java.util.concurrent.atomic.AtomicReference; -import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.iterableWithSize; import static org.mockito.Matchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -72,7 +73,8 @@ public void testPutValidMapping() throws Exception { assertThat(mapping.getExpression(), is(expression)); assertThat(mapping.isEnabled(), equalTo(true)); assertThat(mapping.getName(), equalTo("anarchy")); - assertThat(mapping.getRoles(), containsInAnyOrder("superuser")); + assertThat(mapping.getRoles(), iterableWithSize(1)); + assertThat(mapping.getRoles(), contains("superuser")); assertThat(mapping.getMetadata().size(), equalTo(1)); assertThat(mapping.getMetadata().get("dumb"), equalTo(true)); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTestCase.java index 43e5fb216399..fe8220dad4e6 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTestCase.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -146,7 +147,8 @@ protected NativeRoleMappingStore roleMappingStore(final List userNames) when(mockClient.threadPool()).thenReturn(threadPool); when(mockClient.settings()).thenReturn(settings); - final NativeRoleMappingStore store = new NativeRoleMappingStore(Settings.EMPTY, mockClient, mock(SecurityIndexManager.class)); + final NativeRoleMappingStore store = new NativeRoleMappingStore(Settings.EMPTY, mockClient, mock(SecurityIndexManager.class), + mock(ScriptService.class)); final NativeRoleMappingStore roleMapper = spy(store); doAnswer(invocation -> { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java index c0a93d36ab89..70e8719c0f79 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java @@ -8,6 +8,8 @@ import com.unboundid.ldap.sdk.LDAPURL; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.SecureString; @@ -17,6 +19,9 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.script.ScriptModule; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.mustache.MustacheScriptEngine; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; @@ -29,11 +34,14 @@ import org.elasticsearch.xpack.core.security.authc.ldap.LdapUserSearchSessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.ldap.PoolingSessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.ldap.SearchGroupsResolverSettings; +import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapMetaDataResolverSettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; import org.elasticsearch.xpack.core.security.authc.support.CachingUsernamePasswordRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; import org.elasticsearch.xpack.core.security.authc.support.DnRoleMapperSettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.core.security.authc.support.mapper.TemplateRoleName; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; import org.elasticsearch.xpack.core.ssl.SSLService; @@ -42,6 +50,8 @@ import org.elasticsearch.xpack.security.authc.ldap.support.SessionFactory; import org.elasticsearch.xpack.security.authc.support.DnRoleMapper; import org.elasticsearch.xpack.security.authc.support.MockLookupRealm; +import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; +import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.junit.After; import org.junit.Before; @@ -54,6 +64,7 @@ import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey; import static org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings.URLS_SETTING; import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -394,6 +405,75 @@ public void testLdapRealmMapsUserDNToRole() throws Exception { assertThat(user.roles(), arrayContaining("avenger")); } + /** + * This tests template role mappings (see + * {@link TemplateRoleName}) with an LDAP realm, using a additional + * metadata field (see {@link LdapMetaDataResolverSettings#ADDITIONAL_META_DATA_SETTING}). + */ + public void testLdapRealmWithTemplatedRoleMapping() throws Exception { + String groupSearchBase = "o=sevenSeas"; + String userTemplate = VALID_USER_TEMPLATE; + Settings settings = Settings.builder() + .put(defaultGlobalSettings) + .put(buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE)) + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapMetaDataResolverSettings.ADDITIONAL_META_DATA_SETTING), "uid") + .build(); + RealmConfig config = getRealmConfig(REALM_IDENTIFIER, settings); + + SecurityIndexManager mockSecurityIndex = mock(SecurityIndexManager.class); + when(mockSecurityIndex.isAvailable()).thenReturn(true); + when(mockSecurityIndex.isIndexUpToDate()).thenReturn(true); + when(mockSecurityIndex.isMappingUpToDate()).thenReturn(true); + + Client mockClient = mock(Client.class); + when(mockClient.threadPool()).thenReturn(threadPool); + + final ScriptService scriptService = new ScriptService(defaultGlobalSettings, + Collections.singletonMap(MustacheScriptEngine.NAME, new MustacheScriptEngine()), ScriptModule.CORE_CONTEXTS); + NativeRoleMappingStore roleMapper = new NativeRoleMappingStore(defaultGlobalSettings, mockClient, mockSecurityIndex, + scriptService) { + @Override + protected void loadMappings(ActionListener> listener) { + listener.onResponse( + Arrays.asList( + this.buildMapping("m1", new BytesArray("{" + + "\"role_templates\":[{\"template\":{\"source\":\"_user_{{metadata.uid}}\"}}]," + + "\"enabled\":true," + + "\"rules\":{ \"any\":[" + + " { \"field\":{\"realm.name\":\"ldap1\"}}," + + " { \"field\":{\"realm.name\":\"ldap2\"}}" + + "]}}")), + this.buildMapping("m2", new BytesArray("{" + + "\"roles\":[\"should_not_happen\"]," + + "\"enabled\":true," + + "\"rules\":{ \"all\":[" + + " { \"field\":{\"realm.name\":\"ldap1\"}}," + + " { \"field\":{\"realm.name\":\"ldap2\"}}" + + "]}}")), + this.buildMapping("m3", new BytesArray("{" + + "\"roles\":[\"sales_admin\"]," + + "\"enabled\":true," + + "\"rules\":" + + " { \"field\":{\"dn\":\"*,ou=people,o=sevenSeas\"}}" + + "}")) + ) + ); + } + }; + LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); + LdapRealm ldap = new LdapRealm(config, ldapFactory, + roleMapper, threadPool); + ldap.initialize(Collections.singleton(ldap), licenseState); + + PlainActionFuture future = new PlainActionFuture<>(); + ldap.authenticate(new UsernamePasswordToken("Horatio Hornblower", new SecureString(PASSWORD)), future); + final AuthenticationResult result = future.actionGet(); + assertThat(result.getStatus(), is(AuthenticationResult.Status.SUCCESS)); + User user = result.getUser(); + assertThat(user, notNullValue()); + assertThat(user.roles(), arrayContainingInAnyOrder("_user_hhornblo", "sales_admin")); + } + /** * The contract for {@link Realm} implementations is that they should log-and-return-null (and * not call {@link ActionListener#onFailure(Exception)}) if there is an internal exception that prevented them from performing an diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java index 729bd08d7faf..276d8a333f79 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java @@ -5,31 +5,48 @@ */ package org.elasticsearch.xpack.security.authc.support.mapper; +import org.elasticsearch.Version; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.ByteBufferStreamInput; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.xpack.core.XPackClientPlugin; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.core.security.authc.support.mapper.TemplateRoleName; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.AllExpression; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; import org.hamcrest.Matchers; import org.junit.Before; import org.mockito.Mockito; import java.io.IOException; +import java.util.Arrays; import java.util.Collections; +import java.util.List; import java.util.Locale; -import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.iterableWithSize; import static org.hamcrest.Matchers.notNullValue; public class ExpressionRoleMappingTests extends ESTestCase { @@ -39,44 +56,44 @@ public class ExpressionRoleMappingTests extends ESTestCase { @Before public void setupMapping() throws Exception { realm = new RealmConfig(new RealmConfig.RealmIdentifier("ldap", "ldap1"), - Settings.EMPTY, Mockito.mock(Environment.class), new ThreadContext(Settings.EMPTY)); + Settings.EMPTY, Mockito.mock(Environment.class), new ThreadContext(Settings.EMPTY)); } - public void testParseValidJson() throws Exception { + public void testParseValidJsonWithFixedRoleNames() throws Exception { String json = "{" - + "\"roles\": [ \"kibana_user\", \"sales\" ], " - + "\"enabled\": true, " - + "\"rules\": { " - + " \"all\": [ " - + " { \"field\": { \"dn\" : \"*,ou=sales,dc=example,dc=com\" } }, " - + " { \"except\": { \"field\": { \"metadata.active\" : false } } }" - + " ]}" - + "}"; + + "\"roles\": [ \"kibana_user\", \"sales\" ], " + + "\"enabled\": true, " + + "\"rules\": { " + + " \"all\": [ " + + " { \"field\": { \"dn\" : \"*,ou=sales,dc=example,dc=com\" } }, " + + " { \"except\": { \"field\": { \"metadata.active\" : false } } }" + + " ]}" + + "}"; final ExpressionRoleMapping mapping = parse(json, "ldap_sales"); assertThat(mapping.getRoles(), Matchers.containsInAnyOrder("kibana_user", "sales")); assertThat(mapping.getExpression(), instanceOf(AllExpression.class)); final UserRoleMapper.UserData user1a = new UserRoleMapper.UserData( - "john.smith", "cn=john.smith,ou=sales,dc=example,dc=com", - Collections.emptyList(), Collections.singletonMap("active", true), realm + "john.smith", "cn=john.smith,ou=sales,dc=example,dc=com", + Collections.emptyList(), Collections.singletonMap("active", true), realm ); final UserRoleMapper.UserData user1b = new UserRoleMapper.UserData( - user1a.getUsername(), user1a.getDn().toUpperCase(Locale.US), user1a.getGroups(), user1a.getMetadata(), user1a.getRealm() + user1a.getUsername(), user1a.getDn().toUpperCase(Locale.US), user1a.getGroups(), user1a.getMetadata(), user1a.getRealm() ); final UserRoleMapper.UserData user1c = new UserRoleMapper.UserData( - user1a.getUsername(), user1a.getDn().replaceAll(",", ", "), user1a.getGroups(), user1a.getMetadata(), user1a.getRealm() + user1a.getUsername(), user1a.getDn().replaceAll(",", ", "), user1a.getGroups(), user1a.getMetadata(), user1a.getRealm() ); final UserRoleMapper.UserData user1d = new UserRoleMapper.UserData( - user1a.getUsername(), user1a.getDn().replaceAll("dc=", "DC="), user1a.getGroups(), user1a.getMetadata(), user1a.getRealm() + user1a.getUsername(), user1a.getDn().replaceAll("dc=", "DC="), user1a.getGroups(), user1a.getMetadata(), user1a.getRealm() ); final UserRoleMapper.UserData user2 = new UserRoleMapper.UserData( - "jamie.perez", "cn=jamie.perez,ou=sales,dc=example,dc=com", - Collections.emptyList(), Collections.singletonMap("active", false), realm + "jamie.perez", "cn=jamie.perez,ou=sales,dc=example,dc=com", + Collections.emptyList(), Collections.singletonMap("active", false), realm ); final UserRoleMapper.UserData user3 = new UserRoleMapper.UserData( - "simone.ng", "cn=simone.ng,ou=finance,dc=example,dc=com", - Collections.emptyList(), Collections.singletonMap("active", true), realm + "simone.ng", "cn=simone.ng,ou=finance,dc=example,dc=com", + Collections.emptyList(), Collections.singletonMap("active", true), realm ); assertThat(mapping.getExpression().match(user1a.asModel()), equalTo(true)); @@ -87,58 +104,218 @@ public void testParseValidJson() throws Exception { assertThat(mapping.getExpression().match(user3.asModel()), equalTo(false)); } + public void testParseValidJsonWithTemplatedRoleNames() throws Exception { + String json = "{" + + "\"role_templates\": [ " + + " { \"template\" : { \"source\":\"kibana_user\"} }," + + " { \"template\" : { \"source\":\"sales\"} }," + + " { \"template\" : { \"source\":\"_user_{{username}}\" }, \"format\":\"string\" }" + + " ], " + + "\"enabled\": true, " + + "\"rules\": { " + + " \"all\": [ " + + " { \"field\": { \"dn\" : \"*,ou=sales,dc=example,dc=com\" } }, " + + " { \"except\": { \"field\": { \"metadata.active\" : false } } }" + + " ]}" + + "}"; + final ExpressionRoleMapping mapping = parse(json, "ldap_sales"); + assertThat(mapping.getRoleTemplates(), iterableWithSize(3)); + assertThat(mapping.getRoleTemplates().get(0).getTemplate().utf8ToString(), equalTo("{\"source\":\"kibana_user\"}")); + assertThat(mapping.getRoleTemplates().get(0).getFormat(), equalTo(TemplateRoleName.Format.STRING)); + assertThat(mapping.getRoleTemplates().get(1).getTemplate().utf8ToString(), equalTo("{\"source\":\"sales\"}")); + assertThat(mapping.getRoleTemplates().get(1).getFormat(), equalTo(TemplateRoleName.Format.STRING)); + assertThat(mapping.getRoleTemplates().get(2).getTemplate().utf8ToString(), equalTo("{\"source\":\"_user_{{username}}\"}")); + assertThat(mapping.getRoleTemplates().get(2).getFormat(), equalTo(TemplateRoleName.Format.STRING)); + } + public void testParsingFailsIfRulesAreMissing() throws Exception { String json = "{" - + "\"roles\": [ \"kibana_user\", \"sales\" ], " - + "\"enabled\": true " - + "}"; + + "\"roles\": [ \"kibana_user\", \"sales\" ], " + + "\"enabled\": true " + + "}"; ParsingException ex = expectThrows(ParsingException.class, () -> parse(json, "bad_json")); assertThat(ex.getMessage(), containsString("rules")); } public void testParsingFailsIfRolesMissing() throws Exception { String json = "{" - + "\"enabled\": true, " - + "\"rules\": " - + " { \"field\": { \"dn\" : \"*,ou=sales,dc=example,dc=com\" } } " - + "}"; + + "\"enabled\": true, " + + "\"rules\": " + + " { \"field\": { \"dn\" : \"*,ou=sales,dc=example,dc=com\" } } " + + "}"; ParsingException ex = expectThrows(ParsingException.class, () -> parse(json, "bad_json")); assertThat(ex.getMessage(), containsString("role")); } public void testParsingFailsIfThereAreUnrecognisedFields() throws Exception { String json = "{" - + "\"disabled\": false, " - + "\"roles\": [ \"kibana_user\", \"sales\" ], " - + "\"rules\": " - + " { \"field\": { \"dn\" : \"*,ou=sales,dc=example,dc=com\" } } " - + "}"; + + "\"disabled\": false, " + + "\"roles\": [ \"kibana_user\", \"sales\" ], " + + "\"rules\": " + + " { \"field\": { \"dn\" : \"*,ou=sales,dc=example,dc=com\" } } " + + "}"; ParsingException ex = expectThrows(ParsingException.class, () -> parse(json, "bad_json")); assertThat(ex.getMessage(), containsString("disabled")); } public void testParsingIgnoresTypeFields() throws Exception { String json = "{" - + "\"enabled\": true, " - + "\"roles\": [ \"kibana_user\", \"sales\" ], " - + "\"rules\": " - + " { \"field\": { \"dn\" : \"*,ou=sales,dc=example,dc=com\" } }, " - + "\"doc_type\": \"role-mapping\", " - + "\"type\": \"doc\"" - + "}"; - final ExpressionRoleMapping mapping = parse(json, "from_index"); + + "\"enabled\": true, " + + "\"roles\": [ \"kibana_user\", \"sales\" ], " + + "\"rules\": " + + " { \"field\": { \"dn\" : \"*,ou=sales,dc=example,dc=com\" } }, " + + "\"doc_type\": \"role-mapping\", " + + "\"type\": \"doc\"" + + "}"; + final ExpressionRoleMapping mapping = parse(json, "from_index", true); assertThat(mapping.isEnabled(), equalTo(true)); - assertThat(mapping.getRoles(), containsInAnyOrder("kibana_user", "sales")); + assertThat(mapping.getRoles(), Matchers.containsInAnyOrder("kibana_user", "sales")); + } + + public void testParsingOfBothRoleNamesAndTemplates() throws Exception { + String json = "{" + + "\"enabled\": true, " + + "\"roles\": [ \"kibana_user\", \"sales\" ], " + + "\"role_templates\": [" + + " { \"template\" : \"{ \\\"source\\\":\\\"_user_{{username}}\\\" }\", \"format\":\"string\" }" + + "]," + + "\"rules\": " + + " { \"field\": { \"dn\" : \"*,ou=sales,dc=example,dc=com\" } }" + + "}"; + + // This is rejected when validating a request, but is valid when parsing the mapping + final ExpressionRoleMapping mapping = parse(json, "from_api", false); + assertThat(mapping.getRoles(), iterableWithSize(2)); + assertThat(mapping.getRoleTemplates(), iterableWithSize(1)); + } + + public void testToXContentWithRoleNames() throws Exception { + String source = "{" + + "\"roles\": [ " + + " \"kibana_user\"," + + " \"sales\"" + + " ], " + + "\"enabled\": true, " + + "\"rules\": { \"field\": { \"realm.name\" : \"saml1\" } }" + + "}"; + final ExpressionRoleMapping mapping = parse(source, getTestName()); + assertThat(mapping.getRoles(), iterableWithSize(2)); + + final String xcontent = Strings.toString(mapping); + assertThat(xcontent, equalTo( + "{" + + "\"enabled\":true," + + "\"roles\":[" + + "\"kibana_user\"," + + "\"sales\"" + + "]," + + "\"rules\":{\"field\":{\"realm.name\":\"saml1\"}}," + + "\"metadata\":{}" + + "}" + )); + } + + public void testToXContentWithTemplates() throws Exception { + String source = "{" + + "\"metadata\" : { \"answer\":42 }," + + "\"role_templates\": [ " + + " { \"template\" : { \"source\":\"_user_{{username}}\" }, \"format\":\"string\" }," + + " { \"template\" : { \"source\":\"{{#tojson}}groups{{/tojson}}\" }, \"format\":\"json\" }" + + " ], " + + "\"enabled\": false, " + + "\"rules\": { \"field\": { \"realm.name\" : \"saml1\" } }" + + "}"; + final ExpressionRoleMapping mapping = parse(source, getTestName()); + assertThat(mapping.getRoleTemplates(), iterableWithSize(2)); + + final String xcontent = Strings.toString(mapping.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS, true)); + assertThat(xcontent, equalTo( + "{" + + "\"enabled\":false," + + "\"role_templates\":[" + + "{\"template\":\"{\\\"source\\\":\\\"_user_{{username}}\\\"}\",\"format\":\"string\"}," + + "{\"template\":\"{\\\"source\\\":\\\"{{#tojson}}groups{{/tojson}}\\\"}\",\"format\":\"json\"}" + + "]," + + "\"rules\":{\"field\":{\"realm.name\":\"saml1\"}}," + + "\"metadata\":{\"answer\":42}," + + "\"doc_type\":\"role-mapping\"" + + "}" + )); + + final ExpressionRoleMapping parsed = parse(xcontent, getTestName(), true); + assertThat(parsed.getRoles(), iterableWithSize(0)); + assertThat(parsed.getRoleTemplates(), iterableWithSize(2)); + assertThat(parsed.getMetadata(), Matchers.hasKey("answer")); + } + + public void testSerialization() throws Exception { + final ExpressionRoleMapping original = randomRoleMapping(true); + + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_8_0_0, null); + BytesStreamOutput output = new BytesStreamOutput(); + output.setVersion(version); + original.writeTo(output); + + final NamedWriteableRegistry registry = new NamedWriteableRegistry(new XPackClientPlugin(Settings.EMPTY).getNamedWriteables()); + StreamInput streamInput = new NamedWriteableAwareStreamInput(ByteBufferStreamInput.wrap(BytesReference.toBytes(output.bytes())), + registry); + streamInput.setVersion(version); + final ExpressionRoleMapping serialized = new ExpressionRoleMapping(streamInput); + assertEquals(original, serialized); + } + + public void testSerializationPreV71() throws Exception { + final ExpressionRoleMapping original = randomRoleMapping(false); + + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_7_0_0); + BytesStreamOutput output = new BytesStreamOutput(); + output.setVersion(version); + original.writeTo(output); + + final NamedWriteableRegistry registry = new NamedWriteableRegistry(new XPackClientPlugin(Settings.EMPTY).getNamedWriteables()); + StreamInput streamInput = new NamedWriteableAwareStreamInput(ByteBufferStreamInput.wrap(BytesReference.toBytes(output.bytes())), + registry); + streamInput.setVersion(version); + final ExpressionRoleMapping serialized = new ExpressionRoleMapping(streamInput); + assertEquals(original, serialized); } private ExpressionRoleMapping parse(String json, String name) throws IOException { + return parse(json, name, false); + } + + private ExpressionRoleMapping parse(String json, String name, boolean fromIndex) throws IOException { final NamedXContentRegistry registry = NamedXContentRegistry.EMPTY; final XContentParser parser = XContentType.JSON.xContent() - .createParser(registry, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json); + .createParser(registry, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json); final ExpressionRoleMapping mapping = ExpressionRoleMapping.parse(name, parser); assertThat(mapping, notNullValue()); assertThat(mapping.getName(), equalTo(name)); return mapping; } + private ExpressionRoleMapping randomRoleMapping(boolean acceptRoleTemplates) { + final boolean useTemplate = acceptRoleTemplates && randomBoolean(); + final List roles; + final List templates; + if (useTemplate) { + roles = Collections.emptyList(); + templates = Arrays.asList(randomArray(1, 5, TemplateRoleName[]::new, () -> + new TemplateRoleName(new BytesArray(randomAlphaOfLengthBetween(10, 25)), randomFrom(TemplateRoleName.Format.values())) + )); + } else { + roles = Arrays.asList(randomArray(1, 5, String[]::new, () -> randomAlphaOfLengthBetween(4, 12))); + templates = Collections.emptyList(); + } + return new ExpressionRoleMapping( + randomAlphaOfLengthBetween(3, 8), + new FieldExpression(randomAlphaOfLengthBetween(4, 12), + Collections.singletonList(new FieldExpression.FieldValue(randomInt(99)))), + roles, + templates, + Collections.singletonMap(randomAlphaOfLengthBetween(3, 12), randomIntBetween(30, 90)), + true + ); + } + } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java index 29407a867298..e96284ba1549 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java @@ -10,10 +10,14 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.script.ScriptModule; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.mustache.MustacheScriptEngine; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheAction; @@ -23,6 +27,7 @@ import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.core.security.authc.support.mapper.TemplateRoleName; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression.FieldValue; import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; @@ -54,12 +59,12 @@ public void testResolveRoles() throws Exception { // Does match DN final ExpressionRoleMapping mapping1 = new ExpressionRoleMapping("dept_h", new FieldExpression("dn", Collections.singletonList(new FieldValue("*,ou=dept_h,o=forces,dc=gc,dc=ca"))), - Arrays.asList("dept_h", "defence"), Collections.emptyMap(), true); + Arrays.asList("dept_h", "defence"), Collections.emptyList(), Collections.emptyMap(), true); // Does not match - user is not in this group final ExpressionRoleMapping mapping2 = new ExpressionRoleMapping("admin", - new FieldExpression("groups", Collections.singletonList( - new FieldValue(randomiseDn("cn=esadmin,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca")))), - Arrays.asList("admin"), Collections.emptyMap(), true); + new FieldExpression("groups", Collections.singletonList( + new FieldValue(randomiseDn("cn=esadmin,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca")))), + Arrays.asList("admin"), Collections.emptyList(), Collections.emptyMap(), true); // Does match - user is one of these groups final ExpressionRoleMapping mapping3 = new ExpressionRoleMapping("flight", new FieldExpression("groups", Arrays.asList( @@ -67,18 +72,23 @@ public void testResolveRoles() throws Exception { new FieldValue(randomiseDn("cn=betaflight,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca")), new FieldValue(randomiseDn("cn=gammaflight,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca")) )), - Arrays.asList("flight"), Collections.emptyMap(), true); + Collections.emptyList(), + Arrays.asList(new TemplateRoleName(new BytesArray("{ \"source\":\"{{metadata.extra_group}}\" }"), + TemplateRoleName.Format.STRING)), + Collections.emptyMap(), true); // Does not match - mapping is not enabled final ExpressionRoleMapping mapping4 = new ExpressionRoleMapping("mutants", new FieldExpression("groups", Collections.singletonList( new FieldValue(randomiseDn("cn=mutants,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca")))), - Arrays.asList("mutants"), Collections.emptyMap(), false); + Arrays.asList("mutants"), Collections.emptyList(), Collections.emptyMap(), false); final Client client = mock(Client.class); SecurityIndexManager securityIndex = mock(SecurityIndexManager.class); + ScriptService scriptService = new ScriptService(Settings.EMPTY, + Collections.singletonMap(MustacheScriptEngine.NAME, new MustacheScriptEngine()), ScriptModule.CORE_CONTEXTS); when(securityIndex.isAvailable()).thenReturn(true); - final NativeRoleMappingStore store = new NativeRoleMappingStore(Settings.EMPTY, client, securityIndex) { + final NativeRoleMappingStore store = new NativeRoleMappingStore(Settings.EMPTY, client, securityIndex, scriptService) { @Override protected void loadMappings(ActionListener> listener) { final List mappings = Arrays.asList(mapping1, mapping2, mapping3, mapping4); @@ -96,7 +106,7 @@ protected void loadMappings(ActionListener> listener Arrays.asList( randomiseDn("cn=alphaflight,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca"), randomiseDn("cn=mutants,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca") - ), Collections.emptyMap(), realm); + ), Collections.singletonMap("extra_group", "flight"), realm); logger.info("UserData is [{}]", user); store.resolveRoles(user, future); @@ -213,7 +223,8 @@ protected void doLookupUser(String username, ActionListener listener) { listener.onResponse(null); } }; - final NativeRoleMappingStore store = new NativeRoleMappingStore(Settings.EMPTY, client, mock(SecurityIndexManager.class)); + final NativeRoleMappingStore store = new NativeRoleMappingStore(Settings.EMPTY, client, mock(SecurityIndexManager.class), + mock(ScriptService.class)); store.refreshRealmOnChange(mockRealm); return store; } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java index 8f60b1d30523..a83374885494 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java @@ -181,6 +181,45 @@ public void testGetPrivilegesByApplicationName() throws Exception { assertResult(sourcePrivileges, future); } + public void testGetPrivilegesByWildcardApplicationName() throws Exception { + final PlainActionFuture> future = new PlainActionFuture<>(); + store.getPrivileges(Arrays.asList("myapp-*", "yourapp"), null, future); + assertThat(requests, iterableWithSize(1)); + assertThat(requests.get(0), instanceOf(SearchRequest.class)); + SearchRequest request = (SearchRequest) requests.get(0); + assertThat(request.indices(), arrayContaining(SecurityIndexManager.SECURITY_INDEX_NAME)); + + final String query = Strings.toString(request.source().query()); + assertThat(query, containsString("{\"bool\":{\"filter\":[{\"terms\":{\"application\":[\"yourapp\"]")); + assertThat(query, containsString("{\"prefix\":{\"application\":{\"value\":\"myapp-\"")); + assertThat(query, containsString("{\"term\":{\"type\":{\"value\":\"application-privilege\"")); + + final SearchHit[] hits = new SearchHit[0]; + listener.get().onResponse(new SearchResponse(new SearchResponseSections( + new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), + null, null, false, false, null, 1), + "_scrollId1", 1, 1, 0, 1, null, null)); + } + + public void testGetPrivilegesByStarApplicationName() throws Exception { + final PlainActionFuture> future = new PlainActionFuture<>(); + store.getPrivileges(Arrays.asList("*", "anything"), null, future); + assertThat(requests, iterableWithSize(1)); + assertThat(requests.get(0), instanceOf(SearchRequest.class)); + SearchRequest request = (SearchRequest) requests.get(0); + assertThat(request.indices(), arrayContaining(SecurityIndexManager.SECURITY_INDEX_NAME)); + + final String query = Strings.toString(request.source().query()); + assertThat(query, containsString("{\"exists\":{\"field\":\"application\"")); + assertThat(query, containsString("{\"term\":{\"type\":{\"value\":\"application-privilege\"")); + + final SearchHit[] hits = new SearchHit[0]; + listener.get().onResponse(new SearchResponse(new SearchResponseSections( + new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), + null, null, false, false, null, 1), + "_scrollId1", 1, 1, 0, 1, null, null)); + } + public void testGetAllPrivileges() throws Exception { final List sourcePrivileges = Arrays.asList( new ApplicationPrivilegeDescriptor("app1", "admin", newHashSet("action:admin/*", "action:login", "data:read/*"), emptyMap()), diff --git a/x-pack/plugin/sql/build.gradle b/x-pack/plugin/sql/build.gradle index cd2f82ee7b30..31cec8bbee39 100644 --- a/x-pack/plugin/sql/build.gradle +++ b/x-pack/plugin/sql/build.gradle @@ -10,6 +10,16 @@ esplugin { extendedPlugins = ['x-pack-core', 'lang-painless'] } +ext { + // SQL dependency versions + jlineVersion="3.10.0" + antlrVersion="4.5.3" + + // SQL test dependency versions + csvjdbcVersion="1.0.34" + h2Version="1.4.197" +} + configurations { // Bundles the sql-cli.jar into the distribution bin @@ -81,7 +91,7 @@ configurations { } dependencies { - regenerate 'org.antlr:antlr4:4.5.3' + regenerate "org.antlr:antlr4:${antlrVersion}" } String grammarPath = 'src/main/antlr' diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java index 569745373045..e69d5b020131 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java @@ -257,8 +257,7 @@ public boolean nullPlusNonNullIsNull() throws SQLException { @Override public boolean supportsConvert() throws SQLException { - //TODO: add Convert - return false; + return true; } @Override @@ -774,14 +773,14 @@ public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLExce @Override public ResultSet getCatalogs() throws SQLException { // TABLE_CAT is the first column - Object[][] data = queryColumn(con, "SYS TABLES CATALOG LIKE '%'", 1); + Object[][] data = queryColumn(con, "SYS TABLES CATALOG LIKE '%' LIKE ''", 1); return memorySet(con.cfg, columnInfo("", "TABLE_CAT"), data); } @Override public ResultSet getTableTypes() throws SQLException { // TABLE_TYPE (4) - Object[][] data = queryColumn(con, "SYS TABLES TYPE '%'", 4); + Object[][] data = queryColumn(con, "SYS TABLES CATALOG LIKE '' LIKE '' TYPE '%'", 4); return memorySet(con.cfg, columnInfo("", "TABLE_TYPE"), data); } diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDateUtils.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDateUtils.java index 8b1433780c72..c0f2e6e46ea0 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDateUtils.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDateUtils.java @@ -9,6 +9,7 @@ import java.sql.Date; import java.sql.Time; import java.sql.Timestamp; +import java.time.LocalDate; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; import java.time.format.DateTimeFormatterBuilder; @@ -27,10 +28,9 @@ */ final class JdbcDateUtils { - private JdbcDateUtils() { - } + private JdbcDateUtils() {} - private static final long DAY_IN_MILLIS = 60 * 60 * 24 * 1000L; + private static final LocalDate EPOCH = LocalDate.of(1970, 1, 1); static final DateTimeFormatter ISO_WITH_MILLIS = new DateTimeFormatterBuilder() .parseCaseInsensitive() @@ -58,12 +58,13 @@ static Date asDate(String date) { return new Date(zdt.toLocalDate().atStartOfDay(zdt.getZone()).toInstant().toEpochMilli()); } - /** - * In contrast to {@link JdbcDateUtils#asDate(String)} here we just want to eliminate - * the date part and just set it to EPOCH (1970-01-1) - */ static Time asTime(String date) { - return new Time(utcMillisRemoveDate(asMillisSinceEpoch(date))); + ZonedDateTime zdt = asDateTime(date); + return new Time(zdt.toLocalTime().atDate(EPOCH).atZone(zdt.getZone()).toInstant().toEpochMilli()); + } + + static Timestamp asTimestamp(long millisSinceEpoch) { + return new Timestamp(millisSinceEpoch); } static Timestamp asTimestamp(String date) { @@ -81,8 +82,4 @@ static R asDateTimeField(Object value, Function asDateTimeMethod, return ctor.apply(((Number) value).longValue()); } } - - private static long utcMillisRemoveDate(long l) { - return l % DAY_IN_MILLIS; - } } diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSet.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSet.java index 7d2329254b59..9b1fcb48901a 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSet.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSet.java @@ -178,17 +178,17 @@ public byte[] getBytes(int columnIndex) throws SQLException { @Override public Date getDate(int columnIndex) throws SQLException { - return getDate(columnIndex, null); + return asDate(columnIndex); } @Override public Time getTime(int columnIndex) throws SQLException { - return getTime(columnIndex, null); + return asTime(columnIndex); } @Override public Timestamp getTimestamp(int columnIndex) throws SQLException { - return getTimestamp(columnIndex, null); + return asTimeStamp(columnIndex); } @Override @@ -244,7 +244,7 @@ public Date getDate(String columnLabel) throws SQLException { return getDate(column(columnLabel)); } - private Long dateTime(int columnIndex) throws SQLException { + private Long dateTimeAsMillis(int columnIndex) throws SQLException { Object val = column(columnIndex); EsType type = columnType(columnIndex); try { @@ -270,13 +270,68 @@ private Long dateTime(int columnIndex) throws SQLException { } } + private Date asDate(int columnIndex) throws SQLException { + Object val = column(columnIndex); + + if (val == null) { + return null; + } + + try { + return JdbcDateUtils.asDate(val.toString()); + } catch (Exception e) { + EsType type = columnType(columnIndex); + throw new SQLException( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Date", val, type.getName()), e); + } + } + + private Time asTime(int columnIndex) throws SQLException { + Object val = column(columnIndex); + + if (val == null) { + return null; + } + + EsType type = columnType(columnIndex); + if (type == EsType.DATE) { + return new Time(0L); + } + + try { + return JdbcDateUtils.asTime(val.toString()); + } catch (Exception e) { + throw new SQLException( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Time", val, type.getName()), e); + } + } + + private Timestamp asTimeStamp(int columnIndex) throws SQLException { + Object val = column(columnIndex); + + if (val == null) { + return null; + } + + try { + if (val instanceof Number) { + return JdbcDateUtils.asTimestamp(((Number) val).longValue()); + } + return JdbcDateUtils.asTimestamp(val.toString()); + } catch (Exception e) { + EsType type = columnType(columnIndex); + throw new SQLException( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Timestamp", val, type.getName()), e); + } + } + private Calendar safeCalendar(Calendar calendar) { return calendar == null ? defaultCalendar : calendar; } @Override public Date getDate(int columnIndex, Calendar cal) throws SQLException { - return TypeConverter.convertDate(dateTime(columnIndex), safeCalendar(cal)); + return TypeConverter.convertDate(dateTimeAsMillis(columnIndex), safeCalendar(cal)); } @Override @@ -290,7 +345,7 @@ public Time getTime(int columnIndex, Calendar cal) throws SQLException { if (type == EsType.DATE) { return new Time(0L); } - return TypeConverter.convertTime(dateTime(columnIndex), safeCalendar(cal)); + return TypeConverter.convertTime(dateTimeAsMillis(columnIndex), safeCalendar(cal)); } @Override @@ -300,7 +355,7 @@ public Time getTime(String columnLabel) throws SQLException { @Override public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { - return TypeConverter.convertTimestamp(dateTime(columnIndex), safeCalendar(cal)); + return TypeConverter.convertTimestamp(dateTimeAsMillis(columnIndex), safeCalendar(cal)); } @Override diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java index 907206dc083d..d08496b611e0 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/TypeConverter.java @@ -232,6 +232,8 @@ static Object convert(Object v, EsType columnType, String typeString) throws SQL case INTERVAL_HOUR_TO_SECOND: case INTERVAL_MINUTE_TO_SECOND: return Duration.parse(v.toString()); + case IP: + return v.toString(); default: throw new SQLException("Unexpected column type [" + typeString + "]"); diff --git a/x-pack/plugin/sql/qa/build.gradle b/x-pack/plugin/sql/qa/build.gradle index abbbd6e9663f..cf0a0dba8ee6 100644 --- a/x-pack/plugin/sql/qa/build.gradle +++ b/x-pack/plugin/sql/qa/build.gradle @@ -12,11 +12,20 @@ dependencies { compile project(path: xpackModule('sql:jdbc'), configuration: 'nodeps') compile project(path: xpackModule('sql:sql-action')) - compile "net.sourceforge.csvjdbc:csvjdbc:1.0.34" + compile "net.sourceforge.csvjdbc:csvjdbc:${csvjdbcVersion}" // CLI testing dependencies compile project(path: xpackModule('sql:sql-cli'), configuration: 'nodeps') - compile "org.jline:jline:3.8.2" + + // select just the parts of JLine that are needed + compile("org.jline:jline-terminal-jna:${jlineVersion}") { + exclude group: "net.java.dev.jna" + } + compile "org.jline:jline-terminal:${jlineVersion}" + compile "org.jline:jline-reader:${jlineVersion}" + compile "org.jline:jline-style:${jlineVersion}" + + testRuntime "org.elasticsearch:jna:${versions.jna}" } /* disable unit tests because these are all integration tests used @@ -31,49 +40,6 @@ forbiddenApisMain { replaceSignatureFiles 'es-all-signatures', 'es-test-signatures' } -thirdPartyAudit.ignoreMissingClasses ( - // jLine's optional dependencies - 'org.apache.sshd.client.SshClient', - 'org.apache.sshd.client.auth.keyboard.UserInteraction', - 'org.apache.sshd.client.channel.ChannelShell', - 'org.apache.sshd.client.channel.ClientChannel', - 'org.apache.sshd.client.channel.ClientChannelEvent', - 'org.apache.sshd.client.future.AuthFuture', - 'org.apache.sshd.client.future.ConnectFuture', - 'org.apache.sshd.client.future.OpenFuture', - 'org.apache.sshd.client.session.ClientSession', - 'org.apache.sshd.common.Factory', - 'org.apache.sshd.common.channel.PtyMode', - 'org.apache.sshd.common.config.keys.FilePasswordProvider', - 'org.apache.sshd.common.util.io.NoCloseInputStream', - 'org.apache.sshd.common.util.io.NoCloseOutputStream', - 'org.apache.sshd.server.Command', - 'org.apache.sshd.server.Environment', - 'org.apache.sshd.server.ExitCallback', - 'org.apache.sshd.server.SessionAware', - 'org.apache.sshd.server.Signal', - 'org.apache.sshd.server.SshServer', - 'org.apache.sshd.server.keyprovider.SimpleGeneratorHostKeyProvider', - 'org.apache.sshd.server.scp.ScpCommandFactory$Builder', - 'org.apache.sshd.server.session.ServerSession', - 'org.apache.sshd.server.subsystem.sftp.SftpSubsystemFactory$Builder', - 'org.fusesource.jansi.Ansi', - 'org.fusesource.jansi.internal.CLibrary$Termios', - 'org.fusesource.jansi.internal.CLibrary$WinSize', - 'org.fusesource.jansi.internal.CLibrary', - 'org.fusesource.jansi.internal.Kernel32$CHAR_INFO', - 'org.fusesource.jansi.internal.Kernel32$CONSOLE_SCREEN_BUFFER_INFO', - 'org.fusesource.jansi.internal.Kernel32$COORD', - 'org.fusesource.jansi.internal.Kernel32$FOCUS_EVENT_RECORD', - 'org.fusesource.jansi.internal.Kernel32$INPUT_RECORD', - 'org.fusesource.jansi.internal.Kernel32$KEY_EVENT_RECORD', - 'org.fusesource.jansi.internal.Kernel32$MOUSE_EVENT_RECORD', - 'org.fusesource.jansi.internal.Kernel32$SMALL_RECT', - 'org.fusesource.jansi.internal.Kernel32', - 'org.fusesource.jansi.internal.WindowsSupport', - 'org.mozilla.universalchardet.UniversalDetector', -) - subprojects { apply plugin: 'elasticsearch.standalone-rest-test' dependencies { @@ -88,14 +54,14 @@ subprojects { testCompile "org.elasticsearch.test:framework:${version}" // JDBC testing dependencies - testRuntime "net.sourceforge.csvjdbc:csvjdbc:1.0.34" - testRuntime "com.h2database:h2:1.4.197" + testRuntime "net.sourceforge.csvjdbc:csvjdbc:${csvjdbcVersion}" + testRuntime "com.h2database:h2:${h2Version}" testRuntime project(path: xpackModule('sql:jdbc'), configuration: 'nodeps') testRuntime xpackProject('plugin:sql:sql-client') // TODO check if needed - testRuntime("org.antlr:antlr4-runtime:4.5.3") { + testRuntime("org.antlr:antlr4-runtime:${antlrVersion}") { transitive = false } @@ -104,7 +70,15 @@ subprojects { testRuntime (xpackProject('plugin:sql:sql-action')) { transitive = false } - testRuntime "org.jline:jline:3.8.2" + + testRuntime("org.jline:jline-terminal-jna:${jlineVersion}") { + exclude group: "net.java.dev.jna" + } + testRuntime "org.jline:jline-terminal:${jlineVersion}" + testRuntime "org.jline:jline-reader:${jlineVersion}" + testRuntime "org.jline:jline-style:${jlineVersion}" + + testRuntime "org.elasticsearch:jna:${versions.jna}" } if (project.name != 'security') { diff --git a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlSecurityIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlSecurityIT.java index d5e7e7cc5084..6a4a2662810e 100644 --- a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlSecurityIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlSecurityIT.java @@ -29,10 +29,10 @@ import java.util.Map; import java.util.stream.Collectors; +import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.SQL_QUERY_REST_ENDPOINT; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.columnInfo; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.mode; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.randomMode; -import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.SQL_QUERY_REST_ENDPOINT; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -101,9 +101,9 @@ public void expectDescribe(Map> columns, String user) throw String mode = randomMode(); Map expected = new HashMap<>(3); expected.put("columns", Arrays.asList( - columnInfo(mode, "column", "keyword", JDBCType.VARCHAR, 0), - columnInfo(mode, "type", "keyword", JDBCType.VARCHAR, 0), - columnInfo(mode, "mapping", "keyword", JDBCType.VARCHAR, 0))); + columnInfo(mode, "column", "keyword", JDBCType.VARCHAR, 32766), + columnInfo(mode, "type", "keyword", JDBCType.VARCHAR, 32766), + columnInfo(mode, "mapping", "keyword", JDBCType.VARCHAR, 32766))); List> rows = new ArrayList<>(columns.size()); for (Map.Entry> column : columns.entrySet()) { List cols = new ArrayList<>(); @@ -120,8 +120,8 @@ public void expectDescribe(Map> columns, String user) throw public void expectShowTables(List tables, String user) throws Exception { String mode = randomMode(); List columns = new ArrayList<>(); - columns.add(columnInfo(mode, "name", "keyword", JDBCType.VARCHAR, 0)); - columns.add(columnInfo(mode, "type", "keyword", JDBCType.VARCHAR, 0)); + columns.add(columnInfo(mode, "name", "keyword", JDBCType.VARCHAR, 32766)); + columns.add(columnInfo(mode, "type", "keyword", JDBCType.VARCHAR, 32766)); Map expected = new HashMap<>(); expected.put("columns", columns); List> rows = new ArrayList<>(); diff --git a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/UserFunctionIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/UserFunctionIT.java index 2d93597efc10..65eb991280ff 100644 --- a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/UserFunctionIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/UserFunctionIT.java @@ -34,10 +34,10 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.SQL_QUERY_REST_ENDPOINT; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.columnInfo; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.mode; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.randomMode; -import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.SQL_QUERY_REST_ENDPOINT; public class UserFunctionIT extends ESRestTestCase { @@ -81,7 +81,7 @@ public void testSingleRandomUser() throws IOException { Map expected = new HashMap<>(); expected.put("columns", Arrays.asList( - columnInfo(mode, "USER()", "keyword", JDBCType.VARCHAR, 0))); + columnInfo(mode, "USER()", "keyword", JDBCType.VARCHAR, 32766))); expected.put("rows", Arrays.asList(Arrays.asList(randomUserName))); Map actual = runSql(randomUserName, mode, SQL); @@ -97,7 +97,7 @@ public void testSingleRandomUserWithWhereEvaluatingTrue() throws IOException { Map expected = new HashMap<>(); expected.put("columns", Arrays.asList( - columnInfo(mode, "USER()", "keyword", JDBCType.VARCHAR, 0))); + columnInfo(mode, "USER()", "keyword", JDBCType.VARCHAR, 32766))); expected.put("rows", Arrays.asList(Arrays.asList(randomUserName), Arrays.asList(randomUserName), Arrays.asList(randomUserName))); @@ -114,7 +114,7 @@ public void testSingleRandomUserWithWhereEvaluatingFalse() throws IOException { Map expected = new HashMap<>(); expected.put("columns", Arrays.asList( - columnInfo(mode, "USER()", "keyword", JDBCType.VARCHAR, 0))); + columnInfo(mode, "USER()", "keyword", JDBCType.VARCHAR, 32766))); expected.put("rows", Collections.>emptyList()); String anotherRandomUserName = randomValueOtherThan(randomUserName, () -> randomAlphaOfLengthBetween(1, 15)); Map actual = runSql(randomUserName, mode, SQL + " FROM test WHERE USER()='" + anotherRandomUserName + "' LIMIT 3"); @@ -129,7 +129,7 @@ public void testMultipleRandomUsersAccess() throws IOException { Map expected = new HashMap<>(); expected.put("columns", Arrays.asList( - columnInfo(mode, "USER()", "keyword", JDBCType.VARCHAR, 0))); + columnInfo(mode, "USER()", "keyword", JDBCType.VARCHAR, 32766))); expected.put("rows", Arrays.asList(Arrays.asList(randomlyPickedUsername))); Map actual = runSql(randomlyPickedUsername, mode, SQL); @@ -147,7 +147,7 @@ public void testSingleUserSelectFromIndex() throws IOException { Map expected = new HashMap<>(); expected.put("columns", Arrays.asList( - columnInfo(mode, "USER()", "keyword", JDBCType.VARCHAR, 0))); + columnInfo(mode, "USER()", "keyword", JDBCType.VARCHAR, 32766))); expected.put("rows", Arrays.asList(Arrays.asList(randomUserName), Arrays.asList(randomUserName), Arrays.asList(randomUserName))); diff --git a/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcDocCsvSpecIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcDocCsvSpecIT.java index 6cd53d22a173..839da92483ed 100644 --- a/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcDocCsvSpecIT.java +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/JdbcDocCsvSpecIT.java @@ -53,7 +53,7 @@ protected void loadDataset(RestClient client) throws Exception { @ParametersFactory(shuffle = false, argumentFormatting = SqlSpecTestCase.PARAM_FORMATTING) public static List readScriptSpec() throws Exception { Parser parser = specParser(); - return readScriptSpec("/docs.csv-spec", parser); + return readScriptSpec("/docs/docs.csv-spec", parser); } public JdbcDocCsvSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/SqlProtocolTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/SqlProtocolTestCase.java index 233c4b6a4202..1a47bb0add85 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/SqlProtocolTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/SqlProtocolTestCase.java @@ -28,10 +28,10 @@ import java.util.Locale; import java.util.Map; +import static org.elasticsearch.xpack.sql.proto.Mode.CLI; import static org.elasticsearch.xpack.sql.proto.Protocol.SQL_QUERY_REST_ENDPOINT; import static org.elasticsearch.xpack.sql.proto.RequestInfo.CLIENT_IDS; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.mode; -import static org.elasticsearch.xpack.sql.proto.Mode.CLI; public abstract class SqlProtocolTestCase extends ESRestTestCase { @@ -62,7 +62,7 @@ public void testNumericTypes() throws IOException { } public void testTextualType() throws IOException { - assertQuery("SELECT 'abc123'", "'abc123'", "keyword", "abc123", 0); + assertQuery("SELECT 'abc123'", "'abc123'", "keyword", "abc123", 32766); } public void testDateTimes() throws IOException { @@ -141,7 +141,7 @@ private void assertQuery(String sql, String columnName, String columnType, Objec List row = (ArrayList) rows.get(0); assertEquals(1, row.size()); - // from xcontent we can get float or double, depending on the conversion + // from xcontent we can get float or double, depending on the conversion // method of the specific xcontent format implementation if (columnValue instanceof Float && row.get(0) instanceof Double) { assertEquals(columnValue, (float)((Number) row.get(0)).doubleValue()); @@ -209,7 +209,7 @@ private Map runSql(String mode, String sql, boolean columnar) th return XContentHelper.convertToMap(SmileXContent.smileXContent, content, false); } default: - return XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false); + return XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false); } } } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvSpecTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvSpecTestCase.java index 7029c469d2f9..7db6faefb57c 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvSpecTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/CsvSpecTestCase.java @@ -10,10 +10,10 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.CsvTestCase; +import java.net.URL; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; -import java.util.ArrayList; import java.util.List; import static org.elasticsearch.xpack.sql.qa.jdbc.CsvTestUtils.csvConnection; @@ -29,22 +29,9 @@ public abstract class CsvSpecTestCase extends SpecBaseIntegrationTestCase { @ParametersFactory(argumentFormatting = PARAM_FORMATTING) public static List readScriptSpec() throws Exception { - Parser parser = specParser(); - List tests = new ArrayList<>(); - tests.addAll(readScriptSpec("/select.csv-spec", parser)); - tests.addAll(readScriptSpec("/command.csv-spec", parser)); - tests.addAll(readScriptSpec("/fulltext.csv-spec", parser)); - tests.addAll(readScriptSpec("/agg.csv-spec", parser)); - tests.addAll(readScriptSpec("/columns.csv-spec", parser)); - tests.addAll(readScriptSpec("/date.csv-spec", parser)); - tests.addAll(readScriptSpec("/datetime.csv-spec", parser)); - tests.addAll(readScriptSpec("/alias.csv-spec", parser)); - tests.addAll(readScriptSpec("/null.csv-spec", parser)); - tests.addAll(readScriptSpec("/nested.csv-spec", parser)); - tests.addAll(readScriptSpec("/functions.csv-spec", parser)); - tests.addAll(readScriptSpec("/math.csv-spec", parser)); - tests.addAll(readScriptSpec("/field-alias.csv-spec", parser)); - return tests; + List urls = JdbcTestUtils.classpathResources("/*.csv-spec"); + assertTrue("Not enough specs found " + urls.toString(), urls.size() > 15); + return readScriptSpec(urls, specParser()); } public CsvSpecTestCase(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java index 4697f34d2ad7..19c30b55e92b 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java @@ -6,30 +6,51 @@ package org.elasticsearch.xpack.sql.qa.jdbc; import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.regex.Regex; import org.elasticsearch.xpack.sql.action.BasicFormatter; import org.elasticsearch.xpack.sql.proto.ColumnInfo; import org.elasticsearch.xpack.sql.proto.StringUtils; +import java.io.IOException; +import java.net.URL; +import java.net.URLConnection; +import java.nio.file.FileVisitOption; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; +import java.sql.Date; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; +import java.sql.Time; import java.time.Instant; +import java.time.LocalDate; import java.time.ZoneId; import java.time.ZonedDateTime; import java.util.ArrayList; +import java.util.EnumSet; import java.util.List; +import java.util.jar.JarInputStream; +import java.util.zip.ZipEntry; import static org.elasticsearch.xpack.sql.action.BasicFormatter.FormatOption.CLI; -public abstract class JdbcTestUtils { +final class JdbcTestUtils { - public static final String SQL_TRACE = "org.elasticsearch.xpack.sql:TRACE"; + private JdbcTestUtils() {} - public static final String JDBC_TIMEZONE = "timezone"; - - public static ZoneId UTC = ZoneId.of("Z"); + private static final int MAX_WIDTH = 20; + + static final String SQL_TRACE = "org.elasticsearch.xpack.sql:TRACE"; + static final String JDBC_TIMEZONE = "timezone"; + static final LocalDate EPOCH = LocalDate.of(1970, 1, 1); - public static void logResultSetMetadata(ResultSet rs, Logger logger) throws SQLException { + static void logResultSetMetadata(ResultSet rs, Logger logger) throws SQLException { ResultSetMetaData metaData = rs.getMetaData(); // header StringBuilder sb = new StringBuilder(); @@ -59,35 +80,24 @@ public static void logResultSetMetadata(ResultSet rs, Logger logger) throws SQLE logger.info(sb.toString()); } - private static final int MAX_WIDTH = 20; - - public static void logResultSetData(ResultSet rs, Logger log) throws SQLException { + static void logResultSetData(ResultSet rs, Logger log) throws SQLException { ResultSetMetaData metaData = rs.getMetaData(); - StringBuilder sb = new StringBuilder(); - StringBuilder column = new StringBuilder(); int columns = metaData.getColumnCount(); while (rs.next()) { - sb.setLength(0); - for (int i = 1; i <= columns; i++) { - column.setLength(0); - if (i > 1) { - sb.append(" | "); - } - sb.append(trimOrPad(column.append(rs.getString(i)))); - } - log.info(sb); + log.info(rowAsString(rs, columns)); } } - public static String resultSetCurrentData(ResultSet rs) throws SQLException { + static String resultSetCurrentData(ResultSet rs) throws SQLException { ResultSetMetaData metaData = rs.getMetaData(); - StringBuilder column = new StringBuilder(); - - int columns = metaData.getColumnCount(); + return rowAsString(rs, metaData.getColumnCount()); + } + private static String rowAsString(ResultSet rs, int columns) throws SQLException { StringBuilder sb = new StringBuilder(); + StringBuilder column = new StringBuilder(); for (int i = 1; i <= columns; i++) { column.setLength(0); if (i > 1) { @@ -137,7 +147,96 @@ public static void logLikeCLI(ResultSet rs, Logger logger) throws SQLException { logger.info("\n" + formatter.formatWithHeader(cols, data)); } - public static String of(long millis, String zoneId) { + static String of(long millis, String zoneId) { return StringUtils.toString(ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), ZoneId.of(zoneId))); } + + /** + * Returns the classpath resources matching a simple pattern ("*.csv"). + * It supports folders separated by "/" (e.g. "/some/folder/*.txt"). + * + * Currently able to resolve resources inside the classpath either from: + * folders in the file-system (typically IDEs) or + * inside jars (gradle). + */ + static List classpathResources(String pattern) throws Exception { + while (pattern.startsWith("/")) { + pattern = pattern.substring(1); + } + + Tuple split = pathAndName(pattern); + + // the root folder searched inside the classpath - default is the root classpath + // default file match + final String root = split.v1(); + final String filePattern = split.v2(); + + String[] resources = System.getProperty("java.class.path").split(System.getProperty("path.separator")); + + List matches = new ArrayList<>(); + + for (String resource : resources) { + Path path = PathUtils.get(resource); + + // check whether we're dealing with a jar + // Java 7 java.nio.fileFileSystem can be used on top of ZIPs/JARs but consumes more memory + // hence the use of the JAR API + if (path.toString().endsWith(".jar")) { + try (JarInputStream jar = getJarStream(path.toUri().toURL())) { + ZipEntry entry = null; + while ((entry = jar.getNextEntry()) != null) { + String name = entry.getName(); + Tuple entrySplit = pathAndName(name); + if (root.equals(entrySplit.v1()) && Regex.simpleMatch(filePattern, entrySplit.v2())) { + matches.add(new URL("jar:" + path.toUri() + "!/" + name)); + } + } + } + } + // normal file access + else { + Files.walkFileTree(path, EnumSet.allOf(FileVisitOption.class), 1, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + if (Regex.simpleMatch(filePattern, file.toString())) { + matches.add(file.toUri().toURL()); + } + return FileVisitResult.CONTINUE; + } + }); + } + } + return matches; + } + + @SuppressForbidden(reason = "need to open jar") + private static JarInputStream getJarStream(URL resource) throws IOException { + URLConnection con = resource.openConnection(); + con.setDefaultUseCaches(false); + return new JarInputStream(con.getInputStream()); + } + + static Tuple pathAndName(String string) { + String folder = StringUtils.EMPTY; + String file = string; + int lastIndexOf = string.lastIndexOf("/"); + if (lastIndexOf > 0) { + folder = string.substring(0, lastIndexOf - 1); + if (lastIndexOf + 1 < string.length()) { + file = string.substring(lastIndexOf + 1); + } + } + return new Tuple<>(folder, file); + } + + static Date asDate(long millis, ZoneId zoneId) { + return new java.sql.Date( + ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), zoneId) + .toLocalDate().atStartOfDay(zoneId).toInstant().toEpochMilli()); + } + + static Time asTime(long millis, ZoneId zoneId) { + return new Time(ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), zoneId) + .toLocalTime().atDate(JdbcTestUtils.EPOCH).atZone(zoneId).toInstant().toEpochMilli()); + } } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java index e0a64fa0ca1a..b8cd81e39f54 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java @@ -32,6 +32,8 @@ import java.sql.SQLType; import java.sql.Timestamp; import java.sql.Types; +import java.time.Instant; +import java.time.ZoneId; import java.util.Arrays; import java.util.Calendar; import java.util.Date; @@ -58,6 +60,8 @@ import static java.util.Calendar.SECOND; import static java.util.Calendar.YEAR; import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.JDBC_TIMEZONE; +import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.asDate; +import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.asTime; import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.of; public class ResultSetTestCase extends JdbcIntegrationTestCase { @@ -874,17 +878,10 @@ public void testGettingDateWithoutCalendar() throws Exception { Long randomLongDate = randomNonNegativeLong(); indexSimpleDocumentWithTrueValues(randomLongDate); - Calendar connCalendar = Calendar.getInstance(TimeZone.getTimeZone(timeZoneId), Locale.ROOT); - doWithQuery(SELECT_ALL_FIELDS, (results) -> { results.next(); - connCalendar.setTimeInMillis(randomLongDate); - connCalendar.set(HOUR_OF_DAY, 0); - connCalendar.set(MINUTE, 0); - connCalendar.set(SECOND, 0); - connCalendar.set(MILLISECOND, 0); - java.sql.Date expectedDate = new java.sql.Date(connCalendar.getTimeInMillis()); + java.sql.Date expectedDate = asDate(randomLongDate, getZoneFromOffset(randomLongDate)); assertEquals(expectedDate, results.getDate("test_date")); assertEquals(expectedDate, results.getDate(9)); @@ -892,7 +889,7 @@ public void testGettingDateWithoutCalendar() throws Exception { assertEquals(expectedDate, results.getObject(9, java.sql.Date.class)); // bulk validation for all fields which are not of type date - validateErrorsForDateTimeTestsWithoutCalendar(results::getDate); + validateErrorsForDateTestsWithoutCalendar(results::getDate); }); } @@ -940,28 +937,21 @@ public void testGettingTimeWithoutCalendar() throws Exception { }); Long randomLongDate = randomNonNegativeLong(); indexSimpleDocumentWithTrueValues(randomLongDate); - - Calendar c = Calendar.getInstance(TimeZone.getTimeZone(timeZoneId), Locale.ROOT); - + doWithQuery(SELECT_ALL_FIELDS, (results) -> { results.next(); - c.setTimeInMillis(randomLongDate); - c.set(ERA, GregorianCalendar.AD); - c.set(YEAR, 1970); - c.set(MONTH, 0); - c.set(DAY_OF_MONTH, 1); - - assertEquals(results.getTime("test_date"), new java.sql.Time(c.getTimeInMillis())); - assertEquals(results.getTime(9), new java.sql.Time(c.getTimeInMillis())); - assertEquals(results.getObject("test_date", java.sql.Time.class), - new java.sql.Time(randomLongDate % 86400000L)); - assertEquals(results.getObject(9, java.sql.Time.class), - new java.sql.Time(randomLongDate % 86400000L)); - - validateErrorsForDateTimeTestsWithoutCalendar(results::getTime); + + java.sql.Time expectedTime = asTime(randomLongDate, getZoneFromOffset(randomLongDate)); + + assertEquals(expectedTime, results.getTime("test_date")); + assertEquals(expectedTime, results.getTime(9)); + assertEquals(expectedTime, results.getObject("test_date", java.sql.Time.class)); + assertEquals(expectedTime, results.getObject(9, java.sql.Time.class)); + + validateErrorsForTimeTestsWithoutCalendar(results::getTime); }); } - + public void testGettingTimeWithCalendar() throws Exception { createIndex("test"); updateMappingForNumericValuesTests("test"); @@ -1689,15 +1679,25 @@ private void assertThrowsWritesUnsupportedForUpdate(ThrowingRunnable r) { assertThrowsUnsupportedAndExpectErrorMessage(r, "Writes not supported"); } - private void validateErrorsForDateTimeTestsWithoutCalendar(CheckedFunction method) { + private void validateErrorsForDateTestsWithoutCalendar(CheckedFunction method) { SQLException sqle; for (Entry, SQLType> field : dateTimeTestingFields.entrySet()) { sqle = expectThrows(SQLException.class, () -> method.apply(field.getKey().v1())); assertEquals( - format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Long", + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Date", field.getKey().v2(), field.getValue()), sqle.getMessage()); } } + + private void validateErrorsForTimeTestsWithoutCalendar(CheckedFunction method) { + SQLException sqle; + for (Entry, SQLType> field : dateTimeTestingFields.entrySet()) { + sqle = expectThrows(SQLException.class, () -> method.apply(field.getKey().v1())); + assertEquals( + format(Locale.ROOT, "Unable to convert value [%.128s] of type [%s] to a Time", + field.getKey().v2(), field.getValue()), sqle.getMessage()); + } + } private void validateErrorsForDateTimeTestsWithCalendar(Calendar c, CheckedBiFunction method) { SQLException sqle; @@ -1746,4 +1746,8 @@ private Connection esWithLeniency(boolean multiValueLeniency) throws SQLExceptio private String asDateString(long millis) { return of(millis, timeZoneId); } + + private ZoneId getZoneFromOffset(Long randomLongDate) { + return ZoneId.of(ZoneId.of(timeZoneId).getRules().getOffset(Instant.ofEpochMilli(randomLongDate)).toString()); + } } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java index 3ebf3a539691..eaca892ea43b 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java @@ -31,6 +31,8 @@ import java.util.Objects; import java.util.Properties; +import static java.util.Collections.emptyList; + /** * Tests that compare the Elasticsearch JDBC client to some other JDBC client * after loading a specific set of test data. @@ -146,8 +148,26 @@ protected static List readScriptSpec(String url, Parser parser) throws URL source = SpecBaseIntegrationTestCase.class.getResource(url); Objects.requireNonNull(source, "Cannot find resource " + url); - String fileName = source.getFile().substring(source.getFile().lastIndexOf("/") + 1); - String groupName = fileName.substring(fileName.lastIndexOf('/') + 1, fileName.lastIndexOf(".")); + return readURLSpec(source, parser); + } + + protected static List readScriptSpec(List urls, Parser parser) throws Exception { + List results = emptyList(); + for (URL url : urls) { + List specs = readURLSpec(url, parser); + if (results.isEmpty()) { + results = specs; + } else { + results.addAll(specs); + } + } + + return results; + } + + private static List readURLSpec(URL source, Parser parser) throws Exception { + String fileName = JdbcTestUtils.pathAndName(source.getFile()).v2(); + String groupName = fileName.substring(0, fileName.lastIndexOf(".")); Map testNames = new LinkedHashMap<>(); List testCases = new ArrayList<>(); diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SqlSpecTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SqlSpecTestCase.java index e8ba7eb30b04..ef01dc1fca11 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SqlSpecTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SqlSpecTestCase.java @@ -10,9 +10,9 @@ import org.junit.Assume; import org.junit.ClassRule; +import java.net.URL; import java.sql.Connection; import java.sql.ResultSet; -import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Locale; @@ -31,19 +31,9 @@ public abstract class SqlSpecTestCase extends SpecBaseIntegrationTestCase { @ParametersFactory(argumentFormatting = PARAM_FORMATTING) public static List readScriptSpec() throws Exception { - Parser parser = specParser(); - List tests = new ArrayList<>(); - tests.addAll(readScriptSpec("/select.sql-spec", parser)); - tests.addAll(readScriptSpec("/filter.sql-spec", parser)); - tests.addAll(readScriptSpec("/datetime.sql-spec", parser)); - tests.addAll(readScriptSpec("/math.sql-spec", parser)); - tests.addAll(readScriptSpec("/agg.sql-spec", parser)); - tests.addAll(readScriptSpec("/agg-ordering.sql-spec", parser)); - tests.addAll(readScriptSpec("/arithmetic.sql-spec", parser)); - tests.addAll(readScriptSpec("/string-functions.sql-spec", parser)); - tests.addAll(readScriptSpec("/case-functions.sql-spec", parser)); - tests.addAll(readScriptSpec("/null.sql-spec", parser)); - return tests; + List urls = JdbcTestUtils.classpathResources("/*.sql-spec"); + assertTrue("Not enough specs found " + urls.toString(), urls.size() > 9); + return readScriptSpec(urls, specParser()); } private static class SqlSpecParser implements Parser { diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java index afcabeedf59e..c88f31bb2fd7 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java @@ -74,7 +74,7 @@ public void testBasicQuery() throws IOException { String mode = randomMode(); boolean columnar = randomBoolean(); - expected.put("columns", singletonList(columnInfo(mode, "test", "text", JDBCType.VARCHAR, 0))); + expected.put("columns", singletonList(columnInfo(mode, "test", "text", JDBCType.VARCHAR, Integer.MAX_VALUE))); if (columnar) { expected.put("values", singletonList(Arrays.asList("test", "test"))); } else { @@ -118,7 +118,7 @@ public void testNextPage() throws IOException { Map expected = new HashMap<>(); if (i == 0) { expected.put("columns", Arrays.asList( - columnInfo(mode, "text", "text", JDBCType.VARCHAR, 0), + columnInfo(mode, "text", "text", JDBCType.VARCHAR, Integer.MAX_VALUE), columnInfo(mode, "number", "long", JDBCType.BIGINT, 20), columnInfo(mode, "s", "double", JDBCType.DOUBLE, 25), columnInfo(mode, "SCORE()", "float", JDBCType.REAL, 15))); @@ -184,7 +184,7 @@ public void testScoreWithFieldNamedScore() throws IOException { Map expected = new HashMap<>(); boolean columnar = randomBoolean(); expected.put("columns", Arrays.asList( - columnInfo(mode, "name", "text", JDBCType.VARCHAR, 0), + columnInfo(mode, "name", "text", JDBCType.VARCHAR, Integer.MAX_VALUE), columnInfo(mode, "score", "long", JDBCType.BIGINT, 20), columnInfo(mode, "SCORE()", "float", JDBCType.REAL, 15))); if (columnar) { @@ -427,7 +427,7 @@ public void testBasicQueryWithFilter() throws IOException { "{\"test\":\"bar\"}"); Map expected = new HashMap<>(); - expected.put("columns", singletonList(columnInfo(mode, "test", "text", JDBCType.VARCHAR, 0))); + expected.put("columns", singletonList(columnInfo(mode, "test", "text", JDBCType.VARCHAR, Integer.MAX_VALUE))); expected.put("rows", singletonList(singletonList("foo"))); assertResponse(expected, runSql(new StringEntity("{\"query\":\"SELECT * FROM test\", " + "\"filter\":{\"match\": {\"test\": \"foo\"}}" + mode(mode) + "}", @@ -442,7 +442,7 @@ public void testBasicQueryWithParameters() throws IOException { Map expected = new HashMap<>(); expected.put("columns", Arrays.asList( - columnInfo(mode, "test", "text", JDBCType.VARCHAR, 0), + columnInfo(mode, "test", "text", JDBCType.VARCHAR, Integer.MAX_VALUE), columnInfo(mode, "param", "integer", JDBCType.INTEGER, 11) )); if (columnar) { diff --git a/x-pack/plugin/sql/qa/src/main/resources/agg.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/agg.sql-spec index 97c917ceeb86..806e92b1d88d 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/agg.sql-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/agg.sql-spec @@ -178,10 +178,7 @@ aggCountOnColumnAndMultipleHaving SELECT gender g, COUNT(gender) c FROM "test_emp" GROUP BY g HAVING c > 10 AND c < 70 ORDER BY gender ; aggCountOnColumnAndMultipleHavingEquals SELECT gender g, COUNT(gender) c FROM "test_emp" GROUP BY g HAVING c > 10 AND c = 63 ORDER BY gender ; -// -// Count(column) = Column(*) which is a bug -// https://github.com/elastic/elasticsearch/issues/34549 -// + aggCountOnColumnAndMultipleHavingWithLimit SELECT gender g, COUNT(gender) c FROM "test_emp" GROUP BY g HAVING c > 10 AND c < 70 ORDER BY gender LIMIT 1; aggCountOnColumnAndHavingBetween-Ignore @@ -279,6 +276,8 @@ aggMinWithAlias SELECT gender g, MIN(emp_no) m FROM "test_emp" GROUP BY g ORDER BY gender; aggMinOnDateTime SELECT gender, MIN(birth_date) m FROM "test_emp" GROUP BY gender ORDER BY gender; +aggMinOnDateTimeCastAsDate +SELECT gender, YEAR(CAST(MIN(birth_date) AS DATE)) m FROM "test_emp" GROUP BY gender ORDER BY gender; // Conditional MIN aggMinWithHaving @@ -335,6 +334,8 @@ aggMaxWithAlias SELECT gender g, MAX(emp_no) m FROM "test_emp" GROUP BY g ORDER BY gender; aggMaxOnDateTime SELECT gender, MAX(birth_date) m FROM "test_emp" GROUP BY gender ORDER BY gender; +aggMaxOnDateTimeCastAsDate +SELECT gender, YEAR(CAST(MAX(birth_date) AS DATE)) m FROM "test_emp" GROUP BY gender ORDER BY gender; aggAvgAndMaxWithLikeFilter SELECT CAST(AVG(salary) AS LONG) AS avg, CAST(SUM(salary) AS LONG) AS s FROM "test_emp" WHERE first_name LIKE 'G%'; diff --git a/x-pack/plugin/sql/qa/src/main/resources/arithmetic.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/arithmetic.csv-spec index 4d8a9fc3fc2c..e117866b5e9d 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/arithmetic.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/arithmetic.csv-spec @@ -5,9 +5,14 @@ // the standard behavior here is to return the constant for each element // the weird thing is that an actual query needs to be ran arithmeticWithFrom -SELECT 5 - 2 x FROM test_emp; +SELECT 5 - 2 x FROM test_emp LIMIT 5; -x + x:i +--------------- +3 +3 +3 +3 3 ; diff --git a/x-pack/plugin/sql/qa/src/main/resources/command-sys.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/command-sys.csv-spec deleted file mode 100644 index 2ed05aadb6c4..000000000000 --- a/x-pack/plugin/sql/qa/src/main/resources/command-sys.csv-spec +++ /dev/null @@ -1,11 +0,0 @@ -// -// Sys Commands -// - -sysTableTypes -SYS TABLE TYPES; - - TABLE_TYPE:s -BASE TABLE -VIEW -; diff --git a/x-pack/plugin/sql/qa/src/main/resources/debug.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/debug/debug.csv-spec similarity index 100% rename from x-pack/plugin/sql/qa/src/main/resources/debug.csv-spec rename to x-pack/plugin/sql/qa/src/main/resources/debug/debug.csv-spec diff --git a/x-pack/plugin/sql/qa/src/main/resources/debug.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/debug/debug.sql-spec similarity index 100% rename from x-pack/plugin/sql/qa/src/main/resources/debug.sql-spec rename to x-pack/plugin/sql/qa/src/main/resources/debug/debug.sql-spec diff --git a/x-pack/plugin/sql/qa/src/main/resources/docs.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec similarity index 94% rename from x-pack/plugin/sql/qa/src/main/resources/docs.csv-spec rename to x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec index 41a82ac0f84b..820c358ab2f6 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/docs.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec @@ -929,6 +929,84 @@ null |25324 // end::orderByAgg ; +simpleMatch +// tag::simpleMatch +SELECT author, name FROM library WHERE MATCH(author, 'frank'); + + author | name +---------------+------------------- +Frank Herbert |Dune +Frank Herbert |Dune Messiah +Frank Herbert |Children of Dune +Frank Herbert |God Emperor of Dune + +// end::simpleMatch +; + +multiFieldsMatch +// tag::multiFieldsMatch +SELECT author, name, SCORE() FROM library WHERE MATCH('author^2,name^5', 'frank dune'); + + author | name | SCORE() +---------------+-------------------+--------------- +Frank Herbert |Dune |11.443176 +Frank Herbert |Dune Messiah |9.446629 +Frank Herbert |Children of Dune |8.043278 +Frank Herbert |God Emperor of Dune|7.0029488 + +// end::multiFieldsMatch +; + +optionalParamsForMatch +// tag::optionalParamsForMatch +SELECT author, name, SCORE() FROM library WHERE MATCH(name, 'to the star', 'operator=or;cutoff_frequency=0.2'); + + author | name | SCORE() +-----------------+------------------------------------+--------------- +Peter F. Hamilton|Pandora's Star |3.0997515 +Douglas Adams |The Hitchhiker's Guide to the Galaxy|3.1756816 + +// end::optionalParamsForMatch +; + +simpleQueryQuery +// tag::simpleQueryQuery +SELECT author, name, SCORE() FROM library WHERE QUERY('name:dune'); + + author | name | SCORE() +---------------+-------------------+--------------- +Frank Herbert |Dune |2.2886353 +Frank Herbert |Dune Messiah |1.8893257 +Frank Herbert |Children of Dune |1.6086556 +Frank Herbert |God Emperor of Dune|1.4005898 +// end::simpleQueryQuery +; + +advancedQueryQuery +// tag::advancedQueryQuery +SELECT author, name, page_count, SCORE() FROM library WHERE QUERY('_exists_:"author" AND page_count:>200 AND (name:/star.*/ OR name:duna~)'); + + author | name | page_count | SCORE() +------------------+-------------------+---------------+--------------- +Frank Herbert |Dune |604 |3.7164764 +Frank Herbert |Dune Messiah |331 |3.4169943 +Frank Herbert |Children of Dune |408 |3.2064917 +Frank Herbert |God Emperor of Dune|454 |3.0504425 +Peter F. Hamilton |Pandora's Star |768 |3.0 +Robert A. Heinlein|Starship Troopers |335 |3.0 +// end::advancedQueryQuery +; + +optionalParameterQuery +// tag::optionalParameterQuery +SELECT author, name, SCORE() FROM library WHERE QUERY('dune god', 'default_operator=and;default_field=name'); + + author | name | SCORE() +---------------+-------------------+--------------- +Frank Herbert |God Emperor of Dune|3.6984892 +// end::optionalParameterQuery +; + orderByScore // tag::orderByScore SELECT SCORE(), * FROM library WHERE MATCH(name, 'dune') ORDER BY SCORE() DESC; diff --git a/x-pack/plugin/sql/qa/src/main/resources/example.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/examples/example.csv-spec similarity index 100% rename from x-pack/plugin/sql/qa/src/main/resources/example.csv-spec rename to x-pack/plugin/sql/qa/src/main/resources/examples/example.csv-spec diff --git a/x-pack/plugin/sql/qa/src/main/resources/example.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/examples/example.sql-spec similarity index 100% rename from x-pack/plugin/sql/qa/src/main/resources/example.sql-spec rename to x-pack/plugin/sql/qa/src/main/resources/examples/example.sql-spec diff --git a/x-pack/plugin/sql/qa/src/main/resources/filter.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/filter.csv-spec new file mode 100644 index 000000000000..e60460a63e56 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/filter.csv-spec @@ -0,0 +1,119 @@ +// +// Filter +// + +whereFieldWithRLikeMatch +SELECT last_name l FROM "test_emp" WHERE emp_no < 10003 AND last_name RLIKE 'S.*'; + + l:s +--------------- +Simmel +; + +whereFieldWithNotRLikeMatch +SELECT last_name, first_name FROM "test_emp" WHERE emp_no < 10020 AND first_name NOT RLIKE 'Ma.*' ORDER BY first_name LIMIT 5; + + last_name:s | first_name:s +---------------+--------------- +Preusig |Anneke +Genin |Berni +Simmel |Bezalel +Koblick |Chirstian +Bouloucos |Cristinel +; + +whereFieldWithRLikeMatchNot +SELECT last_name AS L, emp_no FROM "test_emp" WHERE NOT (emp_no < 10003 AND L NOT RLIKE 'K.*') ORDER BY emp_no LIMIT 5; + + L:s | emp_no:i +---------------+--------------- +Bamford |10003 +Koblick |10004 +Maliniak |10005 +Preusig |10006 +Zielinski |10007 +; + +whereFieldOnMatchWithAndAndOr +SELECT last_name l, gender g FROM "test_emp" WHERE (last_name RLIKE 'K.*' OR gender = 'F') AND emp_no < 10050 ORDER BY last_name; + + l:s | g:s +---------------+--------------- +Casley |F +Kalloufi |M +Koblick |M +Lenart |F +Meriste |F +Montemayor |F +Peac |F +Pettey |F +Preusig |F +Reistad |F +Reistad |F +Simmel |F +Stamatiou |F +Tramer |F +Zielinski |F +; + +whereFieldWithRLikeAndGroupByOrderBy +SELECT last_name l, gender g, COUNT(*) c, MAX(salary) AS sal FROM "test_emp" WHERE emp_no < 10050 AND (last_name RLIKE 'B.*' OR gender = 'F') GROUP BY g, l ORDER BY sal; + + l:s | g:s | c:l | sal:i +---------------+---------------+---------------+--------------- +Berztiss |M |1 |28336 +Stamatiou |F |1 |30404 +Brender |M |1 |36051 +Meriste |F |1 |37112 +Tramer |F |1 |37853 +Casley |F |1 |39728 +Montemayor |F |1 |47896 +Bridgland |null |1 |48942 +Simmel |F |1 |56371 +Lenart |F |1 |56415 +Bouloucos |null |1 |58715 +Preusig |F |1 |60335 +Bamford |M |1 |61805 +Pettey |F |1 |64675 +Peac |F |1 |66174 +Reistad |F |2 |73851 +Zielinski |F |1 |74572 +; + +whereFieldWithRLikeAndNotRLike +SELECT COUNT(*), last_name AS f FROM test_emp WHERE last_name RLIKE '.*o.*' AND last_name NOT RLIKE '.*f.*' GROUP BY f HAVING COUNT(*) > 1; + + COUNT(*):l | f:s +---------------+--------------- +2 |Lortz +; + +whereInlineRLike +SELECT emp_no FROM test_emp WHERE 'aaabbb' RLIKE 'aa+b+' AND 'aaabbb' NOT RLIKE 'a++c+' AND emp_no < 10080 ORDER BY emp_no DESC LIMIT 5; + + emp_no:i +--------------- +10079 +10078 +10077 +10076 +10075 +; + +whereInlineRLikeAndCount_1 +SELECT COUNT(*), TRUNCATE(emp_no, -2) t FROM test_emp WHERE 'aaabbb' RLIKE '.....?.?' AND 'aaabbb' NOT RLIKE 'aa?bb?' GROUP BY TRUNCATE(emp_no, -2) ORDER BY t ASC; + + COUNT(*):l | t:i +---------------+--------------- +99 |10000 +1 |10100 +; + +whereInlineRLikeAndCount_2 +SELECT COUNT(*), TRUNCATE(emp_no, -2) t FROM test_emp WHERE 'aaabbb' RLIKE 'a{2,}b{2,}' AND 'aaabbb' NOT RLIKE 'a{4,6}b{4,6}' GROUP BY TRUNCATE(emp_no, -2) ORDER BY t ASC; + + COUNT(*):l | t:i +---------------+--------------- +99 |10000 +1 |10100 +; diff --git a/x-pack/plugin/sql/qa/src/main/resources/filter.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/filter.sql-spec index eafc09c053ee..d15647269799 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/filter.sql-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/filter.sql-spec @@ -51,6 +51,8 @@ whereFieldWithLikeMatch SELECT last_name l FROM "test_emp" WHERE emp_no < 10003 AND last_name LIKE 'K%'; whereFieldWithNotLikeMatch SELECT last_name l FROM "test_emp" WHERE emp_no < 10020 AND first_name NOT LIKE 'Ma%'; +whereFieldWithInlineLikeMatch +SELECT emp_no FROM "test_emp" WHERE 'aaabbb' LIKE 'aa%b%' AND 'aaabbb' NOT LIKE 'a%%c%' AND emp_no < 10080 ORDER BY emp_no DESC LIMIT 5; whereFieldWithOrderNot SELECT last_name l FROM "test_emp" WHERE NOT emp_no < 10003 ORDER BY emp_no LIMIT 5; diff --git a/x-pack/plugin/sql/qa/src/main/resources/fulltext.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/fulltext.csv-spec index 07df14d99e36..99aa07ec91f4 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/fulltext.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/fulltext.csv-spec @@ -30,6 +30,60 @@ SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE QUERY('Man*', ' 10096 |Jayson |M |Mandell ; +matchWithFuzziness +SELECT first_name, SCORE() FROM test_emp WHERE MATCH(first_name, 'geo', 'fuzziness=6'); + + first_name:s | SCORE():f +----------------+--------------- +Gino |1.3684646 +Gao |2.7369292 +; + +matchWithFuzzinessAuto +SELECT first_name, SCORE() FROM test_emp WHERE MATCH(first_name, 'geo', 'fuzziness=AUTO:1,7;fuzzy_rewrite=scoring_boolean'); + + first_name:s | SCORE():f +----------------+--------------- +Gao |2.7369292 +; + +multiMatchWithFuzzinessAuto +SELECT first_name, last_name, SCORE() FROM test_emp WHERE MATCH('first_name^3,last_name^5', 'geo hir', 'fuzziness=AUTO:1,5;operator=or') ORDER BY first_name; + + first_name:s | last_name:s | SCORE():f +----------------+-----------------+--------------- +Gao |Dolinsky |8.210788 +Shir |McClurg |8.210788 +; + +multiMatchWithFuzziness +SELECT first_name, last_name, SCORE() FROM test_emp WHERE MATCH('first_name^3,last_name^5', 'geo hir', 'fuzziness=5;operator=or') ORDER BY first_name; + + first_name:s | last_name:s | SCORE():f +----------------+-----------------+--------------- +Gao |Dolinsky |8.210788 +Gino |Leonhardt |4.105394 +Shir |McClurg |8.210788 +Uri |Lenart |4.105394 +; + +queryWithFuzziness +SELECT first_name, SCORE() FROM test_emp WHERE QUERY('geo~', 'fuzziness=5;default_field=first_name'); + + first_name:s | SCORE():f +----------------+--------------- +Gino |1.3684646 +Gao |2.7369292 +; + +queryWithFuzzinessAuto +SELECT first_name, SCORE() FROM test_emp WHERE QUERY('geo~', 'fuzziness=AUTO:1,5;default_field=first_name'); + + first_name:s | SCORE():f +----------------+--------------- +Gao |2.7369292 +; + matchQuery SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH(first_name, 'Erez'); diff --git a/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_columns.sql b/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_columns.sql index f61d48af4ff3..6292a6296ff6 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_columns.sql +++ b/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_columns.sql @@ -23,12 +23,12 @@ CREATE TABLE mock ( IS_AUTOINCREMENT VARCHAR, IS_GENERATEDCOLUMN VARCHAR ) AS -SELECT null, 'test1', 'name', 12, 'TEXT', 0, 2147483647, null, null, +SELECT null, 'test1', 'name', 12, 'TEXT', 2147483647, 2147483647, null, null, 1, -- columnNullable null, null, 12, 0, 2147483647, 1, 'YES', null, null, null, null, 'NO', 'NO' FROM DUAL UNION ALL -SELECT null, 'test1', 'name.keyword', 12, 'KEYWORD', 0, 2147483647, null, null, +SELECT null, 'test1', 'name.keyword', 12, 'KEYWORD', 32766, 2147483647, null, null, 1, -- columnNullable null, null, 12, 0, 2147483647, 1, 'YES', null, null, null, null, 'NO', 'NO' FROM DUAL diff --git a/x-pack/plugin/sql/sql-cli/build.gradle b/x-pack/plugin/sql/sql-cli/build.gradle index be7fe189cf03..e519062e14f5 100644 --- a/x-pack/plugin/sql/sql-cli/build.gradle +++ b/x-pack/plugin/sql/sql-cli/build.gradle @@ -14,14 +14,20 @@ archivesBaseName = 'elasticsearch-sql-cli' description = 'Command line interface to Elasticsearch that speaks SQL' dependencies { - compile "org.jline:jline:3.8.2" + + // select just the parts of JLine that are needed + compile "org.jline:jline-terminal:${jlineVersion}" + compile("org.jline:jline-terminal-jna:${jlineVersion}") { + exclude group: "net.java.dev.jna" + } + compile "org.jline:jline-reader:${jlineVersion}" + compile "org.jline:jline-style:${jlineVersion}" + compile xpackProject('plugin:sql:sql-client') - compile xpackProject('plugin:sql:sql-action') + compile xpackProject('plugin:sql:sql-action') compile "org.elasticsearch:elasticsearch-cli:${version}" - runtime "org.fusesource.jansi:jansi:1.16" runtime "org.elasticsearch:jna:${versions.jna}" - testCompile "org.elasticsearch.test:framework:${version}" } @@ -32,6 +38,7 @@ dependencyLicenses { mapping from: /lucene-.*/, to: 'lucene' mapping from: /sql-action.*/, to: 'elasticsearch' mapping from: /sql-client.*/, to: 'elasticsearch' + mapping from: /jline-.*/, to: 'jline' ignoreSha 'elasticsearch-cli' ignoreSha 'elasticsearch-core' ignoreSha 'elasticsearch' @@ -41,7 +48,7 @@ dependencyLicenses { /* * Bundle all dependencies into the main jar and mark it as executable it - * can be easilly shipped around and used. + * can be easily shipped around and used. */ jar { from({ @@ -78,37 +85,6 @@ forbiddenApisMain { signaturesFiles += files('src/forbidden/cli-signatures.txt') } -thirdPartyAudit.ignoreMissingClasses ( - // jLine's optional dependencies - 'org.apache.sshd.client.SshClient', - 'org.apache.sshd.client.auth.keyboard.UserInteraction', - 'org.apache.sshd.client.channel.ChannelShell', - 'org.apache.sshd.client.channel.ClientChannel', - 'org.apache.sshd.client.channel.ClientChannelEvent', - 'org.apache.sshd.client.future.AuthFuture', - 'org.apache.sshd.client.future.ConnectFuture', - 'org.apache.sshd.client.future.OpenFuture', - 'org.apache.sshd.client.session.ClientSession', - 'org.apache.sshd.common.Factory', - 'org.apache.sshd.common.channel.PtyMode', - 'org.apache.sshd.common.config.keys.FilePasswordProvider', - 'org.apache.sshd.common.util.io.NoCloseInputStream', - 'org.apache.sshd.common.util.io.NoCloseOutputStream', - 'org.apache.sshd.server.Command', - 'org.apache.sshd.server.Environment', - 'org.apache.sshd.server.ExitCallback', - 'org.apache.sshd.server.SessionAware', - 'org.apache.sshd.server.Signal', - 'org.apache.sshd.server.SshServer', - 'org.apache.sshd.server.keyprovider.SimpleGeneratorHostKeyProvider', - 'org.apache.sshd.server.scp.ScpCommandFactory$Builder', - 'org.apache.sshd.server.session.ServerSession', - 'org.apache.sshd.server.subsystem.sftp.SftpSubsystemFactory$Builder', - 'org.mozilla.universalchardet.UniversalDetector', - 'org.fusesource.jansi.internal.Kernel32$FOCUS_EVENT_RECORD', - 'org.fusesource.jansi.internal.Kernel32$MOUSE_EVENT_RECORD', -) - task runcli { description = 'Run the CLI and connect to elasticsearch running on 9200' dependsOn 'assemble' diff --git a/x-pack/plugin/sql/sql-cli/licenses/jansi-1.16.jar.sha1 b/x-pack/plugin/sql/sql-cli/licenses/jansi-1.16.jar.sha1 deleted file mode 100644 index 8adc5c7977cf..000000000000 --- a/x-pack/plugin/sql/sql-cli/licenses/jansi-1.16.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b1aaf0028852164ab6b4057192ccd0ba7dedd3a5 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-cli/licenses/jansi-LICENSE.txt b/x-pack/plugin/sql/sql-cli/licenses/jansi-LICENSE.txt deleted file mode 100644 index d64569567334..000000000000 --- a/x-pack/plugin/sql/sql-cli/licenses/jansi-LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/x-pack/plugin/sql/sql-cli/licenses/jansi-NOTICE.txt b/x-pack/plugin/sql/sql-cli/licenses/jansi-NOTICE.txt deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/x-pack/plugin/sql/sql-cli/licenses/jline-3.8.2.jar.sha1 b/x-pack/plugin/sql/sql-cli/licenses/jline-3.8.2.jar.sha1 deleted file mode 100644 index 29e11fa3a021..000000000000 --- a/x-pack/plugin/sql/sql-cli/licenses/jline-3.8.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8b81efadcb78388b662ede7965b272be56a86ec1 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-cli/licenses/jline-reader-3.10.0.jar.sha1 b/x-pack/plugin/sql/sql-cli/licenses/jline-reader-3.10.0.jar.sha1 new file mode 100644 index 000000000000..8f97d149b399 --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/licenses/jline-reader-3.10.0.jar.sha1 @@ -0,0 +1 @@ +c93b837f49fe8eb3f68cc3daee5dfb83141ca538 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-cli/licenses/jline-style-3.10.0.jar.sha1 b/x-pack/plugin/sql/sql-cli/licenses/jline-style-3.10.0.jar.sha1 new file mode 100644 index 000000000000..f308f7a728a1 --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/licenses/jline-style-3.10.0.jar.sha1 @@ -0,0 +1 @@ +495cfd226e13abf2b8a5be4f270d5b9897588733 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-cli/licenses/jline-terminal-3.10.0.jar.sha1 b/x-pack/plugin/sql/sql-cli/licenses/jline-terminal-3.10.0.jar.sha1 new file mode 100644 index 000000000000..ae6bccfede9d --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/licenses/jline-terminal-3.10.0.jar.sha1 @@ -0,0 +1 @@ +ac853ad4dd46252319cbb9c012d9ab1bcc501162 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-cli/licenses/jline-terminal-jna-3.10.0.jar.sha1 b/x-pack/plugin/sql/sql-cli/licenses/jline-terminal-jna-3.10.0.jar.sha1 new file mode 100644 index 000000000000..4d28d9789805 --- /dev/null +++ b/x-pack/plugin/sql/sql-cli/licenses/jline-terminal-jna-3.10.0.jar.sha1 @@ -0,0 +1 @@ +797f8cadcb4a969881e8dbd07a623d1b13214984 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java index 6431f10a4921..9a1d26e63570 100644 --- a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java @@ -7,6 +7,7 @@ import joptsimple.OptionSet; import joptsimple.OptionSpec; + import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.LoggingAwareCommand; import org.elasticsearch.cli.Terminal; @@ -20,11 +21,12 @@ import org.elasticsearch.xpack.sql.cli.command.PrintLogoCommand; import org.elasticsearch.xpack.sql.cli.command.ServerInfoCliCommand; import org.elasticsearch.xpack.sql.cli.command.ServerQueryCliCommand; -import org.elasticsearch.xpack.sql.client.HttpClient; import org.elasticsearch.xpack.sql.client.ClientException; import org.elasticsearch.xpack.sql.client.ConnectionConfiguration; +import org.elasticsearch.xpack.sql.client.HttpClient; import org.elasticsearch.xpack.sql.client.Version; import org.jline.terminal.TerminalBuilder; + import java.io.IOException; import java.net.ConnectException; import java.sql.SQLInvalidAuthorizationSpecException; @@ -46,8 +48,13 @@ public class Cli extends LoggingAwareCommand { * -Dorg.jline.terminal.dumb=true */ public static void main(String[] args) throws Exception { - final Cli cli = new Cli(new JLineTerminal(TerminalBuilder.builder().build(), true)); configureJLineLogging(); + final Cli cli = new Cli(new JLineTerminal(TerminalBuilder.builder() + .name("Elasticsearch SQL CLI") + // remove jansi since it has issues on Windows in closing terminals + // the CLI uses JNA anyway + .jansi(false) + .build(), true)); int status = cli.main(args, Terminal.DEFAULT); if (status != ExitCodes.OK) { exit(status); @@ -142,7 +149,7 @@ private void checkConnection(CliSession cliSession, CliTerminal cliTerminal, Con "Cannot connect to the server " + con.connectionString() + " - " + ex.getCause().getMessage()); } else if (ex.getCause() != null && ex.getCause() instanceof SQLInvalidAuthorizationSpecException) { throw new UserException(ExitCodes.NOPERM, - "Cannot establish a secure connection to the server " + + "Cannot establish a secure connection to the server " + con.connectionString() + " - " + ex.getCause().getMessage()); } else { // Most likely we connected to something other than Elasticsearch diff --git a/x-pack/plugin/sql/sql-client/build.gradle b/x-pack/plugin/sql/sql-client/build.gradle index c4ee030d4568..613ca73a4dba 100644 --- a/x-pack/plugin/sql/sql-client/build.gradle +++ b/x-pack/plugin/sql/sql-client/build.gradle @@ -18,9 +18,6 @@ dependencyLicenses { mapping from: /sql-proto.*/, to: 'elasticsearch' mapping from: /elasticsearch-cli.*/, to: 'elasticsearch' mapping from: /elasticsearch-core.*/, to: 'elasticsearch' - mapping from: /lucene-.*/, to: 'lucene' - ignoreSha 'sql-action' - ignoreSha 'elasticsearch' ignoreSha 'elasticsearch-core' } diff --git a/x-pack/plugin/sql/sql-proto/build.gradle b/x-pack/plugin/sql/sql-proto/build.gradle index a10e3ff8c4ce..b1c055a0dfcb 100644 --- a/x-pack/plugin/sql/sql-proto/build.gradle +++ b/x-pack/plugin/sql/sql-proto/build.gradle @@ -14,7 +14,6 @@ dependencies { compile (project(':libs:x-content')) { transitive = false } - compile "joda-time:joda-time:${versions.joda}" runtime "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" testCompile "org.elasticsearch.test:framework:${version}" diff --git a/x-pack/plugin/sql/sql-proto/licenses/joda-time-2.10.1.jar.sha1 b/x-pack/plugin/sql/sql-proto/licenses/joda-time-2.10.1.jar.sha1 deleted file mode 100644 index 75e809754ece..000000000000 --- a/x-pack/plugin/sql/sql-proto/licenses/joda-time-2.10.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9ac3dbf89dbf2ee385185dd0cd3064fe789efee0 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/licenses/joda-time-LICENSE.txt b/x-pack/plugin/sql/sql-proto/licenses/joda-time-LICENSE.txt deleted file mode 100644 index 75b52484ea47..000000000000 --- a/x-pack/plugin/sql/sql-proto/licenses/joda-time-LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/x-pack/plugin/sql/sql-proto/licenses/joda-time-NOTICE.txt b/x-pack/plugin/sql/sql-proto/licenses/joda-time-NOTICE.txt deleted file mode 100644 index dffbcf31cacf..000000000000 --- a/x-pack/plugin/sql/sql-proto/licenses/joda-time-NOTICE.txt +++ /dev/null @@ -1,5 +0,0 @@ -============================================================================= -= NOTICE file corresponding to section 4d of the Apache License Version 2.0 = -============================================================================= -This product includes software developed by -Joda.org (http://www.joda.org/). diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/ColumnInfo.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/ColumnInfo.java index ef4e603564ef..7cf31781d9e8 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/ColumnInfo.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/ColumnInfo.java @@ -119,7 +119,7 @@ public boolean equals(Object o) { return false; } ColumnInfo that = (ColumnInfo) o; - return displaySize == that.displaySize && + return Objects.equals(displaySize, that.displaySize) && Objects.equals(table, that.table) && Objects.equals(name, that.name) && Objects.equals(esType, that.esType); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java index 8935910df2b4..65a9410941b1 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java @@ -31,7 +31,7 @@ import org.elasticsearch.xpack.sql.expression.function.aggregate.Count; import org.elasticsearch.xpack.sql.expression.function.scalar.Cast; import org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic.ArithmeticOperation; -import org.elasticsearch.xpack.sql.expression.predicate.regex.Like; +import org.elasticsearch.xpack.sql.expression.predicate.regex.RegexMatch; import org.elasticsearch.xpack.sql.plan.TableIdentifier; import org.elasticsearch.xpack.sql.plan.logical.Aggregate; import org.elasticsearch.xpack.sql.plan.logical.EsRelation; @@ -852,8 +852,8 @@ private Expression collectResolvedAndReplace(Expression e, Map) v).get(innerKey) : v; + return handleDateTime(innerKey != null && v instanceof Map ? ((Map) v).get(innerKey) : v); + } + + private Object handleDateTime(Object object) { + if (isDateTimeBased) { + if (object == null) { + return object; + } else if (object instanceof Number) { + return DateUtils.asDateTime(((Number) object).longValue(), zoneId); + } else { + throw new SqlIllegalArgumentException("Invalid date key returned: {}", object); + } + } + return object; } /** * Check if the given aggregate has been executed and has computed values * or not (the bucket is null). - * + * * Waiting on https://github.com/elastic/elasticsearch/issues/34903 */ private static boolean containsValues(InternalAggregation agg) { @@ -130,11 +157,11 @@ public boolean equals(Object obj) { if (this == obj) { return true; } - + if (obj == null || getClass() != obj.getClass()) { return false; } - + MetricAggExtractor other = (MetricAggExtractor) obj; return Objects.equals(name, other.name) && Objects.equals(property, other.property) @@ -146,4 +173,4 @@ public String toString() { String i = innerKey != null ? "[" + innerKey + "]" : ""; return Aggs.ROOT_GROUP_NAME + ">" + name + "." + property + i; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/regex/Like.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/regex/Like.java index 75f436622330..e67f4943445a 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/regex/Like.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/regex/Like.java @@ -6,41 +6,35 @@ package org.elasticsearch.xpack.sql.expression.predicate.regex; import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; +import org.elasticsearch.xpack.sql.expression.predicate.regex.RegexProcessor.RegexOperation; import org.elasticsearch.xpack.sql.tree.NodeInfo; import org.elasticsearch.xpack.sql.tree.Source; -import java.util.Objects; - -public class Like extends RegexMatch { - - private final LikePattern pattern; +public class Like extends RegexMatch { public Like(Source source, Expression left, LikePattern pattern) { - super(source, left, pattern.asJavaRegex()); - this.pattern = pattern; - } - - public LikePattern pattern() { - return pattern; + super(source, left, pattern); } @Override protected NodeInfo info() { - return NodeInfo.create(this, Like::new, field(), pattern); + return NodeInfo.create(this, Like::new, field(), pattern()); } @Override protected Like replaceChild(Expression newLeft) { - return new Like(source(), newLeft, pattern); + return new Like(source(), newLeft, pattern()); } @Override - public boolean equals(Object obj) { - return super.equals(obj) && Objects.equals(((Like) obj).pattern(), pattern()); + public Boolean fold() { + Object val = field().fold(); + return RegexOperation.match(val, pattern().asJavaRegex()); } @Override - public int hashCode() { - return Objects.hash(super.hashCode(), pattern()); + protected Processor makeProcessor() { + return new RegexProcessor(pattern().asJavaRegex()); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/regex/RLike.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/regex/RLike.java index b925bd769ea4..187eda3fdece 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/regex/RLike.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/regex/RLike.java @@ -6,29 +6,35 @@ package org.elasticsearch.xpack.sql.expression.predicate.regex; import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; +import org.elasticsearch.xpack.sql.expression.predicate.regex.RegexProcessor.RegexOperation; import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.tree.NodeInfo; -public class RLike extends RegexMatch { +public class RLike extends RegexMatch { - private final String pattern; - - public RLike(Source source, Expression left, String pattern) { - super(source, left, pattern); - this.pattern = pattern; - } - - public String pattern() { - return pattern; + public RLike(Source source, Expression value, String pattern) { + super(source, value, pattern); } @Override protected NodeInfo info() { - return NodeInfo.create(this, RLike::new, field(), pattern); + return NodeInfo.create(this, RLike::new, field(), pattern()); } @Override protected RLike replaceChild(Expression newChild) { - return new RLike(source(), newChild, pattern); + return new RLike(source(), newChild, pattern()); + } + + @Override + public Boolean fold() { + Object val = field().fold(); + return RegexOperation.match(val, pattern()); + } + + @Override + protected Processor makeProcessor() { + return new RegexProcessor(pattern()); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/regex/RegexMatch.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/regex/RegexMatch.java index b3c09c67fc6c..82c6d570f39f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/regex/RegexMatch.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/regex/RegexMatch.java @@ -10,21 +10,25 @@ import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Nullability; import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; -import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; -import org.elasticsearch.xpack.sql.expression.predicate.regex.RegexProcessor.RegexOperation; import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.DataType; -import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isStringAndExact; +import java.util.Objects; -public abstract class RegexMatch extends UnaryScalarFunction { +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isStringAndExact; - private final String pattern; +public abstract class RegexMatch extends UnaryScalarFunction { - protected RegexMatch(Source source, Expression value, String pattern) { + private final T pattern; + + protected RegexMatch(Source source, Expression value, T pattern) { super(source, value); this.pattern = pattern; } + + public T pattern() { + return pattern; + } @Override public DataType dataType() { @@ -33,7 +37,7 @@ public DataType dataType() { @Override public Nullability nullable() { - if (pattern == null) { + if (pattern() == null) { return Nullability.TRUE; } return field().nullable(); @@ -49,15 +53,14 @@ public boolean foldable() { // right() is not directly foldable in any context but Like can fold it. return field().foldable(); } - + @Override - public Boolean fold() { - Object val = field().fold(); - return RegexOperation.match(val, pattern); + public boolean equals(Object obj) { + return super.equals(obj) && Objects.equals(((RegexMatch) obj).pattern(), pattern()); } @Override - protected Processor makeProcessor() { - return new RegexProcessor(pattern); + public int hashCode() { + return Objects.hash(super.hashCode(), pattern()); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java index 87709ac104e0..04935023747c 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java @@ -149,9 +149,8 @@ public SysTables visitSysTables(SysTablesContext ctx) { if (value != null) { // check special ODBC wildcard case if (value.equals(StringUtils.SQL_WILDCARD) && ctx.string().size() == 1) { - // convert % to enumeration - // https://docs.microsoft.com/en-us/sql/odbc/reference/develop-app/value-list-arguments?view=ssdt-18vs2017 - types.addAll(IndexType.VALID); + // treat % as null + // https://docs.microsoft.com/en-us/sql/odbc/reference/develop-app/value-list-arguments } // special case for legacy apps (like msquery) that always asks for 'TABLE' // which we manually map to all concrete tables supported diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java index e829915c35f1..46d8148ed25b 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java @@ -387,44 +387,7 @@ public Order visitOrderBy(OrderByContext ctx) { @Override public DataType visitPrimitiveDataType(PrimitiveDataTypeContext ctx) { - String type = visitIdentifier(ctx.identifier()).toLowerCase(Locale.ROOT); - - switch (type) { - case "bit": - case "bool": - case "boolean": - return DataType.BOOLEAN; - case "tinyint": - case "byte": - return DataType.BYTE; - case "smallint": - case "short": - return DataType.SHORT; - case "int": - case "integer": - return DataType.INTEGER; - case "long": - case "bigint": - return DataType.LONG; - case "real": - return DataType.FLOAT; - case "float": - case "double": - return DataType.DOUBLE; - case "date": - return DataType.DATE; - case "datetime": - case "timestamp": - return DataType.DATETIME; - case "char": - case "varchar": - case "string": - return DataType.KEYWORD; - case "ip": - return DataType.IP; - default: - throw new ParsingException(source(ctx), "Does not recognize type [{}]", ctx.getText()); - } + return dataType(source(ctx), visitIdentifier(ctx.identifier())); } // @@ -437,24 +400,20 @@ public Cast visitCastExpression(CastExpressionContext ctx) { return new Cast(source(castTc), expression(castTc.expression()), typedParsing(castTc.dataType(), DataType.class)); } else { ConvertTemplateContext convertTc = ctx.convertTemplate(); - String convertDataType = convertTc.dataType().getText().toUpperCase(Locale.ROOT); - DataType dataType; - if (convertDataType.startsWith("SQL_")) { - dataType = DataType.fromOdbcType(convertDataType); - if (dataType == null) { - throw new ParsingException(source(convertTc.dataType()), "Invalid data type [{}] provided", convertDataType); - } - } else { - try { - dataType = DataType.valueOf(convertDataType); - } catch (IllegalArgumentException e) { - throw new ParsingException(source(convertTc.dataType()), "Invalid data type [{}] provided", convertDataType); - } - } + DataType dataType = dataType(source(convertTc.dataType()), convertTc.dataType().getText()); return new Cast(source(convertTc), expression(convertTc.expression()), dataType); } } + private static DataType dataType(Source ctx, String string) { + String type = string.toUpperCase(Locale.ROOT); + DataType dataType = type.startsWith("SQL_") ? DataType.fromOdbcType(type) : DataType.fromSqlOrEsType(type); + if (dataType == null) { + throw new ParsingException(ctx, "Does not recognize type [{}]", string); + } + return dataType; + } + @Override public Object visitCastOperatorExpression(SqlBaseParser.CastOperatorExpressionContext ctx) { return new Cast(source(ctx), expression(ctx.valueExpression()), typedParsing(ctx.dataType(), DataType.class)); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTables.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTables.java index 5ce1e6dcc8a7..53f1e1019b75 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTables.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTables.java @@ -14,9 +14,8 @@ import org.elasticsearch.xpack.sql.session.Rows; import org.elasticsearch.xpack.sql.session.SchemaRowSet; import org.elasticsearch.xpack.sql.session.SqlSession; -import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.tree.NodeInfo; -import org.elasticsearch.xpack.sql.util.CollectionUtils; +import org.elasticsearch.xpack.sql.tree.Source; import java.util.ArrayList; import java.util.Comparator; @@ -77,8 +76,11 @@ public final void execute(SqlSession session, ActionListener liste // namely one param specified with '%', everything else empty string // https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqltables-function?view=ssdt-18vs2017#comments - if (clusterPattern != null && clusterPattern.pattern().equals(SQL_WILDCARD)) { - if ((pattern == null || pattern.pattern().isEmpty()) && CollectionUtils.isEmpty(types)) { + // catalog enumeration + if (clusterPattern == null || clusterPattern.pattern().equals(SQL_WILDCARD)) { + // enumerate only if pattern is "" and no types are specified (types is null) + if (pattern != null && pattern.pattern().isEmpty() && index == null + && types == null) { Object[] enumeration = new Object[10]; // send only the cluster, everything else null enumeration[0] = cluster; @@ -87,12 +89,15 @@ public final void execute(SqlSession session, ActionListener liste } } - // if no types were specified (the parser takes care of the % case) - if (IndexType.VALID.equals(types)) { - if ((clusterPattern == null || clusterPattern.pattern().isEmpty()) - && (pattern == null || pattern.pattern().isEmpty())) { + // enumerate types + // if no types are specified (the parser takes care of the % case) + if (types == null) { + // empty string for catalog + if (clusterPattern != null && clusterPattern.pattern().isEmpty() + // empty string for table like and no index specified + && pattern != null && pattern.pattern().isEmpty() && index == null) { List> values = new ArrayList<>(); - // send only the types, everything else null + // send only the types, everything else is made of empty strings for (IndexType type : IndexType.VALID) { Object[] enumeration = new Object[10]; enumeration[3] = type.toSql(); @@ -105,7 +110,7 @@ public final void execute(SqlSession session, ActionListener liste } } - + // no enumeration pattern found, list actual tables String cRegex = clusterPattern != null ? clusterPattern.asJavaRegex() : null; // if the catalog doesn't match, don't return any results diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypes.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypes.java index 0d4ee0760316..2112128b41b0 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypes.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypes.java @@ -82,8 +82,7 @@ public final void execute(SqlSession session, ActionListener liste .sorted(Comparator.comparing((DataType t) -> t.sqlType.getVendorTypeNumber()).thenComparing(DataType::sqlName)) .map(t -> asList(t.toString(), t.sqlType.getVendorTypeNumber(), - //https://docs.microsoft.com/en-us/sql/odbc/reference/appendixes/column-size?view=sql-server-2017 - t.defaultPrecision, + DataTypes.precision(t), "'", "'", null, diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java index f8b9fdd47b9d..56554185ce84 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java @@ -404,7 +404,7 @@ private Tuple addAggFunction(GroupByKey groupingAg // COUNT() } else if (!c.distinct()) { LeafAgg leafAgg = toAgg(functionId, f); - AggPathInput a = new AggPathInput(f, new MetricAggRef(leafAgg.id(), "doc_count", "_count")); + AggPathInput a = new AggPathInput(f, new MetricAggRef(leafAgg.id(), "doc_count", "_count", false)); queryC = queryC.with(queryC.aggs().addAgg(leafAgg)); return new Tuple<>(queryC, a); } @@ -430,14 +430,16 @@ private Tuple addAggFunction(GroupByKey groupingAg // FIXME: concern leak - hack around MatrixAgg which is not // generalized (afaik) aggInput = new AggPathInput(f, - new MetricAggRef(cAggPath, ia.innerName(), ia.innerKey() != null ? QueryTranslator.nameOf(ia.innerKey()) : null)); + new MetricAggRef(cAggPath, ia.innerName(), + ia.innerKey() != null ? QueryTranslator.nameOf(ia.innerKey()) : null, + ia.dataType().isDateBased())); } else { LeafAgg leafAgg = toAgg(functionId, f); if (f instanceof TopHits) { aggInput = new AggPathInput(f, new TopHitsAggRef(leafAgg.id(), f.dataType())); } else { - aggInput = new AggPathInput(f, new MetricAggRef(leafAgg.id())); + aggInput = new AggPathInput(f, new MetricAggRef(leafAgg.id(), f.dataType().isDateBased())); } queryC = queryC.with(queryC.aggs().addAgg(leafAgg)); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java index 9d86c572c2b1..1fdd27d9b0b2 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java @@ -472,6 +472,7 @@ private static String topAggsField(AggregateFunction af, Expression e) { // TODO: need to optimize on ngram // TODO: see whether escaping is needed + @SuppressWarnings("rawtypes") static class Likes extends ExpressionTranslator { @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/MetricAggRef.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/MetricAggRef.java index 75ee3d8f4474..f1602df4c0e7 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/MetricAggRef.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/MetricAggRef.java @@ -17,19 +17,21 @@ public class MetricAggRef extends AggRef { private final String name; private final String property; private final String innerKey; + private final boolean isDateTimeBased; - public MetricAggRef(String name) { - this(name, "value"); + public MetricAggRef(String name, boolean isDateTimeBased) { + this(name, "value", isDateTimeBased); } - public MetricAggRef(String name, String property) { - this(name, property, null); + public MetricAggRef(String name, String property, boolean isDateTimeBased) { + this(name, property, null, isDateTimeBased); } - public MetricAggRef(String name, String property, String innerKey) { + public MetricAggRef(String name, String property, String innerKey, boolean isDateTimeBased) { this.name = name; this.property = property; this.innerKey = innerKey; + this.isDateTimeBased = isDateTimeBased; } public String name() { @@ -44,6 +46,10 @@ public String innerKey() { return innerKey; } + public boolean isDateTimeBased() { + return isDateTimeBased; + } + @Override public String toString() { String i = innerKey != null ? "[" + innerKey + "]" : ""; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQuery.java index d0fe697268d4..7bddacb86bf7 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQuery.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQuery.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.querydsl.query; import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.Operator; import org.elasticsearch.index.query.QueryBuilder; @@ -28,16 +29,17 @@ public class MatchQuery extends LeafQuery { // TODO: it'd be great if these could be constants instead of Strings, needs a core change to make the fields public first // TODO: add zero terms query support, I'm not sure the best way to parse it yet... // appliers.put("zero_terms_query", (qb, s) -> qb.zeroTermsQuery(s)); + appliers.put("analyzer", (qb, s) -> qb.analyzer(s)); + appliers.put("auto_generate_synonyms_phrase_query", (qb, s) -> qb.autoGenerateSynonymsPhraseQuery(Booleans.parseBoolean(s))); appliers.put("cutoff_frequency", (qb, s) -> qb.cutoffFrequency(Float.valueOf(s))); - appliers.put("lenient", (qb, s) -> qb.lenient(Booleans.parseBoolean(s))); + appliers.put("fuzziness", (qb, s) -> qb.fuzziness(Fuzziness.build(s))); appliers.put("fuzzy_transpositions", (qb, s) -> qb.fuzzyTranspositions(Booleans.parseBoolean(s))); appliers.put("fuzzy_rewrite", (qb, s) -> qb.fuzzyRewrite(s)); + appliers.put("lenient", (qb, s) -> qb.lenient(Booleans.parseBoolean(s))); + appliers.put("max_expansions", (qb, s) -> qb.maxExpansions(Integer.valueOf(s))); appliers.put("minimum_should_match", (qb, s) -> qb.minimumShouldMatch(s)); appliers.put("operator", (qb, s) -> qb.operator(Operator.fromString(s))); - appliers.put("max_expansions", (qb, s) -> qb.maxExpansions(Integer.valueOf(s))); appliers.put("prefix_length", (qb, s) -> qb.prefixLength(Integer.valueOf(s))); - appliers.put("analyzer", (qb, s) -> qb.analyzer(s)); - appliers.put("auto_generate_synonyms_phrase_query", (qb, s) -> qb.autoGenerateSynonymsPhraseQuery(Booleans.parseBoolean(s))); BUILDER_APPLIERS = Collections.unmodifiableMap(appliers); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MultiMatchQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MultiMatchQuery.java index 4f0bc0720ae8..2c6b47d7bdcc 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MultiMatchQuery.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MultiMatchQuery.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.querydsl.query; import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.index.query.MultiMatchQueryBuilder; import org.elasticsearch.index.query.Operator; import org.elasticsearch.index.query.QueryBuilder; @@ -29,18 +30,19 @@ public class MultiMatchQuery extends LeafQuery { appliers.put("slop", (qb, s) -> qb.slop(Integer.valueOf(s))); // TODO: add zero terms query support, I'm not sure the best way to parse it yet... // appliers.put("zero_terms_query", (qb, s) -> qb.zeroTermsQuery(s)); - appliers.put("lenient", (qb, s) -> qb.lenient(Booleans.parseBoolean(s))); + appliers.put("analyzer", (qb, s) -> qb.analyzer(s)); + appliers.put("auto_generate_synonyms_phrase_query", (qb, s) -> qb.autoGenerateSynonymsPhraseQuery(Booleans.parseBoolean(s))); appliers.put("cutoff_frequency", (qb, s) -> qb.cutoffFrequency(Float.valueOf(s))); - appliers.put("tie_breaker", (qb, s) -> qb.tieBreaker(Float.valueOf(s))); + appliers.put("fuzziness", (qb, s) -> qb.fuzziness(Fuzziness.build(s))); appliers.put("fuzzy_rewrite", (qb, s) -> qb.fuzzyRewrite(s)); + appliers.put("fuzzy_transpositions", (qb, s) -> qb.fuzzyTranspositions(Booleans.parseBoolean(s))); + appliers.put("lenient", (qb, s) -> qb.lenient(Booleans.parseBoolean(s))); + appliers.put("max_expansions", (qb, s) -> qb.maxExpansions(Integer.valueOf(s))); appliers.put("minimum_should_match", (qb, s) -> qb.minimumShouldMatch(s)); appliers.put("operator", (qb, s) -> qb.operator(Operator.fromString(s))); - appliers.put("max_expansions", (qb, s) -> qb.maxExpansions(Integer.valueOf(s))); appliers.put("prefix_length", (qb, s) -> qb.prefixLength(Integer.valueOf(s))); - appliers.put("analyzer", (qb, s) -> qb.analyzer(s)); + appliers.put("tie_breaker", (qb, s) -> qb.tieBreaker(Float.valueOf(s))); appliers.put("type", (qb, s) -> qb.type(s)); - appliers.put("auto_generate_synonyms_phrase_query", (qb, s) -> qb.autoGenerateSynonymsPhraseQuery(Booleans.parseBoolean(s))); - appliers.put("fuzzy_transpositions", (qb, s) -> qb.fuzzyTranspositions(Booleans.parseBoolean(s))); BUILDER_APPLIERS = Collections.unmodifiableMap(appliers); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/QueryStringQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/QueryStringQuery.java index de457ba918e7..a6d8ff2dbf5f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/QueryStringQuery.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/QueryStringQuery.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.querydsl.query; import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.index.query.MultiMatchQueryBuilder; import org.elasticsearch.index.query.Operator; @@ -28,30 +29,29 @@ public class QueryStringQuery extends LeafQuery { static { HashMap> appliers = new HashMap<>(28); // TODO: it'd be great if these could be constants instead of Strings, needs a core change to make the fields public first + appliers.put("allow_leading_wildcard", (qb, s) -> qb.allowLeadingWildcard(Booleans.parseBoolean(s))); + appliers.put("analyze_wildcard", (qb, s) -> qb.analyzeWildcard(Booleans.parseBoolean(s))); + appliers.put("analyzer", (qb, s) -> qb.analyzer(s)); + appliers.put("auto_generate_synonyms_phrase_query", (qb, s) -> qb.autoGenerateSynonymsPhraseQuery(Booleans.parseBoolean(s))); appliers.put("default_field", (qb, s) -> qb.defaultField(s)); appliers.put("default_operator", (qb, s) -> qb.defaultOperator(Operator.fromString(s))); - appliers.put("analyzer", (qb, s) -> qb.analyzer(s)); - appliers.put("quote_analyzer", (qb, s) -> qb.quoteAnalyzer(s)); - appliers.put("allow_leading_wildcard", (qb, s) -> qb.allowLeadingWildcard(Booleans.parseBoolean(s))); - appliers.put("max_determinized_states", (qb, s) -> qb.maxDeterminizedStates(Integer.valueOf(s))); - appliers.put("lowercase_expanded_terms", (qb, s) -> {}); appliers.put("enable_position_increments", (qb, s) -> qb.enablePositionIncrements(Booleans.parseBoolean(s))); appliers.put("escape", (qb, s) -> qb.escape(Booleans.parseBoolean(s))); - appliers.put("fuzzy_prefix_length", (qb, s) -> qb.fuzzyPrefixLength(Integer.valueOf(s))); + appliers.put("fuzziness", (qb, s) -> qb.fuzziness(Fuzziness.build(s))); appliers.put("fuzzy_max_expansions", (qb, s) -> qb.fuzzyMaxExpansions(Integer.valueOf(s))); + appliers.put("fuzzy_prefix_length", (qb, s) -> qb.fuzzyPrefixLength(Integer.valueOf(s))); appliers.put("fuzzy_rewrite", (qb, s) -> qb.fuzzyRewrite(s)); + appliers.put("fuzzy_transpositions", (qb, s) -> qb.fuzzyTranspositions(Booleans.parseBoolean(s))); + appliers.put("lenient", (qb, s) -> qb.lenient(Booleans.parseBoolean(s))); + appliers.put("max_determinized_states", (qb, s) -> qb.maxDeterminizedStates(Integer.valueOf(s))); + appliers.put("minimum_should_match", (qb, s) -> qb.minimumShouldMatch(s)); appliers.put("phrase_slop", (qb, s) -> qb.phraseSlop(Integer.valueOf(s))); - appliers.put("tie_breaker", (qb, s) -> qb.tieBreaker(Float.valueOf(s))); - appliers.put("analyze_wildcard", (qb, s) -> qb.analyzeWildcard(Booleans.parseBoolean(s))); appliers.put("rewrite", (qb, s) -> qb.rewrite(s)); - appliers.put("minimum_should_match", (qb, s) -> qb.minimumShouldMatch(s)); + appliers.put("quote_analyzer", (qb, s) -> qb.quoteAnalyzer(s)); appliers.put("quote_field_suffix", (qb, s) -> qb.quoteFieldSuffix(s)); - appliers.put("lenient", (qb, s) -> qb.lenient(Booleans.parseBoolean(s))); - appliers.put("locale", (qb, s) -> {}); + appliers.put("tie_breaker", (qb, s) -> qb.tieBreaker(Float.valueOf(s))); appliers.put("time_zone", (qb, s) -> qb.timeZone(s)); appliers.put("type", (qb, s) -> qb.type(MultiMatchQueryBuilder.Type.parse(s, LoggingDeprecationHandler.INSTANCE))); - appliers.put("auto_generate_synonyms_phrase_query", (qb, s) -> qb.autoGenerateSynonymsPhraseQuery(Booleans.parseBoolean(s))); - appliers.put("fuzzy_transpositions", (qb, s) -> qb.fuzzyTranspositions(Booleans.parseBoolean(s))); BUILDER_APPLIERS = Collections.unmodifiableMap(appliers); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java index bc2d5cc722cc..deeeed1c1ca1 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java @@ -13,6 +13,7 @@ import java.util.HashMap; import java.util.Locale; import java.util.Map; +import java.util.Map.Entry; /** * Elasticsearch SQL data types. @@ -35,19 +36,19 @@ public enum DataType { DOUBLE( "double", JDBCType.DOUBLE, Double.BYTES, 15, 25, false, true, true), // 24 bits defaultPrecision - 24*log10(2) =~ 7 (7.22) FLOAT( "float", JDBCType.REAL, Float.BYTES, 7, 15, false, true, true), - HALF_FLOAT( "half_float", JDBCType.FLOAT, Double.BYTES, 16, 25, false, true, true), + HALF_FLOAT( "half_float", JDBCType.FLOAT, Float.BYTES, 3, 25, false, true, true), // precision is based on long - SCALED_FLOAT( "scaled_float", JDBCType.FLOAT, Double.BYTES, 19, 25, false, true, true), - KEYWORD( "keyword", JDBCType.VARCHAR, Integer.MAX_VALUE, 256, 0, false, false, true), - TEXT( "text", JDBCType.VARCHAR, Integer.MAX_VALUE, Integer.MAX_VALUE, 0, false, false, false), + SCALED_FLOAT( "scaled_float", JDBCType.DOUBLE, Long.BYTES, 15, 25, false, true, true), + KEYWORD( "keyword", JDBCType.VARCHAR, Integer.MAX_VALUE, 32766, 32766, false, false, true), + TEXT( "text", JDBCType.VARCHAR, Integer.MAX_VALUE, Integer.MAX_VALUE, Integer.MAX_VALUE, false, false, false), OBJECT( "object", JDBCType.STRUCT, -1, 0, 0, false, false, false), NESTED( "nested", JDBCType.STRUCT, -1, 0, 0, false, false, false), - BINARY( "binary", JDBCType.VARBINARY, -1, Integer.MAX_VALUE, 0, false, false, false), + BINARY( "binary", JDBCType.VARBINARY, -1, Integer.MAX_VALUE, Integer.MAX_VALUE, false, false, false), DATE( JDBCType.DATE, Long.BYTES, 24, 24, false, false, true), // since ODBC and JDBC interpret precision for Date as display size // the precision is 23 (number of chars in ISO8601 with millis) + Z (the UTC timezone) // see https://github.com/elastic/elasticsearch/issues/30386#issuecomment-386807288 - DATETIME( "date", JDBCType.TIMESTAMP, Long.BYTES, 24, 24, false, false, true), + DATETIME( "date", JDBCType.TIMESTAMP, Long.BYTES, 3, 24, false, false, true), // // specialized types // @@ -73,58 +74,78 @@ public enum DataType { INTERVAL_MINUTE_TO_SECOND(ExtTypes.INTERVAL_MINUTE_TO_SECOND,Long.BYTES, 23, 23, false, false, false); // @formatter:on - private static final Map odbcToEs; + private static final Map ODBC_TO_ES = new HashMap<>(36); static { - odbcToEs = new HashMap<>(36); - // Numeric - odbcToEs.put("SQL_BIT", BOOLEAN); - odbcToEs.put("SQL_TINYINT", BYTE); - odbcToEs.put("SQL_SMALLINT", SHORT); - odbcToEs.put("SQL_INTEGER", INTEGER); - odbcToEs.put("SQL_BIGINT", LONG); - odbcToEs.put("SQL_FLOAT", FLOAT); - odbcToEs.put("SQL_REAL", FLOAT); - odbcToEs.put("SQL_DOUBLE", DOUBLE); - odbcToEs.put("SQL_DECIMAL", DOUBLE); - odbcToEs.put("SQL_NUMERIC", DOUBLE); + ODBC_TO_ES.put("SQL_BIT", BOOLEAN); + ODBC_TO_ES.put("SQL_TINYINT", BYTE); + ODBC_TO_ES.put("SQL_SMALLINT", SHORT); + ODBC_TO_ES.put("SQL_INTEGER", INTEGER); + ODBC_TO_ES.put("SQL_BIGINT", LONG); + ODBC_TO_ES.put("SQL_REAL", FLOAT); + ODBC_TO_ES.put("SQL_FLOAT", DOUBLE); + ODBC_TO_ES.put("SQL_DOUBLE", DOUBLE); + ODBC_TO_ES.put("SQL_DECIMAL", DOUBLE); + ODBC_TO_ES.put("SQL_NUMERIC", DOUBLE); // String - odbcToEs.put("SQL_GUID", KEYWORD); - odbcToEs.put("SQL_CHAR", KEYWORD); - odbcToEs.put("SQL_WCHAR", KEYWORD); - odbcToEs.put("SQL_VARCHAR", TEXT); - odbcToEs.put("SQL_WVARCHAR", TEXT); - odbcToEs.put("SQL_LONGVARCHAR", TEXT); - odbcToEs.put("SQL_WLONGVARCHAR", TEXT); + ODBC_TO_ES.put("SQL_GUID", KEYWORD); + ODBC_TO_ES.put("SQL_CHAR", KEYWORD); + ODBC_TO_ES.put("SQL_WCHAR", KEYWORD); + ODBC_TO_ES.put("SQL_VARCHAR", TEXT); + ODBC_TO_ES.put("SQL_WVARCHAR", TEXT); + ODBC_TO_ES.put("SQL_LONGVARCHAR", TEXT); + ODBC_TO_ES.put("SQL_WLONGVARCHAR", TEXT); // Binary - odbcToEs.put("SQL_BINARY", BINARY); - odbcToEs.put("SQL_VARBINARY", BINARY); - odbcToEs.put("SQL_LONGVARBINARY", BINARY); + ODBC_TO_ES.put("SQL_BINARY", BINARY); + ODBC_TO_ES.put("SQL_VARBINARY", BINARY); + ODBC_TO_ES.put("SQL_LONGVARBINARY", BINARY); // Date - odbcToEs.put("SQL_DATE", DATE); - odbcToEs.put("SQL_TIME", DATETIME); - odbcToEs.put("SQL_TIMESTAMP", DATETIME); + ODBC_TO_ES.put("SQL_DATE", DATE); + ODBC_TO_ES.put("SQL_TIME", DATETIME); + ODBC_TO_ES.put("SQL_TIMESTAMP", DATETIME); // Intervals - odbcToEs.put("SQL_INTERVAL_HOUR_TO_MINUTE", INTERVAL_HOUR_TO_MINUTE); - odbcToEs.put("SQL_INTERVAL_HOUR_TO_SECOND", INTERVAL_HOUR_TO_SECOND); - odbcToEs.put("SQL_INTERVAL_MINUTE_TO_SECOND", INTERVAL_MINUTE_TO_SECOND); - odbcToEs.put("SQL_INTERVAL_MONTH", INTERVAL_MONTH); - odbcToEs.put("SQL_INTERVAL_YEAR", INTERVAL_YEAR); - odbcToEs.put("SQL_INTERVAL_YEAR_TO_MONTH", INTERVAL_YEAR_TO_MONTH); - odbcToEs.put("SQL_INTERVAL_DAY", INTERVAL_DAY); - odbcToEs.put("SQL_INTERVAL_HOUR", INTERVAL_HOUR); - odbcToEs.put("SQL_INTERVAL_MINUTE", INTERVAL_MINUTE); - odbcToEs.put("SQL_INTERVAL_SECOND", INTERVAL_SECOND); - odbcToEs.put("SQL_INTERVAL_DAY_TO_HOUR", INTERVAL_DAY_TO_HOUR); - odbcToEs.put("SQL_INTERVAL_DAY_TO_MINUTE", INTERVAL_DAY_TO_MINUTE); - odbcToEs.put("SQL_INTERVAL_DAY_TO_SECOND", INTERVAL_DAY_TO_SECOND); + ODBC_TO_ES.put("SQL_INTERVAL_HOUR_TO_MINUTE", INTERVAL_HOUR_TO_MINUTE); + ODBC_TO_ES.put("SQL_INTERVAL_HOUR_TO_SECOND", INTERVAL_HOUR_TO_SECOND); + ODBC_TO_ES.put("SQL_INTERVAL_MINUTE_TO_SECOND", INTERVAL_MINUTE_TO_SECOND); + ODBC_TO_ES.put("SQL_INTERVAL_MONTH", INTERVAL_MONTH); + ODBC_TO_ES.put("SQL_INTERVAL_YEAR", INTERVAL_YEAR); + ODBC_TO_ES.put("SQL_INTERVAL_YEAR_TO_MONTH", INTERVAL_YEAR_TO_MONTH); + ODBC_TO_ES.put("SQL_INTERVAL_DAY", INTERVAL_DAY); + ODBC_TO_ES.put("SQL_INTERVAL_HOUR", INTERVAL_HOUR); + ODBC_TO_ES.put("SQL_INTERVAL_MINUTE", INTERVAL_MINUTE); + ODBC_TO_ES.put("SQL_INTERVAL_SECOND", INTERVAL_SECOND); + ODBC_TO_ES.put("SQL_INTERVAL_DAY_TO_HOUR", INTERVAL_DAY_TO_HOUR); + ODBC_TO_ES.put("SQL_INTERVAL_DAY_TO_MINUTE", INTERVAL_DAY_TO_MINUTE); + ODBC_TO_ES.put("SQL_INTERVAL_DAY_TO_SECOND", INTERVAL_DAY_TO_SECOND); } + private static final Map SQL_TO_ES = new HashMap<>(45); + static { + // first add ES types + for (DataType type : DataType.values()) { + if (type.isPrimitive()) { + SQL_TO_ES.put(type.name(), type); + } + } + + // reuse the ODBC definition (without SQL_) + // note that this will override existing types in particular FLOAT + for (Entry entry : ODBC_TO_ES.entrySet()) { + SQL_TO_ES.put(entry.getKey().substring(4), entry.getValue()); + } + + + // special ones + SQL_TO_ES.put("BOOL", DataType.BOOLEAN); + SQL_TO_ES.put("INT", DataType.INTEGER); + SQL_TO_ES.put("STRING", DataType.KEYWORD); + } + /** * Type's name used for error messages and column info for the clients */ @@ -234,9 +255,13 @@ public boolean isDateBased() { } public static DataType fromOdbcType(String odbcType) { - return odbcToEs.get(odbcType); + return ODBC_TO_ES.get(odbcType); } + public static DataType fromSqlOrEsType(String typeName) { + return SQL_TO_ES.get(typeName.toUpperCase(Locale.ROOT)); + } + /** * Creates returns DataType enum corresponding to the specified es type */ diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java index 5a3fa235e9a7..f8d657447923 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java @@ -175,7 +175,7 @@ public static Integer metaSqlDataType(DataType t) { } // https://github.com/elastic/elasticsearch/issues/30386 - // https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqlgettypeinfo-function?view=sql-server-2017 + // https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqlgettypeinfo-function public static Integer metaSqlDateTimeSub(DataType t) { if (t == DATETIME) { // ODBC SQL_CODE_TIMESTAMP @@ -185,37 +185,30 @@ public static Integer metaSqlDateTimeSub(DataType t) { return 0; } - // https://docs.microsoft.com/en-us/sql/odbc/reference/appendixes/decimal-digits?view=sql-server-2017 public static Short metaSqlMinimumScale(DataType t) { - // TODO: return info for HALF/SCALED_FLOATS (should be based on field not type) - if (t == DATETIME) { - return Short.valueOf((short) 3); - } - if (t.isInteger()) { - return Short.valueOf((short) 0); - } - // minimum scale? - if (t.isRational()) { - return Short.valueOf((short) 0); - } - return null; + return metaSqlSameScale(t); } public static Short metaSqlMaximumScale(DataType t) { - // TODO: return info for HALF/SCALED_FLOATS (should be based on field not type) - if (t == DATETIME) { - return Short.valueOf((short) 3); - } + return metaSqlSameScale(t); + } + + // https://docs.microsoft.com/en-us/sql/odbc/reference/appendixes/decimal-digits + // https://github.com/elastic/elasticsearch/issues/40357 + // since the scale is fixed, minimum and maximum should return the same value + // hence why this method exists + private static Short metaSqlSameScale(DataType t) { + // TODO: return info for SCALED_FLOATS (should be based on field not type) if (t.isInteger()) { return Short.valueOf((short) 0); } - if (t.isRational()) { + if (t.isDateBased() || t.isRational()) { return Short.valueOf((short) t.defaultPrecision); } return null; } - // https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqlgettypeinfo-function?view=sql-server-2017 + // https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqlgettypeinfo-function public static Integer metaSqlRadix(DataType t) { // RADIX - Determines how numbers returned by COLUMN_SIZE and DECIMAL_DIGITS should be interpreted. // 10 means they represent the number of decimal digits allowed for the column. @@ -223,4 +216,13 @@ public static Integer metaSqlRadix(DataType t) { // null means radix is not applicable for the given type. return t.isInteger() ? Integer.valueOf(10) : (t.isRational() ? Integer.valueOf(2) : null); } + + //https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqlgettypeinfo-function#comments + //https://docs.microsoft.com/en-us/sql/odbc/reference/appendixes/column-size + public static Integer precision(DataType t) { + if (t.isNumeric()) { + return t.defaultPrecision; + } + return t.displaySize; + } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlActionIT.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlActionIT.java index 60047fcdbe79..5f85ff90e344 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlActionIT.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlActionIT.java @@ -33,7 +33,7 @@ public void testSqlAction() { assertThat(response.columns(), hasSize(2)); int dataIndex = dataBeforeCount ? 0 : 1; int countIndex = dataBeforeCount ? 1 : 0; - assertEquals(new ColumnInfo("", "data", "text", 0), response.columns().get(dataIndex)); + assertEquals(new ColumnInfo("", "data", "text", 2147483647), response.columns().get(dataIndex)); assertEquals(new ColumnInfo("", "count", "long", 20), response.columns().get(countIndex)); assertThat(response.rows(), hasSize(2)); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractorTests.java index 12a8dd0420f0..673899d98f3a 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/MetricAggExtractorTests.java @@ -10,9 +10,12 @@ import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.SqlException; +import org.elasticsearch.xpack.sql.util.DateUtils; import java.io.IOException; +import java.time.ZoneId; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; @@ -22,7 +25,8 @@ public class MetricAggExtractorTests extends AbstractWireSerializingTestCase { public static MetricAggExtractor randomMetricAggExtractor() { - return new MetricAggExtractor(randomAlphaOfLength(16), randomAlphaOfLength(16), randomAlphaOfLength(16)); + return new MetricAggExtractor(randomAlphaOfLength(16), randomAlphaOfLength(16), randomAlphaOfLength(16), + randomZone(), randomBoolean()); } @Override @@ -37,7 +41,12 @@ protected Reader instanceReader() { @Override protected MetricAggExtractor mutateInstance(MetricAggExtractor instance) throws IOException { - return new MetricAggExtractor(instance.name() + "mutated", instance.property(), instance.innerKey()); + return new MetricAggExtractor( + instance.name() + "mutated", + instance.property() + "mutated", + instance.innerKey() + "mutated", + randomValueOtherThan(instance.zoneId(), + ESTestCase::randomZone), randomBoolean()); } public void testNoAggs() { @@ -48,7 +57,7 @@ public void testNoAggs() { } public void testSingleValueProperty() { - MetricAggExtractor extractor = randomMetricAggExtractor(); + MetricAggExtractor extractor = new MetricAggExtractor("field", "property", "innerKey", null, false); double value = randomDouble(); Aggregation agg = new TestSingleValueAggregation(extractor.name(), singletonList(extractor.property()), value); @@ -56,8 +65,18 @@ public void testSingleValueProperty() { assertEquals(value, extractor.extract(bucket)); } + public void testSingleValuePropertyDate() { + ZoneId zoneId = randomZone(); + MetricAggExtractor extractor = new MetricAggExtractor("my_date_field", "property", "innerKey", zoneId, true); + + double value = randomDouble(); + Aggregation agg = new TestSingleValueAggregation(extractor.name(), singletonList(extractor.property()), value); + Bucket bucket = new TestBucket(emptyMap(), 0, new Aggregations(singletonList(agg))); + assertEquals(DateUtils.asDateTime((long) value , zoneId), extractor.extract(bucket)); + } + public void testSingleValueInnerKey() { - MetricAggExtractor extractor = randomMetricAggExtractor(); + MetricAggExtractor extractor = new MetricAggExtractor("field", "property", "innerKey", null, false); double innerValue = randomDouble(); Aggregation agg = new TestSingleValueAggregation(extractor.name(), singletonList(extractor.property()), singletonMap(extractor.innerKey(), innerValue)); @@ -65,12 +84,33 @@ public void testSingleValueInnerKey() { assertEquals(innerValue, extractor.extract(bucket)); } + public void testSingleValueInnerKeyDate() { + ZoneId zoneId = randomZone(); + MetricAggExtractor extractor = new MetricAggExtractor("field", "property", "innerKey", zoneId, true); + + double innerValue = randomDouble(); + Aggregation agg = new TestSingleValueAggregation(extractor.name(), singletonList(extractor.property()), + singletonMap(extractor.innerKey(), innerValue)); + Bucket bucket = new TestBucket(emptyMap(), 0, new Aggregations(singletonList(agg))); + assertEquals(DateUtils.asDateTime((long) innerValue , zoneId), extractor.extract(bucket)); + } + public void testMultiValueProperty() { - MetricAggExtractor extractor = randomMetricAggExtractor(); + MetricAggExtractor extractor = new MetricAggExtractor("field", "property", "innerKey", null, false); double value = randomDouble(); Aggregation agg = new TestMultiValueAggregation(extractor.name(), singletonMap(extractor.property(), value)); Bucket bucket = new TestBucket(emptyMap(), 0, new Aggregations(singletonList(agg))); assertEquals(value, extractor.extract(bucket)); } + + public void testMultiValuePropertyDate() { + ZoneId zoneId = randomZone(); + MetricAggExtractor extractor = new MetricAggExtractor("field", "property", "innerKey", zoneId, true); + + double value = randomDouble(); + Aggregation agg = new TestMultiValueAggregation(extractor.name(), singletonMap(extractor.property(), value)); + Bucket bucket = new TestBucket(emptyMap(), 0, new Aggregations(singletonList(agg))); + assertEquals(DateUtils.asDateTime((long) value , zoneId), extractor.extract(bucket)); + } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/ExpressionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/ExpressionTests.java index 66c93c9a3f03..3ffcedae0a6a 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/ExpressionTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/ExpressionTests.java @@ -361,12 +361,12 @@ public void testConvertWithQuotedODBCDataType() { public void testConvertWithInvalidODBCDataType() { ParsingException ex = expectThrows(ParsingException.class, () -> parser.createExpression("CONVERT(1, SQL_INVALID)")); - assertEquals("line 1:13: Invalid data type [SQL_INVALID] provided", ex.getMessage()); + assertEquals("line 1:13: Does not recognize type [SQL_INVALID]", ex.getMessage()); } public void testConvertWithInvalidESDataType() { ParsingException ex = expectThrows(ParsingException.class, () -> parser.createExpression("CONVERT(1, INVALID)")); - assertEquals("line 1:13: Invalid data type [INVALID] provided", ex.getMessage()); + assertEquals("line 1:13: Does not recognize type [INVALID]", ex.getMessage()); } public void testCurrentDate() { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/SqlParserTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/SqlParserTests.java index 45276b8d15ed..d1e05b6ec532 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/SqlParserTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/SqlParserTests.java @@ -81,6 +81,11 @@ public void testSelectCastWithSQLOperator() { assertEquals("CONVERT(POWER(languages, 2), SQL_DOUBLE)", f.sourceText()); } + public void testSelectCastToEsType() { + Cast f = singleProjection(project(parseStatement("SELECT CAST('0.' AS SCALED_FLOAT)")), Cast.class); + assertEquals("CAST('0.' AS SCALED_FLOAT)", f.sourceText()); + } + public void testSelectAddWithParanthesis() { Add f = singleProjection(project(parseStatement("SELECT (1 + 2)")), Add.class); assertEquals("1 + 2", f.sourceText()); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java index a1accd28ab4d..c6c993967dd1 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java @@ -60,7 +60,7 @@ private Tuple sql(String sql) { public void testSysTypes() throws Exception { Command cmd = sql("SYS TYPES").v1(); - List names = asList("BYTE", "LONG", "BINARY", "NULL", "INTEGER", "SHORT", "HALF_FLOAT", "SCALED_FLOAT", "FLOAT", "DOUBLE", + List names = asList("BYTE", "LONG", "BINARY", "NULL", "INTEGER", "SHORT", "HALF_FLOAT", "FLOAT", "DOUBLE", "SCALED_FLOAT", "KEYWORD", "TEXT", "IP", "BOOLEAN", "DATE", "DATETIME", "INTERVAL_YEAR", "INTERVAL_MONTH", "INTERVAL_DAY", "INTERVAL_HOUR", "INTERVAL_MINUTE", "INTERVAL_SECOND", "INTERVAL_YEAR_TO_MONTH", "INTERVAL_DAY_TO_HOUR", "INTERVAL_DAY_TO_MINUTE", "INTERVAL_DAY_TO_SECOND", diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java index 74ecdc80c12d..e2baeb2d8af9 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java @@ -51,20 +51,60 @@ public class SysTablesTests extends ESTestCase { private final IndexInfo index = new IndexInfo("test", IndexType.INDEX); private final IndexInfo alias = new IndexInfo("alias", IndexType.ALIAS); - public void testSysTablesEnumerateCatalog() throws Exception { - executeCommand("SYS TABLES CATALOG LIKE '%'", r -> { + // + // catalog enumeration + // + public void testSysTablesCatalogEnumeration() throws Exception { + executeCommand("SYS TABLES CATALOG LIKE '%' LIKE ''", r -> { assertEquals(1, r.size()); assertEquals(CLUSTER_NAME, r.column(0)); - }); + // everything else should be null + for (int i = 1; i < 10; i++) { + assertNull(r.column(i)); + } + }, index); + } + + // + // table types enumeration + // + public void testSysTablesTypesEnumerationWoString() throws Exception { + executeCommand("SYS TABLES CATALOG LIKE '' LIKE '' ", r -> { + assertEquals(2, r.size()); + assertEquals("BASE TABLE", r.column(3)); + assertTrue(r.advanceRow()); + assertEquals("VIEW", r.column(3)); + }, new IndexInfo[0]); } public void testSysTablesEnumerateTypes() throws Exception { - executeCommand("SYS TABLES TYPE '%'", r -> { + executeCommand("SYS TABLES CATALOG LIKE '' LIKE '' TYPE '%'", r -> { assertEquals(2, r.size()); assertEquals("BASE TABLE", r.column(3)); assertTrue(r.advanceRow()); assertEquals("VIEW", r.column(3)); - }); + }, alias, index); + } + + public void testSysTablesTypesEnumeration() throws Exception { + executeCommand("SYS TABLES CATALOG LIKE '' LIKE '' TYPE '%'", r -> { + assertEquals(2, r.size()); + + Iterator it = IndexType.VALID.stream().sorted(Comparator.comparing(IndexType::toSql)).iterator(); + + for (int t = 0; t < r.size(); t++) { + assertEquals(it.next().toSql(), r.column(3)); + + // everything else should be null + for (int i = 0; i < 10; i++) { + if (i != 3) { + assertNull(r.column(i)); + } + } + + r.advanceRow(); + } + }, new IndexInfo[0]); } public void testSysTablesDifferentCatalog() throws Exception { @@ -77,17 +117,42 @@ public void testSysTablesDifferentCatalog() throws Exception { public void testSysTablesNoTypes() throws Exception { executeCommand("SYS TABLES", r -> { assertEquals(2, r.size()); + assertEquals("test", r.column(2)); + assertEquals("BASE TABLE", r.column(3)); + assertTrue(r.advanceRow()); + assertEquals("alias", r.column(2)); + assertEquals("VIEW", r.column(3)); + }, index, alias); + } + + public void testSysTablesWithLegacyTypes() throws Exception { + executeCommand("SYS TABLES TYPE 'TABLE', 'ALIAS'", r -> { + assertEquals(2, r.size()); + assertEquals("test", r.column(2)); + assertEquals("TABLE", r.column(3)); + assertTrue(r.advanceRow()); + assertEquals("alias", r.column(2)); + assertEquals("VIEW", r.column(3)); + }, index, alias); + } + + public void testSysTablesWithProperTypes() throws Exception { + executeCommand("SYS TABLES TYPE 'BASE TABLE', 'ALIAS'", r -> { + assertEquals(2, r.size()); + assertEquals("test", r.column(2)); assertEquals("BASE TABLE", r.column(3)); assertTrue(r.advanceRow()); + assertEquals("alias", r.column(2)); assertEquals("VIEW", r.column(3)); }, index, alias); } public void testSysTablesPattern() throws Exception { executeCommand("SYS TABLES LIKE '%'", r -> { + assertEquals(2, r.size()); assertEquals("test", r.column(2)); + assertEquals("BASE TABLE", r.column(3)); assertTrue(r.advanceRow()); - assertEquals(2, r.size()); assertEquals("alias", r.column(2)); }, index, alias); } @@ -130,7 +195,18 @@ public void testSysTablesOnlyIndicesInLegacyMode() throws Exception { assertEquals("test", r.column(2)); assertEquals("TABLE", r.column(3)); }, index); + } + + public void testSysTablesNoPatternWithTypesSpecifiedInLegacyMode() throws Exception { + executeCommand("SYS TABLES TYPE 'TABLE','VIEW'", r -> { + assertEquals(2, r.size()); + assertEquals("test", r.column(2)); + assertEquals("TABLE", r.column(3)); + assertTrue(r.advanceRow()); + assertEquals("alias", r.column(2)); + assertEquals("VIEW", r.column(3)); + }, index, alias); } public void testSysTablesOnlyIndicesLegacyModeParameterized() throws Exception { @@ -192,43 +268,6 @@ public void testSysTablesWithInvalidType() throws Exception { }, new IndexInfo[0]); } - public void testSysTablesCatalogEnumeration() throws Exception { - executeCommand("SYS TABLES CATALOG LIKE '%' LIKE ''", r -> { - assertEquals(1, r.size()); - assertEquals(CLUSTER_NAME, r.column(0)); - // everything else should be null - for (int i = 1; i < 10; i++) { - assertNull(r.column(i)); - } - }, new IndexInfo[0]); - } - - public void testSysTablesTypesEnumeration() throws Exception { - executeCommand("SYS TABLES CATALOG LIKE '' LIKE '' TYPE '%'", r -> { - assertEquals(2, r.size()); - - Iterator it = IndexType.VALID.stream().sorted(Comparator.comparing(IndexType::toSql)).iterator(); - - for (int t = 0; t < r.size(); t++) { - assertEquals(it.next().toSql(), r.column(3)); - - // everything else should be null - for (int i = 0; i < 10; i++) { - if (i != 3) { - assertNull(r.column(i)); - } - } - - r.advanceRow(); - } - }, new IndexInfo[0]); - } - - public void testSysTablesTypesEnumerationWoString() throws Exception { - executeCommand("SYS TABLES CATALOG LIKE '' LIKE '' ", r -> { - assertEquals(0, r.size()); - }, new IndexInfo[0]); - } private SqlTypedParamValue param(Object value) { return new SqlTypedParamValue(DataTypes.fromJava(value).typeName, value); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java index 4e428846dc2f..9c1ef31fcb17 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java @@ -43,7 +43,7 @@ private Tuple sql(String sql) { public void testSysTypes() { Command cmd = sql("SYS TYPES").v1(); - List names = asList("BYTE", "LONG", "BINARY", "NULL", "INTEGER", "SHORT", "HALF_FLOAT", "SCALED_FLOAT", "FLOAT", "DOUBLE", + List names = asList("BYTE", "LONG", "BINARY", "NULL", "INTEGER", "SHORT", "HALF_FLOAT", "FLOAT", "DOUBLE", "SCALED_FLOAT", "KEYWORD", "TEXT", "IP", "BOOLEAN", "DATE", "DATETIME", "INTERVAL_YEAR", "INTERVAL_MONTH", "INTERVAL_DAY", "INTERVAL_HOUR", "INTERVAL_MINUTE", "INTERVAL_SECOND", "INTERVAL_YEAR_TO_MONTH", "INTERVAL_DAY_TO_HOUR", "INTERVAL_DAY_TO_MINUTE", "INTERVAL_DAY_TO_SECOND", diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java index 2c31fff1fb94..a39b5466bc10 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java @@ -228,6 +228,41 @@ public void testDifferentLikeAndNotLikePatterns() { assertEquals(1, rqsq.fields().size()); assertEquals("keyword", rqsq.fields().keySet().iterator().next()); } + + public void testRLikePatterns() { + String[] patterns = new String[] {"(...)+", "abab(ab)?", "(ab){1,2}", "(ab){3}", "aabb|bbaa", "a+b+|b+a+", "aa(cc|bb)", + "a{4,6}b{4,6}", ".{3}.{3}", "aaa*bbb*", "a+.+", "a.c.e", "[^abc\\-]"}; + for (int i = 0; i < 5; i++) { + assertDifferentRLikeAndNotRLikePatterns(randomFrom(patterns), randomFrom(patterns)); + } + } + + private void assertDifferentRLikeAndNotRLikePatterns(String firstPattern, String secondPattern) { + LogicalPlan p = plan("SELECT keyword k FROM test WHERE k RLIKE '" + firstPattern + "' AND k NOT RLIKE '" + secondPattern + "'"); + assertTrue(p instanceof Project); + p = ((Project) p).child(); + assertTrue(p instanceof Filter); + + Expression condition = ((Filter) p).condition(); + QueryTranslation qt = QueryTranslator.toQuery(condition, false); + assertEquals(BoolQuery.class, qt.query.getClass()); + BoolQuery bq = ((BoolQuery) qt.query); + assertTrue(bq.isAnd()); + assertTrue(bq.left() instanceof QueryStringQuery); + assertTrue(bq.right() instanceof NotQuery); + + NotQuery nq = (NotQuery) bq.right(); + assertTrue(nq.child() instanceof QueryStringQuery); + QueryStringQuery lqsq = (QueryStringQuery) bq.left(); + QueryStringQuery rqsq = (QueryStringQuery) nq.child(); + + assertEquals("/" + firstPattern + "/", lqsq.query()); + assertEquals(1, lqsq.fields().size()); + assertEquals("keyword", lqsq.fields().keySet().iterator().next()); + assertEquals("/" + secondPattern + "/", rqsq.query()); + assertEquals(1, rqsq.fields().size()); + assertEquals("keyword", rqsq.fields().keySet().iterator().next()); + } public void testTranslateNotExpression_WhereClause_Painless() { LogicalPlan p = plan("SELECT * FROM test WHERE NOT(POSITION('x', keyword) = 0)"); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypesTests.java index 7b38718dad79..a789324e0b47 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypesTests.java @@ -7,8 +7,13 @@ import org.elasticsearch.test.ESTestCase; +import java.util.ArrayList; +import java.util.Arrays; import java.util.EnumSet; +import java.util.List; +import java.util.stream.Stream; +import static java.util.stream.Collectors.toList; import static org.elasticsearch.xpack.sql.type.DataType.DATETIME; import static org.elasticsearch.xpack.sql.type.DataType.FLOAT; import static org.elasticsearch.xpack.sql.type.DataType.INTERVAL_DAY; @@ -50,7 +55,7 @@ public void testMetaDateTypeSub() { public void testMetaMinimumScale() { assertEquals(Short.valueOf((short) 3), metaSqlMinimumScale(DATETIME)); assertEquals(Short.valueOf((short) 0), metaSqlMinimumScale(LONG)); - assertEquals(Short.valueOf((short) 0), metaSqlMinimumScale(FLOAT)); + assertEquals(Short.valueOf((short) FLOAT.defaultPrecision), metaSqlMaximumScale(FLOAT)); assertNull(metaSqlMinimumScale(KEYWORD)); } @@ -108,6 +113,34 @@ public void testIncompatibleInterval() throws Exception { assertNull(compatibleInterval(INTERVAL_MINUTE_TO_SECOND, INTERVAL_MONTH)); } + public void testEsToDataType() throws Exception { + List types = new ArrayList<>(Arrays.asList("null", "boolean", "bool", + "byte", "tinyint", + "short", "smallint", + "integer", + "long", "bigint", + "double", "real", + "half_float", "scaled_float", "float", + "decimal", "numeric", + "keyword", "text", "varchar", + "date", "datetime", "timestamp", + "binary", "varbinary", + "ip", + "interval_year", "interval_month", "interval_year_to_month", + "interval_day", "interval_hour", "interval_minute", "interval_second", + "interval_day_to_hour", "interval_day_to_minute", "interval_day_to_second", + "interval_hour_to_minute", "interval_hour_to_second", + "interval_minute_to_second")); + + types.addAll(Stream.of(DataType.values()) + .filter(DataType::isPrimitive) + .map(DataType::name) + .collect(toList())); + String type = randomFrom(types.toArray(new String[0])); + DataType dataType = DataType.fromSqlOrEsType(type); + assertNotNull(dataType); + } + private DataType randomDataTypeNoDateTime() { return randomValueOtherThan(DataType.DATETIME, () -> randomFrom(DataType.values())); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java index 2a2488dda722..65b491fe71a1 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java @@ -81,7 +81,7 @@ public void testDateField() { EsField field = mapping.get("date"); assertThat(field.getDataType(), is(DATETIME)); assertThat(field.isAggregatable(), is(true)); - assertThat(field.getPrecision(), is(24)); + assertThat(field.getPrecision(), is(3)); } public void testDateNoFormat() { diff --git a/x-pack/plugin/sql/src/test/resources/mapping-nested.json b/x-pack/plugin/sql/src/test/resources/mapping-nested.json index d9b6398458f1..1251d17525a0 100644 --- a/x-pack/plugin/sql/src/test/resources/mapping-nested.json +++ b/x-pack/plugin/sql/src/test/resources/mapping-nested.json @@ -10,8 +10,7 @@ "type" : "text", "fields" : { "keyword" : { - "type" : "keyword", - "ignore_above" : 256 + "type" : "keyword" } } }, diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/security.put_role_mapping.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/security.put_role_mapping.json index 626ff0d6da80..d65cf8f83583 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/security.put_role_mapping.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/security.put_role_mapping.json @@ -23,7 +23,7 @@ } }, "body": { - "description" : "The role to add", + "description" : "The role mapping to add", "required" : true } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml index 642a69e57e63..dede9e559991 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml @@ -71,7 +71,7 @@ setup: data_frame.preview_data_frame_transform: body: > { - "source": "airline-data", + "source": { "index": "airline-data" }, "pivot": { "group_by": { "airline": {"terms": {"field": "airline"}}, @@ -96,7 +96,7 @@ setup: data_frame.preview_data_frame_transform: body: > { - "source": "airline-data", + "source": { "index": "airline-data" }, "pivot": { "group_by": {"airline": {"terms": {"not_a_terms_param": "airline"}}}, "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml index 013731116f3e..00b091330027 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml @@ -44,8 +44,8 @@ setup: transform_id: "missing-source-transform" body: > { - "source": "missing-index", - "dest": "missing-source-dest", + "source": { "index": "missing-index" }, + "dest": { "index": "missing-source-dest" }, "pivot": { "group_by": { "airline": {"terms": {"field": "airline"}}}, "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} @@ -58,8 +58,8 @@ setup: transform_id: "airline-transform" body: > { - "source": "airline-data", - "dest": "airline-data-by-airline", + "source": { "index": "airline-data" }, + "dest": { "index": "airline-data-by-airline" }, "pivot": { "group_by": { "airline": {"terms": {"field": "airline"}}}, "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} @@ -72,8 +72,8 @@ setup: transform_id: "airline-transform-dos" body: > { - "source": "airline-data", - "dest": "airline-data-by-airline-again", + "source": { "index": "airline-data" }, + "dest": { "index": "airline-data-by-airline-again" }, "pivot": { "group_by": { "airline": {"terms": {"field": "airline"}}}, "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} @@ -86,9 +86,9 @@ setup: transform_id: "airline-transform" - match: { count: 1 } - match: { transforms.0.id: "airline-transform" } - - match: { transforms.0.source: "airline-data" } - - match: { transforms.0.dest: "airline-data-by-airline" } - - is_true: transforms.0.query.match_all + - match: { transforms.0.source.index.0: "airline-data" } + - match: { transforms.0.dest.index: "airline-data-by-airline" } + - is_true: transforms.0.source.query.match_all - match: { transforms.0.pivot.group_by.airline.terms.field: "airline" } - match: { transforms.0.pivot.aggregations.avg_response.avg.field: "responsetime" } @@ -136,6 +136,50 @@ setup: - match: { transforms.0.id: "airline-transform-dos" } --- +"Test transform with query and array of indices in source": + - do: + indices.create: + index: airline-data-other + body: + mappings: + properties: + time: + type: date + airline: + type: keyword + responsetime: + type: float + event_rate: + type: integer + + - do: + data_frame.put_data_frame_transform: + transform_id: "airline-transform" + body: > + { + "source": { + "index": ["airline-data", "airline-data-other"], + "query": {"bool":{"filter":{"term":{"airline":"FOO"}}}} + }, + "dest": { "index": "airline-data-by-airline" }, + "pivot": { + "group_by": { "airline": {"terms": {"field": "airline"}}}, + "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} + } + } + - match: { acknowledged: true } + - do: + data_frame.get_data_frame_transform: + transform_id: "airline-transform" + - match: { count: 1 } + - match: { transforms.0.id: "airline-transform" } + - match: { transforms.0.source.index.0: "airline-data" } + - match: { transforms.0.source.index.1: "airline-data-other" } + - match: { transforms.0.source.query.bool.filter.term.airline: "FOO" } + - match: { transforms.0.dest.index: "airline-data-by-airline" } + - match: { transforms.0.pivot.group_by.airline.terms.field: "airline" } + - match: { transforms.0.pivot.aggregations.avg_response.avg.field: "responsetime" } +--- "Test transform with invalid page parameter": - do: catch: /Param \[size\] has a max acceptable value of \[1000\]/ @@ -151,8 +195,8 @@ setup: transform_id: "airline-transform" body: > { - "source": "airline-data", - "dest": "airline-data-by-airline", + "source": { "index": "airline-data" }, + "dest": { "index": "airline-data-by-airline" }, "pivot": { "group_by": { "airline": {"terms": {"field": "airline"}}}, "aggs": {"avg_response": {"avg": {"field": "responsetime"}}, "time": {"max": {"field": "time"}}} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml index fd13e4dc6eae..56f320d4cb40 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml @@ -18,8 +18,8 @@ setup: transform_id: "airline-transform-start-stop" body: > { - "source": "airline-data", - "dest": "airline-data-by-airline-start-stop", + "source": { "index": "airline-data" }, + "dest": { "index": "airline-data-by-airline-start-stop" }, "pivot": { "group_by": { "airline": {"terms": {"field": "airline"}}}, "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} @@ -59,7 +59,7 @@ teardown: - match: { started: true } - do: - catch: /Cannot start task for data frame transform \[airline-transform-start-stop\], because state was \[STARTED\]/ + catch: /Unable to start data frame transform \[airline-transform-start-stop\] as it is in state \[STARTED\]/ data_frame.start_data_frame_transform: transform_id: "airline-transform-start-stop" @@ -75,7 +75,8 @@ teardown: transform_id: "airline-transform-start-stop" - match: { count: 1 } - match: { transforms.0.id: "airline-transform-start-stop" } - - match: { transforms.0.state.transform_state: "started" } + - match: { transforms.0.state.indexer_state: "started" } + - match: { transforms.0.state.task_state: "started" } - do: data_frame.stop_data_frame_transform: @@ -87,7 +88,8 @@ teardown: transform_id: "airline-transform-start-stop" - match: { count: 1 } - match: { transforms.0.id: "airline-transform-start-stop" } - - match: { transforms.0.state.transform_state: "stopped" } + - match: { transforms.0.state.indexer_state: "stopped" } + - match: { transforms.0.state.task_state: "stopped" } - do: data_frame.start_data_frame_transform: @@ -99,7 +101,8 @@ teardown: transform_id: "airline-transform-start-stop" - match: { count: 1 } - match: { transforms.0.id: "airline-transform-start-stop" } - - match: { transforms.0.state.transform_state: "started" } + - match: { transforms.0.state.indexer_state: "started" } + - match: { transforms.0.state.task_state: "started" } --- "Test stop missing transform": @@ -114,3 +117,4 @@ teardown: data_frame.stop_data_frame_transform: transform_id: "airline-transform-start-stop" - match: { stopped: true } + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml index 1b224a692f50..ac6aca4f35d4 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml @@ -18,8 +18,8 @@ setup: transform_id: "airline-transform-stats" body: > { - "source": "airline-data", - "dest": "airline-data-by-airline-stats", + "source": { "index": "airline-data" }, + "dest": { "index": "airline-data-by-airline-stats" }, "pivot": { "group_by": { "airline": {"terms": {"field": "airline"}}}, "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} @@ -46,7 +46,8 @@ teardown: transform_id: "airline-transform-stats" - match: { count: 1 } - match: { transforms.0.id: "airline-transform-stats" } - - match: { transforms.0.state.transform_state: "started" } + - match: { transforms.0.state.indexer_state: "started" } + - match: { transforms.0.state.task_state: "started" } - match: { transforms.0.state.generation: 0 } - match: { transforms.0.stats.pages_processed: 0 } - match: { transforms.0.stats.documents_processed: 0 } @@ -74,8 +75,8 @@ teardown: transform_id: "airline-transform-stats-dos" body: > { - "source": "airline-data", - "dest": "airline-data-by-airline-stats-dos", + "source": { "index": "airline-data" }, + "dest": { "index": "airline-data-by-airline-stats-dos" }, "pivot": { "group_by": { "airline": {"terms": {"field": "airline"}}}, "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} @@ -112,8 +113,8 @@ teardown: transform_id: "airline-transform-stats-dos" body: > { - "source": "airline-data", - "dest": "airline-data-by-airline-stats-dos", + "source": { "index": "airline-data" }, + "dest": { "index": "airline-data-by-airline-stats-dos" }, "pivot": { "group_by": { "airline": {"terms": {"field": "airline"}}}, "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} @@ -124,18 +125,18 @@ teardown: transform_id: "*" - match: { count: 2 } - match: { transforms.0.id: "airline-transform-stats" } - - match: { transforms.0.state.transform_state: "started" } + - match: { transforms.0.state.indexer_state: "started" } - match: { transforms.1.id: "airline-transform-stats-dos" } - - match: { transforms.1.state.transform_state: "stopped" } + - match: { transforms.1.state.indexer_state: "stopped" } - do: data_frame.get_data_frame_transform_stats: transform_id: "_all" - match: { count: 2 } - match: { transforms.0.id: "airline-transform-stats" } - - match: { transforms.0.state.transform_state: "started" } + - match: { transforms.0.state.indexer_state: "started" } - match: { transforms.1.id: "airline-transform-stats-dos" } - - match: { transforms.1.state.transform_state: "stopped" } + - match: { transforms.1.state.indexer_state: "stopped" } --- "Test get single transform stats when it does not have a task": @@ -145,8 +146,8 @@ teardown: transform_id: "airline-transform-stats-dos" body: > { - "source": "airline-data", - "dest": "airline-data-by-airline-stats-dos", + "source": { "index": "airline-data" }, + "dest": { "index": "airline-data-by-airline-stats-dos" }, "pivot": { "group_by": { "airline": {"terms": {"field": "airline"}}}, "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} @@ -157,7 +158,7 @@ teardown: transform_id: "airline-transform-stats-dos" - match: { count: 1 } - match: { transforms.0.id: "airline-transform-stats-dos" } - - match: { transforms.0.state.transform_state: "stopped" } + - match: { transforms.0.state.indexer_state: "stopped" } - match: { transforms.0.state.generation: 0 } - match: { transforms.0.stats.pages_processed: 0 } - match: { transforms.0.stats.documents_processed: 0 } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/20_has_application_privs.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/20_has_application_privs.yml index 85ac286c3f02..eb92cc252b56 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/20_has_application_privs.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/20_has_application_privs.yml @@ -28,6 +28,16 @@ setup: "name": "write", "actions": [ "data:write/*" ] } + }, + "yourapp-v1" : { + "read": { + "actions": [ "data:read/*" ] + } + }, + "yourapp-v2" : { + "read": { + "actions": [ "data:read/*" ] + } } } @@ -83,6 +93,21 @@ setup: } ] } + - do: + security.put_role: + name: "yourapp_read_config" + body: > + { + "cluster": [], + "indices": [], + "applications": [ + { + "application": "yourapp-*", + "privileges": ["read"], + "resources": ["settings/*"] + } + ] + } # And a user for each role - do: @@ -101,6 +126,14 @@ setup: "password": "p@ssw0rd", "roles" : [ "myapp_engineering_write" ] } + - do: + security.put_user: + username: "your_read" + body: > + { + "password": "p@ssw0rd", + "roles" : [ "yourapp_read_config" ] + } --- teardown: @@ -109,6 +142,16 @@ teardown: application: myapp name: "user,read,write" ignore: 404 + - do: + security.delete_privileges: + application: yourapp-v1 + name: "read" + ignore: 404 + - do: + security.delete_privileges: + application: yourapp-v2 + name: "read" + ignore: 404 - do: security.delete_user: @@ -120,6 +163,11 @@ teardown: username: "eng_write" ignore: 404 + - do: + security.delete_user: + username: "your_read" + ignore: 404 + - do: security.delete_role: name: "myapp_engineering_read" @@ -129,6 +177,11 @@ teardown: security.delete_role: name: "myapp_engineering_write" ignore: 404 + + - do: + security.delete_role: + name: "yourapp_read_config" + ignore: 404 --- "Test has_privileges with application-privileges": - do: @@ -188,3 +241,53 @@ teardown: } } } + - do: + headers: { Authorization: "Basic eW91cl9yZWFkOnBAc3N3MHJk" } # your_read + security.has_privileges: + user: null + body: > + { + "application": [ + { + "application" : "yourapp-v1", + "resources" : [ "settings/host", "settings/port", "system/key" ], + "privileges" : [ "data:read/settings", "data:write/settings", "read", "write" ] + }, + { + "application" : "yourapp-v2", + "resources" : [ "settings/host" ], + "privileges" : [ "data:read/settings", "data:write/settings" ] + } + ] + } + + - match: { "username" : "your_read" } + - match: { "has_all_requested" : false } + - match: { "application": { + "yourapp-v1": { + "settings/host": { + "data:read/settings": true, + "data:write/settings": false, + "read": true, + "write": false + }, + "settings/port": { + "data:read/settings": true, + "data:write/settings": false, + "read": true, + "write": false + }, + "system/key": { + "data:read/settings": false, + "data:write/settings": false, + "read": false, + "write": false + } + }, + "yourapp-v2": { + "settings/host": { + "data:read/settings": true, + "data:write/settings": false, + } + } + } } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookIntegrationTests.java index 02ce97d4ea21..521cc2d49fc3 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookIntegrationTests.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.watcher.actions.webhook; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.transport.TransportAddress; @@ -43,6 +44,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/35503") public class WebhookIntegrationTests extends AbstractWatcherIntegrationTestCase { private MockWebServer webServer = new MockWebServer(); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java index f2b839bcabcf..05d8b4ef29de 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.watcher.test.integration; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; @@ -62,6 +63,7 @@ @TestLogging("org.elasticsearch.xpack.watcher:DEBUG," + "org.elasticsearch.xpack.watcher.WatcherIndexingListener:TRACE") +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/35503") public class BasicWatcherTests extends AbstractWatcherIntegrationTestCase { public void testIndexWatch() throws Exception { @@ -216,7 +218,6 @@ public void testConditionSearchWithSource() throws Exception { testConditionSearch(templateRequest(searchSourceBuilder, "events")); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39306") public void testConditionSearchWithIndexedTemplate() throws Exception { SearchSourceBuilder searchSourceBuilder = searchSource().query(matchQuery("level", "a")); assertAcked(client().admin().cluster().preparePutStoredScript() diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java index f8ddc3065f79..3eefa0313714 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java @@ -87,6 +87,7 @@ protected Settings nodeSettings(int nodeOrdinal) { return super.nodeSettings(nodeOrdinal); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40587") public void testHttpInput() throws Exception { WatcherClient watcherClient = watcherClient(); watcherClient.preparePutWatch("_id") diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java index a0ef5e97d853..0e95a15b2a35 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java @@ -122,6 +122,7 @@ public void testAckSingleAction() throws Exception { assertThat(throttledCount, greaterThan(0L)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/35506") public void testAckAllActions() throws Exception { PutWatchResponse putWatchResponse = watcherClient().preparePutWatch() .setId("_id") diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java index eaf952effd3c..f58954658fc1 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java @@ -66,7 +66,8 @@ public void add(Watch watch) { @Override public void pauseExecution() { - watches.clear(); + // No action is needed because this engine does not trigger watches on a schedule (instead + // they must be triggered manually). } @Override diff --git a/x-pack/qa/full-cluster-restart/build.gradle b/x-pack/qa/full-cluster-restart/build.gradle index 4c2ecd2b7b4c..40dca76abc91 100644 --- a/x-pack/qa/full-cluster-restart/build.gradle +++ b/x-pack/qa/full-cluster-restart/build.gradle @@ -3,10 +3,10 @@ import org.elasticsearch.gradle.test.RestIntegTestTask import org.elasticsearch.gradle.Version import java.nio.charset.StandardCharsets -import java.util.regex.Matcher // Apply the java plugin to this project so the sources can be edited in an IDE -apply plugin: 'elasticsearch.build' +apply plugin: 'elasticsearch.standalone-test' + unitTest.enabled = false dependencies { @@ -70,8 +70,6 @@ Closure waitWithAuth = { NodeInfo node, AntBuilder ant -> return tmpFile.exists() } -Project mainProject = project - String coreFullClusterRestartPath = project(':qa:full-cluster-restart').projectDir.toPath().resolve('src/test/java').toString() sourceSets { test { @@ -89,224 +87,157 @@ forbiddenPatterns { exclude '**/system_key' } -// tests are pushed down to subprojects -testingConventions.enabled = false - -/** - * Subdirectories of this project are test rolling upgrades with various - * configuration options based on their name. - */ -subprojects { - Matcher m = project.name =~ /with(out)?-system-key/ - if (false == m.matches()) { - throw new InvalidUserDataException("Invalid project name [${project.name}]") - } - boolean withSystemKey = m.group(1) == null - - apply plugin: 'elasticsearch.standalone-test' +String outputDir = "${buildDir}/generated-resources/${project.name}" - // Use resources from the rolling-upgrade project in subdirectories - sourceSets { - test { - java { - srcDirs = ["${mainProject.projectDir}/src/test/java", coreFullClusterRestartPath] - } - resources { - srcDirs = ["${mainProject.projectDir}/src/test/resources"] - } - } - } - - licenseHeaders { - approvedLicenses << 'Apache' - } - - forbiddenPatterns { - exclude '**/system_key' - } - - String outputDir = "${buildDir}/generated-resources/${project.name}" - - // This is a top level task which we will add dependencies to below. - // It is a single task that can be used to backcompat tests against all versions. - task bwcTest { +// This is a top level task which we will add dependencies to below. +// It is a single task that can be used to backcompat tests against all versions. +task bwcTest { description = 'Runs backwards compatibility tests.' group = 'verification' - } +} - String output = "${buildDir}/generated-resources/${project.name}" - task copyTestNodeKeyMaterial(type: Copy) { +task copyTestNodeKeyMaterial(type: Copy) { from project(':x-pack:plugin:core').files('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem', - 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt', - 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks') + 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt', + 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks') into outputDir - } +} - for (Version version : bwcVersions.indexCompatible) { +for (Version version : bwcVersions.indexCompatible) { String baseName = "v${version}" Task oldClusterTest = tasks.create(name: "${baseName}#oldClusterTest", type: RestIntegTestTask) { - mustRunAfter(precommit) + mustRunAfter(precommit) } Object extension = extensions.findByName("${baseName}#oldClusterTestCluster") configure(extensions.findByName("${baseName}#oldClusterTestCluster")) { - dependsOn copyTestNodeKeyMaterial - if (version.before('6.3.0')) { - String depVersion = version; - if (project.bwcVersions.unreleased.contains(version)) { - depVersion += "-SNAPSHOT" - } - mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${depVersion}" - - } - bwcVersion = version - numBwcNodes = 2 - numNodes = 2 - clusterName = 'full-cluster-restart' - String usersCli = version.before('6.3.0') ? 'bin/x-pack/users' : 'bin/elasticsearch-users' - setupCommand 'setupTestUser', usersCli, 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' - waitCondition = waitWithAuth - - // some tests rely on the translog not being flushed - setting 'indices.memory.shard_inactive_time', '20m' - - setting 'xpack.security.enabled', 'true' - setting 'xpack.security.transport.ssl.enabled', 'true' - if (project.inFipsJvm) { - setting 'xpack.security.transport.ssl.key', 'testnode.pem' - setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' - keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' - } else { - setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' - setting 'xpack.security.transport.ssl.keystore.password', 'testnode' - } - setting 'xpack.license.self_generated.type', 'trial' - dependsOn copyTestNodeKeyMaterial - extraConfigFile 'testnode.pem', new File(outputDir + '/testnode.pem') - extraConfigFile 'testnode.crt', new File(outputDir + '/testnode.crt') - extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') - if (withSystemKey) { - if (version.onOrAfter('5.1.0') && version.before('6.0.0')) { - // The setting didn't exist until 5.1.0 - setting 'xpack.security.system_key.required', 'true' + dependsOn copyTestNodeKeyMaterial + if (version.before('6.3.0')) { + String depVersion = version; + if (project.bwcVersions.unreleased.contains(version)) { + depVersion += "-SNAPSHOT" + } + mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${depVersion}" + } - if (version.onOrAfter('6.0.0')) { - keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" + bwcVersion = version + numBwcNodes = 2 + numNodes = 2 + clusterName = 'full-cluster-restart' + String usersCli = version.before('6.3.0') ? 'bin/x-pack/users' : 'bin/elasticsearch-users' + setupCommand 'setupTestUser', usersCli, 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + waitCondition = waitWithAuth + + // some tests rely on the translog not being flushed + setting 'indices.memory.shard_inactive_time', '20m' + + setting 'xpack.security.enabled', 'true' + setting 'xpack.security.transport.ssl.enabled', 'true' + if (project.inFipsJvm) { + setting 'xpack.security.transport.ssl.key', 'testnode.pem' + setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' + keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' } else { - extraConfigFile 'x-pack/system_key', "${mainProject.projectDir}/src/test/resources/system_key" + setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' + setting 'xpack.security.transport.ssl.keystore.password', 'testnode' } + setting 'xpack.license.self_generated.type', 'trial' + dependsOn copyTestNodeKeyMaterial + extraConfigFile 'testnode.pem', new File(outputDir + '/testnode.pem') + extraConfigFile 'testnode.crt', new File(outputDir + '/testnode.crt') + extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') + + keystoreFile 'xpack.watcher.encryption_key', "${project.projectDir}/src/test/resources/system_key" setting 'xpack.watcher.encrypt_sensitive_data', 'true' - } } Task oldClusterTestRunner = tasks.getByName("${baseName}#oldClusterTestRunner") oldClusterTestRunner.configure { - systemProperty 'tests.is_old_cluster', 'true' - systemProperty 'tests.old_cluster_version', version.toString().minus("-SNAPSHOT") - systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo") - exclude 'org/elasticsearch/upgrades/FullClusterRestartIT.class' - exclude 'org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.class' - exclude 'org/elasticsearch/upgrades/QueryBuilderBWCIT.class' + systemProperty 'tests.is_old_cluster', 'true' + systemProperty 'tests.old_cluster_version', version.toString().minus("-SNAPSHOT") + systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo") + exclude 'org/elasticsearch/upgrades/FullClusterRestartIT.class' + exclude 'org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.class' + exclude 'org/elasticsearch/upgrades/QueryBuilderBWCIT.class' } Task upgradedClusterTest = tasks.create(name: "${baseName}#upgradedClusterTest", type: RestIntegTestTask) configure(extensions.findByName("${baseName}#upgradedClusterTestCluster")) { - dependsOn oldClusterTestRunner, - "${baseName}#oldClusterTestCluster#node0.stop", - "${baseName}#oldClusterTestCluster#node1.stop" - numNodes = 2 - clusterName = 'full-cluster-restart' - dataDir = { nodeNum -> oldClusterTest.nodes[nodeNum].dataDir } - cleanShared = false // We want to keep snapshots made by the old cluster! - setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' - waitCondition = waitWithAuth - - // some tests rely on the translog not being flushed - setting 'indices.memory.shard_inactive_time', '20m' - setting 'xpack.security.enabled', 'true' - if (project.inFipsJvm) { - setting 'xpack.security.transport.ssl.key', 'testnode.pem' - setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' - keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' - } else { - setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' - setting 'xpack.security.transport.ssl.keystore.password', 'testnode' - } - setting 'xpack.license.self_generated.type', 'trial' - dependsOn copyTestNodeKeyMaterial - extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') - extraConfigFile 'testnode.pem', new File(outputDir + '/testnode.pem') - extraConfigFile 'testnode.crt', new File(outputDir + '/testnode.crt') - if (withSystemKey) { - setting 'xpack.watcher.encrypt_sensitive_data', 'true' - keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" - } + dependsOn oldClusterTestRunner, + "${baseName}#oldClusterTestCluster#node0.stop", + "${baseName}#oldClusterTestCluster#node1.stop" + numNodes = 2 + clusterName = 'full-cluster-restart' + dataDir = { nodeNum -> oldClusterTest.nodes[nodeNum].dataDir } + cleanShared = false // We want to keep snapshots made by the old cluster! + setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + waitCondition = waitWithAuth + + // some tests rely on the translog not being flushed + setting 'indices.memory.shard_inactive_time', '20m' + setting 'xpack.security.enabled', 'true' + if (project.inFipsJvm) { + setting 'xpack.security.transport.ssl.key', 'testnode.pem' + setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' + keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' + } else { + setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' + setting 'xpack.security.transport.ssl.keystore.password', 'testnode' + } + setting 'xpack.license.self_generated.type', 'trial' + dependsOn copyTestNodeKeyMaterial + extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') + extraConfigFile 'testnode.pem', new File(outputDir + '/testnode.pem') + extraConfigFile 'testnode.crt', new File(outputDir + '/testnode.crt') + + setting 'xpack.watcher.encrypt_sensitive_data', 'true' + keystoreFile 'xpack.watcher.encryption_key', "${project.projectDir}/src/test/resources/system_key" } Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner") upgradedClusterTestRunner.configure { - systemProperty 'tests.is_old_cluster', 'false' - systemProperty 'tests.old_cluster_version', version.toString().minus("-SNAPSHOT") - systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo") - exclude 'org/elasticsearch/upgrades/FullClusterRestartIT.class' - exclude 'org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.class' - exclude 'org/elasticsearch/upgrades/QueryBuilderBWCIT.class' + systemProperty 'tests.is_old_cluster', 'false' + systemProperty 'tests.old_cluster_version', version.toString().minus("-SNAPSHOT") + systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo") + exclude 'org/elasticsearch/upgrades/FullClusterRestartIT.class' + exclude 'org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.class' + exclude 'org/elasticsearch/upgrades/QueryBuilderBWCIT.class' } Task versionBwcTest = tasks.create(name: "${baseName}#bwcTest") { - dependsOn = [upgradedClusterTest] + dependsOn = [upgradedClusterTest] } if (project.bwc_tests_enabled) { - bwcTest.dependsOn(versionBwcTest) + bwcTest.dependsOn(versionBwcTest) } - } - - unitTest.enabled = false // no unit tests for full cluster restarts, only the rest integration test +} - // basic integ tests includes testing bwc against the most recent version - task bwcTestSnapshots { +// basic integ tests includes testing bwc against the most recent version +task bwcTestSnapshots { if (project.bwc_tests_enabled) { - for (final def version : bwcVersions.unreleasedIndexCompatible) { - dependsOn "v${version}#bwcTest" - } + for (final def version : bwcVersions.unreleasedIndexCompatible) { + dependsOn "v${version}#bwcTest" + } } - } - - check.dependsOn(bwcTestSnapshots) +} - dependencies { - // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here - testCompile project(path: xpackModule('core'), configuration: 'default') - testCompile project(path: xpackModule('watcher'), configuration: 'runtime') - testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') - testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') - } +check.dependsOn(bwcTestSnapshots) - // copy x-pack plugin info so it is on the classpath and security manager has the right permissions - task copyXPackRestSpec(type: Copy) { +// copy x-pack plugin info so it is on the classpath and security manager has the right permissions +task copyXPackRestSpec(type: Copy) { dependsOn(project.configurations.restSpec, 'processTestResources') from project(xpackModule('core')).sourceSets.test.resources include 'rest-api-spec/api/**' into project.sourceSets.test.output.resourcesDir - } +} - task copyXPackPluginProps(type: Copy) { +task copyXPackPluginProps(type: Copy) { dependsOn(copyXPackRestSpec) from project(xpackModule('core')).file('src/main/plugin-metadata') from project(xpackModule('core')).tasks.pluginProperties into outputDir - } - project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps) - - repositories { - maven { - url "https://artifacts.elastic.co/maven" - } - maven { - url "https://snapshots.elastic.co/maven" - } - } } +project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps) diff --git a/x-pack/qa/full-cluster-restart/with-system-key/build.gradle b/x-pack/qa/full-cluster-restart/with-system-key/build.gradle deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/x-pack/qa/full-cluster-restart/without-system-key/build.gradle b/x-pack/qa/full-cluster-restart/without-system-key/build.gradle deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/x-pack/qa/kerberos-tests/build.gradle b/x-pack/qa/kerberos-tests/build.gradle index a59becbfe6b5..50b709f77dca 100644 --- a/x-pack/qa/kerberos-tests/build.gradle +++ b/x-pack/qa/kerberos-tests/build.gradle @@ -2,9 +2,11 @@ import java.nio.file.Path import java.nio.file.Paths import java.nio.file.Files -apply plugin: 'elasticsearch.vagrantsupport' apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' +apply plugin: 'elasticsearch.test.fixtures' + +testFixtures.useFixture ":test:fixtures:krb5kdc-fixture" dependencies { testCompile "org.elasticsearch.plugin:x-pack-core:${version}" @@ -12,75 +14,6 @@ dependencies { testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') } -// MIT Kerberos Vagrant Testing Fixture -String box = "krb5kdc" -Map vagrantEnvVars = [ - 'VAGRANT_CWD' : "${project(':test:fixtures:krb5kdc-fixture').projectDir}", - 'VAGRANT_VAGRANTFILE' : 'Vagrantfile', - 'VAGRANT_PROJECT_DIR' : "${project(':test:fixtures:krb5kdc-fixture').projectDir}" -] - -task krb5kdcUpdate(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'box' - subcommand 'update' - boxName box - environmentVars vagrantEnvVars - dependsOn "vagrantCheckVersion", "virtualboxCheckVersion" -} - -task krb5kdcFixture(type: org.elasticsearch.gradle.test.VagrantFixture) { - command 'up' - args '--provision', '--provider', 'virtualbox' - boxName box - environmentVars vagrantEnvVars - dependsOn krb5kdcUpdate -} - -// lazily resolve to avoid any slowdowns from DNS lookups prior to when we need this value -Object httpPrincipal = new Object() { - @Override - String toString() { - InetAddress resolvedAddress = InetAddress.getByName('127.0.0.1') - return "HTTP/" + resolvedAddress.getCanonicalHostName() - } -} - -String realm = "BUILD.ELASTIC.CO" - -task 'addPrincipal#peppa'(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'ssh' - args '--command', "sudo bash /vagrant/src/main/resources/provision/addprinc.sh peppa " - boxName box - environmentVars vagrantEnvVars - dependsOn krb5kdcFixture -} - -task 'addPrincipal#george'(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'ssh' - args '--command', "sudo bash /vagrant/src/main/resources/provision/addprinc.sh george dino" - boxName box - environmentVars vagrantEnvVars - dependsOn krb5kdcFixture -} - -task 'addPrincipal#HTTP'(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'ssh' - args '--command', "sudo bash /vagrant/src/main/resources/provision/addprinc.sh $httpPrincipal" - boxName box - environmentVars vagrantEnvVars - dependsOn krb5kdcFixture -} - -task krb5AddPrincipals { dependsOn krb5kdcFixture, 'addPrincipal#peppa', 'addPrincipal#george', 'addPrincipal#HTTP' } - -def generatedResources = "$buildDir/generated-resources/keytabs" -task copyKeytabToGeneratedResources(type: Copy) { - Path peppaKeytab = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("keytabs").resolve("peppa.keytab").toAbsolutePath() - from peppaKeytab; - into generatedResources - dependsOn krb5AddPrincipals -} - integTestCluster { // force localhost IPv4 otherwise it is a chicken and egg problem where we need the keytab for the hostname when starting the cluster // but do not know the exact address that is first in the http ports file @@ -96,12 +29,10 @@ integTestCluster { setting 'xpack.security.authc.realms.kerberos.kerberos.krb.debug', 'true' setting 'xpack.security.authc.realms.kerberos.kerberos.remove_realm_name', 'false' - Path krb5conf = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("conf").resolve("krb5.conf").toAbsolutePath() - String jvmArgsStr = " -Djava.security.krb5.conf=${krb5conf}" + " -Dsun.security.krb5.debug=true" - jvmArgs jvmArgsStr - Path esKeytab = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("keytabs") - .resolve("$httpPrincipal".replace('/', '_') + ".keytab").toAbsolutePath() - extraConfigFile("es.keytab", "${esKeytab}") + jvmArgs += " -Djava.security.krb5.conf=${project(':test:fixtures:krb5kdc-fixture').ext.krb5Conf("peppa")}" + jvmArgs += " -Dsun.security.krb5.debug=true" + + extraConfigFile("es.keytab", project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("peppa", "HTTP_localhost.keytab")) setupCommand 'setupTestAdmin', 'bin/elasticsearch-users', 'useradd', "test_admin", '-p', 'x-pack-test-password', '-r', "superuser" @@ -119,6 +50,7 @@ integTestCluster { } +String realm = "BUILD.ELASTIC.CO" integTestRunner { Path peppaKeytab = Paths.get("${project.buildDir}", "generated-resources", "keytabs", "peppa.keytab") systemProperty 'test.userkt', "peppa@${realm}" @@ -126,16 +58,17 @@ integTestRunner { systemProperty 'test.userpwd', "george@${realm}" systemProperty 'test.userpwd.password', "dino" systemProperty 'tests.security.manager', 'true' - Path krb5conf = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("conf").resolve("krb5.conf").toAbsolutePath() - List jvmargs = ["-Djava.security.krb5.conf=${krb5conf}","-Dsun.security.krb5.debug=true"] - jvmArgs jvmargs + jvmArgs([ + "-Djava.security.krb5.conf=${project(':test:fixtures:krb5kdc-fixture').ext.krb5Conf("peppa")}", + "-Dsun.security.krb5.debug=true" + ]) } -if (project.rootProject.vagrantSupported == false) { - integTest.enabled = false - testingConventions.enabled = false -} else { - project.sourceSets.test.output.dir(generatedResources) - integTestCluster.dependsOn krb5AddPrincipals, krb5kdcFixture, copyKeytabToGeneratedResources - integTest.finalizedBy project(':test:fixtures:krb5kdc-fixture').halt +def generatedResources = "$buildDir/generated-resources/keytabs" +task copyKeytabToGeneratedResources(type: Copy) { + from project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("peppa", "peppa.keytab") + into generatedResources + dependsOn project(':test:fixtures:krb5kdc-fixture').postProcessFixture } +project.sourceSets.test.output.dir(generatedResources, builtBy:copyKeytabToGeneratedResources) + diff --git a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/test/OpenLdapTests.java b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/test/OpenLdapTests.java index 397440391b09..26d7306735dd 100644 --- a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/test/OpenLdapTests.java +++ b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/test/OpenLdapTests.java @@ -51,8 +51,8 @@ public class OpenLdapTests extends ESTestCase { - public static final String OPEN_LDAP_DNS_URL = "ldaps://localhost:60636"; - public static final String OPEN_LDAP_IP_URL = "ldaps://127.0.0.1:60636"; + public static final String OPEN_LDAP_DNS_URL = "ldaps://localhost:" + getFromProperty("636"); + public static final String OPEN_LDAP_IP_URL = "ldaps://127.0.0.1:" + getFromProperty("636"); public static final String PASSWORD = "NickFuryHeartsES"; private static final String HAWKEYE_DN = "uid=hawkeye,ou=people,dc=oldap,dc=test,dc=elasticsearch,dc=com"; @@ -286,4 +286,11 @@ private Map resolve(LDAPConnection connection, LdapMetaDataResol resolver.resolve(connection, HAWKEYE_DN, TimeValue.timeValueSeconds(1), logger, null, future); return future.get(); } + + private static String getFromProperty(String port) { + String key = "test.fixtures.openldap.tcp." + port; + final String value = System.getProperty(key); + assertNotNull("Expected the actual value for port " + port + " to be in system property " + key, value); + return value; + } } diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index f8222669b218..f689573a6143 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -3,10 +3,10 @@ import org.elasticsearch.gradle.test.RestIntegTestTask import org.elasticsearch.gradle.Version import java.nio.charset.StandardCharsets -import java.util.regex.Matcher // Apply the java plugin to this project so the sources can be edited in an IDE -apply plugin: 'elasticsearch.build' +apply plugin: 'elasticsearch.standalone-test' + unitTest.enabled = false dependencies { @@ -68,161 +68,50 @@ Closure waitWithAuth = { NodeInfo node, AntBuilder ant -> return tmpFile.exists() } -Project mainProject = project - compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked" forbiddenPatterns { exclude '**/system_key' } -// Tests are pushed down to subprojects -testingConventions.enabled = false - -/** - * Subdirectories of this project are test rolling upgrades with various - * configuration options based on their name. - */ -subprojects { - Matcher m = project.name =~ /with(out)?-system-key/ - if (false == m.matches()) { - throw new InvalidUserDataException("Invalid project name [${project.name}]") - } - boolean withSystemKey = m.group(1) == null - - apply plugin: 'elasticsearch.standalone-test' - - // Use resources from the rolling-upgrade project in subdirectories - sourceSets { - test { - java { - srcDirs = ["${mainProject.projectDir}/src/test/java"] - } - resources { - srcDirs = ["${mainProject.projectDir}/src/test/resources"] - } - } - } - - forbiddenPatterns { - exclude '**/system_key' - } +String outputDir = "${buildDir}/generated-resources/${project.name}" - String outputDir = "${buildDir}/generated-resources/${project.name}" - - // This is a top level task which we will add dependencies to below. - // It is a single task that can be used to backcompat tests against all versions. - task bwcTest { +// This is a top level task which we will add dependencies to below. +// It is a single task that can be used to backcompat tests against all versions. +task bwcTest { description = 'Runs backwards compatibility tests.' group = 'verification' - } +} - String output = "${buildDir}/generated-resources/${project.name}" - task copyTestNodeKeyMaterial(type: Copy) { +task copyTestNodeKeyMaterial(type: Copy) { from project(':x-pack:plugin:core').files('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem', - 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt', - 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks') + 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt', + 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks') into outputDir - } +} - for (Version version : bwcVersions.wireCompatible) { +for (Version version : bwcVersions.wireCompatible) { String baseName = "v${version}" Task oldClusterTest = tasks.create(name: "${baseName}#oldClusterTest", type: RestIntegTestTask) { - mustRunAfter(precommit) + mustRunAfter(precommit) } configure(extensions.findByName("${baseName}#oldClusterTestCluster")) { - dependsOn copyTestNodeKeyMaterial - if (version.before('6.3.0')) { - String depVersion = version; - if (project.bwcVersions.unreleased.contains(version)) { - depVersion += "-SNAPSHOT" - } - mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${depVersion}" - } - String usersCli = version.before('6.3.0') ? 'bin/x-pack/users' : 'bin/elasticsearch-users' - setupCommand 'setupTestUser', usersCli, 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' - bwcVersion = version - numBwcNodes = 3 - numNodes = 3 - clusterName = 'rolling-upgrade' - waitCondition = waitWithAuth - setting 'xpack.monitoring.exporters._http.type', 'http' - setting 'xpack.monitoring.exporters._http.enabled', 'false' - setting 'xpack.monitoring.exporters._http.auth.username', 'test_user' - setting 'xpack.monitoring.exporters._http.auth.password', 'x-pack-test-password' - setting 'xpack.license.self_generated.type', 'trial' - setting 'xpack.security.enabled', 'true' - setting 'xpack.security.transport.ssl.enabled', 'true' - setting 'xpack.security.authc.token.enabled', 'true' - setting 'xpack.security.audit.enabled', 'true' - if (project.inFipsJvm) { - setting 'xpack.security.transport.ssl.key', 'testnode.pem' - setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' - keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' - } else { - setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' - setting 'xpack.security.transport.ssl.keystore.password', 'testnode' - } - dependsOn copyTestNodeKeyMaterial - extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') - extraConfigFile 'testnode.pem', new File(outputDir + '/testnode.pem') - extraConfigFile 'testnode.crt', new File(outputDir + '/testnode.crt') - if (version.onOrAfter('7.0.0')) { - setting 'xpack.security.authc.realms.file.file1.order', '0' - setting 'xpack.security.authc.realms.native.native1.order', '1' - } else { - setting 'xpack.security.authc.realms.file1.type', 'file' - setting 'xpack.security.authc.realms.file1.order', '0' - setting 'xpack.security.authc.realms.native1.type', 'native' - setting 'xpack.security.authc.realms.native1.order', '1' - } - - if (withSystemKey) { - if (version.onOrAfter('5.1.0') && version.before('6.0.0')) { - // The setting didn't exist until 5.1.0 - setting 'xpack.security.system_key.required', 'true' - } - if (version.onOrAfter('6.0.0')) { - keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" - } else { - String systemKeyFile = version.before('6.3.0') ? 'x-pack/system_key' : 'system_key' - extraConfigFile systemKeyFile, "${mainProject.projectDir}/src/test/resources/system_key" - keystoreSetting 'xpack.security.authc.token.passphrase', 'token passphrase' + dependsOn copyTestNodeKeyMaterial + if (version.before('6.3.0')) { + String depVersion = version; + if (project.bwcVersions.unreleased.contains(version)) { + depVersion += "-SNAPSHOT" + } + mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${depVersion}" } - setting 'xpack.watcher.encrypt_sensitive_data', 'true' - } - - if (version.onOrAfter('6.6.0')) { - setting 'ccr.auto_follow.wait_for_metadata_timeout', '1s' - } - - // Old versions of the code contain an invalid assertion that trips - // during tests. Versions 5.6.9 and 6.2.4 have been fixed by removing - // the assertion, but this is impossible for released versions. - // However, released versions run without assertions, so end users won't - // be suffering the effects. This argument effectively removes the - // incorrect assertion from the older versions used in the BWC tests. - if (version.before('5.6.9') || (version.onOrAfter('6.0.0') && version.before('6.2.4'))) { - jvmArgs '-da:org.elasticsearch.xpack.monitoring.exporter.http.HttpExportBulk' - } - } - - Task oldClusterTestRunner = tasks.getByName("${baseName}#oldClusterTestRunner") - oldClusterTestRunner.configure { - systemProperty 'tests.rest.suite', 'old_cluster' - } - - Closure configureUpgradeCluster = {String name, Task lastRunner, int stopNode, Closure getOtherUnicastHostAddresses -> - configure(extensions.findByName("${baseName}#${name}")) { - dependsOn lastRunner, "${baseName}#oldClusterTestCluster#node${stopNode}.stop" - setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + String usersCli = version.before('6.3.0') ? 'bin/x-pack/users' : 'bin/elasticsearch-users' + setupCommand 'setupTestUser', usersCli, 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + bwcVersion = version + numBwcNodes = 3 + numNodes = 3 clusterName = 'rolling-upgrade' - otherUnicastHostAddresses = { getOtherUnicastHostAddresses() } - /* Override the data directory so the new node always gets the node we - * just stopped's data directory. */ - dataDir = { nodeNumber -> oldClusterTest.nodes[stopNode].dataDir } waitCondition = waitWithAuth setting 'xpack.monitoring.exporters._http.type', 'http' setting 'xpack.monitoring.exporters._http.enabled', 'false' @@ -231,154 +120,195 @@ subprojects { setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.enabled', 'true' setting 'xpack.security.transport.ssl.enabled', 'true' + setting 'xpack.security.authc.token.enabled', 'true' + setting 'xpack.security.audit.enabled', 'true' if (project.inFipsJvm) { - setting 'xpack.security.transport.ssl.key', 'testnode.pem' - setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' - keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' + setting 'xpack.security.transport.ssl.key', 'testnode.pem' + setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' + keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' } else { - setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' - setting 'xpack.security.transport.ssl.keystore.password', 'testnode' + setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' + setting 'xpack.security.transport.ssl.keystore.password', 'testnode' } - setting 'node.attr.upgraded', 'true' - setting 'xpack.security.authc.token.enabled', 'true' - setting 'xpack.security.audit.enabled', 'true' - setting 'node.name', "upgraded-node-${stopNode}" dependsOn copyTestNodeKeyMaterial extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') extraConfigFile 'testnode.pem', new File(outputDir + '/testnode.pem') extraConfigFile 'testnode.crt', new File(outputDir + '/testnode.crt') if (version.onOrAfter('7.0.0')) { - setting 'xpack.security.authc.realms.file.file1.order', '0' - setting 'xpack.security.authc.realms.native.native1.order', '1' + setting 'xpack.security.authc.realms.file.file1.order', '0' + setting 'xpack.security.authc.realms.native.native1.order', '1' } else { - setting 'xpack.security.authc.realms.file1.type', 'file' - setting 'xpack.security.authc.realms.file1.order', '0' - setting 'xpack.security.authc.realms.native1.type', 'native' - setting 'xpack.security.authc.realms.native1.order', '1' + setting 'xpack.security.authc.realms.file1.type', 'file' + setting 'xpack.security.authc.realms.file1.order', '0' + setting 'xpack.security.authc.realms.native1.type', 'native' + setting 'xpack.security.authc.realms.native1.order', '1' + } + + keystoreFile 'xpack.watcher.encryption_key', "${project.projectDir}/src/test/resources/system_key" + setting 'xpack.watcher.encrypt_sensitive_data', 'true' + + if (version.onOrAfter('6.6.0')) { + setting 'ccr.auto_follow.wait_for_metadata_timeout', '1s' } - if (withSystemKey) { - setting 'xpack.watcher.encrypt_sensitive_data', 'true' - keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" + + // Old versions of the code contain an invalid assertion that trips + // during tests. Versions 5.6.9 and 6.2.4 have been fixed by removing + // the assertion, but this is impossible for released versions. + // However, released versions run without assertions, so end users won't + // be suffering the effects. This argument effectively removes the + // incorrect assertion from the older versions used in the BWC tests. + if (version.before('5.6.9') || (version.onOrAfter('6.0.0') && version.before('6.2.4'))) { + jvmArgs '-da:org.elasticsearch.xpack.monitoring.exporter.http.HttpExportBulk' } - if (version.before('6.0.0')) { - keystoreSetting 'xpack.security.authc.token.passphrase', 'token passphrase' + } + + Task oldClusterTestRunner = tasks.getByName("${baseName}#oldClusterTestRunner") + oldClusterTestRunner.configure { + systemProperty 'tests.rest.suite', 'old_cluster' + } + + Closure configureUpgradeCluster = {String name, Task lastRunner, int stopNode, Closure getOtherUnicastHostAddresses -> + configure(extensions.findByName("${baseName}#${name}")) { + dependsOn lastRunner, "${baseName}#oldClusterTestCluster#node${stopNode}.stop" + setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + clusterName = 'rolling-upgrade' + otherUnicastHostAddresses = { getOtherUnicastHostAddresses() } + /* Override the data directory so the new node always gets the node we + * just stopped's data directory. */ + dataDir = { nodeNumber -> oldClusterTest.nodes[stopNode].dataDir } + waitCondition = waitWithAuth + setting 'xpack.monitoring.exporters._http.type', 'http' + setting 'xpack.monitoring.exporters._http.enabled', 'false' + setting 'xpack.monitoring.exporters._http.auth.username', 'test_user' + setting 'xpack.monitoring.exporters._http.auth.password', 'x-pack-test-password' + setting 'xpack.license.self_generated.type', 'trial' + setting 'xpack.security.enabled', 'true' + setting 'xpack.security.transport.ssl.enabled', 'true' + if (project.inFipsJvm) { + setting 'xpack.security.transport.ssl.key', 'testnode.pem' + setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' + keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' + } else { + setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' + setting 'xpack.security.transport.ssl.keystore.password', 'testnode' + } + setting 'node.attr.upgraded', 'true' + setting 'xpack.security.authc.token.enabled', 'true' + setting 'xpack.security.audit.enabled', 'true' + setting 'node.name', "upgraded-node-${stopNode}" + dependsOn copyTestNodeKeyMaterial + extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') + extraConfigFile 'testnode.pem', new File(outputDir + '/testnode.pem') + extraConfigFile 'testnode.crt', new File(outputDir + '/testnode.crt') + if (version.onOrAfter('7.0.0')) { + setting 'xpack.security.authc.realms.file.file1.order', '0' + setting 'xpack.security.authc.realms.native.native1.order', '1' + } else { + setting 'xpack.security.authc.realms.file1.type', 'file' + setting 'xpack.security.authc.realms.file1.order', '0' + setting 'xpack.security.authc.realms.native1.type', 'native' + setting 'xpack.security.authc.realms.native1.order', '1' + } + setting 'xpack.watcher.encrypt_sensitive_data', 'true' + keystoreFile 'xpack.watcher.encryption_key', "${project.projectDir}/src/test/resources/system_key" + if (version.before('6.0.0')) { + keystoreSetting 'xpack.security.authc.token.passphrase', 'token passphrase' + } } - } } Task oneThirdUpgradedTest = tasks.create(name: "${baseName}#oneThirdUpgradedTest", type: RestIntegTestTask) configureUpgradeCluster("oneThirdUpgradedTestCluster", oldClusterTestRunner, 0, - // Use all running nodes as seed nodes so there is no race between pinging and the tests - { [oldClusterTest.nodes.get(1).transportUri(), oldClusterTest.nodes.get(2).transportUri()] }) + // Use all running nodes as seed nodes so there is no race between pinging and the tests + { [oldClusterTest.nodes.get(1).transportUri(), oldClusterTest.nodes.get(2).transportUri()] }) Task oneThirdUpgradedTestRunner = tasks.getByName("${baseName}#oneThirdUpgradedTestRunner") oneThirdUpgradedTestRunner.configure { - systemProperty 'tests.rest.suite', 'mixed_cluster' - systemProperty 'tests.first_round', 'true' - // We only need to run these tests once so we may as well do it when we're two thirds upgraded - systemProperty 'tests.rest.blacklist', [ - 'mixed_cluster/10_basic/Start scroll in mixed cluster on upgraded node that we will continue after upgrade', - 'mixed_cluster/30_ml_jobs_crud/Create a job in the mixed cluster and write some data', - 'mixed_cluster/40_ml_datafeed_crud/Put job and datafeed in mixed cluster', + systemProperty 'tests.rest.suite', 'mixed_cluster' + systemProperty 'tests.first_round', 'true' + // We only need to run these tests once so we may as well do it when we're two thirds upgraded + systemProperty 'tests.rest.blacklist', [ + 'mixed_cluster/10_basic/Start scroll in mixed cluster on upgraded node that we will continue after upgrade', + 'mixed_cluster/30_ml_jobs_crud/Create a job in the mixed cluster and write some data', + 'mixed_cluster/40_ml_datafeed_crud/Put job and datafeed in mixed cluster', ].join(',') - finalizedBy "${baseName}#oldClusterTestCluster#node1.stop" + finalizedBy "${baseName}#oldClusterTestCluster#node1.stop" } Task twoThirdsUpgradedTest = tasks.create(name: "${baseName}#twoThirdsUpgradedTest", type: RestIntegTestTask) configureUpgradeCluster("twoThirdsUpgradedTestCluster", oneThirdUpgradedTestRunner, 1, - // Use all running nodes as seed nodes so there is no race between pinging and the tests - { [oldClusterTest.nodes.get(2).transportUri(), oneThirdUpgradedTest.nodes.get(0).transportUri()] }) + // Use all running nodes as seed nodes so there is no race between pinging and the tests + { [oldClusterTest.nodes.get(2).transportUri(), oneThirdUpgradedTest.nodes.get(0).transportUri()] }) Task twoThirdsUpgradedTestRunner = tasks.getByName("${baseName}#twoThirdsUpgradedTestRunner") twoThirdsUpgradedTestRunner.configure { - systemProperty 'tests.rest.suite', 'mixed_cluster' - systemProperty 'tests.first_round', 'false' - finalizedBy "${baseName}#oldClusterTestCluster#node2.stop" + systemProperty 'tests.rest.suite', 'mixed_cluster' + systemProperty 'tests.first_round', 'false' + finalizedBy "${baseName}#oldClusterTestCluster#node2.stop" } Task upgradedClusterTest = tasks.create(name: "${baseName}#upgradedClusterTest", type: RestIntegTestTask) configureUpgradeCluster("upgradedClusterTestCluster", twoThirdsUpgradedTestRunner, 2, - // Use all running nodes as seed nodes so there is no race between pinging and the tests - { [oneThirdUpgradedTest.nodes.get(0).transportUri(), twoThirdsUpgradedTest.nodes.get(0).transportUri()] }) + // Use all running nodes as seed nodes so there is no race between pinging and the tests + { [oneThirdUpgradedTest.nodes.get(0).transportUri(), twoThirdsUpgradedTest.nodes.get(0).transportUri()] }) Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner") upgradedClusterTestRunner.configure { - systemProperty 'tests.rest.suite', 'upgraded_cluster' - /* - * Force stopping all the upgraded nodes after the test runner - * so they are alive during the test. - */ - finalizedBy "${baseName}#oneThirdUpgradedTestCluster#stop" - finalizedBy "${baseName}#twoThirdsUpgradedTestCluster#stop" - - // migration tests should only run when the original/old cluster nodes where versions < 5.2.0. - // this stinks but we do the check here since our rest tests do not support conditionals - // otherwise we could check the index created version - String versionStr = project.extensions.findByName("${baseName}#oldClusterTestCluster").properties.get('bwcVersion') - String[] versionParts = versionStr.split('\\.') - if (versionParts[0].equals("5")) { - Integer minor = Integer.parseInt(versionParts[1]) - if (minor >= 2) { - systemProperty 'tests.rest.blacklist', '/20_security/Verify default password migration results in upgraded cluster' - } - } + systemProperty 'tests.rest.suite', 'upgraded_cluster' + /* + * Force stopping all the upgraded nodes after the test runner + * so they are alive during the test. + */ + finalizedBy "${baseName}#oneThirdUpgradedTestCluster#stop" + finalizedBy "${baseName}#twoThirdsUpgradedTestCluster#stop" + + // migration tests should only run when the original/old cluster nodes where versions < 5.2.0. + // this stinks but we do the check here since our rest tests do not support conditionals + // otherwise we could check the index created version + String versionStr = project.extensions.findByName("${baseName}#oldClusterTestCluster").properties.get('bwcVersion') + String[] versionParts = versionStr.split('\\.') + if (versionParts[0].equals("5")) { + Integer minor = Integer.parseInt(versionParts[1]) + if (minor >= 2) { + systemProperty 'tests.rest.blacklist', '/20_security/Verify default password migration results in upgraded cluster' + } + } } Task versionBwcTest = tasks.create(name: "${baseName}#bwcTest") { - dependsOn = [upgradedClusterTest] + dependsOn = [upgradedClusterTest] } if (project.bwc_tests_enabled) { - bwcTest.dependsOn(versionBwcTest) + bwcTest.dependsOn(versionBwcTest) } - } - - unitTest.enabled = false // no unit tests for rolling upgrades, only the rest integration test +} - // basic integ tests includes testing bwc against the most recent version - task bwcTestSnapshots { +// basic integ tests includes testing bwc against the most recent version +task bwcTestSnapshots { if (project.bwc_tests_enabled) { - for (final def version : bwcVersions.unreleasedWireCompatible) { - dependsOn "v${version}#bwcTest" - } + for (final def version : bwcVersions.unreleasedWireCompatible) { + dependsOn "v${version}#bwcTest" + } } - } - check.dependsOn(bwcTestSnapshots) - - dependencies { - // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here - testCompile project(path: xpackModule('core'), configuration: 'default') - testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') - testCompile project(path: xpackModule('watcher')) - } - - compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked" +} +check.dependsOn(bwcTestSnapshots) - // copy x-pack plugin info so it is on the classpath and security manager has the right permissions - task copyXPackRestSpec(type: Copy) { +// copy x-pack plugin info so it is on the classpath and security manager has the right permissions +task copyXPackRestSpec(type: Copy) { dependsOn(project.configurations.restSpec, 'processTestResources') from project(xpackProject('plugin').path).sourceSets.test.resources include 'rest-api-spec/api/**' into project.sourceSets.test.output.resourcesDir - } +} - task copyXPackPluginProps(type: Copy) { +task copyXPackPluginProps(type: Copy) { dependsOn(copyXPackRestSpec) from project(xpackModule('core')).file('src/main/plugin-metadata') from project(xpackModule('core')).tasks.pluginProperties into outputDir - } - project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps) - - repositories { - maven { - url "https://artifacts.elastic.co/maven" - } - maven { - url "https://snapshots.elastic.co/maven" - } - } } +project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps) diff --git a/x-pack/qa/rolling-upgrade/with-system-key/build.gradle b/x-pack/qa/rolling-upgrade/with-system-key/build.gradle deleted file mode 100644 index 03505e01dedd..000000000000 --- a/x-pack/qa/rolling-upgrade/with-system-key/build.gradle +++ /dev/null @@ -1 +0,0 @@ -group = "${group}.x-pack.qa.rolling-upgrade.with-system-key" diff --git a/x-pack/qa/rolling-upgrade/without-system-key/build.gradle b/x-pack/qa/rolling-upgrade/without-system-key/build.gradle deleted file mode 100644 index aa7ac502eb3e..000000000000 --- a/x-pack/qa/rolling-upgrade/without-system-key/build.gradle +++ /dev/null @@ -1 +0,0 @@ -group = "${group}.x-pack.qa.rolling-upgrade.without-system-key" diff --git a/x-pack/qa/saml-idp-tests/build.gradle b/x-pack/qa/saml-idp-tests/build.gradle index 33aca42914c3..7b76321fe9d4 100644 --- a/x-pack/qa/saml-idp-tests/build.gradle +++ b/x-pack/qa/saml-idp-tests/build.gradle @@ -16,12 +16,32 @@ testFixtures.useFixture ":x-pack:test:idp-fixture" String outputDir = "${project.buildDir}/generated-resources/${project.name}" -task copyIdpCertificate(type: Copy) { - from idpFixtureProject.file('idp/shibboleth-idp/credentials/idp-browser.pem'); +task copyIdpFiles(type: Copy) { + from idpFixtureProject.files('idp/shibboleth-idp/credentials/idp-browser.pem', 'idp/shibboleth-idp/metadata/idp-metadata.xml'); into outputDir } -project.sourceSets.test.output.dir(outputDir, builtBy: copyIdpCertificate) -integTestCluster.dependsOn copyIdpCertificate +project.sourceSets.test.output.dir(outputDir, builtBy: copyIdpFiles) + +task setupPorts { + dependsOn copyIdpFiles, idpFixtureProject.postProcessFixture + doLast { + String portString = idpFixtureProject.postProcessFixture.ext."test.fixtures.shibboleth-idp.tcp.4443" + int ephemeralPort = Integer.valueOf(portString) + File idpMetaFile = file(outputDir + '/idp-metadata.xml') + List lines = idpMetaFile.readLines("UTF-8") + StringBuilder content = new StringBuilder() + for (String line : lines) { + content.append(line.replace("localhost:4443", "localhost:" + ephemeralPort)) + } + idpMetaFile.delete() + idpMetaFile.createNewFile() + idpMetaFile.write(content.toString(), "UTF-8") + } +} +// Don't attempt to get ephemeral ports when Docker is not available +setupPorts.onlyIf { idpFixtureProject.postProcessFixture.enabled } + +integTestCluster.dependsOn setupPorts integTestCluster { setting 'xpack.license.self_generated.type', 'trial' @@ -51,8 +71,9 @@ integTestCluster { setting 'xpack.security.authc.realms.native.native.order', '3' setting 'xpack.ml.enabled', 'false' + setting 'logger.org.elasticsearch.xpack.security', 'TRACE' - extraConfigFile 'idp-metadata.xml', idpFixtureProject.file("idp/shibboleth-idp/metadata/idp-metadata.xml") + extraConfigFile 'idp-metadata.xml', file(outputDir + "/idp-metadata.xml") setupCommand 'setupTestAdmin', 'bin/elasticsearch-users', 'useradd', "test_admin", '-p', 'x-pack-test-password', '-r', "superuser" diff --git a/x-pack/qa/saml-idp-tests/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java b/x-pack/qa/saml-idp-tests/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java index 96ca68b98a02..23bc287086b9 100644 --- a/x-pack/qa/saml-idp-tests/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java +++ b/x-pack/qa/saml-idp-tests/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java @@ -639,5 +639,4 @@ private URI getWebServerUri() { throw new ElasticsearchException("Cannot construct URI for httpServer @ {}:{}", e, host, port); } } - } diff --git a/x-pack/test/feature-aware/build.gradle b/x-pack/test/feature-aware/build.gradle index 7e7ed3ee3a86..e3c7ae96063d 100644 --- a/x-pack/test/feature-aware/build.gradle +++ b/x-pack/test/feature-aware/build.gradle @@ -1,7 +1,7 @@ apply plugin: 'elasticsearch.build' dependencies { - compile 'org.ow2.asm:asm:7.0' + compile 'org.ow2.asm:asm:7.1' compile "org.elasticsearch:elasticsearch:${version}" compile "org.elasticsearch.plugin:x-pack-core:${version}" testCompile "org.elasticsearch.test:framework:${version}" diff --git a/x-pack/test/idp-fixture/docker-compose.yml b/x-pack/test/idp-fixture/docker-compose.yml index 830e04a7bc44..53fb62855164 100644 --- a/x-pack/test/idp-fixture/docker-compose.yml +++ b/x-pack/test/idp-fixture/docker-compose.yml @@ -4,8 +4,8 @@ services: command: --copy-service --loglevel debug image: "osixia/openldap:1.2.3" ports: - - "30389:389" - - "60636:636" + - "389" + - "636" environment: LDAP_ADMIN_PASSWORD: "NickFuryHeartsES" LDAP_DOMAIN: "oldap.test.elasticsearch.com" @@ -31,7 +31,7 @@ services: - JETTY_BROWSER_SSL_KEYSTORE_PASSWORD=secret - JETTY_BACKCHANNEL_SSL_KEYSTORE_PASSWORD=secret ports: - - "4443:4443" + - "4443" links: - openldap:openldap volumes: