diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 15cdfdd1c52ba..1461dae5ae107 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -25,6 +25,46 @@ run it using Gradle: ./gradlew run ------------------------------------- +==== Launching and debugging from an IDE + +If you want to run Elasticsearch from your IDE, the `./gradlew run` task +supports a remote debugging option: + +--------------------------------------------------------------------------- +./gradlew run --debug-jvm +--------------------------------------------------------------------------- + +==== Distribution + +By default a node is started with the zip distribution. +In order to start with a different distribution use the `-Drun.distribution` argument. + +To for example start the open source distribution: + +------------------------------------- +./gradlew run -Drun.distribution=oss-zip +------------------------------------- + +==== License type + +By default a node is started with the `basic` license type. +In order to start with a different license type use the `-Drun.license_type` argument. + +In order to start a node with a trial license execute the following command: + +------------------------------------- +./gradlew run -Drun.license_type=trial +------------------------------------- + +This enables security and other paid features and adds a superuser with the username: `elastic-admin` and +password: `elastic-password`. + +==== Other useful arguments + +In order to start a node with a different max heap space add: `-Dtests.heap.size=4G` +In order to disable annotations add: `-Dtests.asserts=false` +In order to set an Elasticsearch setting, provide a setting with the following prefix: `-Dtests.es.` + === Test case filtering. - `tests.class` is a class-filtering shell-like glob pattern, @@ -572,15 +612,6 @@ as its build system. Since the switch to Gradle though, this is no longer possib the code currently used to build Elasticsearch does not allow JaCoCo to recognize its tests. For more information on this, see the discussion in https://github.com/elastic/elasticsearch/issues/28867[issue #28867]. -== Launching and debugging from an IDE - -If you want to run Elasticsearch from your IDE, the `./gradlew run` task -supports a remote debugging option: - ---------------------------------------------------------------------------- -./gradlew run --debug-jvm ---------------------------------------------------------------------------- - == Debugging remotely from an IDE If you want to run Elasticsearch and be able to remotely attach the process diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy deleted file mode 100644 index acb8f57d9d72c..0000000000000 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginBuildPlugin.groovy +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.gradle.plugin - -import org.elasticsearch.gradle.BuildPlugin -import org.elasticsearch.gradle.test.RestTestPlugin -import org.elasticsearch.gradle.test.RunTask -import org.elasticsearch.gradle.test.StandaloneRestTestPlugin -import org.gradle.api.Plugin -import org.gradle.api.Project -import org.gradle.api.file.FileCopyDetails -import org.gradle.api.file.RelativePath -import org.gradle.api.tasks.bundling.Zip - -class MetaPluginBuildPlugin implements Plugin { - - @Override - void apply(Project project) { - project.plugins.apply(StandaloneRestTestPlugin) - project.plugins.apply(RestTestPlugin) - - createBundleTask(project) - boolean isModule = project.path.startsWith(':modules:') || project.path.startsWith(':x-pack:plugin') - - project.integTestCluster { - dependsOn(project.bundlePlugin) - distribution = 'integ-test-zip' - } - BuildPlugin.configurePomGeneration(project) - project.afterEvaluate { - PluginBuildPlugin.addZipPomGeneration(project) - if (isModule) { - if (project.integTestCluster.distribution == 'integ-test-zip') { - project.integTestCluster.module(project) - } - } else { - project.integTestCluster.plugin(project.path) - } - } - - RunTask run = project.tasks.create('run', RunTask) - run.dependsOn(project.bundlePlugin) - if (isModule == false) { - run.clusterConfig.plugin(project.path) - } - } - - private static void createBundleTask(Project project) { - - MetaPluginPropertiesTask buildProperties = project.tasks.create('pluginProperties', MetaPluginPropertiesTask.class) - - // create the actual bundle task, which zips up all the files for the plugin - Zip bundle = project.tasks.create(name: 'bundlePlugin', type: Zip, dependsOn: [buildProperties]) { - from(buildProperties.descriptorOutput.parentFile) { - // plugin properties file - include(buildProperties.descriptorOutput.name) - } - // due to how the renames work for each bundled plugin, we must exclude empty dirs or every subdir - // within bundled plugin zips will show up at the root as an empty dir - includeEmptyDirs = false - - } - project.assemble.dependsOn(bundle) - - // also make the zip available as a configuration (used when depending on this project) - project.configurations.create('zip') - project.artifacts.add('zip', bundle) - - // a super hacky way to inject code to run at the end of each of the bundled plugin's configuration - // to add itself back to this meta plugin zip - project.afterEvaluate { - buildProperties.extension.plugins.each { String bundledPluginProjectName -> - Project bundledPluginProject = project.project(bundledPluginProjectName) - bundledPluginProject.afterEvaluate { - String bundledPluginName = bundledPluginProject.esplugin.name - bundle.configure { - dependsOn bundledPluginProject.bundlePlugin - from(project.zipTree(bundledPluginProject.bundlePlugin.outputs.files.singleFile)) { - eachFile { FileCopyDetails details -> - // we want each path to have the plugin name interjected - details.relativePath = new RelativePath(true, bundledPluginName, details.relativePath.toString()) - } - } - } - } - } - } - } -} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginPropertiesExtension.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginPropertiesExtension.groovy deleted file mode 100644 index e5d84002e533f..0000000000000 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginPropertiesExtension.groovy +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.gradle.plugin - -import org.gradle.api.Project -import org.gradle.api.tasks.Input - -/** - * A container for meta plugin properties that will be written to the meta plugin descriptor, for easy - * manipulation in the gradle DSL. - */ -class MetaPluginPropertiesExtension { - @Input - String name - - @Input - String description - - /** - * The plugins this meta plugin wraps. - * Note this is not written to the plugin descriptor, but used to setup the final zip file task. - */ - @Input - List plugins - - MetaPluginPropertiesExtension(Project project) { - name = project.name - } -} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginPropertiesTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginPropertiesTask.groovy deleted file mode 100644 index e868cc2cc3128..0000000000000 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/MetaPluginPropertiesTask.groovy +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.gradle.plugin - -import org.gradle.api.InvalidUserDataException -import org.gradle.api.Task -import org.gradle.api.tasks.Copy -import org.gradle.api.tasks.OutputFile - -class MetaPluginPropertiesTask extends Copy { - - MetaPluginPropertiesExtension extension - - @OutputFile - File descriptorOutput = new File(project.buildDir, 'generated-resources/meta-plugin-descriptor.properties') - - MetaPluginPropertiesTask() { - File templateFile = new File(project.buildDir, "templates/${descriptorOutput.name}") - Task copyPluginPropertiesTemplate = project.tasks.create('copyPluginPropertiesTemplate') { - doLast { - InputStream resourceTemplate = PluginPropertiesTask.getResourceAsStream("/${descriptorOutput.name}") - templateFile.parentFile.mkdirs() - templateFile.setText(resourceTemplate.getText('UTF-8'), 'UTF-8') - } - } - - dependsOn(copyPluginPropertiesTemplate) - extension = project.extensions.create('es_meta_plugin', MetaPluginPropertiesExtension, project) - project.afterEvaluate { - // check require properties are set - if (extension.name == null) { - throw new InvalidUserDataException('name is a required setting for es_meta_plugin') - } - if (extension.description == null) { - throw new InvalidUserDataException('description is a required setting for es_meta_plugin') - } - // configure property substitution - from(templateFile.parentFile).include(descriptorOutput.name) - into(descriptorOutput.parentFile) - Map properties = generateSubstitutions() - expand(properties) - inputs.properties(properties) - } - } - - Map generateSubstitutions() { - return ['name': extension.name, - 'description': extension.description - ] - } -} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index b9a3839631855..14aa53e4a1762 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -24,7 +24,7 @@ import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties -import org.elasticsearch.gradle.plugin.MetaPluginBuildPlugin + import org.elasticsearch.gradle.plugin.PluginBuildPlugin import org.elasticsearch.gradle.plugin.PluginPropertiesExtension import org.gradle.api.AntBuilder @@ -842,19 +842,15 @@ class ClusterFormationTasks { } static void verifyProjectHasBuildPlugin(String name, Version version, Project project, Project pluginProject) { - if (pluginProject.plugins.hasPlugin(PluginBuildPlugin) == false && pluginProject.plugins.hasPlugin(MetaPluginBuildPlugin) == false) { + if (pluginProject.plugins.hasPlugin(PluginBuildPlugin) == false) { throw new GradleException("Task [${name}] cannot add plugin [${pluginProject.path}] with version [${version}] to project's " + - "[${project.path}] dependencies: the plugin is not an esplugin or es_meta_plugin") + "[${project.path}] dependencies: the plugin is not an esplugin") } } - /** Find the plugin name in the given project, whether a regular plugin or meta plugin. */ + /** Find the plugin name in the given project. */ static String findPluginName(Project pluginProject) { PluginPropertiesExtension extension = pluginProject.extensions.findByName('esplugin') - if (extension != null) { - return extension.name - } else { - return pluginProject.extensions.findByName('es_meta_plugin').name - } + return extension.name } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy index de52d75c6008c..5eec829dfa1ba 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestPlugin.groovy @@ -24,6 +24,7 @@ import org.elasticsearch.gradle.BuildPlugin import org.gradle.api.Plugin import org.gradle.api.Project import org.gradle.api.plugins.JavaBasePlugin +import org.gradle.api.tasks.compile.JavaCompile /** * Configures the build to compile against Elasticsearch's test framework and @@ -49,5 +50,12 @@ public class StandaloneTestPlugin implements Plugin { test.testClassesDir project.sourceSets.test.output.classesDir test.mustRunAfter(project.precommit) project.check.dependsOn(test) + + project.tasks.withType(JavaCompile) { + // This will be the default in Gradle 5.0 + if (options.compilerArgs.contains("-processor") == false) { + options.compilerArgs << '-proc:none' + } + } } } diff --git a/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.es-meta-plugin.properties b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.es-meta-plugin.properties deleted file mode 100644 index 50240e95416c7..0000000000000 --- a/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.es-meta-plugin.properties +++ /dev/null @@ -1,20 +0,0 @@ -# -# Licensed to Elasticsearch under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -implementation-class=org.elasticsearch.gradle.plugin.MetaPluginBuildPlugin diff --git a/buildSrc/src/main/resources/meta-plugin-descriptor.properties b/buildSrc/src/main/resources/meta-plugin-descriptor.properties deleted file mode 100644 index 950cb03240083..0000000000000 --- a/buildSrc/src/main/resources/meta-plugin-descriptor.properties +++ /dev/null @@ -1,20 +0,0 @@ -# Elasticsearch meta plugin descriptor file -# This file must exist as 'meta-plugin-descriptor.properties' inside a plugin. -# -### example meta plugin for "meta-foo" -# -# meta-foo.zip <-- zip file for the meta plugin, with this structure: -# |____ <-- The plugin files for bundled_plugin_1 -# |____ <-- The plugin files for bundled_plugin_2 -# |____ meta-plugin-descriptor.properties <-- example contents below: -# -# description=My meta plugin -# name=meta-foo -# -### mandatory elements for all meta plugins: -# -# 'description': simple summary of the meta plugin -description=${description} -# -# 'name': the meta plugin name -name=${name} \ No newline at end of file diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 89fe9cf330a35..a547982e3b613 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 7.0.0-alpha1 -lucene = 7.4.0-snapshot-6705632810 +lucene = 7.4.0-snapshot-59f2b7aec2 # optional dependencies spatial4j = 0.7 diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 931447d85d44a..88e4a2568158f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -642,7 +642,12 @@ public void testShrink() throws IOException { ResizeRequest resizeRequest = new ResizeRequest("target", "source"); resizeRequest.setResizeType(ResizeType.SHRINK); - Settings targetSettings = Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build(); + Settings targetSettings = + Settings.builder() + .put("index.number_of_shards", 2) + .put("index.number_of_replicas", 0) + .putNull("index.routing.allocation.require._name") + .build(); resizeRequest.setTargetIndex(new CreateIndexRequest("target").settings(targetSettings).alias(new Alias("alias"))); ResizeResponse resizeResponse = highLevelClient().indices().shrink(resizeRequest); assertTrue(resizeResponse.isAcknowledged()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index 1dd9834d8f53f..38a963fa33c8c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -1305,7 +1305,8 @@ public void testShrinkIndex() throws Exception { // end::shrink-index-request-waitForActiveShards // tag::shrink-index-request-settings request.getTargetIndexRequest().settings(Settings.builder() - .put("index.number_of_shards", 2)); // <1> + .put("index.number_of_shards", 2) // <1> + .putNull("index.routing.allocation.require._name")); // <2> // end::shrink-index-request-settings // tag::shrink-index-request-aliases request.getTargetIndexRequest().alias(new Alias("target_alias")); // <1> diff --git a/distribution/build.gradle b/distribution/build.gradle index d2e2810bc7eec..940a4152bfd55 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -310,12 +310,14 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { task run(type: RunTask) { distribution = System.getProperty('run.distribution', 'zip') if (distribution == 'zip') { - String licenseType = System.getProperty("license_type", "basic") + String licenseType = System.getProperty("run.license_type", "basic") if (licenseType == 'trial') { setting 'xpack.ml.enabled', 'true' setting 'xpack.graph.enabled', 'true' setting 'xpack.watcher.enabled', 'true' setting 'xpack.license.self_generated.type', 'trial' + setupCommand 'setupTestAdmin', + 'bin/elasticsearch-users', 'useradd', 'elastic-admin', '-p', 'elastic-password', '-r', 'superuser' } else if (licenseType != 'basic') { throw new IllegalArgumentException("Unsupported self-generated license type: [" + licenseType + "[basic] or [trial].") } diff --git a/distribution/src/bin/elasticsearch-cli b/distribution/src/bin/elasticsearch-cli new file mode 100644 index 0000000000000..94f8f763bb1c6 --- /dev/null +++ b/distribution/src/bin/elasticsearch-cli @@ -0,0 +1,22 @@ +#!/bin/bash + +set -e -o pipefail + +source "`dirname "$0"`"/elasticsearch-env + +IFS=';' read -r -a additional_sources <<< "$ES_ADDITIONAL_SOURCES" +for additional_source in "${additional_sources[@]}" +do + source "`dirname "$0"`"/$additional_source +done + +exec \ + "$JAVA" \ + $ES_JAVA_OPTS \ + -Des.path.home="$ES_HOME" \ + -Des.path.conf="$ES_PATH_CONF" \ + -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ + -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ + -cp "$ES_CLASSPATH" \ + $1 \ + "${@:2}" diff --git a/distribution/src/bin/elasticsearch-keystore b/distribution/src/bin/elasticsearch-keystore index aee62dfde50d4..ebe24179a0ea5 100755 --- a/distribution/src/bin/elasticsearch-keystore +++ b/distribution/src/bin/elasticsearch-keystore @@ -1,14 +1,5 @@ #!/bin/bash -source "`dirname "$0"`"/elasticsearch-env - -exec \ - "$JAVA" \ - $ES_JAVA_OPTS \ - -Des.path.home="$ES_HOME" \ - -Des.path.conf="$ES_PATH_CONF" \ - -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ - -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ - -cp "$ES_CLASSPATH" \ +"`dirname "$0"`"/elasticsearch-cli \ org.elasticsearch.common.settings.KeyStoreCli \ "$@" diff --git a/distribution/src/bin/elasticsearch-plugin b/distribution/src/bin/elasticsearch-plugin index 500fd710c1aea..67b6ea7e13c37 100755 --- a/distribution/src/bin/elasticsearch-plugin +++ b/distribution/src/bin/elasticsearch-plugin @@ -1,14 +1,5 @@ #!/bin/bash -source "`dirname "$0"`"/elasticsearch-env - -exec \ - "$JAVA" \ - $ES_JAVA_OPTS \ - -Des.path.home="$ES_HOME" \ - -Des.path.conf="$ES_PATH_CONF" \ - -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ - -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ - -cp "$ES_CLASSPATH" \ +"`dirname "$0"`"/elasticsearch-cli \ org.elasticsearch.plugins.PluginCli \ "$@" diff --git a/distribution/src/bin/elasticsearch-translog b/distribution/src/bin/elasticsearch-translog index e176231c6f44d..33350aaf0b65f 100755 --- a/distribution/src/bin/elasticsearch-translog +++ b/distribution/src/bin/elasticsearch-translog @@ -1,14 +1,5 @@ #!/bin/bash -source "`dirname "$0"`"/elasticsearch-env - -exec \ - "$JAVA" \ - $ES_JAVA_OPTS \ - -Des.path.home="$ES_HOME" \ - -Des.path.conf="$ES_PATH_CONF" \ - -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ - -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ - -cp "$ES_CLASSPATH" \ +"`dirname "$0"`"/elasticsearch-cli \ org.elasticsearch.index.translog.TranslogToolCli \ "$@" diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java index 71c57f7f10135..d6f6e36b8c48e 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java @@ -87,8 +87,8 @@ *
  • A URL to a plugin zip
  • * * - * Plugins are packaged as zip files. Each packaged plugin must contain a plugin properties file - * or a meta plugin properties file. See {@link PluginInfo} and {@link MetaPluginInfo}, respectively. + * Plugins are packaged as zip files. Each packaged plugin must contain a plugin properties file. + * See {@link PluginInfo}. *

    * The installation process first extracts the plugin files into a temporary * directory in order to verify the plugin satisfies the following requirements: @@ -106,11 +106,6 @@ * files specific to the plugin. The config files be installed into a subdirectory of the * elasticsearch config directory, using the name of the plugin. If any files to be installed * already exist, they will be skipped. - *

    - * If the plugin is a meta plugin, the installation process installs each plugin separately - * inside the meta plugin directory. The {@code bin} and {@code config} directory are also moved - * inside the meta plugin directory. - *

    */ class InstallPluginCommand extends EnvironmentAwareCommand { @@ -550,7 +545,7 @@ private Path stagingDirectoryWithoutPosixPermissions(Path pluginsDir) throws IOE } // checking for existing version of the plugin - private void verifyPluginName(Path pluginPath, String pluginName, Path candidateDir) throws UserException, IOException { + private void verifyPluginName(Path pluginPath, String pluginName) throws UserException, IOException { // don't let user install plugin conflicting with module... // they might be unavoidably in maven central and are packaged up the same way) if (MODULES.contains(pluginName)) { @@ -567,28 +562,10 @@ private void verifyPluginName(Path pluginPath, String pluginName, Path candidate pluginName); throw new UserException(PLUGIN_EXISTS, message); } - // checks meta plugins too - try (DirectoryStream stream = Files.newDirectoryStream(pluginPath)) { - for (Path plugin : stream) { - if (candidateDir.equals(plugin.resolve(pluginName))) { - continue; - } - if (MetaPluginInfo.isMetaPlugin(plugin) && Files.exists(plugin.resolve(pluginName))) { - final MetaPluginInfo info = MetaPluginInfo.readFromProperties(plugin); - final String message = String.format( - Locale.ROOT, - "plugin name [%s] already exists in a meta plugin; if you need to update the meta plugin, " + - "uninstall it first using command 'remove %s'", - plugin.resolve(pluginName).toAbsolutePath(), - info.getName()); - throw new UserException(PLUGIN_EXISTS, message); - } - } - } } /** Load information about the plugin, and verify it can be installed with no errors. */ - private PluginInfo loadPluginInfo(Terminal terminal, Path pluginRoot, boolean isBatch, Environment env) throws Exception { + private PluginInfo loadPluginInfo(Terminal terminal, Path pluginRoot, Environment env) throws Exception { final PluginInfo info = PluginInfo.readFromProperties(pluginRoot); if (info.hasNativeController()) { throw new IllegalStateException("plugins can not have native controllers"); @@ -596,7 +573,7 @@ private PluginInfo loadPluginInfo(Terminal terminal, Path pluginRoot, boolean is PluginsService.verifyCompatibility(info); // checking for existing version of the plugin - verifyPluginName(env.pluginsFile(), info.getName(), pluginRoot); + verifyPluginName(env.pluginsFile(), info.getName()); PluginsService.checkForFailedPluginRemovals(env.pluginsFile()); @@ -635,11 +612,7 @@ private void install(Terminal terminal, boolean isBatch, Path tmpRoot, Environme List deleteOnFailure = new ArrayList<>(); deleteOnFailure.add(tmpRoot); try { - if (MetaPluginInfo.isMetaPlugin(tmpRoot)) { - installMetaPlugin(terminal, isBatch, tmpRoot, env, deleteOnFailure); - } else { - installPlugin(terminal, isBatch, tmpRoot, env, deleteOnFailure); - } + installPlugin(terminal, isBatch, tmpRoot, env, deleteOnFailure); } catch (Exception installProblem) { try { IOUtils.rm(deleteOnFailure.toArray(new Path[0])); @@ -650,71 +623,13 @@ private void install(Terminal terminal, boolean isBatch, Path tmpRoot, Environme } } - /** - * Installs the meta plugin and all the bundled plugins from {@code tmpRoot} into the plugins dir. - * If a bundled plugin has a bin dir and/or a config dir, those are copied. - */ - private void installMetaPlugin(Terminal terminal, boolean isBatch, Path tmpRoot, - Environment env, List deleteOnFailure) throws Exception { - final MetaPluginInfo metaInfo = MetaPluginInfo.readFromProperties(tmpRoot); - verifyPluginName(env.pluginsFile(), metaInfo.getName(), tmpRoot); - - final Path destination = env.pluginsFile().resolve(metaInfo.getName()); - deleteOnFailure.add(destination); - terminal.println(VERBOSE, metaInfo.toString()); - - final List pluginPaths = new ArrayList<>(); - try (DirectoryStream paths = Files.newDirectoryStream(tmpRoot)) { - // Extract bundled plugins path and validate plugin names - for (Path plugin : paths) { - if (MetaPluginInfo.isPropertiesFile(plugin)) { - continue; - } - final PluginInfo info = PluginInfo.readFromProperties(plugin); - PluginsService.verifyCompatibility(info); - verifyPluginName(env.pluginsFile(), info.getName(), plugin); - pluginPaths.add(plugin); - } - } - - // read optional security policy from each bundled plugin, and confirm all exceptions one time with user - - Set permissions = new HashSet<>(); - final List pluginInfos = new ArrayList<>(); - for (Path plugin : pluginPaths) { - final PluginInfo info = loadPluginInfo(terminal, plugin, isBatch, env); - pluginInfos.add(info); - - Path policy = plugin.resolve(PluginInfo.ES_PLUGIN_POLICY); - if (Files.exists(policy)) { - permissions.addAll(PluginSecurity.parsePermissions(policy, env.tmpFile())); - } - } - PluginSecurity.confirmPolicyExceptions(terminal, permissions, isBatch); - - // move support files and rename as needed to prepare the exploded plugin for its final location - for (int i = 0; i < pluginPaths.size(); ++i) { - Path pluginPath = pluginPaths.get(i); - PluginInfo info = pluginInfos.get(i); - installPluginSupportFiles(info, pluginPath, env.binFile().resolve(metaInfo.getName()), - env.configFile().resolve(metaInfo.getName()), deleteOnFailure); - // ensure the plugin dir within the tmpRoot has the correct name - if (pluginPath.getFileName().toString().equals(info.getName()) == false) { - Files.move(pluginPath, pluginPath.getParent().resolve(info.getName()), StandardCopyOption.ATOMIC_MOVE); - } - } - movePlugin(tmpRoot, destination); - String[] plugins = pluginInfos.stream().map(PluginInfo::getName).toArray(String[]::new); - terminal.println("-> Installed " + metaInfo.getName() + " with: " + Strings.arrayToCommaDelimitedString(plugins)); - } - /** * Installs the plugin from {@code tmpRoot} into the plugins dir. * If the plugin has a bin dir and/or a config dir, those are moved. */ private void installPlugin(Terminal terminal, boolean isBatch, Path tmpRoot, Environment env, List deleteOnFailure) throws Exception { - final PluginInfo info = loadPluginInfo(terminal, tmpRoot, isBatch, env); + final PluginInfo info = loadPluginInfo(terminal, tmpRoot, env); // read optional security policy (extra permissions), if it exists, confirm or warn the user Path policy = tmpRoot.resolve(PluginInfo.ES_PLUGIN_POLICY); final Set permissions; diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java index fb73554c2b19e..6015d9da14307 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java @@ -61,25 +61,7 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th } Collections.sort(plugins); for (final Path plugin : plugins) { - if (MetaPluginInfo.isMetaPlugin(plugin)) { - MetaPluginInfo metaInfo = MetaPluginInfo.readFromProperties(plugin); - List subPluginPaths = new ArrayList<>(); - try (DirectoryStream subPaths = Files.newDirectoryStream(plugin)) { - for (Path subPlugin : subPaths) { - if (MetaPluginInfo.isPropertiesFile(subPlugin)) { - continue; - } - subPluginPaths.add(subPlugin); - } - } - Collections.sort(subPluginPaths); - terminal.println(Terminal.Verbosity.SILENT, metaInfo.getName()); - for (Path subPlugin : subPluginPaths) { - printPlugin(env, terminal, subPlugin, "\t"); - } - } else { - printPlugin(env, terminal, plugin, ""); - } + printPlugin(env, terminal, plugin, ""); } } diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java index 5931e66cb9a5d..bfeb3c0279b65 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java @@ -219,18 +219,6 @@ static String createPluginUrl(String name, Path structure, String... additionalP return createPlugin(name, structure, additionalProps).toUri().toURL().toString(); } - /** creates an meta plugin .zip and returns the url for testing */ - static String createMetaPluginUrl(String name, Path structure) throws IOException { - return createMetaPlugin(name, structure).toUri().toURL().toString(); - } - - static void writeMetaPlugin(String name, Path structure) throws IOException { - PluginTestUtil.writeMetaPluginProperties(structure, - "description", "fake desc", - "name", name - ); - } - static void writePlugin(String name, Path structure, String... additionalProps) throws IOException { String[] properties = Stream.concat(Stream.of( "description", "fake desc", @@ -261,11 +249,6 @@ static Path createPlugin(String name, Path structure, String... additionalProps) return writeZip(structure, null); } - static Path createMetaPlugin(String name, Path structure) throws IOException { - writeMetaPlugin(name, structure); - return writeZip(structure, null); - } - void installPlugin(String pluginUrl, Path home) throws Exception { installPlugin(pluginUrl, home, skipJarHellCommand); } @@ -275,11 +258,6 @@ void installPlugin(String pluginUrl, Path home, InstallPluginCommand command) th command.execute(terminal, pluginUrl, false, env); } - void assertMetaPlugin(String metaPlugin, String name, Path original, Environment env) throws IOException { - assertPluginInternal(name, env.pluginsFile().resolve(metaPlugin)); - assertConfigAndBin(metaPlugin, original, env); - } - void assertPlugin(String name, Path original, Environment env) throws IOException { assertPluginInternal(name, env.pluginsFile()); assertConfigAndBin(name, original, env); @@ -388,23 +366,9 @@ public void testSomethingWorks() throws Exception { assertPlugin("fake", pluginDir, env.v2()); } - public void testWithMetaPlugin() throws Exception { - Tuple env = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); - Files.createDirectory(pluginDir.resolve("fake1")); - writePlugin("fake1", pluginDir.resolve("fake1")); - Files.createDirectory(pluginDir.resolve("fake2")); - writePlugin("fake2", pluginDir.resolve("fake2")); - String pluginZip = createMetaPluginUrl("my_plugins", pluginDir); - installPlugin(pluginZip, env.v1()); - assertMetaPlugin("my_plugins", "fake1", pluginDir, env.v2()); - assertMetaPlugin("my_plugins", "fake2", pluginDir, env.v2()); - } - public void testInstallFailsIfPreviouslyRemovedPluginFailed() throws Exception { Tuple env = createEnv(fs, temp); - Path metaDir = createPluginDir(temp); - Path pluginDir = metaDir.resolve("fake"); + Path pluginDir = createPluginDir(temp); String pluginZip = createPluginUrl("fake", pluginDir); final Path removing = env.v2().pluginsFile().resolve(".removing-failed"); Files.createDirectory(removing); @@ -414,11 +378,6 @@ public void testInstallFailsIfPreviouslyRemovedPluginFailed() throws Exception { "found file [%s] from a failed attempt to remove the plugin [failed]; execute [elasticsearch-plugin remove failed]", removing); assertThat(e, hasToString(containsString(expected))); - - // test with meta plugin - String metaZip = createMetaPluginUrl("my_plugins", metaDir); - final IllegalStateException e1 = expectThrows(IllegalStateException.class, () -> installPlugin(metaZip, env.v1())); - assertThat(e1, hasToString(containsString(expected))); } public void testSpaceInUrl() throws Exception { @@ -500,23 +459,6 @@ public void testJarHell() throws Exception { assertInstallCleaned(environment.v2()); } - public void testJarHellInMetaPlugin() throws Exception { - // jar hell test needs a real filesystem - assumeTrue("real filesystem", isReal); - Tuple environment = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); - Files.createDirectory(pluginDir.resolve("fake1")); - writePlugin("fake1", pluginDir.resolve("fake1")); - Files.createDirectory(pluginDir.resolve("fake2")); - writePlugin("fake2", pluginDir.resolve("fake2")); // adds plugin.jar with Fake2Plugin - writeJar(pluginDir.resolve("fake2").resolve("other.jar"), "Fake2Plugin"); - String pluginZip = createMetaPluginUrl("my_plugins", pluginDir); - IllegalStateException e = expectThrows(IllegalStateException.class, - () -> installPlugin(pluginZip, environment.v1(), defaultCommand)); - assertTrue(e.getMessage(), e.getMessage().contains("jar hell")); - assertInstallCleaned(environment.v2()); - } - public void testIsolatedPlugins() throws Exception { Tuple env = createEnv(fs, temp); // these both share the same FakePlugin class @@ -540,23 +482,6 @@ public void testExistingPlugin() throws Exception { assertInstallCleaned(env.v2()); } - public void testExistingMetaPlugin() throws Exception { - Tuple env = createEnv(fs, temp); - Path metaZip = createPluginDir(temp); - Path pluginDir = metaZip.resolve("fake"); - Files.createDirectory(pluginDir); - String pluginZip = createPluginUrl("fake", pluginDir); - installPlugin(pluginZip, env.v1()); - UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); - assertTrue(e.getMessage(), e.getMessage().contains("already exists")); - assertInstallCleaned(env.v2()); - - String anotherZip = createMetaPluginUrl("another_plugins", metaZip); - e = expectThrows(UserException.class, () -> installPlugin(anotherZip, env.v1())); - assertTrue(e.getMessage(), e.getMessage().contains("already exists")); - assertInstallCleaned(env.v2()); - } - public void testBin() throws Exception { Tuple env = createEnv(fs, temp); Path pluginDir = createPluginDir(temp); @@ -568,43 +493,20 @@ public void testBin() throws Exception { assertPlugin("fake", pluginDir, env.v2()); } - public void testMetaBin() throws Exception { - Tuple env = createEnv(fs, temp); - Path metaDir = createPluginDir(temp); - Path pluginDir = metaDir.resolve("fake"); - Files.createDirectory(pluginDir); - writePlugin("fake", pluginDir); - Path binDir = pluginDir.resolve("bin"); - Files.createDirectory(binDir); - Files.createFile(binDir.resolve("somescript")); - String pluginZip = createMetaPluginUrl("my_plugins", metaDir); - installPlugin(pluginZip, env.v1()); - assertMetaPlugin("my_plugins","fake", pluginDir, env.v2()); - } - public void testBinNotDir() throws Exception { Tuple env = createEnv(fs, temp); - Path metaDir = createPluginDir(temp); - Path pluginDir = metaDir.resolve("fake"); - Files.createDirectory(pluginDir); + Path pluginDir = createPluginDir(temp); Path binDir = pluginDir.resolve("bin"); Files.createFile(binDir); String pluginZip = createPluginUrl("fake", pluginDir); UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); assertTrue(e.getMessage(), e.getMessage().contains("not a directory")); assertInstallCleaned(env.v2()); - - String metaZip = createMetaPluginUrl("my_plugins", metaDir); - e = expectThrows(UserException.class, () -> installPlugin(metaZip, env.v1())); - assertTrue(e.getMessage(), e.getMessage().contains("not a directory")); - assertInstallCleaned(env.v2()); } public void testBinContainsDir() throws Exception { Tuple env = createEnv(fs, temp); - Path metaDir = createPluginDir(temp); - Path pluginDir = metaDir.resolve("fake"); - Files.createDirectory(pluginDir); + Path pluginDir = createPluginDir(temp); Path dirInBinDir = pluginDir.resolve("bin").resolve("foo"); Files.createDirectories(dirInBinDir); Files.createFile(dirInBinDir.resolve("somescript")); @@ -612,11 +514,6 @@ public void testBinContainsDir() throws Exception { UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); assertTrue(e.getMessage(), e.getMessage().contains("Directories not allowed in bin dir for plugin")); assertInstallCleaned(env.v2()); - - String metaZip = createMetaPluginUrl("my_plugins", metaDir); - e = expectThrows(UserException.class, () -> installPlugin(metaZip, env.v1())); - assertTrue(e.getMessage(), e.getMessage().contains("Directories not allowed in bin dir for plugin")); - assertInstallCleaned(env.v2()); } public void testBinConflict() throws Exception { @@ -649,27 +546,6 @@ public void testBinPermissions() throws Exception { } } - public void testMetaBinPermissions() throws Exception { - assumeTrue("posix filesystem", isPosix); - Tuple env = createEnv(fs, temp); - Path metaDir = createPluginDir(temp); - Path pluginDir = metaDir.resolve("fake"); - Files.createDirectory(pluginDir); - writePlugin("fake", pluginDir); - Path binDir = pluginDir.resolve("bin"); - Files.createDirectory(binDir); - Files.createFile(binDir.resolve("somescript")); - String pluginZip = createMetaPluginUrl("my_plugins", metaDir); - try (PosixPermissionsResetter binAttrs = new PosixPermissionsResetter(env.v2().binFile())) { - Set perms = binAttrs.getCopyPermissions(); - // make sure at least one execute perm is missing, so we know we forced it during installation - perms.remove(PosixFilePermission.GROUP_EXECUTE); - binAttrs.setPermissions(perms); - installPlugin(pluginZip, env.v1()); - assertMetaPlugin("my_plugins", "fake", pluginDir, env.v2()); - } - } - public void testPluginPermissions() throws Exception { assumeTrue("posix filesystem", isPosix); @@ -761,32 +637,9 @@ public void testExistingConfig() throws Exception { assertTrue(Files.exists(envConfigDir.resolve("other.yml"))); } - public void testExistingMetaConfig() throws Exception { - Tuple env = createEnv(fs, temp); - Path envConfigDir = env.v2().configFile().resolve("my_plugins"); - Files.createDirectories(envConfigDir); - Files.write(envConfigDir.resolve("custom.yml"), "existing config".getBytes(StandardCharsets.UTF_8)); - Path metaDir = createPluginDir(temp); - Path pluginDir = metaDir.resolve("fake"); - Files.createDirectory(pluginDir); - writePlugin("fake", pluginDir); - Path configDir = pluginDir.resolve("config"); - Files.createDirectory(configDir); - Files.write(configDir.resolve("custom.yml"), "new config".getBytes(StandardCharsets.UTF_8)); - Files.createFile(configDir.resolve("other.yml")); - String pluginZip = createMetaPluginUrl("my_plugins", metaDir); - installPlugin(pluginZip, env.v1()); - assertMetaPlugin("my_plugins", "fake", pluginDir, env.v2()); - List configLines = Files.readAllLines(envConfigDir.resolve("custom.yml"), StandardCharsets.UTF_8); - assertEquals(1, configLines.size()); - assertEquals("existing config", configLines.get(0)); - assertTrue(Files.exists(envConfigDir.resolve("other.yml"))); - } - public void testConfigNotDir() throws Exception { Tuple env = createEnv(fs, temp); - Path metaDir = createPluginDir(temp); - Path pluginDir = metaDir.resolve("fake"); + Path pluginDir = createPluginDir(temp); Files.createDirectories(pluginDir); Path configDir = pluginDir.resolve("config"); Files.createFile(configDir); @@ -794,11 +647,6 @@ public void testConfigNotDir() throws Exception { UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); assertTrue(e.getMessage(), e.getMessage().contains("not a directory")); assertInstallCleaned(env.v2()); - - String metaZip = createMetaPluginUrl("my_plugins", metaDir); - e = expectThrows(UserException.class, () -> installPlugin(metaZip, env.v1())); - assertTrue(e.getMessage(), e.getMessage().contains("not a directory")); - assertInstallCleaned(env.v2()); } public void testConfigContainsDir() throws Exception { @@ -815,19 +663,12 @@ public void testConfigContainsDir() throws Exception { public void testMissingDescriptor() throws Exception { Tuple env = createEnv(fs, temp); - Path metaDir = createPluginDir(temp); - Path pluginDir = metaDir.resolve("fake"); - Files.createDirectory(pluginDir); + Path pluginDir = createPluginDir(temp); Files.createFile(pluginDir.resolve("fake.yml")); String pluginZip = writeZip(pluginDir, null).toUri().toURL().toString(); NoSuchFileException e = expectThrows(NoSuchFileException.class, () -> installPlugin(pluginZip, env.v1())); assertTrue(e.getMessage(), e.getMessage().contains("plugin-descriptor.properties")); assertInstallCleaned(env.v2()); - - String metaZip = createMetaPluginUrl("my_plugins", metaDir); - e = expectThrows(NoSuchFileException.class, () -> installPlugin(metaZip, env.v1())); - assertTrue(e.getMessage(), e.getMessage().contains("plugin-descriptor.properties")); - assertInstallCleaned(env.v2()); } public void testContainsIntermediateDirectory() throws Exception { @@ -840,16 +681,6 @@ public void testContainsIntermediateDirectory() throws Exception { assertInstallCleaned(env.v2()); } - public void testContainsIntermediateDirectoryMeta() throws Exception { - Tuple env = createEnv(fs, temp); - Path pluginDir = createPluginDir(temp); - Files.createFile(pluginDir.resolve(MetaPluginInfo.ES_META_PLUGIN_PROPERTIES)); - String pluginZip = writeZip(pluginDir, "elasticsearch").toUri().toURL().toString(); - UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); - assertThat(e.getMessage(), containsString("This plugin was built with an older plugin structure")); - assertInstallCleaned(env.v2()); - } - public void testZipRelativeOutsideEntryName() throws Exception { Tuple env = createEnv(fs, temp); Path zip = createTempDir().resolve("broken.zip"); @@ -958,29 +789,6 @@ public void testPluginAlreadyInstalled() throws Exception { "if you need to update the plugin, uninstall it first using command 'remove fake'")); } - public void testMetaPluginAlreadyInstalled() throws Exception { - Tuple env = createEnv(fs, temp); - { - // install fake plugin - Path pluginDir = createPluginDir(temp); - String pluginZip = createPluginUrl("fake", pluginDir); - installPlugin(pluginZip, env.v1()); - } - - Path pluginDir = createPluginDir(temp); - Files.createDirectory(pluginDir.resolve("fake")); - writePlugin("fake", pluginDir.resolve("fake")); - Files.createDirectory(pluginDir.resolve("other")); - writePlugin("other", pluginDir.resolve("other")); - String metaZip = createMetaPluginUrl("meta", pluginDir); - final UserException e = expectThrows(UserException.class, - () -> installPlugin(metaZip, env.v1(), randomFrom(skipJarHellCommand, defaultCommand))); - assertThat( - e.getMessage(), - equalTo("plugin directory [" + env.v2().pluginsFile().resolve("fake") + "] already exists; " + - "if you need to update the plugin, uninstall it first using command 'remove fake'")); - } - private void installPlugin(MockTerminal terminal, boolean isBatch) throws Exception { Tuple env = createEnv(fs, temp); Path pluginDir = createPluginDir(temp); @@ -1224,24 +1032,6 @@ public void testPolicyConfirmation() throws Exception { assertPlugin("fake", pluginDir, env.v2()); } - public void testMetaPluginPolicyConfirmation() throws Exception { - Tuple env = createEnv(fs, temp); - Path metaDir = createPluginDir(temp); - Path fake1Dir = metaDir.resolve("fake1"); - Files.createDirectory(fake1Dir); - writePluginSecurityPolicy(fake1Dir, "setAccessible", "setFactory"); - writePlugin("fake1", fake1Dir); - Path fake2Dir = metaDir.resolve("fake2"); - Files.createDirectory(fake2Dir); - writePluginSecurityPolicy(fake2Dir, "setAccessible", "accessDeclaredMembers"); - writePlugin("fake2", fake2Dir); - String pluginZip = createMetaPluginUrl("meta-plugin", metaDir); - - assertPolicyConfirmation(env, pluginZip, "plugin requires additional permissions"); - assertMetaPlugin("meta-plugin", "fake1", metaDir, env.v2()); - assertMetaPlugin("meta-plugin", "fake2", metaDir, env.v2()); - } - public void testPluginWithNativeController() throws Exception { Tuple env = createEnv(fs, temp); Path pluginDir = createPluginDir(temp); @@ -1250,21 +1040,4 @@ public void testPluginWithNativeController() throws Exception { final IllegalStateException e = expectThrows(IllegalStateException.class, () -> installPlugin(pluginZip, env.v1())); assertThat(e, hasToString(containsString("plugins can not have native controllers"))); } - - public void testMetaPluginWithNativeController() throws Exception { - Tuple env = createEnv(fs, temp); - Path metaDir = createPluginDir(temp); - Path fake1Dir = metaDir.resolve("fake1"); - Files.createDirectory(fake1Dir); - writePluginSecurityPolicy(fake1Dir, "setAccessible", "setFactory"); - writePlugin("fake1", fake1Dir); - Path fake2Dir = metaDir.resolve("fake2"); - Files.createDirectory(fake2Dir); - writePlugin("fake2", fake2Dir, "has.native.controller", "true"); - String pluginZip = createMetaPluginUrl("meta-plugin", metaDir); - - final IllegalStateException e = expectThrows(IllegalStateException.class, () -> installPlugin(pluginZip, env.v1())); - assertThat(e, hasToString(containsString("plugins can not have native controllers"))); - } - } diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java index f106d51063f40..8144c5f060073 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java @@ -92,16 +92,7 @@ private static void buildFakePlugin( final String description, final String name, final String classname) throws IOException { - buildFakePlugin(env, null, description, name, classname, false); - } - - private static void buildFakePlugin( - final Environment env, - final String metaPlugin, - final String description, - final String name, - final String classname) throws IOException { - buildFakePlugin(env, metaPlugin, description, name, classname, false); + buildFakePlugin(env, description, name, classname, false); } private static void buildFakePlugin( @@ -110,36 +101,15 @@ private static void buildFakePlugin( final String name, final String classname, final boolean hasNativeController) throws IOException { - buildFakePlugin(env, null, description, name, classname, hasNativeController); - } - - private static void buildFakePlugin( - final Environment env, - final String metaPlugin, - final String description, - final String name, - final String classname, - final boolean hasNativeController) throws IOException { - Path dest = metaPlugin != null ? env.pluginsFile().resolve(metaPlugin) : env.pluginsFile(); PluginTestUtil.writePluginProperties( - dest.resolve(name), - "description", description, - "name", name, - "version", "1.0", - "elasticsearch.version", Version.CURRENT.toString(), - "java.version", "1.8", - "classname", classname, - "has.native.controller", Boolean.toString(hasNativeController)); - } - - private static void buildFakeMetaPlugin( - final Environment env, - final String description, - final String name) throws IOException { - PluginTestUtil.writeMetaPluginProperties( env.pluginsFile().resolve(name), "description", description, - "name", name); + "name", name, + "version", "1.0", + "elasticsearch.version", Version.CURRENT.toString(), + "java.version", "1.8", + "classname", classname, + "has.native.controller", Boolean.toString(hasNativeController)); } public void testPluginsDirMissing() throws Exception { @@ -166,16 +136,6 @@ public void testTwoPlugins() throws Exception { assertEquals(buildMultiline("fake1", "fake2"), terminal.getOutput()); } - public void testMetaPlugin() throws Exception { - buildFakeMetaPlugin(env, "fake meta desc", "meta_plugin"); - buildFakePlugin(env, "meta_plugin", "fake desc", "fake1", "org.fake1"); - buildFakePlugin(env, "meta_plugin", "fake desc 2", "fake2", "org.fake2"); - buildFakePlugin(env, "fake desc 3", "fake3", "org.fake3"); - buildFakePlugin(env, "fake desc 4", "fake4", "org.fake4"); - MockTerminal terminal = listPlugins(home); - assertEquals(buildMultiline("fake3", "fake4", "meta_plugin", "\tfake1", "\tfake2"), terminal.getOutput()); - } - public void testPluginWithVerbose() throws Exception { buildFakePlugin(env, "fake desc", "fake_plugin", "org.fake"); String[] params = { "-v" }; @@ -247,39 +207,6 @@ public void testPluginWithVerboseMultiplePlugins() throws Exception { terminal.getOutput()); } - public void testPluginWithVerboseMetaPlugins() throws Exception { - buildFakeMetaPlugin(env, "fake meta desc", "meta_plugin"); - buildFakePlugin(env, "meta_plugin", "fake desc 1", "fake_plugin1", "org.fake"); - buildFakePlugin(env, "meta_plugin", "fake desc 2", "fake_plugin2", "org.fake2"); - String[] params = { "-v" }; - MockTerminal terminal = listPlugins(home, params); - assertEquals( - buildMultiline( - "Plugins directory: " + env.pluginsFile(), - "meta_plugin", - "\tfake_plugin1", - "\t- Plugin information:", - "\tName: fake_plugin1", - "\tDescription: fake desc 1", - "\tVersion: 1.0", - "\tElasticsearch Version: " + Version.CURRENT.toString(), - "\tJava Version: 1.8", - "\tNative Controller: false", - "\tExtended Plugins: []", - "\t * Classname: org.fake", - "\tfake_plugin2", - "\t- Plugin information:", - "\tName: fake_plugin2", - "\tDescription: fake desc 2", - "\tVersion: 1.0", - "\tElasticsearch Version: " + Version.CURRENT.toString(), - "\tJava Version: 1.8", - "\tNative Controller: false", - "\tExtended Plugins: []", - "\t * Classname: org.fake2"), - terminal.getOutput()); - } - public void testPluginWithoutVerboseMultiplePlugins() throws Exception { buildFakePlugin(env, "fake desc 1", "fake_plugin1", "org.fake"); buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2"); @@ -307,19 +234,6 @@ public void testPluginWithWrongDescriptorFile() throws Exception{ e.getMessage()); } - public void testMetaPluginWithWrongDescriptorFile() throws Exception{ - buildFakeMetaPlugin(env, "fake meta desc", "meta_plugin"); - final Path pluginDir = env.pluginsFile().resolve("meta_plugin").resolve("fake_plugin1"); - PluginTestUtil.writePluginProperties(pluginDir, "description", "fake desc"); - IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> listPlugins(home)); - final Path descriptorPath = pluginDir.resolve(PluginInfo.ES_PLUGIN_PROPERTIES); - assertEquals( - "property [name] is missing in [" + descriptorPath.toString() + "]", - e.getMessage()); - } - public void testExistingIncompatiblePlugin() throws Exception { PluginTestUtil.writePluginProperties(env.pluginsFile().resolve("fake_plugin1"), "description", "fake desc 1", @@ -340,27 +254,4 @@ public void testExistingIncompatiblePlugin() throws Exception { terminal = listPlugins(home, params); assertEquals("fake_plugin1\nfake_plugin2\n", terminal.getOutput()); } - - public void testExistingIncompatibleMetaPlugin() throws Exception { - buildFakeMetaPlugin(env, "fake meta desc", "meta_plugin"); - PluginTestUtil.writePluginProperties(env.pluginsFile().resolve("meta_plugin").resolve("fake_plugin1"), - "description", "fake desc 1", - "name", "fake_plugin1", - "version", "1.0", - "elasticsearch.version", Version.fromString("1.0.0").toString(), - "java.version", System.getProperty("java.specification.version"), - "classname", "org.fake1"); - buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2"); - - MockTerminal terminal = listPlugins(home); - String message = "plugin [fake_plugin1] was built for Elasticsearch version 1.0 but version " + Version.CURRENT + " is required"; - assertEquals( - "fake_plugin2\nmeta_plugin\n\tfake_plugin1\n" + "WARNING: " + message + "\n", - terminal.getOutput()); - - String[] params = {"-s"}; - terminal = listPlugins(home, params); - assertEquals("fake_plugin2\nmeta_plugin\n\tfake_plugin1\n", terminal.getOutput()); - } - } diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java index 13506cf986a9a..67c55bc348c1a 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java @@ -103,16 +103,6 @@ void createPlugin(Path path, String name, Version version) throws IOException { "classname", "SomeClass"); } - void createMetaPlugin(String name, String... plugins) throws Exception { - PluginTestUtil.writeMetaPluginProperties( - env.pluginsFile().resolve(name), - "description", "dummy", - "name", name); - for (String plugin : plugins) { - createPlugin(env.pluginsFile().resolve(name), plugin); - } - } - static MockTerminal removePlugin(String name, Path home, boolean purge) throws Exception { Environment env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", home).build()); MockTerminal terminal = new MockTerminal(); @@ -159,19 +149,6 @@ public void testRemoveOldVersion() throws Exception { assertRemoveCleaned(env); } - public void testBasicMeta() throws Exception { - createMetaPlugin("meta", "fake1"); - createPlugin("other"); - removePlugin("meta", home, randomBoolean()); - assertFalse(Files.exists(env.pluginsFile().resolve("meta"))); - assertTrue(Files.exists(env.pluginsFile().resolve("other"))); - assertRemoveCleaned(env); - - UserException exc = - expectThrows(UserException.class, () -> removePlugin("fake1", home, randomBoolean())); - assertThat(exc.getMessage(), containsString("plugin [fake1] not found")); - } - public void testBin() throws Exception { createPlugin("fake"); Path binDir = env.binFile().resolve("fake"); diff --git a/docs/README.asciidoc b/docs/README.asciidoc index f0826b4161224..2963359d44c28 100644 --- a/docs/README.asciidoc +++ b/docs/README.asciidoc @@ -6,7 +6,7 @@ See: https://github.com/elastic/docs Snippets marked with `// CONSOLE` are automatically annotated with "VIEW IN CONSOLE" and "COPY AS CURL" in the documentation and are automatically tested by the command `gradle :docs:check`. To test just the docs from a single page, -use e.g. `gradle :docs:check -Dtests.method="\*rollover*"`. +use e.g. `gradle :docs:check -Dtests.method="*rollover*"`. By default each `// CONSOLE` snippet runs as its own isolated test. You can manipulate the test execution in the following ways: diff --git a/docs/community-clients/index.asciidoc b/docs/community-clients/index.asciidoc index 0fd5c3a483f50..e28ec84f08790 100644 --- a/docs/community-clients/index.asciidoc +++ b/docs/community-clients/index.asciidoc @@ -208,6 +208,9 @@ The following projects appear to be abandoned: * https://github.com/ropensci/elasticdsl[elasticdsl]: A high-level R DSL for Elasticsearch, wrapping the elastic R client. + +* https://github.com/UptakeOpenSource/uptasticsearch[uptasticsearch]: + An R client tailored to data science workflows. The following projects appear to be abandoned: diff --git a/docs/java-api/query-dsl/has-parent-query.asciidoc b/docs/java-api/query-dsl/has-parent-query.asciidoc index 63711c399f71a..6a83fe2b0698f 100644 --- a/docs/java-api/query-dsl/has-parent-query.asciidoc +++ b/docs/java-api/query-dsl/has-parent-query.asciidoc @@ -9,7 +9,7 @@ When using the `has_parent` query it is important to use the `PreBuiltTransportC -------------------------------------------------- Settings settings = Settings.builder().put("cluster.name", "elasticsearch").build(); TransportClient client = new PreBuiltTransportClient(settings); -client.addTransportAddress(new InetSocketTransportAddress(new InetSocketAddress(InetAddresses.forString("127.0.0.1"), 9300))); +client.addTransportAddress(new TransportAddress(new InetSocketAddress(InetAddresses.forString("127.0.0.1"), 9300))); -------------------------------------------------- Otherwise the parent-join module doesn't get loaded and the `has_parent` query can't be used from the transport client. diff --git a/docs/java-api/query-dsl/percolate-query.asciidoc b/docs/java-api/query-dsl/percolate-query.asciidoc index a5651392b628e..e1968ae456a5c 100644 --- a/docs/java-api/query-dsl/percolate-query.asciidoc +++ b/docs/java-api/query-dsl/percolate-query.asciidoc @@ -9,7 +9,7 @@ See: -------------------------------------------------- Settings settings = Settings.builder().put("cluster.name", "elasticsearch").build(); TransportClient client = new PreBuiltTransportClient(settings); -client.addTransportAddress(new InetSocketTransportAddress(new InetSocketAddress(InetAddresses.forString("127.0.0.1"), 9300))); +client.addTransportAddress(new TransportAddress(new InetSocketAddress(InetAddresses.forString("127.0.0.1"), 9300))); -------------------------------------------------- Before the `percolate` query can be used an `percolator` mapping should be added and diff --git a/docs/java-rest/high-level/indices/shrink_index.asciidoc b/docs/java-rest/high-level/indices/shrink_index.asciidoc index 318279646319d..5bef7a561a628 100644 --- a/docs/java-rest/high-level/indices/shrink_index.asciidoc +++ b/docs/java-rest/high-level/indices/shrink_index.asciidoc @@ -45,8 +45,8 @@ returns a response, as an `ActiveShardCount` -------------------------------------------------- include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[shrink-index-request-settings] -------------------------------------------------- -<1> The settings to apply to the target index, which include the number of -shards to create for it +<1> The number of shards on the target of the shrink index request +<2> Remove the allocation requirement copied from the source index ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- diff --git a/docs/painless/painless-execute-script.asciidoc b/docs/painless/painless-execute-script.asciidoc index 7997c87e3e45f..3344bd9f75132 100644 --- a/docs/painless/painless-execute-script.asciidoc +++ b/docs/painless/painless-execute-script.asciidoc @@ -1,6 +1,8 @@ [[painless-execute-api]] === Painless execute API +experimental[The painless execute api is new and the request / response format may change in a breaking way in the future] + The Painless execute API allows an arbitrary script to be executed and a result to be returned. [[painless-execute-api-parameters]] diff --git a/docs/plugins/authors.asciidoc b/docs/plugins/authors.asciidoc index b89ac903592ca..fceeeac892c80 100644 --- a/docs/plugins/authors.asciidoc +++ b/docs/plugins/authors.asciidoc @@ -13,8 +13,6 @@ The Elasticsearch repository contains examples of: which contains a rescore plugin. * a https://github.com/elastic/elasticsearch/tree/master/plugins/examples/script-expert-scoring[Java plugin] which contains a script plugin. -* a https://github.com/elastic/elasticsearch/tree/master/plugins/examples/meta-plugin[Java plugin] - which contains a meta plugin. These examples provide the bare bones needed to get started. For more information about how to write a plugin, we recommend looking at the plugins @@ -120,19 +118,3 @@ AccessController.doPrivileged( See http://www.oracle.com/technetwork/java/seccodeguide-139067.html[Secure Coding Guidelines for Java SE] for more information. - -[float] -=== Meta Plugin - -It is also possible to bundle multiple plugins into a meta plugin. -A directory for each sub-plugin must be contained in a directory called `elasticsearch. -The meta plugin must also contain a file called `meta-plugin-descriptor.properties` in the directory named -`elasticsearch`. -The format for this file is described in detail in this example: - -["source","properties",subs="attributes"] --------------------------------------------------- -include::{plugin-properties-files}/meta-plugin-descriptor.properties[] --------------------------------------------------- - -A meta plugin can be installed/removed like a normal plugin with the `bin/elasticsearch-plugin` command. diff --git a/docs/reference/aggregations/bucket/filters-aggregation.asciidoc b/docs/reference/aggregations/bucket/filters-aggregation.asciidoc index 3ca86d1d7a096..b7e3b1edf10d2 100644 --- a/docs/reference/aggregations/bucket/filters-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/filters-aggregation.asciidoc @@ -124,7 +124,7 @@ The `other_bucket` parameter can be set to add a bucket to the response which wi not match any of the given filters. The value of this parameter can be as follows: `false`:: Does not compute the `other` bucket -`true`:: Returns the `other` bucket bucket either in a bucket (named `_other_` by default) if named filters are being used, +`true`:: Returns the `other` bucket either in a bucket (named `_other_` by default) if named filters are being used, or as the last bucket if anonymous filters are being used The `other_bucket_key` parameter can be used to set the key for the `other` bucket to a value other than the default `_other_`. Setting diff --git a/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc b/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc index 0c19bf172bbf0..dc3222a5f371e 100644 --- a/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc @@ -28,8 +28,8 @@ The top_hits aggregation returns regular search hits, because of this many per h ==== Example -In the following example we group the questions by tag and per tag we show the last active question. For each question -only the title field is being included in the source. +In the following example we group the sales by type and per type we show the last sale. +For each sale only the date and price fields are being included in the source. [source,js] -------------------------------------------------- diff --git a/docs/reference/index-shared1.asciidoc b/docs/reference/index-shared1.asciidoc deleted file mode 100644 index ae208e290112a..0000000000000 --- a/docs/reference/index-shared1.asciidoc +++ /dev/null @@ -1,6 +0,0 @@ - -include::getting-started.asciidoc[] - -include::setup.asciidoc[] - -include::upgrade.asciidoc[] diff --git a/docs/reference/index-shared2.asciidoc b/docs/reference/index-shared2.asciidoc deleted file mode 100644 index e48948079cc9f..0000000000000 --- a/docs/reference/index-shared2.asciidoc +++ /dev/null @@ -1,2 +0,0 @@ - -include::migration/index.asciidoc[] diff --git a/docs/reference/index-shared3.asciidoc b/docs/reference/index-shared3.asciidoc deleted file mode 100644 index 4da338186b0c8..0000000000000 --- a/docs/reference/index-shared3.asciidoc +++ /dev/null @@ -1,26 +0,0 @@ - -include::api-conventions.asciidoc[] - -include::docs.asciidoc[] - -include::search.asciidoc[] - -include::aggregations.asciidoc[] - -include::indices.asciidoc[] - -include::cat.asciidoc[] - -include::cluster.asciidoc[] - -include::query-dsl.asciidoc[] - -include::mapping.asciidoc[] - -include::analysis.asciidoc[] - -include::modules.asciidoc[] - -include::index-modules.asciidoc[] - -include::ingest.asciidoc[] diff --git a/docs/reference/index-shared4.asciidoc b/docs/reference/index-shared4.asciidoc deleted file mode 100644 index 3dc9e4f5e07cf..0000000000000 --- a/docs/reference/index-shared4.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ - -include::how-to.asciidoc[] - -include::testing.asciidoc[] - -include::glossary.asciidoc[] - -include::release-notes/highlights.asciidoc[] - -include::release-notes.asciidoc[] \ No newline at end of file diff --git a/docs/reference/index-shared5.asciidoc b/docs/reference/index-shared5.asciidoc deleted file mode 100644 index 572522f6c8e74..0000000000000 --- a/docs/reference/index-shared5.asciidoc +++ /dev/null @@ -1,2 +0,0 @@ - -include::redirects.asciidoc[] diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 8aa9eef32f8bf..59f8d77a0c086 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -1,12 +1,79 @@ [[elasticsearch-reference]] = Elasticsearch Reference +:include-xpack: true :es-test-dir: {docdir}/../src/test :plugins-examples-dir: {docdir}/../../plugins/examples +:xes-repo-dir: {docdir}/../../x-pack/docs/{lang} +:es-repo-dir: {docdir} + include::../Versions.asciidoc[] -include::index-shared1.asciidoc[] -include::index-shared2.asciidoc[] -include::index-shared3.asciidoc[] -include::index-shared4.asciidoc[] -include::index-shared5.asciidoc[] + +include::getting-started.asciidoc[] + +include::setup.asciidoc[] + +include::{xes-repo-dir}/setup/setup-xes.asciidoc[] + +include::{xes-repo-dir}/monitoring/configuring-monitoring.asciidoc[] + +include::{xes-repo-dir}/security/configuring-es.asciidoc[] + +include::{xes-repo-dir}/setup/setup-xclient.asciidoc[] + +include::{xes-repo-dir}/settings/configuring-xes.asciidoc[] + +include::{xes-repo-dir}/setup/bootstrap-checks-xes.asciidoc[] + +include::upgrade.asciidoc[] + +include::migration/index.asciidoc[] + +include::api-conventions.asciidoc[] + +include::docs.asciidoc[] + +include::search.asciidoc[] + +include::aggregations.asciidoc[] + +include::indices.asciidoc[] + +include::cat.asciidoc[] + +include::cluster.asciidoc[] + +include::query-dsl.asciidoc[] + +include::mapping.asciidoc[] + +include::analysis.asciidoc[] + +include::modules.asciidoc[] + +include::index-modules.asciidoc[] + +include::ingest.asciidoc[] + +include::{xes-repo-dir}/sql/index.asciidoc[] + +include::{xes-repo-dir}/monitoring/index.asciidoc[] + +include::{xes-repo-dir}/rollup/index.asciidoc[] + +include::{xes-repo-dir}/rest-api/index.asciidoc[] + +include::{xes-repo-dir}/commands/index.asciidoc[] + +include::how-to.asciidoc[] + +include::testing.asciidoc[] + +include::glossary.asciidoc[] + +include::release-notes/highlights.asciidoc[] + +include::release-notes.asciidoc[] + +include::redirects.asciidoc[] diff --git a/docs/reference/index.x.asciidoc b/docs/reference/index.x.asciidoc index 5be21cb004331..35204eef5b67e 100644 --- a/docs/reference/index.x.asciidoc +++ b/docs/reference/index.x.asciidoc @@ -1,12 +1 @@ -[[elasticsearch-reference]] -= Elasticsearch Reference - -:include-xpack: true -:es-test-dir: {docdir}/../src/test -:plugins-examples-dir: {docdir}/../../plugins/examples -:xes-repo-dir: {docdir}/../../x-pack/docs/{lang} -:es-repo-dir: {docdir} - - -include::../Versions.asciidoc[] -include::{xes-repo-dir}/index.asciidoc[] +include::index.asciidoc[] diff --git a/docs/reference/indices/create-index.asciidoc b/docs/reference/indices/create-index.asciidoc index f2882e6fb60d4..32a454624990c 100644 --- a/docs/reference/indices/create-index.asciidoc +++ b/docs/reference/indices/create-index.asciidoc @@ -25,7 +25,7 @@ PUT twitter } -------------------------------------------------- // CONSOLE -<1> Default for `number_of_shards` is 5 +<1> Default for `number_of_shards` is 1 <2> Default for `number_of_replicas` is 1 (ie one replica for each primary shard) The above second curl example shows how an index called `twitter` can be diff --git a/docs/reference/indices/shrink-index.asciidoc b/docs/reference/indices/shrink-index.asciidoc index 34e90e6799d78..24a67208f7289 100644 --- a/docs/reference/indices/shrink-index.asciidoc +++ b/docs/reference/indices/shrink-index.asciidoc @@ -62,7 +62,7 @@ the following request: [source,js] -------------------------------------------------- -POST my_source_index/_shrink/my_target_index?copy_settings=true +POST my_source_index/_shrink/my_target_index { "settings": { "index.routing.allocation.require._name": null, <1> @@ -106,7 +106,7 @@ and accepts `settings` and `aliases` parameters for the target index: [source,js] -------------------------------------------------- -POST my_source_index/_shrink/my_target_index?copy_settings=true +POST my_source_index/_shrink/my_target_index { "settings": { "index.number_of_replicas": 1, @@ -130,16 +130,6 @@ POST my_source_index/_shrink/my_target_index?copy_settings=true NOTE: Mappings may not be specified in the `_shrink` request. -NOTE: By default, with the exception of `index.analysis`, `index.similarity`, -and `index.sort` settings, index settings on the source index are not copied -during a shrink operation. With the exception of non-copyable settings, settings -from the source index can be copied to the target index by adding the URL -parameter `copy_settings=true` to the request. Note that `copy_settings` can not -be set to `false`. The parameter `copy_settings` will be removed in 8.0.0 - -deprecated[6.4.0, not copying settings is deprecated, copying settings will be -the default behavior in 7.x] - [float] === Monitoring the shrink process diff --git a/docs/reference/indices/split-index.asciidoc b/docs/reference/indices/split-index.asciidoc index aaed23459c34b..ade0a8075d582 100644 --- a/docs/reference/indices/split-index.asciidoc +++ b/docs/reference/indices/split-index.asciidoc @@ -123,7 +123,7 @@ the following request: [source,js] -------------------------------------------------- -POST my_source_index/_split/my_target_index?copy_settings=true +POST my_source_index/_split/my_target_index { "settings": { "index.number_of_shards": 2 @@ -158,7 +158,7 @@ and accepts `settings` and `aliases` parameters for the target index: [source,js] -------------------------------------------------- -POST my_source_index/_split/my_target_index?copy_settings=true +POST my_source_index/_split/my_target_index { "settings": { "index.number_of_shards": 5 <1> @@ -177,16 +177,6 @@ POST my_source_index/_split/my_target_index?copy_settings=true NOTE: Mappings may not be specified in the `_split` request. -NOTE: By default, with the exception of `index.analysis`, `index.similarity`, -and `index.sort` settings, index settings on the source index are not copied -during a split operation. With the exception of non-copyable settings, settings -from the source index can be copied to the target index by adding the URL -parameter `copy_settings=true` to the request. Note that `copy_settings` can not -be set to `false`. The parameter `copy_settings` will be removed in 8.0.0 - -deprecated[6.4.0, not copying settings is deprecated, copying settings will be -the default behavior in 7.x] - [float] === Monitoring the split process diff --git a/docs/reference/migration/migrate_7_0.asciidoc b/docs/reference/migration/migrate_7_0.asciidoc index aea6d14fac9c8..c68dee287d5a5 100644 --- a/docs/reference/migration/migrate_7_0.asciidoc +++ b/docs/reference/migration/migrate_7_0.asciidoc @@ -34,6 +34,7 @@ Elasticsearch 6.x in order to be readable by Elasticsearch 7.x. * <> * <> * <> +* <> include::migrate_7_0/aggregations.asciidoc[] @@ -47,3 +48,4 @@ include::migrate_7_0/plugins.asciidoc[] include::migrate_7_0/api.asciidoc[] include::migrate_7_0/java.asciidoc[] include::migrate_7_0/settings.asciidoc[] +include::migrate_7_0/scripting.asciidoc[] diff --git a/docs/reference/migration/migrate_7_0/api.asciidoc b/docs/reference/migration/migrate_7_0/api.asciidoc index e140fd577bd19..3d824c600648f 100644 --- a/docs/reference/migration/migrate_7_0/api.asciidoc +++ b/docs/reference/migration/migrate_7_0/api.asciidoc @@ -65,3 +65,13 @@ deprecated in 6.3.0 and now removed in 7.0.0. In the past, `fields` could be provided either as a parameter, or as part of the request body. Specifying `fields` in the request body as opposed to a parameter was deprecated in 6.4.0, and is now unsupported in 7.0.0. + +==== `copy_settings` is deprecated on shrink and split APIs + +Versions of Elasticsearch prior to 6.4.0 did not copy index settings on shrink +and split operations. Starting with Elasticsearch 7.0.0, the default behavior +will be for such settings to be copied on such operations. To enable users in +6.4.0 to transition in 6.4.0 to the default behavior in 7.0.0, the +`copy_settings` parameter was added on the REST layer. As this behavior will be +the only behavior in 8.0.0, this parameter is deprecated in 7.0.0 for removal in +8.0.0. diff --git a/docs/reference/migration/migrate_7_0/scripting.asciidoc b/docs/reference/migration/migrate_7_0/scripting.asciidoc new file mode 100644 index 0000000000000..df43aaa92eadf --- /dev/null +++ b/docs/reference/migration/migrate_7_0/scripting.asciidoc @@ -0,0 +1,13 @@ +[[breaking_70_scripting_changes]] +=== Scripting changes + +==== getDate() and getDates() removed + +Fields of type `long` and `date` had `getDate()` and `getDates()` methods +(for multi valued fields) to get an object with date specific helper methods +for the current doc value. In 5.3.0, `date` fields were changed to expose +this same date object directly when calling `doc["myfield"].value`, and +the getter methods for date objects were deprecated. These methods have +now been removed. Instead, use `.value` on `date` fields, or explicitly +parse `long` fields into a date object using +`Instance.ofEpochMillis(doc["myfield"].value)`. diff --git a/docs/reference/setup/install/windows.asciidoc b/docs/reference/setup/install/windows.asciidoc index c48ec5de22d2c..861daa160e7b5 100644 --- a/docs/reference/setup/install/windows.asciidoc +++ b/docs/reference/setup/install/windows.asciidoc @@ -79,13 +79,13 @@ part of the installation, with the option to configure a HTTPS proxy through whi [[msi-installer-selected-plugins]] image::images/msi_installer/msi_installer_selected_plugins.png[] -Upon choosing to install X-Pack plugin, an additional step allows a choice of the type of X-Pack -license to install, in addition to X-Pack Security configuration and built-in user configuration: +Upon choosing to install {xpack} plugin, an additional step allows a choice of the type of {xpack} +license to install, in addition to {security} configuration and built-in user configuration: [[msi-installer-xpack]] image::images/msi_installer/msi_installer_xpack.png[] -NOTE: X-Pack includes a choice of a Trial or Basic license for 30 days. After that, you can obtain one of the +NOTE: {xpack} includes a choice of a Trial or Basic license for 30 days. After that, you can obtain one of the https://www.elastic.co/subscriptions[available subscriptions] or {ref}/security-settings.html[disable Security]. The Basic license is free and includes the https://www.elastic.co/products/x-pack/monitoring[Monitoring] extension. @@ -286,43 +286,43 @@ as _properties_ within Windows Installer documentation) that can be passed to ms `XPACKLICENSE`:: - When installing X-Pack plugin, the type of license to install, + When installing {xpack} plugin, the type of license to install, either `Basic` or `Trial`. Defaults to `Basic` `XPACKSECURITYENABLED`:: - When installing X-Pack plugin with a `Trial` license, whether X-Pack Security should be enabled. + When installing {xpack} plugin with a `Trial` license, whether {security} should be enabled. Defaults to `true` `BOOTSTRAPPASSWORD`:: - When installing X-Pack plugin with a `Trial` license and X-Pack Security enabled, the password to + When installing {xpack} plugin with a `Trial` license and {security} enabled, the password to used to bootstrap the cluster and persisted as the `bootstrap.password` setting in the keystore. Defaults to a randomized value. `SKIPSETTINGPASSWORDS`:: - When installing X-Pack plugin with a `Trial` license and X-Pack Security enabled, whether the + When installing {xpack} plugin with a `Trial` license and {security} enabled, whether the installation should skip setting up the built-in users `elastic`, `kibana` and `logstash_system`. Defaults to `false` `ELASTICUSERPASSWORD`:: - When installing X-Pack plugin with a `Trial` license and X-Pack Security enabled, the password + When installing {xpack} plugin with a `Trial` license and {security} enabled, the password to use for the built-in user `elastic`. Defaults to `""` `KIBANAUSERPASSWORD`:: - When installing X-Pack plugin with a `Trial` license and X-Pack Security enabled, the password + When installing {xpack} plugin with a `Trial` license and {security} enabled, the password to use for the built-in user `kibana`. Defaults to `""` `LOGSTASHSYSTEMUSERPASSWORD`:: - When installing X-Pack plugin with a `Trial` license and X-Pack Security enabled, the password + When installing {xpack} plugin with a `Trial` license and {security} enabled, the password to use for the built-in user `logstash_system`. Defaults to `""` To pass a value, simply append the property name and value using the format `=""` to -the installation command. For example, to use a different installation directory to the default one and to install https://www.elastic.co/products/x-pack[X-Pack]: +the installation command. For example, to use a different installation directory to the default one and to install https://www.elastic.co/products/x-pack[{xpack}]: ["source","sh",subs="attributes,callouts"] -------------------------------------------- diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index c9b48f0c8650d..624194092a02e 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -237,9 +237,14 @@ public List getPreConfiguredTokenFilters() { filters.add(PreConfiguredTokenFilter.singleton("dutch_stem", false, input -> new SnowballFilter(input, new DutchStemmer()))); filters.add(PreConfiguredTokenFilter.singleton("edge_ngram", false, input -> new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenFilter.DEFAULT_MAX_GRAM_SIZE))); - // TODO deprecate edgeNGram - filters.add(PreConfiguredTokenFilter.singleton("edgeNGram", false, input -> - new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenFilter.DEFAULT_MAX_GRAM_SIZE))); + filters.add(PreConfiguredTokenFilter.singletonWithVersion("edgeNGram", false, (reader, version) -> { + if (version.onOrAfter(org.elasticsearch.Version.V_6_4_0)) { + DEPRECATION_LOGGER.deprecatedAndMaybeLog("edgeNGram_deprecation", + "The [edgeNGram] token filter name is deprecated and will be removed in a future version. " + + "Please change the filter name to [edge_ngram] instead."); + } + return new EdgeNGramTokenFilter(reader, EdgeNGramTokenFilter.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenFilter.DEFAULT_MAX_GRAM_SIZE); + })); filters.add(PreConfiguredTokenFilter.singleton("elision", true, input -> new ElisionFilter(input, FrenchAnalyzer.DEFAULT_ARTICLES))); filters.add(PreConfiguredTokenFilter.singleton("french_stem", false, input -> new SnowballFilter(input, new FrenchStemmer()))); @@ -256,8 +261,14 @@ public List getPreConfiguredTokenFilters() { LimitTokenCountFilterFactory.DEFAULT_MAX_TOKEN_COUNT, LimitTokenCountFilterFactory.DEFAULT_CONSUME_ALL_TOKENS))); filters.add(PreConfiguredTokenFilter.singleton("ngram", false, NGramTokenFilter::new)); - // TODO deprecate nGram - filters.add(PreConfiguredTokenFilter.singleton("nGram", false, NGramTokenFilter::new)); + filters.add(PreConfiguredTokenFilter.singletonWithVersion("nGram", false, (reader, version) -> { + if (version.onOrAfter(org.elasticsearch.Version.V_6_4_0)) { + DEPRECATION_LOGGER.deprecatedAndMaybeLog("nGram_deprecation", + "The [nGram] token filter name is deprecated and will be removed in a future version. " + + "Please change the filter name to [ngram] instead."); + } + return new NGramTokenFilter(reader); + })); filters.add(PreConfiguredTokenFilter.singleton("persian_normalization", true, PersianNormalizationFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("porter_stem", false, PorterStemFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("reverse", false, ReverseStringFilter::new)); diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java new file mode 100644 index 0000000000000..1d2b8a36810eb --- /dev/null +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisPluginTests.java @@ -0,0 +1,119 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.analysis.common; + +import org.apache.lucene.analysis.MockTokenizer; +import org.apache.lucene.analysis.Tokenizer; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.TokenFilterFactory; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.test.VersionUtils; + +import java.io.IOException; +import java.io.StringReader; +import java.util.Map; + +public class CommonAnalysisPluginTests extends ESTestCase { + + /** + * Check that the deprecated name "nGram" issues a deprecation warning for indices created since 6.3.0 + */ + public void testNGramDeprecationWarning() throws IOException { + Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_6_4_0, Version.CURRENT)) + .build(); + + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + Map tokenFilters = createTestAnalysis(idxSettings, settings, commonAnalysisPlugin).tokenFilter; + TokenFilterFactory tokenFilterFactory = tokenFilters.get("nGram"); + Tokenizer tokenizer = new MockTokenizer(); + tokenizer.setReader(new StringReader("foo bar")); + assertNotNull(tokenFilterFactory.create(tokenizer)); + assertWarnings( + "The [nGram] token filter name is deprecated and will be removed in a future version. " + + "Please change the filter name to [ngram] instead."); + } + } + + /** + * Check that the deprecated name "nGram" does NOT issues a deprecation warning for indices created before 6.4.0 + */ + public void testNGramNoDeprecationWarningPre6_4() throws IOException { + Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put(IndexMetaData.SETTING_VERSION_CREATED, + VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.V_6_3_0)) + .build(); + + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + Map tokenFilters = createTestAnalysis(idxSettings, settings, commonAnalysisPlugin).tokenFilter; + TokenFilterFactory tokenFilterFactory = tokenFilters.get("nGram"); + Tokenizer tokenizer = new MockTokenizer(); + tokenizer.setReader(new StringReader("foo bar")); + assertNotNull(tokenFilterFactory.create(tokenizer)); + } + } + + /** + * Check that the deprecated name "edgeNGram" issues a deprecation warning for indices created since 6.3.0 + */ + public void testEdgeNGramDeprecationWarning() throws IOException { + Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_6_4_0, Version.CURRENT)) + .build(); + + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + Map tokenFilters = createTestAnalysis(idxSettings, settings, commonAnalysisPlugin).tokenFilter; + TokenFilterFactory tokenFilterFactory = tokenFilters.get("edgeNGram"); + Tokenizer tokenizer = new MockTokenizer(); + tokenizer.setReader(new StringReader("foo bar")); + assertNotNull(tokenFilterFactory.create(tokenizer)); + assertWarnings( + "The [edgeNGram] token filter name is deprecated and will be removed in a future version. " + + "Please change the filter name to [edge_ngram] instead."); + } + } + + /** + * Check that the deprecated name "edgeNGram" does NOT issues a deprecation warning for indices created before 6.4.0 + */ + public void testEdgeNGramNoDeprecationWarningPre6_4() throws IOException { + Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put(IndexMetaData.SETTING_VERSION_CREATED, + VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.V_6_3_0)) + .build(); + + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); + try (CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin()) { + Map tokenFilters = createTestAnalysis(idxSettings, settings, commonAnalysisPlugin).tokenFilter; + TokenFilterFactory tokenFilterFactory = tokenFilters.get("edgeNGram"); + Tokenizer tokenizer = new MockTokenizer(); + tokenizer.setReader(new StringReader("foo bar")); + assertNotNull(tokenFilterFactory.create(tokenizer)); + } + } +} diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/NGramTokenizerFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/NGramTokenizerFactoryTests.java index 078e0a9cb9ed3..1cf6ef4696d04 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/NGramTokenizerFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/NGramTokenizerFactoryTests.java @@ -32,15 +32,11 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.test.ESTokenStreamTestCase; import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.io.StringReader; -import java.lang.reflect.Field; -import java.lang.reflect.Modifier; -import java.util.ArrayList; import java.util.Arrays; -import java.util.List; -import java.util.Random; import static com.carrotsearch.randomizedtesting.RandomizedTest.scaledRandomIntBetween; import static org.hamcrest.Matchers.instanceOf; @@ -129,7 +125,7 @@ public void testBackwardsCompatibilityEdgeNgramTokenFilter() throws Exception { for (int i = 0; i < iters; i++) { final Index index = new Index("test", "_na_"); final String name = "ngr"; - Version v = randomVersion(random()); + Version v = VersionUtils.randomVersion(random()); Builder builder = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3); boolean reverse = random().nextBoolean(); if (reverse) { @@ -150,7 +146,6 @@ public void testBackwardsCompatibilityEdgeNgramTokenFilter() throws Exception { } } - /*` * test that throws an error when trying to get a NGramTokenizer where difference between max_gram and min_gram * is greater than the allowed value of max_ngram_diff @@ -175,16 +170,4 @@ public void testMaxNGramDiffException() throws Exception{ + IndexSettings.MAX_NGRAM_DIFF_SETTING.getKey() + "] index level setting.", ex.getMessage()); } - - private Version randomVersion(Random random) throws IllegalArgumentException, IllegalAccessException { - Field[] declaredFields = Version.class.getFields(); - List versionFields = new ArrayList<>(); - for (Field field : declaredFields) { - if ((field.getModifiers() & Modifier.STATIC) != 0 && field.getName().startsWith("V_") && field.getType() == Version.class) { - versionFields.add(field); - } - } - return (Version) versionFields.get(random.nextInt(versionFields.size())).get(Version.class); - } - } diff --git a/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-59f2b7aec2.jar.sha1 new file mode 100644 index 0000000000000..702782e1c5ed7 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-59f2b7aec2.jar.sha1 @@ -0,0 +1 @@ +a3dba337d06e1f5930cb7ae638c1655b99ce0cb7 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-6705632810.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-6705632810.jar.sha1 deleted file mode 100644 index b333863b94d64..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-6705632810.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f72ad4b6474c2d59b0eed0ca84eddd1f99d29129 \ No newline at end of file diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt index 51a1b7cecb3f8..6495659d9cdc0 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt @@ -74,16 +74,12 @@ class org.elasticsearch.index.fielddata.ScriptDocValues$Longs { Long get(int) long getValue() List getValues() - org.joda.time.ReadableDateTime getDate() - List getDates() } class org.elasticsearch.index.fielddata.ScriptDocValues$Dates { org.joda.time.ReadableDateTime get(int) org.joda.time.ReadableDateTime getValue() List getValues() - org.joda.time.ReadableDateTime getDate() - List getDates() } class org.elasticsearch.index.fielddata.ScriptDocValues$Doubles { diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/50_script_doc_values.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/50_script_doc_values.yml index ede2927b992e0..617b8df61b6bd 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/50_script_doc_values.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/50_script_doc_values.yml @@ -106,28 +106,6 @@ setup: source: "doc.date.value" - match: { hits.hits.0.fields.field.0: '2017-01-01T12:11:12.000Z' } - - do: - warnings: - - getDate is no longer necessary on date fields as the value is now a date. - search: - body: - script_fields: - field: - script: - source: "doc['date'].date" - - match: { hits.hits.0.fields.field.0: '2017-01-01T12:11:12.000Z' } - - - do: - warnings: - - getDates is no longer necessary on date fields as the values are now dates. - search: - body: - script_fields: - field: - script: - source: "doc['date'].dates.get(0)" - - match: { hits.hits.0.fields.field.0: '2017-01-01T12:11:12.000Z' } - --- "geo_point": - do: @@ -213,28 +191,6 @@ setup: source: "doc['long'].value" - match: { hits.hits.0.fields.field.0: 12348732141234 } - - do: - warnings: - - getDate on numeric fields is deprecated. Use a date field to get dates. - search: - body: - script_fields: - field: - script: - source: "doc['long'].date" - - match: { hits.hits.0.fields.field.0: '2361-04-26T03:22:21.234Z' } - - - do: - warnings: - - getDates on numeric fields is deprecated. Use a date field to get dates. - search: - body: - script_fields: - field: - script: - source: "doc['long'].dates.get(0)" - - match: { hits.hits.0.fields.field.0: '2361-04-26T03:22:21.234Z' } - --- "integer": - do: diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/70_mov_fn_agg.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/70_mov_fn_agg.yml index 039b54aab01d1..76b63e171692e 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/70_mov_fn_agg.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/70_mov_fn_agg.yml @@ -2,7 +2,7 @@ # setup: - skip: - version: " - 6.4.0" + version: " - 6.3.99" reason: "moving_fn added in 6.4.0" - do: indices.create: diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-59f2b7aec2.jar.sha1 new file mode 100644 index 0000000000000..f99b0177de590 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-59f2b7aec2.jar.sha1 @@ -0,0 +1 @@ +473a7f4d955f132bb498482648266653f8da85bd \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-6705632810.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-6705632810.jar.sha1 deleted file mode 100644 index 6720beb8d8682..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-6705632810.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b4e19c53f29fa9b40bd7ad12ff598e3f08d507a3 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-59f2b7aec2.jar.sha1 new file mode 100644 index 0000000000000..08269eed6360f --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-59f2b7aec2.jar.sha1 @@ -0,0 +1 @@ +c5a72b9a790e2552248c8bbb36af47c4c399ba27 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-6705632810.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-6705632810.jar.sha1 deleted file mode 100644 index 861a2110e164a..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-6705632810.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -23dd8cb3834f3641d9b3e8bc3d38281389a597bc \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-59f2b7aec2.jar.sha1 new file mode 100644 index 0000000000000..325fe16120428 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-59f2b7aec2.jar.sha1 @@ -0,0 +1 @@ +14f680ab9b886c7c5224ff682a7fa70b6df44a05 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-6705632810.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-6705632810.jar.sha1 deleted file mode 100644 index c7c19feb57df5..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-6705632810.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e8119a17448a6f5512ded0bd2a6faa7fc8e70890 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-59f2b7aec2.jar.sha1 new file mode 100644 index 0000000000000..9e88119ed1d16 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-59f2b7aec2.jar.sha1 @@ -0,0 +1 @@ +e033c68c9ec1ba9cd8439758adf7eb5fee22acef \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-6705632810.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-6705632810.jar.sha1 deleted file mode 100644 index 94e8c2698389b..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-6705632810.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -336d9ac698066b8cf8a448f193e4a29ef163baa8 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-59f2b7aec2.jar.sha1 new file mode 100644 index 0000000000000..74721c857571c --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-59f2b7aec2.jar.sha1 @@ -0,0 +1 @@ +08df0a5029f11c109b22064dec78c05dfa25f9e3 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-6705632810.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-6705632810.jar.sha1 deleted file mode 100644 index 364facee9efe6..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-6705632810.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e1e77951a83fc6a9deab884773314992fefa14f3 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-59f2b7aec2.jar.sha1 new file mode 100644 index 0000000000000..1c257797c08e2 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-59f2b7aec2.jar.sha1 @@ -0,0 +1 @@ +a9d1819b2b13f134f6a605ab5a59ce3c602c0460 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-6705632810.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-6705632810.jar.sha1 deleted file mode 100644 index b55d8cf04ec32..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-6705632810.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d4da149a16673c6326f4898ad877756259f676f8 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-59f2b7aec2.jar.sha1 new file mode 100644 index 0000000000000..117ac05c91fe1 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-59f2b7aec2.jar.sha1 @@ -0,0 +1 @@ +47bc91ccb0cdf0c1c404646ffe0d5fd6b020a4ab \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-6705632810.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-6705632810.jar.sha1 deleted file mode 100644 index dcc2249c45f2f..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-6705632810.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ab4141b43cc6c2680d5f5a0b5086299f38ebec4d \ No newline at end of file diff --git a/plugins/examples/meta-plugin/build.gradle b/plugins/examples/meta-plugin/build.gradle deleted file mode 100644 index db28e6378713e..0000000000000 --- a/plugins/examples/meta-plugin/build.gradle +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -// A meta plugin packaging example that bundles multiple plugins in a single zip. - -apply plugin: 'elasticsearch.es-meta-plugin' - -es_meta_plugin { - name 'meta-plugin' - description 'example meta plugin' - plugins = ['dummy-plugin1', 'dummy-plugin2'] -} diff --git a/plugins/examples/meta-plugin/dummy-plugin1/build.gradle b/plugins/examples/meta-plugin/dummy-plugin1/build.gradle deleted file mode 100644 index 5a02e993f8c25..0000000000000 --- a/plugins/examples/meta-plugin/dummy-plugin1/build.gradle +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -apply plugin: 'elasticsearch.esplugin' - -esplugin { - name 'dummy-plugin1' - description 'A dummy plugin' - classname 'org.elasticsearch.example.DummyPlugin1' -} - -test.enabled = false -integTestRunner.enabled = false \ No newline at end of file diff --git a/plugins/examples/meta-plugin/dummy-plugin1/src/main/java/org/elasticsearch/example/DummyPlugin1.java b/plugins/examples/meta-plugin/dummy-plugin1/src/main/java/org/elasticsearch/example/DummyPlugin1.java deleted file mode 100644 index 65102dbc2e337..0000000000000 --- a/plugins/examples/meta-plugin/dummy-plugin1/src/main/java/org/elasticsearch/example/DummyPlugin1.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.example; - -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.SearchPlugin; - -import java.util.List; - -import static java.util.Collections.singletonList; - -public class DummyPlugin1 extends Plugin {} diff --git a/plugins/examples/meta-plugin/dummy-plugin2/build.gradle b/plugins/examples/meta-plugin/dummy-plugin2/build.gradle deleted file mode 100644 index d90983adfed0c..0000000000000 --- a/plugins/examples/meta-plugin/dummy-plugin2/build.gradle +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -apply plugin: 'elasticsearch.esplugin' - -esplugin { - name 'dummy-plugin2' - description 'Another dummy plugin' - classname 'org.elasticsearch.example.DummyPlugin2' -} - -test.enabled = false -integTestRunner.enabled = false \ No newline at end of file diff --git a/plugins/examples/meta-plugin/dummy-plugin2/src/main/java/org/elasticsearch/example/DummyPlugin2.java b/plugins/examples/meta-plugin/dummy-plugin2/src/main/java/org/elasticsearch/example/DummyPlugin2.java deleted file mode 100644 index 2d74d7603d15f..0000000000000 --- a/plugins/examples/meta-plugin/dummy-plugin2/src/main/java/org/elasticsearch/example/DummyPlugin2.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.example; - -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.SearchPlugin; - -import java.util.List; - -import static java.util.Collections.singletonList; - -public class DummyPlugin2 extends Plugin {} diff --git a/plugins/examples/meta-plugin/src/main/resources/meta-plugin-descriptor.properties b/plugins/examples/meta-plugin/src/main/resources/meta-plugin-descriptor.properties deleted file mode 100644 index 1fd5a86b95a54..0000000000000 --- a/plugins/examples/meta-plugin/src/main/resources/meta-plugin-descriptor.properties +++ /dev/null @@ -1,4 +0,0 @@ -# The name of the meta plugin -name=my_meta_plugin -# The description of the meta plugin -description=A meta plugin example \ No newline at end of file diff --git a/plugins/examples/meta-plugin/src/test/java/org/elasticsearch/smoketest/SmokeTestPluginsClientYamlTestSuiteIT.java b/plugins/examples/meta-plugin/src/test/java/org/elasticsearch/smoketest/SmokeTestPluginsClientYamlTestSuiteIT.java deleted file mode 100644 index d1f9e6b73703e..0000000000000 --- a/plugins/examples/meta-plugin/src/test/java/org/elasticsearch/smoketest/SmokeTestPluginsClientYamlTestSuiteIT.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.smoketest; - -import com.carrotsearch.randomizedtesting.annotations.Name; -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; - -import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; -import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; - -public class SmokeTestPluginsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { - - public SmokeTestPluginsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { - super(testCandidate); - } - - @ParametersFactory - public static Iterable parameters() throws Exception { - return ESClientYamlSuiteTestCase.createParameters(); - } -} - diff --git a/plugins/examples/meta-plugin/src/test/resources/rest-api-spec/test/smoke_test_plugins/10_basic.yml b/plugins/examples/meta-plugin/src/test/resources/rest-api-spec/test/smoke_test_plugins/10_basic.yml deleted file mode 100644 index 011a278ed8949..0000000000000 --- a/plugins/examples/meta-plugin/src/test/resources/rest-api-spec/test/smoke_test_plugins/10_basic.yml +++ /dev/null @@ -1,14 +0,0 @@ -# Integration tests for testing meta plugins -# -"Check meta plugin install": - - do: - cluster.state: {} - - # Get master node id - - set: { master_node: master } - - - do: - nodes.info: {} - - - match: { nodes.$master.plugins.0.name: dummy-plugin1 } - - match: { nodes.$master.plugins.1.name: dummy-plugin2 } diff --git a/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java b/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java index 5a146f75919ce..b910526ef3d98 100644 --- a/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java +++ b/plugins/examples/script-expert-scoring/src/main/java/org/elasticsearch/example/expertscript/ExpertScriptPlugin.java @@ -54,7 +54,7 @@ public String getType() { @Override public T compile(String scriptName, String scriptSource, ScriptContext context, Map params) { - if (context.equals(SearchScript.CONTEXT) == false) { + if (context.equals(SearchScript.SCRIPT_SCORE_CONTEXT) == false) { throw new IllegalArgumentException(getType() + " scripts cannot be used for context [" + context.name + "]"); } // we use the script "source" as the script identifier diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java index 331e2dadca2da..833539905103a 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java @@ -66,9 +66,6 @@ public InputStream readBlob(String blobName) throws IOException { @Override public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { - if (blobExists(blobName)) { - throw new FileAlreadyExistsException("blob [" + blobName + "] already exists, cannot overwrite"); - } blobStore.writeBlob(buildKey(blobName), inputStream, blobSize); } diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java index 5dc03ea45de03..83aafdde2b1ab 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java @@ -28,6 +28,7 @@ import com.google.cloud.storage.Storage; import com.google.cloud.storage.Storage.BlobListOption; import com.google.cloud.storage.Storage.CopyRequest; +import com.google.cloud.storage.StorageException; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; @@ -47,12 +48,15 @@ import java.nio.channels.Channels; import java.nio.channels.ReadableByteChannel; import java.nio.channels.WritableByteChannel; +import java.nio.file.FileAlreadyExistsException; import java.nio.file.NoSuchFileException; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.stream.Collectors; +import static java.net.HttpURLConnection.HTTP_PRECON_FAILED; + class GoogleCloudStorageBlobStore extends AbstractComponent implements BlobStore { // The recommended maximum size of a blob that should be uploaded in a single @@ -204,24 +208,32 @@ void writeBlob(String blobName, InputStream inputStream, long blobSize) throws I * @param inputStream the stream containing the blob data */ private void writeBlobResumable(BlobInfo blobInfo, InputStream inputStream) throws IOException { - final WriteChannel writeChannel = SocketAccess.doPrivilegedIOException(() -> storage.writer(blobInfo)); - Streams.copy(inputStream, Channels.newOutputStream(new WritableByteChannel() { - @Override - public boolean isOpen() { - return writeChannel.isOpen(); - } + try { + final WriteChannel writeChannel = SocketAccess.doPrivilegedIOException( + () -> storage.writer(blobInfo, Storage.BlobWriteOption.doesNotExist())); + Streams.copy(inputStream, Channels.newOutputStream(new WritableByteChannel() { + @Override + public boolean isOpen() { + return writeChannel.isOpen(); + } - @Override - public void close() throws IOException { - SocketAccess.doPrivilegedVoidIOException(writeChannel::close); - } + @Override + public void close() throws IOException { + SocketAccess.doPrivilegedVoidIOException(writeChannel::close); + } - @SuppressForbidden(reason = "Channel is based of a socket not a file") - @Override - public int write(ByteBuffer src) throws IOException { - return SocketAccess.doPrivilegedIOException(() -> writeChannel.write(src)); + @SuppressForbidden(reason = "Channel is based of a socket not a file") + @Override + public int write(ByteBuffer src) throws IOException { + return SocketAccess.doPrivilegedIOException(() -> writeChannel.write(src)); + } + })); + } catch (StorageException se) { + if (se.getCode() == HTTP_PRECON_FAILED) { + throw new FileAlreadyExistsException(blobInfo.getBlobId().getName(), null, se.getMessage()); } - })); + throw se; + } } /** @@ -238,7 +250,17 @@ private void writeBlobMultipart(BlobInfo blobInfo, InputStream inputStream, long assert blobSize <= LARGE_BLOB_THRESHOLD_BYTE_SIZE : "large blob uploads should use the resumable upload method"; final ByteArrayOutputStream baos = new ByteArrayOutputStream(Math.toIntExact(blobSize)); Streams.copy(inputStream, baos); - SocketAccess.doPrivilegedVoidIOException(() -> storage.create(blobInfo, baos.toByteArray())); + SocketAccess.doPrivilegedVoidIOException( + () -> { + try { + storage.create(blobInfo, baos.toByteArray(), Storage.BlobTargetOption.doesNotExist()); + } catch (StorageException se) { + if (se.getCode() == HTTP_PRECON_FAILED) { + throw new FileAlreadyExistsException(blobInfo.getBlobId().getName(), null, se.getMessage()); + } + throw se; + } + }); } /** @@ -295,8 +317,8 @@ void deleteBlobs(Collection blobNames) throws IOException { /** * Moves a blob within the same bucket * - * @param sourceBlob name of the blob to move - * @param targetBlob new name of the blob in the same bucket + * @param sourceBlobName name of the blob to move + * @param targetBlobName new name of the blob in the same bucket */ void moveBlob(String sourceBlobName, String targetBlobName) throws IOException { final BlobId sourceBlobId = BlobId.of(bucket, sourceBlobName); diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java index 2b52b7a32a9cc..1b31b3018e48a 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java @@ -56,6 +56,7 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import java.util.stream.Stream; /** * {@link MockStorage} mocks a {@link Storage} client by storing all the blobs @@ -113,7 +114,14 @@ public Blob create(BlobInfo blobInfo, byte[] content, BlobTargetOption... option if (bucketName.equals(blobInfo.getBucket()) == false) { throw new StorageException(404, "Bucket not found"); } - blobs.put(blobInfo.getName(), content); + if (Stream.of(options).anyMatch(option -> option.equals(BlobTargetOption.doesNotExist()))) { + byte[] existingBytes = blobs.putIfAbsent(blobInfo.getName(), content); + if (existingBytes != null) { + throw new StorageException(412, "Blob already exists"); + } + } else { + blobs.put(blobInfo.getName(), content); + } return get(BlobId.of(blobInfo.getBucket(), blobInfo.getName())); } @@ -243,9 +251,16 @@ public boolean isOpen() { } @Override - public void close() throws IOException { + public void close() { IOUtils.closeWhileHandlingException(writableByteChannel); - blobs.put(blobInfo.getName(), output.toByteArray()); + if (Stream.of(options).anyMatch(option -> option.equals(BlobWriteOption.doesNotExist()))) { + byte[] existingBytes = blobs.putIfAbsent(blobInfo.getName(), output.toByteArray()); + if (existingBytes != null) { + throw new StorageException(412, "Blob already exists"); + } + } else { + blobs.put(blobInfo.getName(), output.toByteArray()); + } } }; } diff --git a/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java b/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java index 1afda01130ba7..edc10bdec3a86 100644 --- a/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java +++ b/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java @@ -170,91 +170,6 @@ private void assertControllerSpawns(final Function pluginsDir } } - /** - * Two plugins in a meta module - one with a controller daemon and one without. - */ - public void testControllerSpawnMeta() throws Exception { - runTestControllerSpawnMeta(Environment::pluginsFile, false); - runTestControllerSpawnMeta(Environment::modulesFile, true); - } - - - private void runTestControllerSpawnMeta( - final Function pluginsDirFinder, final boolean expectSpawn) throws Exception { - /* - * On Windows you can not directly run a batch file - you have to run cmd.exe with the batch - * file as an argument and that's out of the remit of the controller daemon process spawner. - */ - assumeFalse("This test does not work on Windows", Constants.WINDOWS); - - Path esHome = createTempDir().resolve("esHome"); - Settings.Builder settingsBuilder = Settings.builder(); - settingsBuilder.put(Environment.PATH_HOME_SETTING.getKey(), esHome.toString()); - Settings settings = settingsBuilder.build(); - - Environment environment = TestEnvironment.newEnvironment(settings); - - Path metaModule = pluginsDirFinder.apply(environment).resolve("meta_module"); - Files.createDirectories(environment.modulesFile()); - Files.createDirectories(metaModule); - PluginTestUtil.writeMetaPluginProperties( - metaModule, - "description", "test_plugin", - "name", "meta_plugin", - "plugins", "test_plugin,other_plugin"); - - // this plugin will have a controller daemon - Path plugin = metaModule.resolve("test_plugin"); - - Files.createDirectories(plugin); - PluginTestUtil.writePluginProperties( - plugin, - "description", "test_plugin", - "version", Version.CURRENT.toString(), - "elasticsearch.version", Version.CURRENT.toString(), - "name", "test_plugin", - "java.version", "1.8", - "classname", "TestPlugin", - "has.native.controller", "true"); - Path controllerProgram = Platforms.nativeControllerPath(plugin); - createControllerProgram(controllerProgram); - - // this plugin will not have a controller daemon - Path otherPlugin = metaModule.resolve("other_plugin"); - Files.createDirectories(otherPlugin); - PluginTestUtil.writePluginProperties( - otherPlugin, - "description", "other_plugin", - "version", Version.CURRENT.toString(), - "elasticsearch.version", Version.CURRENT.toString(), - "name", "other_plugin", - "java.version", "1.8", - "classname", "OtherPlugin", - "has.native.controller", "false"); - - Spawner spawner = new Spawner(); - spawner.spawnNativeControllers(environment); - - List processes = spawner.getProcesses(); - - if (expectSpawn) { - // as there should only be a reference in the list for the plugin that had the controller daemon, we expect one here - assertThat(processes, hasSize(1)); - Process process = processes.get(0); - final InputStreamReader in = - new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8); - try (BufferedReader stdoutReader = new BufferedReader(in)) { - String line = stdoutReader.readLine(); - assertEquals("I am alive", line); - spawner.close(); - // fail if the process does not die within one second; usually it will be even quicker but it depends on OS scheduling - assertTrue(process.waitFor(1, TimeUnit.SECONDS)); - } - } else { - assertThat(processes, hasSize(0)); - } - } - public void testControllerSpawnWithIncorrectDescriptor() throws IOException { // this plugin will have a controller daemon Path esHome = createTempDir().resolve("esHome"); diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index 9f10c5dcfab73..6c4004074fa3b 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -30,6 +30,26 @@ task bwcTest { } for (Version version : bwcVersions.wireCompatible) { + /* + * The goal here is to: + *
      + *
    • start three nodes on the old version + *
    • run tests with systemProperty 'tests.rest.suite', 'old_cluster' + *
    • shut down one node + *
    • start a node with the new version + *
    • run tests with systemProperty 'tests.rest.suite', 'mixed_cluster' + *
    • shut down one node on the old version + *
    • start a node with the new version + *
    • run tests with systemProperty 'tests.rest.suite', 'mixed_cluster' again + *
    • shut down the last node with the old version + *
    • start a node with the new version + *
    • run tests with systemProperty 'tests.rest.suite', 'upgraded_cluster' + *
    • shut down the entire cluster + *
    + * + * Be careful: gradle dry run spits out tasks in the wrong order but, + * strangely, running the tasks works properly. + */ String baseName = "v${version}" Task oldClusterTest = tasks.create(name: "${baseName}#oldClusterTest", type: RestIntegTestTask) { @@ -39,8 +59,8 @@ for (Version version : bwcVersions.wireCompatible) { Object extension = extensions.findByName("${baseName}#oldClusterTestCluster") configure(extensions.findByName("${baseName}#oldClusterTestCluster")) { bwcVersion = version - numBwcNodes = 2 - numNodes = 2 + numBwcNodes = 3 + numNodes = 3 clusterName = 'rolling-upgrade' setting 'repositories.url.allowed_urls', 'http://snapshot.test*' if (version.onOrAfter('5.3.0')) { @@ -53,43 +73,57 @@ for (Version version : bwcVersions.wireCompatible) { systemProperty 'tests.rest.suite', 'old_cluster' } - Task mixedClusterTest = tasks.create(name: "${baseName}#mixedClusterTest", type: RestIntegTestTask) + Closure configureUpgradeCluster = {String name, Task lastRunner, int stopNode, Closure unicastSeed -> + configure(extensions.findByName("${baseName}#${name}")) { + dependsOn lastRunner, "${baseName}#oldClusterTestCluster#node${stopNode}.stop" + clusterName = 'rolling-upgrade' + unicastTransportUri = { seedNode, node, ant -> unicastSeed() } + minimumMasterNodes = { 3 } + /* Override the data directory so the new node always gets the node we + * just stopped's data directory. */ + dataDir = { nodeNumber -> oldClusterTest.nodes[stopNode].dataDir } + setting 'repositories.url.allowed_urls', 'http://snapshot.test*' + } + } - configure(extensions.findByName("${baseName}#mixedClusterTestCluster")) { - dependsOn oldClusterTestRunner, "${baseName}#oldClusterTestCluster#node1.stop" - clusterName = 'rolling-upgrade' - unicastTransportUri = { seedNode, node, ant -> oldClusterTest.nodes.get(0).transportUri() } - minimumMasterNodes = { 2 } - /* Override the data directory so the new node always gets the node we - * just stopped's data directory. */ - dataDir = { nodeNumber -> oldClusterTest.nodes[1].dataDir } - setting 'repositories.url.allowed_urls', 'http://snapshot.test*' + Task oneThirdUpgradedTest = tasks.create(name: "${baseName}#oneThirdUpgradedTest", type: RestIntegTestTask) + + configureUpgradeCluster("oneThirdUpgradedTestCluster", oldClusterTestRunner, + 0, { oldClusterTest.nodes.get(1).transportUri() }) + + Task oneThirdUpgradedTestRunner = tasks.getByName("${baseName}#oneThirdUpgradedTestRunner") + oneThirdUpgradedTestRunner.configure { + systemProperty 'tests.rest.suite', 'mixed_cluster' + systemProperty 'tests.first_round', 'true' + finalizedBy "${baseName}#oldClusterTestCluster#node1.stop" } - Task mixedClusterTestRunner = tasks.getByName("${baseName}#mixedClusterTestRunner") - mixedClusterTestRunner.configure { + Task twoThirdsUpgradedTest = tasks.create(name: "${baseName}#twoThirdsUpgradedTest", type: RestIntegTestTask) + + configureUpgradeCluster("twoThirdsUpgradedTestCluster", oneThirdUpgradedTestRunner, + 1, { oneThirdUpgradedTest.nodes.get(0).transportUri() }) + + Task twoThirdsUpgradedTestRunner = tasks.getByName("${baseName}#twoThirdsUpgradedTestRunner") + twoThirdsUpgradedTestRunner.configure { systemProperty 'tests.rest.suite', 'mixed_cluster' - finalizedBy "${baseName}#oldClusterTestCluster#node0.stop" + systemProperty 'tests.first_round', 'false' + finalizedBy "${baseName}#oldClusterTestCluster#node2.stop" } Task upgradedClusterTest = tasks.create(name: "${baseName}#upgradedClusterTest", type: RestIntegTestTask) - configure(extensions.findByName("${baseName}#upgradedClusterTestCluster")) { - dependsOn mixedClusterTestRunner, "${baseName}#oldClusterTestCluster#node0.stop" - clusterName = 'rolling-upgrade' - unicastTransportUri = { seedNode, node, ant -> mixedClusterTest.nodes.get(0).transportUri() } - minimumMasterNodes = { 2 } - /* Override the data directory so the new node always gets the node we - * just stopped's data directory. */ - dataDir = { nodeNumber -> oldClusterTest.nodes[0].dataDir} - setting 'repositories.url.allowed_urls', 'http://snapshot.test*' - } + configureUpgradeCluster("upgradedClusterTestCluster", twoThirdsUpgradedTestRunner, + 2, { twoThirdsUpgradedTest.nodes.get(0).transportUri() }) Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner") upgradedClusterTestRunner.configure { systemProperty 'tests.rest.suite', 'upgraded_cluster' - // only need to kill the mixed cluster tests node here because we explicitly told it to not stop nodes upon completion - finalizedBy "${baseName}#mixedClusterTestCluster#stop" + /* + * Force stopping all the upgraded nodes after the test runner + * so they are alive during the test. + */ + finalizedBy "${baseName}#oneThirdUpgradedTestCluster#stop" + finalizedBy "${baseName}#twoThirdsUpgradedTestCluster#stop" } Task versionBwcTest = tasks.create(name: "${baseName}#bwcTest") { diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java new file mode 100644 index 0000000000000..6f4453aa06cc9 --- /dev/null +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java @@ -0,0 +1,91 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.upgrades; + +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.elasticsearch.Version; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Response; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.yaml.ObjectPath; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Future; +import java.util.function.Predicate; + +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiOfLength; +import static java.util.Collections.emptyMap; +import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING; +import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.notNullValue; + +public abstract class AbstractRollingTestCase extends ESRestTestCase { + protected enum ClusterType { + OLD, + MIXED, + UPGRADED; + + public static ClusterType parse(String value) { + switch (value) { + case "old_cluster": + return OLD; + case "mixed_cluster": + return MIXED; + case "upgraded_cluster": + return UPGRADED; + default: + throw new AssertionError("unknown cluster type: " + value); + } + } + } + + protected static final ClusterType CLUSTER_TYPE = ClusterType.parse(System.getProperty("tests.rest.suite")); + + @Override + protected final boolean preserveIndicesUponCompletion() { + return true; + } + + @Override + protected final boolean preserveReposUponCompletion() { + return true; + } + + @Override + protected final Settings restClientSettings() { + return Settings.builder().put(super.restClientSettings()) + // increase the timeout here to 90 seconds to handle long waits for a green + // cluster health. the waits for green need to be longer than a minute to + // account for delayed shards + .put(ESRestTestCase.CLIENT_RETRY_TIMEOUT, "90s") + .put(ESRestTestCase.CLIENT_SOCKET_TIMEOUT, "90s") + .build(); + } +} diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java new file mode 100644 index 0000000000000..f1e01d24acff6 --- /dev/null +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java @@ -0,0 +1,135 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.upgrades; + +import org.apache.http.util.EntityUtils; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +/** + * Basic test that indexed documents survive the rolling restart. See + * {@link RecoveryIT} for much more in depth testing of the mechanism + * by which they survive. + */ +public class IndexingIT extends AbstractRollingTestCase { + public void testIndexing() throws IOException { + switch (CLUSTER_TYPE) { + case OLD: + break; + case MIXED: + Request waitForYellow = new Request("GET", "/_cluster/health"); + waitForYellow.addParameter("wait_for_nodes", "3"); + waitForYellow.addParameter("wait_for_status", "yellow"); + client().performRequest(waitForYellow); + break; + case UPGRADED: + Request waitForGreen = new Request("GET", "/_cluster/health/test_index,index_with_replicas,empty_index"); + waitForGreen.addParameter("wait_for_nodes", "3"); + waitForGreen.addParameter("wait_for_status", "green"); + // wait for long enough that we give delayed unassigned shards to stop being delayed + waitForGreen.addParameter("timeout", "70s"); + waitForGreen.addParameter("level", "shards"); + client().performRequest(waitForGreen); + break; + default: + throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); + } + + if (CLUSTER_TYPE == ClusterType.OLD) { + Request createTestIndex = new Request("PUT", "/test_index"); + createTestIndex.setJsonEntity("{\"settings\": {\"index.number_of_replicas\": 0}}"); + client().performRequest(createTestIndex); + + String recoverQuickly = "{\"settings\": {\"index.unassigned.node_left.delayed_timeout\": \"100ms\"}}"; + Request createIndexWithReplicas = new Request("PUT", "/index_with_replicas"); + createIndexWithReplicas.setJsonEntity(recoverQuickly); + client().performRequest(createIndexWithReplicas); + + Request createEmptyIndex = new Request("PUT", "/empty_index"); + // Ask for recovery to be quick + createEmptyIndex.setJsonEntity(recoverQuickly); + client().performRequest(createEmptyIndex); + + bulk("test_index", "_OLD", 5); + bulk("index_with_replicas", "_OLD", 5); + } + + int expectedCount; + switch (CLUSTER_TYPE) { + case OLD: + expectedCount = 5; + break; + case MIXED: + if (Booleans.parseBoolean(System.getProperty("tests.first_round"))) { + expectedCount = 5; + } else { + expectedCount = 10; + } + break; + case UPGRADED: + expectedCount = 15; + break; + default: + throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); + } + + assertCount("test_index", expectedCount); + assertCount("index_with_replicas", 5); + assertCount("empty_index", 0); + + if (CLUSTER_TYPE != ClusterType.OLD) { + bulk("test_index", "_" + CLUSTER_TYPE, 5); + Request toBeDeleted = new Request("PUT", "/test_index/doc/to_be_deleted"); + toBeDeleted.addParameter("refresh", "true"); + toBeDeleted.setJsonEntity("{\"f1\": \"delete-me\"}"); + client().performRequest(toBeDeleted); + assertCount("test_index", expectedCount + 6); + + Request delete = new Request("DELETE", "/test_index/doc/to_be_deleted"); + delete.addParameter("refresh", "true"); + client().performRequest(delete); + + assertCount("test_index", expectedCount + 5); + } + } + + private void bulk(String index, String valueSuffix, int count) throws IOException { + StringBuilder b = new StringBuilder(); + for (int i = 0; i < count; i++) { + b.append("{\"index\": {\"_index\": \"").append(index).append("\", \"_type\": \"doc\"}}\n"); + b.append("{\"f1\": \"v").append(i).append(valueSuffix).append("\", \"f2\": ").append(i).append("}\n"); + } + Request bulk = new Request("POST", "/_bulk"); + bulk.addParameter("refresh", "true"); + bulk.setJsonEntity(b.toString()); + client().performRequest(bulk); + } + + private void assertCount(String index, int count) throws IOException { + Request searchTestIndexRequest = new Request("POST", "/" + index + "/_search"); + searchTestIndexRequest.addParameter("filter_path", "hits.total"); + Response searchTestIndexResponse = client().performRequest(searchTestIndexRequest); + assertEquals("{\"hits\":{\"total\":" + count + "}}", + EntityUtils.toString(searchTestIndexResponse.getEntity(), StandardCharsets.UTF_8)); + } +} diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index d208a7097a784..350636551d9ad 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -46,53 +46,13 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.notNullValue; -public class RecoveryIT extends ESRestTestCase { - - @Override - protected boolean preserveIndicesUponCompletion() { - return true; - } - - @Override - protected boolean preserveReposUponCompletion() { - return true; - } - - private enum CLUSTER_TYPE { - OLD, - MIXED, - UPGRADED; - - public static CLUSTER_TYPE parse(String value) { - switch (value) { - case "old_cluster": - return OLD; - case "mixed_cluster": - return MIXED; - case "upgraded_cluster": - return UPGRADED; - default: - throw new AssertionError("unknown cluster type: " + value); - } - } - } - - private final CLUSTER_TYPE clusterType = CLUSTER_TYPE.parse(System.getProperty("tests.rest.suite")); - - @Override - protected Settings restClientSettings() { - return Settings.builder().put(super.restClientSettings()) - // increase the timeout here to 90 seconds to handle long waits for a green - // cluster health. the waits for green need to be longer than a minute to - // account for delayed shards - .put(ESRestTestCase.CLIENT_RETRY_TIMEOUT, "90s") - .put(ESRestTestCase.CLIENT_SOCKET_TIMEOUT, "90s") - .build(); - } - +/** + * In depth testing of the recovery mechanism during a rolling restart. + */ +public class RecoveryIT extends AbstractRollingTestCase { public void testHistoryUUIDIsGenerated() throws Exception { final String index = "index_history_uuid"; - if (clusterType == CLUSTER_TYPE.OLD) { + if (CLUSTER_TYPE == ClusterType.OLD) { Settings.Builder settings = Settings.builder() .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) @@ -102,7 +62,7 @@ public void testHistoryUUIDIsGenerated() throws Exception { // before timing out .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms"); createIndex(index, settings.build()); - } else if (clusterType == CLUSTER_TYPE.UPGRADED) { + } else if (CLUSTER_TYPE == ClusterType.UPGRADED) { ensureGreen(index); Response response = client().performRequest("GET", index + "/_stats", Collections.singletonMap("level", "shards")); assertOK(response); @@ -157,11 +117,11 @@ public void testRecoveryWithConcurrentIndexing() throws Exception { final Map nodeMap = objectPath.evaluate("nodes"); List nodes = new ArrayList<>(nodeMap.keySet()); - switch (clusterType) { + switch (CLUSTER_TYPE) { case OLD: Settings.Builder settings = Settings.builder() .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2) // if the node with the replica is the first to be restarted, while a replica is still recovering // then delayed allocation will kick in. When the node comes back, the master will search for a copy // but the recovering copy will be seen as invalid and the cluster health won't return to GREEN @@ -181,6 +141,7 @@ public void testRecoveryWithConcurrentIndexing() throws Exception { assertOK(client().performRequest("POST", index + "/_refresh")); assertCount(index, "_only_nodes:" + nodes.get(0), 60); assertCount(index, "_only_nodes:" + nodes.get(1), 60); + assertCount(index, "_only_nodes:" + nodes.get(2), 60); // make sure that we can index while the replicas are recovering updateIndexSettings(index, Settings.builder().put(INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "primaries")); break; @@ -191,9 +152,10 @@ public void testRecoveryWithConcurrentIndexing() throws Exception { assertOK(client().performRequest("POST", index + "/_refresh")); assertCount(index, "_only_nodes:" + nodes.get(0), 110); assertCount(index, "_only_nodes:" + nodes.get(1), 110); + assertCount(index, "_only_nodes:" + nodes.get(2), 110); break; default: - throw new IllegalStateException("unknown type " + clusterType); + throw new IllegalStateException("unknown type " + CLUSTER_TYPE); } } @@ -221,11 +183,11 @@ private String getNodeId(Predicate versionPredicate) throws IOException public void testRelocationWithConcurrentIndexing() throws Exception { final String index = "relocation_with_concurrent_indexing"; - switch (clusterType) { + switch (CLUSTER_TYPE) { case OLD: Settings.Builder settings = Settings.builder() .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2) // if the node with the replica is the first to be restarted, while a replica is still recovering // then delayed allocation will kick in. When the node comes back, the master will search for a copy // but the recovering copy will be seen as invalid and the cluster health won't return to GREEN @@ -258,7 +220,7 @@ public void testRelocationWithConcurrentIndexing() throws Exception { break; case UPGRADED: updateIndexSettings(index, Settings.builder() - .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2) .put("index.routing.allocation.include._id", (String)null) ); asyncIndexDocs(index, 60, 50).get(); @@ -271,9 +233,10 @@ public void testRelocationWithConcurrentIndexing() throws Exception { assertCount(index, "_only_nodes:" + nodes.get(0), 110); assertCount(index, "_only_nodes:" + nodes.get(1), 110); + assertCount(index, "_only_nodes:" + nodes.get(2), 110); break; default: - throw new IllegalStateException("unknown type " + clusterType); + throw new IllegalStateException("unknown type " + CLUSTER_TYPE); } } diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java index f3c0256b2c3c5..7932328c8c2f6 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java @@ -60,4 +60,3 @@ protected Settings restClientSettings() { .build(); } } - diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml index 0810341db1317..0cce81c8985cd 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml @@ -1,74 +1,8 @@ ---- -"Index data and search on the mixed cluster": - - do: - cluster.health: - wait_for_status: yellow - wait_for_nodes: 2 - - - do: - search: - index: test_index - - - match: { hits.total: 5 } # no new indexed data, so expect the original 5 documents from the old cluster - - - do: - search: - index: index_with_replicas - - - match: { hits.total: 5 } # just check we recovered fine - - - do: - bulk: - refresh: true - body: - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v1_mixed", "f2": 5}' - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v2_mixed", "f2": 6}' - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v3_mixed", "f2": 7}' - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v4_mixed", "f2": 8}' - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v5_mixed", "f2": 9}' - - - do: - index: - index: test_index - type: doc - id: d10 - body: {"f1": "v6_mixed", "f2": 10} - - - do: - indices.refresh: - index: test_index - - - do: - search: - index: test_index - - - match: { hits.total: 11 } # 5 docs from old cluster, 6 docs from mixed cluster - - - do: - delete: - index: test_index - type: doc - id: d10 - - - do: - indices.refresh: - index: test_index - - - do: - search: - index: test_index - - - match: { hits.total: 10 } - --- "Verify that we can still find things with the template": - do: search_template: + index: test_search_template body: id: test_search_template params: diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml index 067eba6e4b860..04d85eb607835 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml @@ -1,76 +1,5 @@ --- -"Index data, search, and create things in the cluster state that we'll validate are there after the ugprade": - - do: - indices.create: - index: test_index - body: - settings: - index: - number_of_replicas: 0 - - do: - indices.create: - index: index_with_replicas # dummy index to ensure we can recover indices with replicas just fine - body: - # if the node with the replica is the first to be restarted, then delayed - # allocation will kick in, and the cluster health won't return to GREEN - # before timing out - index.unassigned.node_left.delayed_timeout: "100ms" - - - do: - indices.create: - index: empty_index # index to ensure we can recover empty indices - body: - # if the node with the replica is the first to be restarted, then delayed - # allocation will kick in, and the cluster health won't return to GREEN - # before timing out - index.unassigned.node_left.delayed_timeout: "100ms" - - - do: - bulk: - refresh: true - body: - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v1_old", "f2": 0}' - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v2_old", "f2": 1}' - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v3_old", "f2": 2}' - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v4_old", "f2": 3}' - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v5_old", "f2": 4}' - - - do: - bulk: - refresh: true - body: - - '{"index": {"_index": "index_with_replicas", "_type": "doc"}}' - - '{"f1": "d_old"}' - - '{"index": {"_index": "index_with_replicas", "_type": "doc"}}' - - '{"f1": "d_old"}' - - '{"index": {"_index": "index_with_replicas", "_type": "doc"}}' - - '{"f1": "d_old"}' - - '{"index": {"_index": "index_with_replicas", "_type": "doc"}}' - - '{"f1": "d_old"}' - - '{"index": {"_index": "index_with_replicas", "_type": "doc"}}' - - '{"f1": "d_old"}' - - - do: - indices.refresh: - index: test_index,index_with_replicas - - - do: - search: - index: test_index - - - match: { hits.total: 5 } - - - do: - search: - index: index_with_replicas - - - match: { hits.total: 5 } - +"Create things in the cluster state that we'll validate are there after the ugprade": - do: snapshot.create_repository: repository: my_repo @@ -91,6 +20,21 @@ } - match: { "acknowledged": true } + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "test_search_template", "_type": "doc"}}' + - '{"f1": "v1_old"}' + - '{"index": {"_index": "test_search_template", "_type": "doc"}}' + - '{"f1": "v2_old"}' + - '{"index": {"_index": "test_search_template", "_type": "doc"}}' + - '{"f1": "v3_old"}' + - '{"index": {"_index": "test_search_template", "_type": "doc"}}' + - '{"f1": "v4_old"}' + - '{"index": {"_index": "test_search_template", "_type": "doc"}}' + - '{"f1": "v5_old"}' + - do: put_script: id: test_search_template @@ -105,6 +49,7 @@ - do: search_template: + index: test_search_template body: id: test_search_template params: diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml index 011db854ecdc6..3e293f91ce12a 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml @@ -1,55 +1,8 @@ ---- -"Index data and search on the upgraded cluster": - - do: - cluster.health: - wait_for_status: green - wait_for_nodes: 2 - # wait for long enough that we give delayed unassigned shards to stop being delayed - timeout: 70s - level: shards - index: test_index,index_with_replicas,empty_index - - - do: - search: - index: test_index - - - match: { hits.total: 10 } # no new indexed data, so expect the original 10 documents from the old and mixed clusters - - - do: - search: - index: index_with_replicas - - - match: { hits.total: 5 } # just check we recovered fine - - - do: - bulk: - refresh: true - body: - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v1_upgraded", "f2": 10}' - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v2_upgraded", "f2": 11}' - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v3_upgraded", "f2": 12}' - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v4_upgraded", "f2": 13}' - - '{"index": {"_index": "test_index", "_type": "doc"}}' - - '{"f1": "v5_upgraded", "f2": 14}' - - - do: - indices.refresh: - index: test_index - - - do: - search: - index: test_index - - - match: { hits.total: 15 } # 10 docs from previous clusters plus 5 new docs - --- "Verify that we can still find things with the template": - do: search_template: + index: test_search_template body: id: test_search_template params: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml index 880efaff19aa6..861e1200991b1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.state/20_filtering.yml @@ -163,12 +163,24 @@ setup: version: " - 6.3.99" reason: "cluster state including cluster_uuid at the top level is new in v6.4.0 and higher" + # Get the current cluster_uuid + - do: + cluster.state: {} + - set: { metadata.cluster_uuid : cluster_uuid } + - do: cluster.state: - metric: [ master_node, version, metadata ] + metric: [ master_node, version ] - - is_true: cluster_uuid + - match: { cluster_uuid: $cluster_uuid } - is_true: master_node - is_true: version - is_true: state_uuid - - is_true: metadata + + - do: + cluster.state: + metric: [ routing_table ] + index: testidx + + - match: { cluster_uuid: $cluster_uuid } + - is_true: routing_table diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml index f94cf286fd898..bfd3fc58fdf4b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml @@ -1,8 +1,8 @@ --- "Shrink index via API": - skip: - version: " - 6.3.99" - reason: expects warnings that pre-6.4.0 will not send + version: " - 6.9.99" + reason: expects warnings that pre-7.0.0 will not send features: "warnings" # creates an index with one document solely allocated on the master node # and shrinks it into a new index with a single shard @@ -67,8 +67,6 @@ body: settings: index.number_of_replicas: 0 - warnings: - - "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior" - do: cluster.health: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml index 6f532ff81c688..2e7a13b38e10e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml @@ -1,8 +1,8 @@ --- "Shrink index ignores target template mapping": - skip: - version: " - 6.3.99" - reason: expects warnings that pre-6.4.0 will not send + version: " - 6.9.99" + reason: expects warnings that pre-7.0.0 will not send features: "warnings" - do: @@ -71,8 +71,6 @@ body: settings: index.number_of_replicas: 0 - warnings: - - "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior" - do: cluster.health: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml index 53a12aad787f7..8c4c84c4be152 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml @@ -1,8 +1,8 @@ --- "Copy settings during shrink index": - skip: - version: " - 6.3.99" - reason: expects warnings that pre-6.4.0 will not send + version: " - 6.9.99" + reason: expects warnings that pre-7.0.0 will not send features: "warnings" - do: @@ -48,6 +48,8 @@ settings: index.number_of_replicas: 0 index.merge.scheduler.max_thread_count: 2 + warnings: + - "parameter [copy_settings] is deprecated and will be removed in 8.0.0" - do: cluster.health: @@ -63,19 +65,17 @@ - match: { copy-settings-target.settings.index.blocks.write: "true" } - match: { copy-settings-target.settings.index.routing.allocation.include._id: $master } - # now we do a actual shrink and do not copy settings (by default) + # now we do a actual shrink and copy settings (by default) - do: indices.shrink: index: "source" - target: "no-copy-settings-target" + target: "default-copy-settings-target" wait_for_active_shards: 1 master_timeout: 10s body: settings: index.number_of_replicas: 0 index.merge.scheduler.max_thread_count: 2 - warnings: - - "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior" - do: cluster.health: @@ -83,13 +83,13 @@ - do: indices.get_settings: - index: "no-copy-settings-target" + index: "default-copy-settings-target" - # only the request setting should be copied - - is_false: no-copy-settings-target.settings.index.merge.scheduler.max_merge_count - - match: { no-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" } - - is_false: no-copy-settings-target.settings.index.blocks.write - - is_false: no-copy-settings-target.settings.index.routing.allocation.include._id + # settings should be copied + - match: { default-copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" } + - match: { default-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" } + - match: { default-copy-settings-target.settings.index.blocks.write: "true" } + - match: { default-copy-settings-target.settings.index.routing.allocation.include._id: $master } # now we do a actual shrink and try to set no copy settings - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml index 8cfe77042dd3f..74774f13e212e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml @@ -33,8 +33,8 @@ setup: --- "Split index via API": - skip: - version: " - 6.3.99" - reason: expects warnings that pre-6.4.0 will not send + version: " - 6.9.99" + reason: pre-7.0.0 will send warnings features: "warnings" # make it read-only @@ -61,8 +61,6 @@ setup: settings: index.number_of_replicas: 0 index.number_of_shards: 4 - warnings: - - "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior" - do: cluster.health: @@ -108,8 +106,7 @@ setup: "Split from 1 to N": - skip: version: " - 6.99.99" - reason: Automatic preparation for splitting was added in 7.0.0 - features: "warnings" + reason: automatic preparation for splitting was added in 7.0.0 - do: indices.create: index: source_one_shard @@ -163,8 +160,6 @@ setup: settings: index.number_of_replicas: 0 index.number_of_shards: 5 - warnings: - - "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior" - do: cluster.health: @@ -205,13 +200,11 @@ setup: - match: { _id: "3" } - match: { _source: { foo: "hello world 3" } } - - --- "Create illegal split indices": - skip: - version: " - 6.3.99" - reason: expects warnings that pre-6.4.0 will not send + version: " - 6.9.99" + reason: pre-7.0.0 will send warnings features: "warnings" # try to do an illegal split with number_of_routing_shards set @@ -227,8 +220,6 @@ setup: index.number_of_replicas: 0 index.number_of_shards: 4 index.number_of_routing_shards: 8 - warnings: - - "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior" # try to do an illegal split with illegal number_of_shards - do: @@ -242,5 +233,3 @@ setup: settings: index.number_of_replicas: 0 index.number_of_shards: 6 - warnings: - - "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml index 88d3f3c610202..727e1e374ba65 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml @@ -1,8 +1,8 @@ --- "Split index ignores target template mapping": - skip: - version: " - 6.3.99" - reason: expects warnings that pre-6.4.0 will not send + version: " - 6.9.99" + reason: pre-7.0.0 will send warnings features: "warnings" # create index @@ -65,8 +65,6 @@ settings: index.number_of_shards: 2 index.number_of_replicas: 0 - warnings: - - "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior" - do: cluster.health: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml index 9e64b2b8130ad..90d4080e46379 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml @@ -1,8 +1,8 @@ --- "Copy settings during split index": - skip: - version: " - 6.3.99" - reason: expects warnings that pre-6.4.0 will not send + version: " - 6.9.99" + reason: expects warnings that pre-7.0.0 will not send features: "warnings" - do: @@ -50,6 +50,9 @@ index.number_of_replicas: 0 index.number_of_shards: 2 index.merge.scheduler.max_thread_count: 2 + warnings: + - "parameter [copy_settings] is deprecated and will be removed in 8.0.0" + - do: cluster.health: @@ -65,11 +68,11 @@ - match: { copy-settings-target.settings.index.blocks.write: "true" } - match: { copy-settings-target.settings.index.routing.allocation.include._id: $master } - # now we do a actual shrink and do not copy settings (by default) + # now we do a actual shrink and copy settings (by default) - do: indices.split: index: "source" - target: "no-copy-settings-target" + target: "default-copy-settings-target" wait_for_active_shards: 1 master_timeout: 10s body: @@ -77,8 +80,6 @@ index.number_of_replicas: 0 index.number_of_shards: 2 index.merge.scheduler.max_thread_count: 2 - warnings: - - "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior" - do: cluster.health: @@ -86,13 +87,13 @@ - do: indices.get_settings: - index: "no-copy-settings-target" + index: "default-copy-settings-target" - # only the request setting should be copied - - is_false: no-copy-settings-target.settings.index.merge.scheduler.max_merge_count - - match: { no-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" } - - is_false: no-copy-settings-target.settings.index.blocks.write - - is_false: no-copy-settings-target.settings.index.routing.allocation.include._id + # settings should be copied + - match: { default-copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" } + - match: { default-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" } + - match: { default-copy-settings-target.settings.index.blocks.write: "true" } + - match: { default-copy-settings-target.settings.index.routing.allocation.include._id: $master } - do: catch: /illegal_argument_exception/ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml index 9dd54811fabaa..f5b80509db895 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml @@ -1,6 +1,6 @@ setup: - skip: - version: " - 6.4.0" + version: " - 6.3.99" reason: "moving_fn added in 6.4.0" --- diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml index 19593decb6533..eb6e4efcdca5f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml @@ -197,7 +197,7 @@ setup: "Test typed keys parameter for date_histogram aggregation and max_bucket pipeline aggregation": - skip: features: warnings - version: " - 6.4.0" + version: " - 6.3.99" reason: "deprecation added in 6.4.0" - do: warnings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/210_rescore_explain.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/210_rescore_explain.yml new file mode 100644 index 0000000000000..24920580c4552 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/210_rescore_explain.yml @@ -0,0 +1,39 @@ +--- +"Score should match explanation in rescore": + - skip: + version: " - 6.99.99" + reason: Explanation for rescoring was corrected after these versions + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "test_index", "_type": "_doc", "_id": "1"}}' + - '{"f1": "1"}' + - '{"index": {"_index": "test_index", "_type": "_doc", "_id": "2"}}' + - '{"f1": "2"}' + - '{"index": {"_index": "test_index", "_type": "_doc", "_id": "3"}}' + - '{"f1": "3"}' + + - do: + search: + index: test_index + body: + explain: true + query: + match_all: {} + rescore: + window_size: 2 + query: + rescore_query: + match_all: {} + query_weight: 5 + rescore_query_weight: 10 + + - match: { hits.hits.0._score: 15 } + - match: { hits.hits.0._explanation.value: 15 } + + - match: { hits.hits.1._score: 15 } + - match: { hits.hits.1._explanation.value: 15 } + + - match: { hits.hits.2._score: 5 } + - match: { hits.hits.2._explanation.value: 5 } diff --git a/server/licenses/lucene-analyzers-common-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-analyzers-common-7.4.0-snapshot-59f2b7aec2.jar.sha1 new file mode 100644 index 0000000000000..14f5fcb381f1c --- /dev/null +++ b/server/licenses/lucene-analyzers-common-7.4.0-snapshot-59f2b7aec2.jar.sha1 @@ -0,0 +1 @@ +b70d03784d06a643e096fae4d959200aa246ba16 \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-7.4.0-snapshot-6705632810.jar.sha1 b/server/licenses/lucene-analyzers-common-7.4.0-snapshot-6705632810.jar.sha1 deleted file mode 100644 index ccabc01378088..0000000000000 --- a/server/licenses/lucene-analyzers-common-7.4.0-snapshot-6705632810.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f465718b3db829e7660009aac2c1211fd5d74ca0 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-backward-codecs-7.4.0-snapshot-59f2b7aec2.jar.sha1 new file mode 100644 index 0000000000000..47afb59e45eb7 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-7.4.0-snapshot-59f2b7aec2.jar.sha1 @@ -0,0 +1 @@ +d660a63ac0f7ab2772a45ae518518472bf620620 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.4.0-snapshot-6705632810.jar.sha1 b/server/licenses/lucene-backward-codecs-7.4.0-snapshot-6705632810.jar.sha1 deleted file mode 100644 index 68c4dcebd2ee2..0000000000000 --- a/server/licenses/lucene-backward-codecs-7.4.0-snapshot-6705632810.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d502441e830e1a9d30270442f8e3fd8317fe7bba \ No newline at end of file diff --git a/server/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 new file mode 100644 index 0000000000000..50392f59374a8 --- /dev/null +++ b/server/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 @@ -0,0 +1 @@ +bf8f9e8284a54af18545574cb4a530da0deb968a \ No newline at end of file diff --git a/server/licenses/lucene-core-7.4.0-snapshot-6705632810.jar.sha1 b/server/licenses/lucene-core-7.4.0-snapshot-6705632810.jar.sha1 deleted file mode 100644 index c25718d0a9ee0..0000000000000 --- a/server/licenses/lucene-core-7.4.0-snapshot-6705632810.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5167fb0a14434cb10ec3224e9e32ca668e9f9ad4 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-grouping-7.4.0-snapshot-59f2b7aec2.jar.sha1 new file mode 100644 index 0000000000000..27d7aaab2f589 --- /dev/null +++ b/server/licenses/lucene-grouping-7.4.0-snapshot-59f2b7aec2.jar.sha1 @@ -0,0 +1 @@ +9eaae9dcd4ec88227475cb81d3be9afa767f1b22 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.4.0-snapshot-6705632810.jar.sha1 b/server/licenses/lucene-grouping-7.4.0-snapshot-6705632810.jar.sha1 deleted file mode 100644 index 007ae9bb24eee..0000000000000 --- a/server/licenses/lucene-grouping-7.4.0-snapshot-6705632810.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -488aeecf49413b63a404989ae00b07b20951e76e \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-highlighter-7.4.0-snapshot-59f2b7aec2.jar.sha1 new file mode 100644 index 0000000000000..176c3a86afe7f --- /dev/null +++ b/server/licenses/lucene-highlighter-7.4.0-snapshot-59f2b7aec2.jar.sha1 @@ -0,0 +1 @@ +cd15f0008742c84899d678cb0cecda06d0a6d63e \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.4.0-snapshot-6705632810.jar.sha1 b/server/licenses/lucene-highlighter-7.4.0-snapshot-6705632810.jar.sha1 deleted file mode 100644 index f1733ffb6826a..0000000000000 --- a/server/licenses/lucene-highlighter-7.4.0-snapshot-6705632810.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -107755edd67cddb3fb9817de50c0bed3a10da19c \ No newline at end of file diff --git a/server/licenses/lucene-join-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-join-7.4.0-snapshot-59f2b7aec2.jar.sha1 new file mode 100644 index 0000000000000..0bfe9cfb79aff --- /dev/null +++ b/server/licenses/lucene-join-7.4.0-snapshot-59f2b7aec2.jar.sha1 @@ -0,0 +1 @@ +5ce38b8610a7f402f2da3b0e408e508151d979c5 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.4.0-snapshot-6705632810.jar.sha1 b/server/licenses/lucene-join-7.4.0-snapshot-6705632810.jar.sha1 deleted file mode 100644 index 71800d6aa153f..0000000000000 --- a/server/licenses/lucene-join-7.4.0-snapshot-6705632810.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9226fab3b9c6250af52b87061f637c0f8e3114b6 \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-memory-7.4.0-snapshot-59f2b7aec2.jar.sha1 new file mode 100644 index 0000000000000..c1a0127e2ce73 --- /dev/null +++ b/server/licenses/lucene-memory-7.4.0-snapshot-59f2b7aec2.jar.sha1 @@ -0,0 +1 @@ +53819f03a07050a4af28361d64395c86f2cea008 \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.4.0-snapshot-6705632810.jar.sha1 b/server/licenses/lucene-memory-7.4.0-snapshot-6705632810.jar.sha1 deleted file mode 100644 index 6cc24bbe98b82..0000000000000 --- a/server/licenses/lucene-memory-7.4.0-snapshot-6705632810.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2b7bf384c1933225972f04224d867ec800f5e3a7 \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-misc-7.4.0-snapshot-59f2b7aec2.jar.sha1 new file mode 100644 index 0000000000000..615a0dec0c0d4 --- /dev/null +++ b/server/licenses/lucene-misc-7.4.0-snapshot-59f2b7aec2.jar.sha1 @@ -0,0 +1 @@ +8cdc0e2b65d146ed11f4d2507109e530d59ff33d \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.4.0-snapshot-6705632810.jar.sha1 b/server/licenses/lucene-misc-7.4.0-snapshot-6705632810.jar.sha1 deleted file mode 100644 index 03c146f5c6473..0000000000000 --- a/server/licenses/lucene-misc-7.4.0-snapshot-6705632810.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -18b770c35db8757dc036b1506870a4ddaad7b1ab \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-queries-7.4.0-snapshot-59f2b7aec2.jar.sha1 new file mode 100644 index 0000000000000..12f5eff262e9c --- /dev/null +++ b/server/licenses/lucene-queries-7.4.0-snapshot-59f2b7aec2.jar.sha1 @@ -0,0 +1 @@ +e56090463703112ad64ad457d18bae9a5b2966b8 \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.4.0-snapshot-6705632810.jar.sha1 b/server/licenses/lucene-queries-7.4.0-snapshot-6705632810.jar.sha1 deleted file mode 100644 index 3ecdd79cafd9e..0000000000000 --- a/server/licenses/lucene-queries-7.4.0-snapshot-6705632810.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -683f6436938c67709d0c665c9e1fdef7bd893e4a \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-queryparser-7.4.0-snapshot-59f2b7aec2.jar.sha1 new file mode 100644 index 0000000000000..a787a00541a54 --- /dev/null +++ b/server/licenses/lucene-queryparser-7.4.0-snapshot-59f2b7aec2.jar.sha1 @@ -0,0 +1 @@ +9faf974b77058e44a6d35e956db4f5fb67389dfa \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.4.0-snapshot-6705632810.jar.sha1 b/server/licenses/lucene-queryparser-7.4.0-snapshot-6705632810.jar.sha1 deleted file mode 100644 index d113267f6f3d1..0000000000000 --- a/server/licenses/lucene-queryparser-7.4.0-snapshot-6705632810.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1df20ba64b9aa68f1fa9a15c9ff75f87f94dec47 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-sandbox-7.4.0-snapshot-59f2b7aec2.jar.sha1 new file mode 100644 index 0000000000000..7d95cd6b3b6e3 --- /dev/null +++ b/server/licenses/lucene-sandbox-7.4.0-snapshot-59f2b7aec2.jar.sha1 @@ -0,0 +1 @@ +b852b1fe70ef70736b2b1a9ad57eb93cbaed0423 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.4.0-snapshot-6705632810.jar.sha1 b/server/licenses/lucene-sandbox-7.4.0-snapshot-6705632810.jar.sha1 deleted file mode 100644 index 7c3391aec27e8..0000000000000 --- a/server/licenses/lucene-sandbox-7.4.0-snapshot-6705632810.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -895ca714fc62b66ba63d43931730cdc4ef56d35f \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-spatial-7.4.0-snapshot-59f2b7aec2.jar.sha1 new file mode 100644 index 0000000000000..ac0598b3f0c49 --- /dev/null +++ b/server/licenses/lucene-spatial-7.4.0-snapshot-59f2b7aec2.jar.sha1 @@ -0,0 +1 @@ +d2fa99ec7140fcf35db16ac1feb78ef142750d39 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.4.0-snapshot-6705632810.jar.sha1 b/server/licenses/lucene-spatial-7.4.0-snapshot-6705632810.jar.sha1 deleted file mode 100644 index 35d6d5359ebd1..0000000000000 --- a/server/licenses/lucene-spatial-7.4.0-snapshot-6705632810.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -95ab7e9421bbeb8229d83ac72700b37a521fdf4f \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-spatial-extras-7.4.0-snapshot-59f2b7aec2.jar.sha1 new file mode 100644 index 0000000000000..a2537dbdde529 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-7.4.0-snapshot-59f2b7aec2.jar.sha1 @@ -0,0 +1 @@ +c9963f60d3a0924b877a6f910650c5f2384822a0 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.4.0-snapshot-6705632810.jar.sha1 b/server/licenses/lucene-spatial-extras-7.4.0-snapshot-6705632810.jar.sha1 deleted file mode 100644 index c2f2f39a1f821..0000000000000 --- a/server/licenses/lucene-spatial-extras-7.4.0-snapshot-6705632810.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -773ff8c8425d32609ccec6956759ad377dfb8f6b \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-spatial3d-7.4.0-snapshot-59f2b7aec2.jar.sha1 new file mode 100644 index 0000000000000..6844bcd13b278 --- /dev/null +++ b/server/licenses/lucene-spatial3d-7.4.0-snapshot-59f2b7aec2.jar.sha1 @@ -0,0 +1 @@ +3f33ba54da5e0e125f4c5ef7dd800dd6185e4f61 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.4.0-snapshot-6705632810.jar.sha1 b/server/licenses/lucene-spatial3d-7.4.0-snapshot-6705632810.jar.sha1 deleted file mode 100644 index 0533067ff0d89..0000000000000 --- a/server/licenses/lucene-spatial3d-7.4.0-snapshot-6705632810.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ea711541e243ee768f950041e6e2843d0cc5e695 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-suggest-7.4.0-snapshot-59f2b7aec2.jar.sha1 new file mode 100644 index 0000000000000..0343db2d94485 --- /dev/null +++ b/server/licenses/lucene-suggest-7.4.0-snapshot-59f2b7aec2.jar.sha1 @@ -0,0 +1 @@ +bb3c18c987395dae6fe63744f5a50fd367ea5a74 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.4.0-snapshot-6705632810.jar.sha1 b/server/licenses/lucene-suggest-7.4.0-snapshot-6705632810.jar.sha1 deleted file mode 100644 index ce8adccc89a78..0000000000000 --- a/server/licenses/lucene-suggest-7.4.0-snapshot-6705632810.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2ca005cf25722ba3777ed93f720f40c937081fa6 \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index d4df9e3fffeda..a2820711da556 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -167,7 +167,7 @@ public class Version implements Comparable, ToXContentFragment { public static final int V_6_2_5_ID = 6020599; public static final Version V_6_2_5 = new Version(V_6_2_5_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); public static final int V_6_3_0_ID = 6030099; - public static final Version V_6_3_0 = new Version(V_6_3_0_ID, org.apache.lucene.util.Version.LUCENE_7_3_0); + public static final Version V_6_3_0 = new Version(V_6_3_0_ID, org.apache.lucene.util.Version.LUCENE_7_3_1); public static final int V_6_4_0_ID = 6040099; public static final Version V_6_4_0 = new Version(V_6_4_0_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); public static final int V_7_0_0_alpha1_ID = 7000001; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index 58d6a299bb936..b7ef075a59afa 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -98,14 +98,11 @@ protected void masterOperation(final ClusterStateRequest request, final ClusterS if (request.blocks()) { builder.blocks(currentState.blocks()); } - if (request.metaData()) { - MetaData.Builder mdBuilder; - if (request.indices().length == 0) { - mdBuilder = MetaData.builder(currentState.metaData()); - } else { - mdBuilder = MetaData.builder(); - } + MetaData.Builder mdBuilder = MetaData.builder(); + mdBuilder.clusterUUID(currentState.metaData().clusterUUID()); + + if (request.metaData()) { if (request.indices().length > 0) { String[] indices = indexNameExpressionResolver.concreteIndexNames(currentState, request); for (String filteredIndex : indices) { @@ -114,17 +111,19 @@ protected void masterOperation(final ClusterStateRequest request, final ClusterS mdBuilder.put(indexMetaData, false); } } + } else { + mdBuilder = MetaData.builder(currentState.metaData()); } - // Filter our metadata that shouldn't be returned by API - for(ObjectObjectCursor custom : currentState.metaData().customs()) { - if(!custom.value.context().contains(MetaData.XContentContext.API)) { + // filter out metadata that shouldn't be returned by the API + for (ObjectObjectCursor custom : currentState.metaData().customs()) { + if (custom.value.context().contains(MetaData.XContentContext.API) == false) { mdBuilder.removeCustom(custom.key); } } - - builder.metaData(mdBuilder); } + builder.metaData(mdBuilder); + if (request.customs()) { for (ObjectObjectCursor custom : currentState.customs()) { if (custom.value.isPrivate() == false) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequest.java index 0c4f63b71fbbb..1f4214dbdd7a4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequest.java @@ -71,7 +71,7 @@ public GetSettingsRequest(StreamInput in) throws IOException { indicesOptions = IndicesOptions.readIndicesOptions(in); names = in.readStringArray(); humanReadable = in.readBoolean(); - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { includeDefaults = in.readBoolean(); } } @@ -83,7 +83,7 @@ public void writeTo(StreamOutput out) throws IOException { indicesOptions.writeIndicesOptions(out); out.writeStringArray(names); out.writeBoolean(humanReadable); - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { out.writeBoolean(includeDefaults); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java index 6217c9a685375..da01fb76275d5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java @@ -114,7 +114,7 @@ public void readFrom(StreamInput in) throws IOException { } ImmutableOpenMap.Builder defaultSettingsBuilder = ImmutableOpenMap.builder(); - if (in.getVersion().onOrAfter(org.elasticsearch.Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(org.elasticsearch.Version.V_6_4_0)) { int defaultSettingsSize = in.readVInt(); for (int i = 0; i < defaultSettingsSize ; i++) { defaultSettingsBuilder.put(in.readString(), Settings.readSettingsFromStream(in)); @@ -132,7 +132,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(cursor.key); Settings.writeSettingsToStream(cursor.value, out); } - if (out.getVersion().onOrAfter(org.elasticsearch.Version.V_7_0_0_alpha1)) { + if (out.getVersion().onOrAfter(org.elasticsearch.Version.V_6_4_0)) { out.writeVInt(indexToDefaultSettings.size()); for (ObjectObjectCursor cursor : indexToDefaultSettings) { out.writeString(cursor.key); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java index ca046c48accff..eb95a2455b23e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java @@ -56,7 +56,7 @@ public class ResizeRequest extends AcknowledgedRequest implements private CreateIndexRequest targetIndexRequest; private String sourceIndex; private ResizeType type = ResizeType.SHRINK; - private Boolean copySettings; + private Boolean copySettings = true; ResizeRequest() {} diff --git a/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredTokenFilter.java b/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredTokenFilter.java index 777fb589c9db0..12130e856f32a 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredTokenFilter.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredTokenFilter.java @@ -41,6 +41,15 @@ public static PreConfiguredTokenFilter singleton(String name, boolean useFilterF (tokenStream, version) -> create.apply(tokenStream)); } + /** + * Create a pre-configured token filter that may not vary at all. + */ + public static PreConfiguredTokenFilter singletonWithVersion(String name, boolean useFilterForMultitermQueries, + BiFunction create) { + return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, CachingStrategy.ONE, + (tokenStream, version) -> create.apply(tokenStream, version)); + } + /** * Create a pre-configured token filter that may vary based on the Lucene version. */ diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java b/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java index 3d07a0f87aa5e..dd85e921e4ac5 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java @@ -94,38 +94,19 @@ public final void sort(Comparator c) { } public static final class Longs extends ScriptDocValues { - protected static final DeprecationLogger deprecationLogger = new DeprecationLogger(ESLoggerFactory.getLogger(Longs.class)); - private final SortedNumericDocValues in; - /** - * Callback for deprecated fields. In production this should always point to - * {@link #deprecationLogger} but tests will override it so they can test that - * we use the required permissions when calling it. - */ - private final Consumer deprecationCallback; private long[] values = new long[0]; private int count; - private Dates dates; - private int docId = -1; /** * Standard constructor. */ public Longs(SortedNumericDocValues in) { - this(in, deprecationLogger::deprecated); - } - - /** - * Constructor for testing the deprecation callback. - */ - Longs(SortedNumericDocValues in, Consumer deprecationCallback) { this.in = in; - this.deprecationCallback = deprecationCallback; } @Override public void setNextDocId(int docId) throws IOException { - this.docId = docId; if (in.advanceExact(docId)) { resize(in.docValueCount()); for (int i = 0; i < count; i++) { @@ -134,9 +115,6 @@ public void setNextDocId(int docId) throws IOException { } else { resize(0); } - if (dates != null) { - dates.setNextDocId(docId); - } } /** @@ -148,10 +126,6 @@ protected void resize(int newSize) { values = ArrayUtil.grow(values, count); } - public SortedNumericDocValues getInternalValues() { - return this.in; - } - public long getValue() { if (count == 0) { return 0L; @@ -159,26 +133,6 @@ public long getValue() { return values[0]; } - @Deprecated - public ReadableDateTime getDate() throws IOException { - deprecated("getDate on numeric fields is deprecated. Use a date field to get dates."); - if (dates == null) { - dates = new Dates(in); - dates.setNextDocId(docId); - } - return dates.getValue(); - } - - @Deprecated - public List getDates() throws IOException { - deprecated("getDates on numeric fields is deprecated. Use a date field to get dates."); - if (dates == null) { - dates = new Dates(in); - dates.setNextDocId(docId); - } - return dates; - } - @Override public Long get(int index) { return values[index]; @@ -188,22 +142,6 @@ public Long get(int index) { public int size() { return count; } - - /** - * Log a deprecation log, with the server's permissions, not the permissions of the - * script calling this method. We need to do this to prevent errors when rolling - * the log file. - */ - private void deprecated(String message) { - // Intentionally not calling SpecialPermission.check because this is supposed to be called by scripts - AccessController.doPrivileged(new PrivilegedAction() { - @Override - public Void run() { - deprecationCallback.accept(message); - return null; - } - }); - } } public static final class Dates extends ScriptDocValues { @@ -212,12 +150,6 @@ public static final class Dates extends ScriptDocValues { private static final ReadableDateTime EPOCH = new DateTime(0, DateTimeZone.UTC); private final SortedNumericDocValues in; - /** - * Callback for deprecated fields. In production this should always point to - * {@link #deprecationLogger} but tests will override it so they can test that - * we use the required permissions when calling it. - */ - private final Consumer deprecationCallback; /** * Values wrapped in {@link MutableDateTime}. Null by default an allocated on first usage so we allocate a reasonably size. We keep * this array so we don't have allocate new {@link MutableDateTime}s on every usage. Instead we reuse them for every document. @@ -229,15 +161,7 @@ public static final class Dates extends ScriptDocValues { * Standard constructor. */ public Dates(SortedNumericDocValues in) { - this(in, deprecationLogger::deprecated); - } - - /** - * Constructor for testing deprecation logging. - */ - Dates(SortedNumericDocValues in, Consumer deprecationCallback) { this.in = in; - this.deprecationCallback = deprecationCallback; } /** @@ -251,24 +175,6 @@ public ReadableDateTime getValue() { return get(0); } - /** - * Fetch the first value. Added for backwards compatibility with 5.x when date fields were {@link Longs}. - */ - @Deprecated - public ReadableDateTime getDate() { - deprecated("getDate is no longer necessary on date fields as the value is now a date."); - return getValue(); - } - - /** - * Fetch all the values. Added for backwards compatibility with 5.x when date fields were {@link Longs}. - */ - @Deprecated - public List getDates() { - deprecated("getDates is no longer necessary on date fields as the values are now dates."); - return this; - } - @Override public ReadableDateTime get(int index) { if (index >= count) { @@ -326,22 +232,6 @@ void refreshArray() throws IOException { dates[i] = new MutableDateTime(in.nextValue(), DateTimeZone.UTC); } } - - /** - * Log a deprecation log, with the server's permissions, not the permissions of the - * script calling this method. We need to do this to prevent errors when rolling - * the log file. - */ - private void deprecated(String message) { - // Intentionally not calling SpecialPermission.check because this is supposed to be called by scripts - AccessController.doPrivileged(new PrivilegedAction() { - @Override - public Void run() { - deprecationCallback.accept(message); - return null; - } - }); - } } public static final class Doubles extends ScriptDocValues { diff --git a/server/src/main/java/org/elasticsearch/index/query/TermsSetQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/TermsSetQueryBuilder.java index 5caabd445b32e..fbada58f29477 100644 --- a/server/src/main/java/org/elasticsearch/index/query/TermsSetQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/TermsSetQueryBuilder.java @@ -249,7 +249,8 @@ protected Query doToQuery(QueryShardContext context) throws IOException { IndexNumericFieldData fieldData = context.getForField(msmFieldType); longValuesSource = new FieldValuesSource(fieldData); } else if (minimumShouldMatchScript != null) { - SearchScript.Factory factory = context.getScriptService().compile(minimumShouldMatchScript, SearchScript.CONTEXT); + SearchScript.Factory factory = context.getScriptService().compile(minimumShouldMatchScript, + SearchScript.TERMS_SET_QUERY_CONTEXT); Map params = new HashMap<>(); params.putAll(minimumShouldMatchScript.getParams()); params.put("num_terms", queries.size()); diff --git a/server/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java b/server/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java index cc89518154d12..ed4c5f5a26952 100644 --- a/server/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java @@ -92,7 +92,7 @@ protected int doHashCode() { @Override protected ScoreFunction doToFunction(QueryShardContext context) { try { - SearchScript.Factory factory = context.getScriptService().compile(script, SearchScript.CONTEXT); + SearchScript.Factory factory = context.getScriptService().compile(script, SearchScript.SCRIPT_SCORE_CONTEXT); SearchScript.LeafFactory searchScript = factory.newFactory(script.getParams(), context.lookup()); return new ScriptScoreFunction(script, searchScript); } catch (Exception e) { diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index bc4395632d594..d9f151907dfbb 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -759,17 +759,6 @@ private void innerClose() throws IOException { public String toString() { return "store(" + in.toString() + ")"; } - - @Override - public boolean checkPendingDeletions() throws IOException { - if (super.checkPendingDeletions()) { - deletesLogger.warn("directory has still pending deletes"); - } - // we skip this check since our IW usage always goes forward. - // we still might run into situations where we have pending deletes ie. in shrink / split case - // and that will cause issues on windows since we open multiple IW instance one after another during the split/shrink recovery - return false; - } } /** diff --git a/server/src/main/java/org/elasticsearch/plugins/MetaPluginInfo.java b/server/src/main/java/org/elasticsearch/plugins/MetaPluginInfo.java deleted file mode 100644 index d8bb176273ce2..0000000000000 --- a/server/src/main/java/org/elasticsearch/plugins/MetaPluginInfo.java +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.plugins; - -import java.io.IOException; -import java.io.InputStream; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.Arrays; -import java.util.Map; -import java.util.Properties; -import java.util.function.Function; -import java.util.stream.Collectors; - -/** - * An in-memory representation of the meta plugin descriptor. - */ -public class MetaPluginInfo { - static final String ES_META_PLUGIN_PROPERTIES = "meta-plugin-descriptor.properties"; - - private final String name; - private final String description; - - /** - * Construct plugin info. - * - * @param name the name of the plugin - * @param description a description of the plugin - */ - private MetaPluginInfo(String name, String description) { - this.name = name; - this.description = description; - } - - /** - * @return Whether the provided {@code path} is a meta plugin. - */ - public static boolean isMetaPlugin(final Path path) { - return Files.exists(path.resolve(ES_META_PLUGIN_PROPERTIES)); - } - - /** - * @return Whether the provided {@code path} is a meta properties file. - */ - public static boolean isPropertiesFile(final Path path) { - return ES_META_PLUGIN_PROPERTIES.equals(path.getFileName().toString()); - } - - /** reads (and validates) meta plugin metadata descriptor file */ - - /** - * Reads and validates the meta plugin descriptor file. - * - * @param path the path to the root directory for the meta plugin - * @return the meta plugin info - * @throws IOException if an I/O exception occurred reading the meta plugin descriptor - */ - public static MetaPluginInfo readFromProperties(final Path path) throws IOException { - final Path descriptor = path.resolve(ES_META_PLUGIN_PROPERTIES); - - final Map propsMap; - { - final Properties props = new Properties(); - try (InputStream stream = Files.newInputStream(descriptor)) { - props.load(stream); - } - propsMap = props.stringPropertyNames().stream().collect(Collectors.toMap(Function.identity(), props::getProperty)); - } - - final String name = propsMap.remove("name"); - if (name == null || name.isEmpty()) { - throw new IllegalArgumentException( - "property [name] is missing for meta plugin in [" + descriptor + "]"); - } - final String description = propsMap.remove("description"); - if (description == null) { - throw new IllegalArgumentException( - "property [description] is missing for meta plugin [" + name + "]"); - } - - if (propsMap.isEmpty() == false) { - throw new IllegalArgumentException("Unknown properties in meta plugin descriptor: " + propsMap.keySet()); - } - - return new MetaPluginInfo(name, description); - } - - /** - * The name of the meta plugin. - * - * @return the meta plugin name - */ - public String getName() { - return name; - } - - /** - * The description of the meta plugin. - * - * @return the meta plugin description - */ - public String getDescription() { - return description; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - MetaPluginInfo that = (MetaPluginInfo) o; - - if (!name.equals(that.name)) return false; - - return true; - } - - @Override - public int hashCode() { - return name.hashCode(); - } - - @Override - public String toString() { - final StringBuilder information = new StringBuilder() - .append("- Plugin information:\n") - .append("Name: ").append(name).append("\n") - .append("Description: ").append(description); - return information.toString(); - } - -} diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginsService.java b/server/src/main/java/org/elasticsearch/plugins/PluginsService.java index 4514691e4bec4..3bb2c3a1868b1 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginsService.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginsService.java @@ -140,16 +140,12 @@ public PluginsService(Settings settings, Path configPath, Path modulesDirectory, // TODO: remove this leniency, but tests bogusly rely on it if (isAccessibleDirectory(pluginsDirectory, logger)) { checkForFailedPluginRemovals(pluginsDirectory); - // call findBundles directly to get the meta plugin names - List plugins = findBundles(pluginsDirectory, "plugin"); - for (final BundleCollection plugin : plugins) { - final Collection bundles = plugin.bundles(); - for (final Bundle bundle : bundles) { - pluginsList.add(bundle.plugin); - } - seenBundles.addAll(bundles); - pluginsNames.add(plugin.name()); + Set plugins = getPluginBundles(pluginsDirectory); + for (final Bundle bundle : plugins) { + pluginsList.add(bundle.plugin); + pluginsNames.add(bundle.plugin.getName()); } + seenBundles.addAll(plugins); } } catch (IOException ex) { throw new IllegalStateException("Unable to initialize plugins", ex); @@ -253,17 +249,8 @@ public PluginsAndModules info() { return info; } - /** - * An abstraction over a single plugin and meta-plugins. - */ - interface BundleCollection { - String name(); - Collection bundles(); - } - - // a "bundle" is a group of plugins in a single classloader - // really should be 1-1, but we are not so fortunate - static class Bundle implements BundleCollection { + // a "bundle" is a group of jars in a single classloader + static class Bundle { final PluginInfo plugin; final Set urls; @@ -283,16 +270,6 @@ static class Bundle implements BundleCollection { this.urls = Objects.requireNonNull(urls); } - @Override - public String name() { - return plugin.getName(); - } - - @Override - public Collection bundles() { - return Collections.singletonList(this); - } - @Override public boolean equals(Object o) { if (this == o) return true; @@ -308,87 +285,30 @@ public int hashCode() { } /** - * Represents a meta-plugin and the {@link Bundle}s corresponding to its constituents. - */ - static class MetaBundle implements BundleCollection { - private final String name; - private final List bundles; - - MetaBundle(final String name, final List bundles) { - this.name = name; - this.bundles = bundles; - } - - @Override - public String name() { - return name; - } - - @Override - public Collection bundles() { - return bundles; - } - - } - - /** - * Extracts all installed plugin directories from the provided {@code rootPath} expanding meta-plugins if needed. + * Extracts all installed plugin directories from the provided {@code rootPath}. * * @param rootPath the path where the plugins are installed * @return a list of all plugin paths installed in the {@code rootPath} * @throws IOException if an I/O exception occurred reading the directories */ public static List findPluginDirs(final Path rootPath) throws IOException { - final Tuple, Map>> groupedPluginDirs = findGroupedPluginDirs(rootPath); - return Stream.concat( - groupedPluginDirs.v1().stream(), - groupedPluginDirs.v2().values().stream().flatMap(Collection::stream)) - .collect(Collectors.toList()); - } - - /** - * Extracts all installed plugin directories from the provided {@code rootPath} expanding meta-plugins if needed. The plugins are - * grouped into plugins and meta-plugins. The meta-plugins are keyed by the meta-plugin name. - * - * @param rootPath the path where the plugins are installed - * @return a tuple of plugins as the first component and meta-plugins keyed by meta-plugin name as the second component - * @throws IOException if an I/O exception occurred reading the directories - */ - private static Tuple, Map>> findGroupedPluginDirs(final Path rootPath) throws IOException { final List plugins = new ArrayList<>(); - final Map> metaPlugins = new LinkedHashMap<>(); final Set seen = new HashSet<>(); if (Files.exists(rootPath)) { try (DirectoryStream stream = Files.newDirectoryStream(rootPath)) { for (Path plugin : stream) { if (FileSystemUtils.isDesktopServicesStore(plugin) || - plugin.getFileName().toString().startsWith(".removing-")) { + plugin.getFileName().toString().startsWith(".removing-")) { continue; } if (seen.add(plugin.getFileName().toString()) == false) { throw new IllegalStateException("duplicate plugin: " + plugin); } - if (MetaPluginInfo.isMetaPlugin(plugin)) { - final String name = plugin.getFileName().toString(); - try (DirectoryStream subStream = Files.newDirectoryStream(plugin)) { - for (Path subPlugin : subStream) { - if (MetaPluginInfo.isPropertiesFile(subPlugin) || - FileSystemUtils.isDesktopServicesStore(subPlugin)) { - continue; - } - if (seen.add(subPlugin.getFileName().toString()) == false) { - throw new IllegalStateException("duplicate plugin: " + subPlugin); - } - metaPlugins.computeIfAbsent(name, n -> new ArrayList<>()).add(subPlugin); - } - } - } else { - plugins.add(plugin); - } + plugins.add(plugin); } } } - return Tuple.tuple(plugins, metaPlugins); + return plugins; } /** @@ -425,32 +345,21 @@ static void checkForFailedPluginRemovals(final Path pluginsDirectory) throws IOE /** Get bundles for plugins installed in the given modules directory. */ static Set getModuleBundles(Path modulesDirectory) throws IOException { - return findBundles(modulesDirectory, "module").stream().flatMap(b -> b.bundles().stream()).collect(Collectors.toSet()); + return findBundles(modulesDirectory, "module"); } /** Get bundles for plugins installed in the given plugins directory. */ static Set getPluginBundles(final Path pluginsDirectory) throws IOException { - return findBundles(pluginsDirectory, "plugin").stream().flatMap(b -> b.bundles().stream()).collect(Collectors.toSet()); + return findBundles(pluginsDirectory, "plugin"); } // searches subdirectories under the given directory for plugin directories - private static List findBundles(final Path directory, String type) throws IOException { - final List bundles = new ArrayList<>(); - final Set seenBundles = new HashSet<>(); - final Tuple, Map>> groupedPluginDirs = findGroupedPluginDirs(directory); - for (final Path plugin : groupedPluginDirs.v1()) { - final Bundle bundle = readPluginBundle(seenBundles, plugin, type); + private static Set findBundles(final Path directory, String type) throws IOException { + final Set bundles = new HashSet<>(); + for (final Path plugin : findPluginDirs(directory)) { + final Bundle bundle = readPluginBundle(bundles, plugin, type); bundles.add(bundle); } - for (final Map.Entry> metaPlugin : groupedPluginDirs.v2().entrySet()) { - final List metaPluginBundles = new ArrayList<>(); - for (final Path metaPluginPlugin : metaPlugin.getValue()) { - final Bundle bundle = readPluginBundle(seenBundles, metaPluginPlugin, type); - metaPluginBundles.add(bundle); - } - final MetaBundle metaBundle = new MetaBundle(metaPlugin.getKey(), metaPluginBundles); - bundles.add(metaBundle); - } return bundles; } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index fcbc54efbf780..ba4b68b20b246 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -1147,6 +1147,7 @@ public void snapshot(final IndexCommit snapshotIndexCommit) { // TODO apparently we don't use the MetadataSnapshot#.recoveryDiff(...) here but we should final Collection fileNames; try { + logger.trace("[{}] [{}] Loading store metadata using index commit [{}]", shardId, snapshotId, snapshotIndexCommit); metadata = store.getMetadata(snapshotIndexCommit); fileNames = snapshotIndexCommit.getFileNames(); } catch (IOException e) { @@ -1242,9 +1243,6 @@ public void snapshot(final IndexCommit snapshotIndexCommit) { /** * Snapshot individual file - *

    - * This is asynchronous method. Upon completion of the operation latch is getting counted down and any failures are - * added to the {@code failures} list * * @param fileInfo file to be snapshotted */ diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java index bc5db552b9dd2..b865bd3a42029 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java @@ -19,6 +19,7 @@ package org.elasticsearch.rest.action.admin.indices; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.support.ActiveShardCount; @@ -47,22 +48,22 @@ public abstract class RestResizeHandler extends BaseRestHandler { public final RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final ResizeRequest resizeRequest = new ResizeRequest(request.param("target"), request.param("index")); resizeRequest.setResizeType(getResizeType()); + // copy_settings should be removed in Elasticsearch 8.0.0; cf. https://github.com/elastic/elasticsearch/issues/28347 + assert Version.CURRENT.major < 8; final String rawCopySettings = request.param("copy_settings"); final Boolean copySettings; if (rawCopySettings == null) { copySettings = resizeRequest.getCopySettings(); - } else if (rawCopySettings.isEmpty()) { - copySettings = true; } else { - copySettings = Booleans.parseBoolean(rawCopySettings); - if (copySettings == false) { - throw new IllegalArgumentException("parameter [copy_settings] can not be explicitly set to [false]"); + if (rawCopySettings.isEmpty()) { + copySettings = true; + } else { + copySettings = Booleans.parseBoolean(rawCopySettings); + if (copySettings == false) { + throw new IllegalArgumentException("parameter [copy_settings] can not be explicitly set to [false]"); + } } - } - if (copySettings == null) { - deprecationLogger.deprecated( - "resize operations without copying settings is deprecated; " - + "set parameter [copy_settings] to [true] for future default behavior"); + deprecationLogger.deprecated("parameter [copy_settings] is deprecated and will be removed in 8.0.0"); } resizeRequest.setCopySettings(copySettings); request.applyContentParser(resizeRequest::fromXContent); diff --git a/server/src/main/java/org/elasticsearch/script/ScriptModule.java b/server/src/main/java/org/elasticsearch/script/ScriptModule.java index 5afb6ad28d7ab..583421be8e581 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptModule.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptModule.java @@ -42,6 +42,9 @@ public class ScriptModule { CORE_CONTEXTS = Stream.of( SearchScript.CONTEXT, SearchScript.AGGS_CONTEXT, + SearchScript.SCRIPT_SCORE_CONTEXT, + SearchScript.SCRIPT_SORT_CONTEXT, + SearchScript.TERMS_SET_QUERY_CONTEXT, ExecutableScript.CONTEXT, ExecutableScript.AGGS_CONTEXT, ExecutableScript.UPDATE_CONTEXT, diff --git a/server/src/main/java/org/elasticsearch/script/SearchScript.java b/server/src/main/java/org/elasticsearch/script/SearchScript.java index d0c932a3490d8..e5762adb1bbe9 100644 --- a/server/src/main/java/org/elasticsearch/script/SearchScript.java +++ b/server/src/main/java/org/elasticsearch/script/SearchScript.java @@ -158,6 +158,12 @@ public interface Factory { /** The context used to compile {@link SearchScript} factories. */ public static final ScriptContext CONTEXT = new ScriptContext<>("search", Factory.class); - // TODO: remove aggs context when it has its own interface + // TODO: remove these contexts when it has its own interface public static final ScriptContext AGGS_CONTEXT = new ScriptContext<>("aggs", Factory.class); -} \ No newline at end of file + // Can return a double. (For ScriptSortType#NUMBER only, for ScriptSortType#STRING normal CONTEXT should be used) + public static final ScriptContext SCRIPT_SORT_CONTEXT = new ScriptContext<>("sort", Factory.class); + // Can return a float + public static final ScriptContext SCRIPT_SCORE_CONTEXT = new ScriptContext<>("score", Factory.class); + // Can return a long + public static final ScriptContext TERMS_SET_QUERY_CONTEXT = new ScriptContext<>("terms_set", Factory.class); +} diff --git a/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java b/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java index d4cf05d542560..4a9567a32c06a 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java @@ -30,6 +30,8 @@ import java.util.Arrays; import java.util.Comparator; import java.util.Set; +import java.util.Collections; +import static java.util.stream.Collectors.toSet; public final class QueryRescorer implements Rescorer { @@ -61,6 +63,11 @@ protected float combine(float firstPassScore, boolean secondPassMatches, float s // First take top slice of incoming docs, to be rescored: TopDocs topNFirstPass = topN(topDocs, rescoreContext.getWindowSize()); + // Save doc IDs for which rescoring was applied to be used in score explanation + Set topNDocIDs = Collections.unmodifiableSet( + Arrays.stream(topNFirstPass.scoreDocs).map(scoreDoc -> scoreDoc.doc).collect(toSet())); + rescoreContext.setRescoredDocs(topNDocIDs); + // Rescore them: TopDocs rescored = rescorer.rescore(searcher, topNFirstPass, rescoreContext.getWindowSize()); @@ -71,16 +78,12 @@ protected float combine(float firstPassScore, boolean secondPassMatches, float s @Override public Explanation explain(int topLevelDocId, IndexSearcher searcher, RescoreContext rescoreContext, Explanation sourceExplanation) throws IOException { - QueryRescoreContext rescore = (QueryRescoreContext) rescoreContext; if (sourceExplanation == null) { // this should not happen but just in case return Explanation.noMatch("nothing matched"); } - // TODO: this isn't right? I.e., we are incorrectly pretending all first pass hits were rescored? If the requested docID was - // beyond the top rescoreContext.window() in the first pass hits, we don't rescore it now? - Explanation rescoreExplain = searcher.explain(rescore.query(), topLevelDocId); + QueryRescoreContext rescore = (QueryRescoreContext) rescoreContext; float primaryWeight = rescore.queryWeight(); - Explanation prim; if (sourceExplanation.isMatch()) { prim = Explanation.match( @@ -89,23 +92,24 @@ public Explanation explain(int topLevelDocId, IndexSearcher searcher, RescoreCon } else { prim = Explanation.noMatch("First pass did not match", sourceExplanation); } - - // NOTE: we don't use Lucene's Rescorer.explain because we want to insert our own description with which ScoreMode was used. Maybe - // we should add QueryRescorer.explainCombine to Lucene? - if (rescoreExplain != null && rescoreExplain.isMatch()) { - float secondaryWeight = rescore.rescoreQueryWeight(); - Explanation sec = Explanation.match( + if (rescoreContext.isRescored(topLevelDocId)){ + Explanation rescoreExplain = searcher.explain(rescore.query(), topLevelDocId); + // NOTE: we don't use Lucene's Rescorer.explain because we want to insert our own description with which ScoreMode was used. + // Maybe we should add QueryRescorer.explainCombine to Lucene? + if (rescoreExplain != null && rescoreExplain.isMatch()) { + float secondaryWeight = rescore.rescoreQueryWeight(); + Explanation sec = Explanation.match( rescoreExplain.getValue() * secondaryWeight, "product of:", rescoreExplain, Explanation.match(secondaryWeight, "secondaryWeight")); - QueryRescoreMode scoreMode = rescore.scoreMode(); - return Explanation.match( + QueryRescoreMode scoreMode = rescore.scoreMode(); + return Explanation.match( scoreMode.combine(prim.getValue(), sec.getValue()), scoreMode + " of:", prim, sec); - } else { - return prim; + } } + return prim; } private static final Comparator SCORE_DOC_COMPARATOR = new Comparator() { diff --git a/server/src/main/java/org/elasticsearch/search/rescore/RescoreContext.java b/server/src/main/java/org/elasticsearch/search/rescore/RescoreContext.java index 75ce807a67f47..2401b9ff32900 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/RescoreContext.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/RescoreContext.java @@ -19,6 +19,8 @@ package org.elasticsearch.search.rescore; +import java.util.Set; + /** * Context available to the rescore while it is running. Rescore * implementations should extend this with any additional resources that @@ -27,6 +29,7 @@ public class RescoreContext { private final int windowSize; private final Rescorer rescorer; + private Set resroredDocs; //doc Ids for which rescoring was applied /** * Build the context. @@ -50,4 +53,12 @@ public Rescorer rescorer() { public int getWindowSize() { return windowSize; } + + public void setRescoredDocs(Set docIds) { + resroredDocs = docIds; + } + + public boolean isRescored(int docId) { + return resroredDocs.contains(docId); + } } diff --git a/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java index 99668515de5b1..6e52160238b33 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java @@ -305,7 +305,7 @@ public static ScriptSortBuilder fromXContent(XContentParser parser, String eleme @Override public SortFieldAndFormat build(QueryShardContext context) throws IOException { - final SearchScript.Factory factory = context.getScriptService().compile(script, SearchScript.CONTEXT); + final SearchScript.Factory factory = context.getScriptService().compile(script, SearchScript.SCRIPT_SORT_CONTEXT); final SearchScript.LeafFactory searchScript = factory.newFactory(script.getParams(), context.lookup()); MultiValueMode valueMode = null; diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java index d89a8a134ff7c..0c3cc7e4b153e 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java @@ -23,7 +23,6 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedSetSelector; import org.apache.lucene.search.SortedSetSortField; -import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; @@ -84,7 +83,6 @@ protected Collection> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class); } - @AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/LUCENE-8318") public void testCreateShrinkIndexToN() { int[][] possibleShardSplits = new int[][] {{8,4,2}, {9, 3, 1}, {4, 2, 1}, {15,5,1}}; int[] shardSplits = randomFrom(possibleShardSplits); @@ -113,9 +111,11 @@ public void testCreateShrinkIndexToN() { ensureGreen(); // now merge source into a 4 shard index assertAcked(client().admin().indices().prepareResizeIndex("source", "first_shrink") - .setSettings(Settings.builder() - .put("index.number_of_replicas", 0) - .put("index.number_of_shards", shardSplits[1]).build()).get()); + .setSettings(Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", shardSplits[1]) + .putNull("index.blocks.write") + .build()).get()); ensureGreen(); assertHitCount(client().prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); @@ -135,9 +135,12 @@ public void testCreateShrinkIndexToN() { ensureGreen(); // now merge source into a 2 shard index assertAcked(client().admin().indices().prepareResizeIndex("first_shrink", "second_shrink") - .setSettings(Settings.builder() - .put("index.number_of_replicas", 0) - .put("index.number_of_shards", shardSplits[2]).build()).get()); + .setSettings(Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", shardSplits[2]) + .putNull("index.blocks.write") + .putNull("index.routing.allocation.require._name") + .build()).get()); ensureGreen(); assertHitCount(client().prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); // let it be allocated anywhere and bump replicas @@ -272,8 +275,14 @@ public void testCreateShrinkIndex() { // now merge source into a single shard index final boolean createWithReplicas = randomBoolean(); - assertAcked(client().admin().indices().prepareResizeIndex("source", "target") - .setSettings(Settings.builder().put("index.number_of_replicas", createWithReplicas ? 1 : 0).build()).get()); + assertAcked( + client().admin().indices().prepareResizeIndex("source", "target") + .setSettings( + Settings.builder() + .put("index.number_of_replicas", createWithReplicas ? 1 : 0) + .putNull("index.blocks.write") + .putNull("index.routing.allocation.require._name") + .build()).get()); ensureGreen(); // resolve true merge node - this is not always the node we required as all shards may be on another node @@ -444,19 +453,20 @@ public void testCreateShrinkWithIndexSort() throws Exception { // check that index sort cannot be set on the target index IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, - () -> client().admin().indices().prepareResizeIndex("source", "target") - .setSettings(Settings.builder() - .put("index.number_of_replicas", 0) - .put("index.number_of_shards", "2") - .put("index.sort.field", "foo") - .build()).get()); + () -> client().admin().indices().prepareResizeIndex("source", "target") + .setSettings(Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", "2") + .put("index.sort.field", "foo") + .build()).get()); assertThat(exc.getMessage(), containsString("can't override index sort when resizing an index")); // check that the index sort order of `source` is correctly applied to the `target` assertAcked(client().admin().indices().prepareResizeIndex("source", "target") - .setSettings(Settings.builder() - .put("index.number_of_replicas", 0) - .put("index.number_of_shards", "2").build()).get()); + .setSettings(Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", "2") + .putNull("index.blocks.write").build()).get()); ensureGreen(); flushAndRefresh(); GetSettingsResponse settingsResponse = diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java index fe6e980ab4259..a0fd40a649e71 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java @@ -193,8 +193,9 @@ private void splitToN(int sourceShards, int firstSplitShards, int secondSplitSha .put("index.blocks.write", true)).get(); ensureGreen(); Settings.Builder firstSplitSettingsBuilder = Settings.builder() - .put("index.number_of_replicas", 0) - .put("index.number_of_shards", firstSplitShards); + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", firstSplitShards) + .putNull("index.blocks.write"); if (sourceShards == 1 && useRoutingPartition == false && randomBoolean()) { // try to set it if we have a source index with 1 shard firstSplitSettingsBuilder.put("index.number_of_routing_shards", secondSplitShards); } @@ -225,10 +226,12 @@ private void splitToN(int sourceShards, int firstSplitShards, int secondSplitSha ensureGreen(); // now split source into a new index assertAcked(client().admin().indices().prepareResizeIndex("first_split", "second_split") - .setResizeType(ResizeType.SPLIT) - .setSettings(Settings.builder() - .put("index.number_of_replicas", 0) - .put("index.number_of_shards", secondSplitShards).build()).get()); + .setResizeType(ResizeType.SPLIT) + .setSettings(Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", secondSplitShards) + .putNull("index.blocks.write") + .build()).get()); ensureGreen(); assertHitCount(client().prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); // let it be allocated anywhere and bump replicas @@ -340,7 +343,11 @@ public void testSplitIndexPrimaryTerm() throws Exception { // now split source into target final Settings splitSettings = - Settings.builder().put("index.number_of_replicas", 0).put("index.number_of_shards", numberOfTargetShards).build(); + Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", numberOfTargetShards) + .putNull("index.blocks.write") + .build(); assertAcked(client().admin().indices().prepareResizeIndex("source", "target") .setResizeType(ResizeType.SPLIT) .setSettings(splitSettings).get()); @@ -396,8 +403,10 @@ public void testCreateSplitIndex() { assertAcked(client().admin().indices().prepareResizeIndex("source", "target") .setResizeType(ResizeType.SPLIT) .setSettings(Settings.builder() - .put("index.number_of_replicas", createWithReplicas ? 1 : 0) - .put("index.number_of_shards", 2).build()).get()); + .put("index.number_of_replicas", createWithReplicas ? 1 : 0) + .put("index.number_of_shards", 2) + .putNull("index.blocks.write") + .build()).get()); ensureGreen(); final ClusterState state = client().admin().cluster().prepareState().get().getState(); @@ -507,10 +516,12 @@ public void testCreateSplitWithIndexSort() throws Exception { // check that the index sort order of `source` is correctly applied to the `target` assertAcked(client().admin().indices().prepareResizeIndex("source", "target") - .setResizeType(ResizeType.SPLIT) - .setSettings(Settings.builder() - .put("index.number_of_replicas", 0) - .put("index.number_of_shards", 4).build()).get()); + .setResizeType(ResizeType.SPLIT) + .setSettings(Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", 4) + .putNull("index.blocks.write") + .build()).get()); ensureGreen(); flushAndRefresh(); GetSettingsResponse settingsResponse = diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutorTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutorTests.java index 125cb572ea54d..8e69b8093597f 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutorTests.java @@ -154,6 +154,7 @@ public void testAutoQueueSizingWithMin() throws Exception { context.close(); } + @TestLogging("org.elasticsearch.common.util.concurrent:DEBUG") public void testAutoQueueSizingWithMax() throws Exception { ThreadContext context = new ThreadContext(Settings.EMPTY); ResizableBlockingQueue queue = diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesDatesTests.java b/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesDatesTests.java index 7a0a6816a66bf..12eb69bef39d8 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesDatesTests.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesDatesTests.java @@ -50,19 +50,11 @@ public void test() throws IOException { values[d][i] = expectedDates[d][i].getMillis(); } } - Set warnings = new HashSet<>(); - Dates dates = wrap(values, deprecationMessage -> { - warnings.add(deprecationMessage); - /* Create a temporary directory to prove we are running with the - * server's permissions. */ - createTempDir(); - }); - + Dates dates = wrap(values); for (int round = 0; round < 10; round++) { int d = between(0, values.length - 1); dates.setNextDocId(d); assertEquals(expectedDates[d].length > 0 ? expectedDates[d][0] : new DateTime(0, DateTimeZone.UTC), dates.getValue()); - assertEquals(expectedDates[d].length > 0 ? expectedDates[d][0] : new DateTime(0, DateTimeZone.UTC), dates.getDate()); assertEquals(values[d].length, dates.size()); for (int i = 0; i < values[d].length; i++) { @@ -72,33 +64,9 @@ public void test() throws IOException { Exception e = expectThrows(UnsupportedOperationException.class, () -> dates.add(new DateTime())); assertEquals("doc values are unmodifiable", e.getMessage()); } - - /* - * Invoke getDates without any privileges to verify that - * it still works without any. In particularly, this - * verifies that the callback that we've configured - * above works. That callback creates a temporary - * directory which is not possible with "noPermissions". - */ - PermissionCollection noPermissions = new Permissions(); - AccessControlContext noPermissionsAcc = new AccessControlContext( - new ProtectionDomain[] { - new ProtectionDomain(null, noPermissions) - } - ); - AccessController.doPrivileged(new PrivilegedAction() { - public Void run() { - dates.getDates(); - return null; - } - }, noPermissionsAcc); - - assertThat(warnings, containsInAnyOrder( - "getDate is no longer necessary on date fields as the value is now a date.", - "getDates is no longer necessary on date fields as the values are now dates.")); } - private Dates wrap(long[][] values, Consumer deprecationHandler) { + private Dates wrap(long[][] values) { return new Dates(new AbstractSortedNumericDocValues() { long[] current; int i; @@ -117,6 +85,6 @@ public int docValueCount() { public long nextValue() { return current[i++]; } - }, deprecationHandler); + }); } } diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesLongsTests.java b/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesLongsTests.java index 8b20e9a9f3a71..5fd33da27e399 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesLongsTests.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesLongsTests.java @@ -47,7 +47,7 @@ public void testLongs() throws IOException { values[d][i] = randomLong(); } } - Longs longs = wrap(values, deprecationMessage -> {fail("unexpected deprecation: " + deprecationMessage);}); + Longs longs = wrap(values); for (int round = 0; round < 10; round++) { int d = between(0, values.length - 1); @@ -66,69 +66,7 @@ public void testLongs() throws IOException { } } - public void testDates() throws IOException { - long[][] values = new long[between(3, 10)][]; - ReadableDateTime[][] dates = new ReadableDateTime[values.length][]; - for (int d = 0; d < values.length; d++) { - values[d] = new long[randomBoolean() ? randomBoolean() ? 0 : 1 : between(2, 100)]; - dates[d] = new ReadableDateTime[values[d].length]; - for (int i = 0; i < values[d].length; i++) { - dates[d][i] = new DateTime(randomNonNegativeLong(), DateTimeZone.UTC); - values[d][i] = dates[d][i].getMillis(); - } - } - Set warnings = new HashSet<>(); - Longs longs = wrap(values, deprecationMessage -> { - warnings.add(deprecationMessage); - /* Create a temporary directory to prove we are running with the - * server's permissions. */ - createTempDir(); - }); - - for (int round = 0; round < 10; round++) { - int d = between(0, values.length - 1); - longs.setNextDocId(d); - assertEquals(dates[d].length > 0 ? dates[d][0] : new DateTime(0, DateTimeZone.UTC), longs.getDate()); - - assertEquals(values[d].length, longs.getDates().size()); - for (int i = 0; i < values[d].length; i++) { - assertEquals(dates[d][i], longs.getDates().get(i)); - } - - Exception e = expectThrows(UnsupportedOperationException.class, () -> longs.getDates().add(new DateTime())); - assertEquals("doc values are unmodifiable", e.getMessage()); - } - - /* - * Invoke getDates without any privileges to verify that - * it still works without any. In particularly, this - * verifies that the callback that we've configured - * above works. That callback creates a temporary - * directory which is not possible with "noPermissions". - */ - PermissionCollection noPermissions = new Permissions(); - AccessControlContext noPermissionsAcc = new AccessControlContext( - new ProtectionDomain[] { - new ProtectionDomain(null, noPermissions) - } - ); - AccessController.doPrivileged(new PrivilegedAction() { - public Void run() { - try { - longs.getDates(); - } catch (IOException e) { - throw new RuntimeException("unexpected", e); - } - return null; - } - }, noPermissionsAcc); - - assertThat(warnings, containsInAnyOrder( - "getDate on numeric fields is deprecated. Use a date field to get dates.", - "getDates on numeric fields is deprecated. Use a date field to get dates.")); - } - - private Longs wrap(long[][] values, Consumer deprecationCallback) { + private Longs wrap(long[][] values) { return new Longs(new AbstractSortedNumericDocValues() { long[] current; int i; @@ -147,6 +85,6 @@ public int docValueCount() { public long nextValue() { return current[i++]; } - }, deprecationCallback); + }); } } diff --git a/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java b/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java index bbfa56a0e55fe..108b41d54a08e 100644 --- a/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -470,7 +470,8 @@ protected void sendRequest(Connection connection, long requestId, String action, * TODO once checksum verification on snapshotting is implemented this test needs to be fixed or split into several * parts... We should also corrupt files on the actual snapshot and check that we don't restore the corrupted shard. */ - @TestLogging("org.elasticsearch.monitor.fs:DEBUG") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30577") + @TestLogging("org.elasticsearch.repositories:TRACE,org.elasticsearch.snapshots:TRACE,org.elasticsearch.index.engine:DEBUG") public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, InterruptedException, IOException { int numDocs = scaledRandomIntBetween(100, 1000); internalCluster().ensureAtLeastNumDataNodes(2); @@ -494,6 +495,7 @@ public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, I assertHitCount(countResponse, numDocs); ShardRouting shardRouting = corruptRandomPrimaryFile(false); + logger.info("--> shard {} has a corrupted file", shardRouting); // we don't corrupt segments.gen since S/R doesn't snapshot this file // the other problem here why we can't corrupt segments.X files is that the snapshot flushes again before // it snapshots and that will write a new segments.X+1 file @@ -504,9 +506,12 @@ public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, I .put("compress", randomBoolean()) .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test").get(); - assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.PARTIAL)); - logger.info("failed during snapshot -- maybe SI file got corrupted"); + final CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .setIndices("test") + .get(); + final SnapshotState snapshotState = createSnapshotResponse.getSnapshotInfo().state(); + logger.info("--> snapshot terminated with state " + snapshotState); final List files = listShardFiles(shardRouting); Path corruptedFile = null; for (Path file : files) { @@ -515,6 +520,11 @@ public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, I break; } } + if (snapshotState != SnapshotState.PARTIAL) { + logger.info("--> listing shard files for investigation"); + files.forEach(f -> logger.info("path: {}", f.toAbsolutePath())); + } + assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.PARTIAL)); assertThat(corruptedFile, notNullValue()); } diff --git a/server/src/test/java/org/elasticsearch/plugins/MetaPluginInfoTests.java b/server/src/test/java/org/elasticsearch/plugins/MetaPluginInfoTests.java deleted file mode 100644 index c54a13bd30267..0000000000000 --- a/server/src/test/java/org/elasticsearch/plugins/MetaPluginInfoTests.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.plugins; - -import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.Version; -import org.elasticsearch.test.ESTestCase; - -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.Collections; -import java.util.Comparator; -import java.util.List; - -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.endsWith; - -@LuceneTestCase.SuppressFileSystems(value = "ExtrasFS") -public class MetaPluginInfoTests extends ESTestCase { - - public void testReadFromProperties() throws Exception { - Path pluginDir = createTempDir().resolve("fake-meta-plugin"); - PluginTestUtil.writeMetaPluginProperties(pluginDir, - "description", "fake desc", - "name", "my_meta_plugin"); - MetaPluginInfo info = MetaPluginInfo.readFromProperties(pluginDir); - assertEquals("my_meta_plugin", info.getName()); - assertEquals("fake desc", info.getDescription()); - } - - public void testReadFromPropertiesNameMissing() throws Exception { - Path pluginDir = createTempDir().resolve("fake-meta-plugin"); - PluginTestUtil.writeMetaPluginProperties(pluginDir); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> MetaPluginInfo.readFromProperties(pluginDir)); - assertThat(e.getMessage(), containsString("property [name] is missing for")); - - PluginTestUtil.writeMetaPluginProperties(pluginDir, "name", ""); - e = expectThrows(IllegalArgumentException.class, () -> MetaPluginInfo.readFromProperties(pluginDir)); - assertThat(e.getMessage(), containsString("property [name] is missing for")); - } - - public void testReadFromPropertiesDescriptionMissing() throws Exception { - Path pluginDir = createTempDir().resolve("fake-meta-plugin"); - PluginTestUtil.writeMetaPluginProperties(pluginDir, "name", "fake-meta-plugin"); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> MetaPluginInfo.readFromProperties(pluginDir)); - assertThat(e.getMessage(), containsString("[description] is missing")); - } - - public void testUnknownProperties() throws Exception { - Path pluginDir = createTempDir().resolve("fake-meta-plugin"); - PluginTestUtil.writeMetaPluginProperties(pluginDir, - "extra", "property", - "unknown", "property", - "description", "fake desc", - "name", "my_meta_plugin"); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> MetaPluginInfo.readFromProperties(pluginDir)); - assertThat(e.getMessage(), containsString("Unknown properties in meta plugin descriptor")); - } - - public void testExtractAllPluginsWithDuplicates() throws Exception { - Path pluginDir = createTempDir().resolve("plugins"); - // Simple plugin - Path plugin1 = pluginDir.resolve("plugin1"); - Files.createDirectories(plugin1); - PluginTestUtil.writePluginProperties(plugin1, - "description", "fake desc", - "name", "plugin1", - "version", "1.0", - "elasticsearch.version", Version.CURRENT.toString(), - "java.version", System.getProperty("java.specification.version"), - "classname", "FakePlugin"); - - // Meta plugin - Path metaPlugin = pluginDir.resolve("meta_plugin"); - Files.createDirectory(metaPlugin); - PluginTestUtil.writeMetaPluginProperties(metaPlugin, - "description", "fake desc", - "name", "meta_plugin"); - Path plugin2 = metaPlugin.resolve("plugin1"); - Files.createDirectory(plugin2); - PluginTestUtil.writePluginProperties(plugin2, - "description", "fake desc", - "name", "plugin1", - "version", "1.0", - "elasticsearch.version", Version.CURRENT.toString(), - "java.version", System.getProperty("java.specification.version"), - "classname", "FakePlugin"); - Path plugin3 = metaPlugin.resolve("plugin2"); - Files.createDirectory(plugin3); - PluginTestUtil.writePluginProperties(plugin3, - "description", "fake desc", - "name", "plugin2", - "version", "1.0", - "elasticsearch.version", Version.CURRENT.toString(), - "java.version", System.getProperty("java.specification.version"), - "classname", "FakePlugin"); - - IllegalStateException exc = - expectThrows(IllegalStateException.class, () -> PluginsService.findPluginDirs(pluginDir)); - assertThat(exc.getMessage(), containsString("duplicate plugin")); - assertThat(exc.getMessage(), endsWith("plugin1")); - } -} diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java index 4d2eb6f2f36f3..ffecaca452599 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java @@ -620,34 +620,7 @@ public void testFindPluginDirs() throws IOException { Files.copy(jar, fake.resolve("plugin.jar")); } - final Path fakeMeta = plugins.resolve("fake-meta"); - - PluginTestUtil.writeMetaPluginProperties(fakeMeta, "description", "description", "name", "fake-meta"); - - final Path fakeMetaCore = fakeMeta.resolve("fake-meta-core"); - PluginTestUtil.writePluginProperties( - fakeMetaCore, - "description", "description", - "name", "fake-meta-core", - "version", "1.0.0", - "elasticsearch.version", Version.CURRENT.toString(), - "java.version", System.getProperty("java.specification.version"), - "classname", "test.DummyPlugin"); - try (InputStream jar = PluginsServiceTests.class.getResourceAsStream("dummy-plugin.jar")) { - Files.copy(jar, fakeMetaCore.resolve("plugin.jar")); - } - - assertThat(PluginsService.findPluginDirs(plugins), containsInAnyOrder(fake, fakeMetaCore)); - } - - public void testMissingMandatoryPlugin() { - final Settings settings = - Settings.builder() - .put("path.home", createTempDir()) - .put("plugin.mandatory", "fake") - .build(); - final IllegalStateException e = expectThrows(IllegalStateException.class, () -> newPluginsService(settings)); - assertThat(e, hasToString(containsString("missing mandatory plugins [fake]"))); + assertThat(PluginsService.findPluginDirs(plugins), containsInAnyOrder(fake)); } public void testExistingMandatoryClasspathPlugin() { @@ -696,38 +669,4 @@ public void testExistingMandatoryInstalledPlugin() throws IOException { .build(); newPluginsService(settings); } - - public void testExistingMandatoryMetaPlugin() throws IOException { - // This test opens a child classloader, reading a jar under the test temp - // dir (a dummy plugin). Classloaders are closed by GC, so when test teardown - // occurs the jar is deleted while the classloader is still open. However, on - // windows, files cannot be deleted when they are still open by a process. - assumeFalse("windows deletion behavior is asinine", Constants.WINDOWS); - final Path pathHome = createTempDir(); - final Path plugins = pathHome.resolve("plugins"); - final Path fakeMeta = plugins.resolve("fake-meta"); - - PluginTestUtil.writeMetaPluginProperties(fakeMeta, "description", "description", "name", "fake-meta"); - - final Path fake = fakeMeta.resolve("fake"); - PluginTestUtil.writePluginProperties( - fake, - "description", "description", - "name", "fake", - "version", "1.0.0", - "elasticsearch.version", Version.CURRENT.toString(), - "java.version", System.getProperty("java.specification.version"), - "classname", "test.DummyPlugin"); - try (InputStream jar = PluginsServiceTests.class.getResourceAsStream("dummy-plugin.jar")) { - Files.copy(jar, fake.resolve("plugin.jar")); - } - - final Settings settings = - Settings.builder() - .put("path.home", pathHome) - .put("plugin.mandatory", "fake-meta") - .build(); - newPluginsService(settings); - } - } diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandlerTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandlerTests.java index 2c30184ee4e35..2eeb90d9ec9b9 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandlerTests.java @@ -70,10 +70,8 @@ private void runTestResizeCopySettingsDeprecated( assertThat(e, hasToString(containsString("parameter [copy_settings] can not be explicitly set to [false]"))); } else { handler.prepareRequest(request, mock(NodeClient.class)); - if (copySettings == null) { - assertWarnings( - "resize operations without copying settings is deprecated; " - + "set parameter [copy_settings] to [true] for future default behavior"); + if ("".equals(copySettings) || "true".equals(copySettings)) { + assertWarnings("parameter [copy_settings] is deprecated and will be removed in 8.0.0"); } } } diff --git a/server/src/test/java/org/elasticsearch/routing/PartitionedRoutingIT.java b/server/src/test/java/org/elasticsearch/routing/PartitionedRoutingIT.java index 74fbcc20a0278..37ea53332af92 100644 --- a/server/src/test/java/org/elasticsearch/routing/PartitionedRoutingIT.java +++ b/server/src/test/java/org/elasticsearch/routing/PartitionedRoutingIT.java @@ -108,10 +108,11 @@ public void testShrinking() throws Exception { logger.info("--> shrinking index [" + previousIndex + "] to [" + index + "]"); client().admin().indices().prepareResizeIndex(previousIndex, index) - .setSettings(Settings.builder() - .put("index.number_of_shards", currentShards) - .put("index.number_of_replicas", numberOfReplicas()) - .build()).get(); + .setSettings(Settings.builder() + .put("index.number_of_shards", currentShards) + .put("index.number_of_replicas", numberOfReplicas()) + .putNull("index.routing.allocation.require._name") + .build()).get(); ensureGreen(); } } diff --git a/server/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java b/server/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java index 2bf691e6a36cf..842748107d1d1 100644 --- a/server/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java +++ b/server/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java @@ -76,7 +76,7 @@ public String getType() { @Override public T compile(String scriptName, String scriptSource, ScriptContext context, Map params) { assert scriptSource.equals("explainable_script"); - assert context == SearchScript.CONTEXT; + assert context == SearchScript.SCRIPT_SCORE_CONTEXT; SearchScript.Factory factory = (p, lookup) -> new SearchScript.LeafFactory() { @Override public SearchScript newInstance(LeafReaderContext context) throws IOException { diff --git a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java index 055139eacf716..0396b8ac78820 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java @@ -217,8 +217,9 @@ private static String toCamelCase(String s) { // should we expose it, or maybe think about higher level integration of the // fake term frequency feature (LUCENE-7854) .put("delimitedtermfrequency", Void.class) - // not exposed, only used internally to index shingles and speed up phrase queries - .put("fixedshingle", Void.class) + // LUCENE-8273: ConditionalTokenFilter allows analysis chains to skip + // particular token filters based on the attributes of the current token. + .put("termexclusion", Void.class) .immutableMap(); diff --git a/test/framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java b/test/framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java index 5a92c99d61870..ff996c800b59e 100644 --- a/test/framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java +++ b/test/framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java @@ -27,9 +27,6 @@ /** Utility methods for testing plugins */ public class PluginTestUtil { - public static void writeMetaPluginProperties(Path pluginDir, String... stringProps) throws IOException { - writeProperties(pluginDir.resolve(MetaPluginInfo.ES_META_PLUGIN_PROPERTIES), stringProps); - } public static void writePluginProperties(Path pluginDir, String... stringProps) throws IOException { writeProperties(pluginDir.resolve(PluginInfo.ES_PLUGIN_PROPERTIES), stringProps); diff --git a/x-pack/docs/en/index.asciidoc b/x-pack/docs/en/index.asciidoc deleted file mode 100644 index 3133053c5bd23..0000000000000 --- a/x-pack/docs/en/index.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ - -include::{es-repo-dir}/index-shared1.asciidoc[] - -include::setup/setup-xes.asciidoc[] - -include::{es-repo-dir}/index-shared2.asciidoc[] - -include::{es-repo-dir}/index-shared3.asciidoc[] - -include::sql/index.asciidoc[] - -include::monitoring/index.asciidoc[] - -include::rollup/index.asciidoc[] - -include::rest-api/index.asciidoc[] - -include::commands/index.asciidoc[] - -include::{es-repo-dir}/index-shared4.asciidoc[] - -include::{es-repo-dir}/index-shared5.asciidoc[] diff --git a/x-pack/docs/en/ml/forecasting.asciidoc b/x-pack/docs/en/ml/forecasting.asciidoc index 95693a1677f0a..cd01aa0fb77ca 100644 --- a/x-pack/docs/en/ml/forecasting.asciidoc +++ b/x-pack/docs/en/ml/forecasting.asciidoc @@ -59,10 +59,7 @@ For more information about any of these functions, see <>. * Forecasts run concurrently with real-time {ml} analysis. That is to say, {ml} analysis does not stop while forecasts are generated. Forecasts can have an impact on {ml} jobs, however, especially in terms of memory usage. For this -reason, forecasts run only if the model memory status is acceptable and the -snapshot models for the forecast do not require more than 20 MB. If these memory -limits are reached, consider splitting the job into multiple smaller jobs and -creating forecasts for these. +reason, forecasts run only if the model memory status is acceptable. * The job must be open when you create a forecast. Otherwise, an error occurs. * If there is insufficient data to generate any meaningful predictions, an error occurs. In general, forecasts that are created early in the learning phase diff --git a/x-pack/docs/en/ml/index.asciidoc b/x-pack/docs/en/ml/index.asciidoc index c36f77ca812aa..4c9a32da8d678 100644 --- a/x-pack/docs/en/ml/index.asciidoc +++ b/x-pack/docs/en/ml/index.asciidoc @@ -17,11 +17,20 @@ from {es} for analysis and anomaly results are displayed in {kib} dashboards. -- +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/overview.asciidoc include::overview.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/getting-started.asciidoc include::getting-started.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/configuring.asciidoc include::configuring.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/stopping-ml.asciidoc include::stopping-ml.asciidoc[] -// include::ml-scenarios.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/api-quickref.asciidoc include::api-quickref.asciidoc[] -//include::troubleshooting.asciidoc[] Referenced from x-pack/docs/public/xpack-troubleshooting.asciidoc + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/functions.asciidoc include::functions.asciidoc[] diff --git a/x-pack/docs/en/security/authentication/built-in-users.asciidoc b/x-pack/docs/en/security/authentication/built-in-users.asciidoc index 74fc9f1e1db12..d18f441e293f1 100644 --- a/x-pack/docs/en/security/authentication/built-in-users.asciidoc +++ b/x-pack/docs/en/security/authentication/built-in-users.asciidoc @@ -118,7 +118,7 @@ the `logstash.yml` configuration file: xpack.monitoring.elasticsearch.password: logstashpassword ---------------------------------------------------------- -If you have upgraded from an older version of elasticsearch/x-pack, +If you have upgraded from an older version of Elasticsearch, the `logstash_system` user may have defaulted to _disabled_ for security reasons. Once the password has been changed, you can enable the user via the following API call: diff --git a/x-pack/docs/en/security/authentication/custom-realm.asciidoc b/x-pack/docs/en/security/authentication/custom-realm.asciidoc index 8e0114b7454c6..0ae33d434a1f5 100644 --- a/x-pack/docs/en/security/authentication/custom-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/custom-realm.asciidoc @@ -50,7 +50,7 @@ public AuthenticationFailureHandler getAuthenticationFailureHandler() { ---------------------------------------------------- + The `getAuthenticationFailureHandler` method is used to optionally provide a -custom `AuthenticationFailureHandler`, which will control how X-Pack responds +custom `AuthenticationFailureHandler`, which will control how {security} responds in certain authentication failure events. + [source,java] diff --git a/x-pack/docs/en/security/gs-index.asciidoc b/x-pack/docs/en/security/gs-index.asciidoc index b320b2a6d82e5..bf211c3692908 100644 --- a/x-pack/docs/en/security/gs-index.asciidoc +++ b/x-pack/docs/en/security/gs-index.asciidoc @@ -25,8 +25,8 @@ Security protects Elasticsearch clusters by: To prevent unauthorized access to your Elasticsearch cluster, you must have a way to _authenticate_ users. This simply means that you need a way to validate that a user is who they claim to be. For example, you have to make sure only -the person named _Kelsey Andorra_ can sign in as the user `kandorra`. X-Pack -Security provides a standalone authentication mechanism that enables you to +the person named _Kelsey Andorra_ can sign in as the user `kandorra`. {security} +provides a standalone authentication mechanism that enables you to quickly password-protect your cluster. If you're already using {xpack-ref}/ldap-realm.html[LDAP], {xpack-ref}/active-directory-realm.html[ Active Directory], or {xpack-ref}/pki-realm.html[ PKI] to manage users in your organization, {security} is able to integrate with those @@ -83,7 +83,7 @@ issues. * {xpack-ref}/ccs-clients-integrations.html[Integrations] shows you how to interact with an Elasticsearch cluster protected by - X-Pack Security. + {security}. * {xpack-ref}/security-reference.html[Reference] provides detailed information about the access privileges you can grant to diff --git a/x-pack/docs/en/security/index.asciidoc b/x-pack/docs/en/security/index.asciidoc index d5f970a3fb826..96e9287ca01ec 100644 --- a/x-pack/docs/en/security/index.asciidoc +++ b/x-pack/docs/en/security/index.asciidoc @@ -26,8 +26,8 @@ Security protects Elasticsearch clusters by: To prevent unauthorized access to your Elasticsearch cluster, you must have a way to _authenticate_ users. This simply means that you need a way to validate that a user is who they claim to be. For example, you have to make sure only -the person named _Kelsey Andorra_ can sign in as the user `kandorra`. X-Pack -Security provides a standalone authentication mechanism that enables you to +the person named _Kelsey Andorra_ can sign in as the user `kandorra`. {security} +provides a standalone authentication mechanism that enables you to quickly password-protect your cluster. If you're already using <>, <>, or <> to manage users in your organization, {security} is able to integrate with those @@ -81,7 +81,7 @@ issues. * <> shows you how to interact with an Elasticsearch cluster protected by - X-Pack Security. + {security}. * <> provides detailed information about the access privileges you can grant to @@ -95,20 +95,29 @@ Head over to our {security-forum}[Security Discussion Forum] to share your experience, questions, and suggestions. -- +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/getting-started.asciidoc include::getting-started.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/how-security-works.asciidoc include::how-security-works.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/overview.asciidoc include::authentication/overview.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authorization/overview.asciidoc include::authorization/overview.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/auditing.asciidoc include::auditing.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/securing-communications.asciidoc include::securing-communications.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/using-ip-filtering.asciidoc include::using-ip-filtering.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/ccs-clients-integrations.asciidoc include::ccs-clients-integrations.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/reference.asciidoc include::reference.asciidoc[] diff --git a/x-pack/docs/en/setup/bootstrap-checks-xes.asciidoc b/x-pack/docs/en/setup/bootstrap-checks-xes.asciidoc index 6ee9c29b44ff3..6cab0de510e2e 100644 --- a/x-pack/docs/en/setup/bootstrap-checks-xes.asciidoc +++ b/x-pack/docs/en/setup/bootstrap-checks-xes.asciidoc @@ -1,10 +1,6 @@ [role="xpack"] [[bootstrap-checks-xpack]] == Bootstrap Checks for {xpack} -++++ -Bootstrap Checks -++++ - In addition to the <>, there are checks that are specific to {xpack} features. diff --git a/x-pack/docs/en/setup/setup-xes.asciidoc b/x-pack/docs/en/setup/setup-xes.asciidoc index c14c7c585def6..b0003b1e0b7c4 100644 --- a/x-pack/docs/en/setup/setup-xes.asciidoc +++ b/x-pack/docs/en/setup/setup-xes.asciidoc @@ -1,9 +1,7 @@ [role="xpack"] [[setup-xpack]] -= Set up {xpack} +== Set up {xpack} -[partintro] --- {xpack} is an Elastic Stack extension that provides security, alerting, monitoring, reporting, machine learning, and many other capabilities. By default, when you install {es}, {xpack} is installed. @@ -20,10 +18,3 @@ https://www.elastic.co/subscriptions. * <> * <> --- - -include::{xes-repo-dir}/monitoring/configuring-monitoring.asciidoc[] -include::{xes-repo-dir}/security/configuring-es.asciidoc[] -include::setup-xclient.asciidoc[] -include::{xes-repo-dir}/settings/configuring-xes.asciidoc[] -include::bootstrap-checks-xes.asciidoc[] diff --git a/x-pack/docs/en/watcher/index.asciidoc b/x-pack/docs/en/watcher/index.asciidoc index 3cbc54b0a484b..2be3638971929 100644 --- a/x-pack/docs/en/watcher/index.asciidoc +++ b/x-pack/docs/en/watcher/index.asciidoc @@ -65,24 +65,35 @@ from the query, whether the condition was met, and what actions were taken. -- +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/getting-started.asciidoc include::getting-started.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/how-watcher-works.asciidoc include::how-watcher-works.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/encrypting-data.asciidoc include::encrypting-data.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/input.asciidoc include::input.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/trigger.asciidoc include::trigger.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/condition.asciidoc include::condition.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/actions.asciidoc include::actions.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/transform.asciidoc include::transform.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/java.asciidoc include::java.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/managing-watches.asciidoc include::managing-watches.asciidoc[] +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/watcher/example-watches.asciidoc include::example-watches.asciidoc[] diff --git a/x-pack/docs/en/watcher/java.asciidoc b/x-pack/docs/en/watcher/java.asciidoc index f54e8554e8e88..bcf41252433ba 100644 --- a/x-pack/docs/en/watcher/java.asciidoc +++ b/x-pack/docs/en/watcher/java.asciidoc @@ -95,7 +95,7 @@ TransportClient client = new PreBuiltXPackTransportClient(Settings.builder() .put("cluster.name", "myClusterName") ... .build()) - .addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("localhost"), 9300)); + .addTransportAddress(new TransportAddress(InetAddress.getByName("localhost"), 9300)); XPackClient xpackClient = new XPackClient(client); WatcherClient watcherClient = xpackClient.watcher(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java index b709e32946ec6..6af323f1510e4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java @@ -9,6 +9,7 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.NamedDiff; @@ -467,6 +468,14 @@ public static DatafeedState getDatafeedState(String datafeedId, @Nullable Persis } } + public static MlMetadata getMlMetadata(ClusterState state) { + MlMetadata mlMetadata = (state == null) ? null : state.getMetaData().custom(MLMetadataField.TYPE); + if (mlMetadata == null) { + return EMPTY_METADATA; + } + return mlMetadata; + } + public static class JobAlreadyMarkedAsDeletedException extends RuntimeException { } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java index 1ac842e8898bf..4e51d7b6c1e30 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/AnomalyDetectorsIndex.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.core.ml.job.persistence; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; /** @@ -47,8 +46,7 @@ public static String resultsWriteAlias(String jobId) { * @return The index name */ public static String getPhysicalIndexFromState(ClusterState state, String jobId) { - MlMetadata meta = state.getMetaData().custom(MLMetadataField.TYPE); - return meta.getJobs().get(jobId).getResultsIndexName(); + return MlMetadata.getMlMetadata(state).getJobs().get(jobId).getResultsIndexName(); } /** diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/CategorizerState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/CategorizerState.java index 8c08300354698..2d68a6d7cf7a0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/CategorizerState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/CategorizerState.java @@ -37,6 +37,16 @@ public static final String v54DocumentPrefix(String jobId) { return jobId + "#"; } + /** + * Given the id of a categorizer state document it extracts the job id + * @param docId the categorizer state document id + * @return the job id or {@code null} if the id is not valid + */ + public static final String extractJobId(String docId) { + int suffixIndex = docId.lastIndexOf("_" + TYPE); + return suffixIndex <= 0 ? null : docId.substring(0, suffixIndex); + } + private CategorizerState() { } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelState.java index dce791a2b3d26..fbec7bb6c7291 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelState.java @@ -29,6 +29,16 @@ public static final String v54DocumentId(String jobId, String snapshotId, int do return jobId + "-" + snapshotId + "#" + docNum; } + /** + * Given the id of a state document it extracts the job id + * @param docId the state document id + * @return the job id or {@code null} if the id is not valid + */ + public static final String extractJobId(String docId) { + int suffixIndex = docId.lastIndexOf("_" + TYPE + "_"); + return suffixIndex <= 0 ? null : docId.substring(0, suffixIndex); + } + private ModelState() { } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/Quantiles.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/Quantiles.java index 0c167aadb7623..0b3ddcc7b5197 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/Quantiles.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/Quantiles.java @@ -60,6 +60,16 @@ public static String v54DocumentId(String jobId) { return jobId + "-" + TYPE; } + /** + * Given the id of a quantiles document it extracts the job id + * @param docId the quantiles document id + * @return the job id or {@code null} if the id is not valid + */ + public static final String extractJobId(String docId) { + int suffixIndex = docId.lastIndexOf("_" + TYPE); + return suffixIndex <= 0 ? null : docId.substring(0, suffixIndex); + } + private final String jobId; private final Date timestamp; private final String quantileState; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherIndexTemplateRegistryField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherIndexTemplateRegistryField.java index 4cf0898bae2ff..25e2c928d9a57 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherIndexTemplateRegistryField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherIndexTemplateRegistryField.java @@ -12,8 +12,9 @@ public final class WatcherIndexTemplateRegistryField { // version 3: include watch status in history // version 6: upgrade to ES 6, removal of _status field // version 7: add full exception stack traces for better debugging + // version 8: fix slack attachment property not to be dynamic, causing field type issues // Note: if you change this, also inform the kibana team around the watcher-ui - public static final String INDEX_TEMPLATE_VERSION = "7"; + public static final String INDEX_TEMPLATE_VERSION = "8"; public static final String HISTORY_TEMPLATE_NAME = ".watch-history-" + INDEX_TEMPLATE_VERSION; public static final String TRIGGERED_TEMPLATE_NAME = ".triggered_watches"; public static final String WATCHES_TEMPLATE_NAME = ".watches"; diff --git a/x-pack/plugin/core/src/main/resources/watch-history.json b/x-pack/plugin/core/src/main/resources/watch-history.json index a26305b35542a..d158281c264d2 100644 --- a/x-pack/plugin/core/src/main/resources/watch-history.json +++ b/x-pack/plugin/core/src/main/resources/watch-history.json @@ -507,6 +507,13 @@ "properties" : { "color" : { "type" : "keyword" + }, + "fields" : { + "properties" : { + "value" : { + "type" : "text" + } + } } } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/CategorizerStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/CategorizerStateTests.java new file mode 100644 index 0000000000000..726288faffbc7 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/CategorizerStateTests.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.process.autodetect.state; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.core.Is.is; + +public class CategorizerStateTests extends ESTestCase { + + public void testExtractJobId_GivenValidDocId() { + assertThat(CategorizerState.extractJobId("foo_categorizer_state#1"), equalTo("foo")); + assertThat(CategorizerState.extractJobId("bar_categorizer_state#2"), equalTo("bar")); + assertThat(CategorizerState.extractJobId("foo_bar_categorizer_state#3"), equalTo("foo_bar")); + assertThat(CategorizerState.extractJobId("_categorizer_state_categorizer_state#3"), equalTo("_categorizer_state")); + } + + public void testExtractJobId_GivenInvalidDocId() { + assertThat(CategorizerState.extractJobId(""), is(nullValue())); + assertThat(CategorizerState.extractJobId("foo"), is(nullValue())); + assertThat(CategorizerState.extractJobId("_categorizer_state"), is(nullValue())); + assertThat(CategorizerState.extractJobId("foo_model_state_3141341341"), is(nullValue())); + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelStateTests.java new file mode 100644 index 0000000000000..0e42a06111931 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/ModelStateTests.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.job.process.autodetect.state; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.core.Is.is; + +public class ModelStateTests extends ESTestCase { + + public void testExtractJobId_GivenValidDocId() { + assertThat(ModelState.extractJobId("foo_model_state_3151373783#1"), equalTo("foo")); + assertThat(ModelState.extractJobId("bar_model_state_451515#3"), equalTo("bar")); + assertThat(ModelState.extractJobId("foo_bar_model_state_blah_blah"), equalTo("foo_bar")); + assertThat(ModelState.extractJobId("_model_state_model_state_11111"), equalTo("_model_state")); + } + + public void testExtractJobId_GivenInvalidDocId() { + assertThat(ModelState.extractJobId(""), is(nullValue())); + assertThat(ModelState.extractJobId("foo"), is(nullValue())); + assertThat(ModelState.extractJobId("_model_3141341341"), is(nullValue())); + assertThat(ModelState.extractJobId("_state_3141341341"), is(nullValue())); + assertThat(ModelState.extractJobId("_model_state_3141341341"), is(nullValue())); + assertThat(ModelState.extractJobId("foo_quantiles"), is(nullValue())); + } +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/QuantilesTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/QuantilesTests.java index 84c1a161f1ee4..146e3ed5bd539 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/QuantilesTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/state/QuantilesTests.java @@ -15,9 +15,26 @@ import java.util.Date; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.core.Is.is; public class QuantilesTests extends AbstractSerializingTestCase { + public void testExtractJobId_GivenValidDocId() { + assertThat(Quantiles.extractJobId("foo_quantiles"), equalTo("foo")); + assertThat(Quantiles.extractJobId("bar_quantiles"), equalTo("bar")); + assertThat(Quantiles.extractJobId("foo_bar_quantiles"), equalTo("foo_bar")); + assertThat(Quantiles.extractJobId("_quantiles_quantiles"), equalTo("_quantiles")); + } + + public void testExtractJobId_GivenInvalidDocId() { + assertThat(Quantiles.extractJobId(""), is(nullValue())); + assertThat(Quantiles.extractJobId("foo"), is(nullValue())); + assertThat(Quantiles.extractJobId("_quantiles"), is(nullValue())); + assertThat(Quantiles.extractJobId("foo_model_state_3141341341"), is(nullValue())); + } + public void testEquals_GivenSameObject() { Quantiles quantiles = new Quantiles("foo", new Date(0L), "foo"); assertTrue(quantiles.equals(quantiles)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index c5402152fead5..bdefabdb294e5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -286,7 +286,8 @@ public List> getSettings() { DataCountsReporter.ACCEPTABLE_PERCENTAGE_DATE_PARSE_ERRORS_SETTING, DataCountsReporter.ACCEPTABLE_PERCENTAGE_OUT_OF_ORDER_ERRORS_SETTING, AutodetectProcessManager.MAX_RUNNING_JOBS_PER_NODE, - AutodetectProcessManager.MAX_OPEN_JOBS_PER_NODE)); + AutodetectProcessManager.MAX_OPEN_JOBS_PER_NODE, + AutodetectProcessManager.MIN_DISK_SPACE_OFF_HEAP)); } public Settings additionalSettings() { @@ -403,6 +404,9 @@ public Collection createComponents(Client client, ClusterService cluster // This object's constructor attaches to the license state, so there's no need to retain another reference to it new InvalidLicenseEnforcer(settings, getLicenseState(), threadPool, datafeedManager, autodetectProcessManager); + // run node startup tasks + autodetectProcessManager.onNodeStartup(); + return Arrays.asList( mlLifeCycleService, jobProvider, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java index f533fc1aa5a7c..05af1ffee17a4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java @@ -23,7 +23,6 @@ import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.XPackField; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MachineLearningFeatureSetUsage; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction; @@ -132,15 +131,7 @@ public Map nativeCodeInfo() { @Override public void usage(ActionListener listener) { ClusterState state = clusterService.state(); - MlMetadata mlMetadata = state.getMetaData().custom(MLMetadataField.TYPE); - - // Handle case when usage is called but MlMetadata has not been installed yet - if (mlMetadata == null) { - listener.onResponse(new MachineLearningFeatureSetUsage(available(), enabled, - Collections.emptyMap(), Collections.emptyMap())); - } else { - new Retriever(client, mlMetadata, available(), enabled()).execute(listener); - } + new Retriever(client, MlMetadata.getMlMetadata(state), available(), enabled()).execute(listener); } public static class Retriever { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java index 87cf03caa9949..37d714d1777da 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java @@ -13,7 +13,6 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.OpenJobAction; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; @@ -90,8 +89,7 @@ public void clusterChanged(ClusterChangedEvent event) { } } else if (StartDatafeedAction.TASK_NAME.equals(currentTask.getTaskName())) { String datafeedId = ((StartDatafeedAction.DatafeedParams) currentTask.getParams()).getDatafeedId(); - MlMetadata mlMetadata = event.state().getMetaData().custom(MLMetadataField.TYPE); - DatafeedConfig datafeedConfig = mlMetadata.getDatafeed(datafeedId); + DatafeedConfig datafeedConfig = MlMetadata.getMlMetadata(event.state()).getDatafeed(datafeedId); if (currentAssignment.getExecutorNode() == null) { String msg = "No node found to start datafeed [" + datafeedId +"]. Reasons [" + currentAssignment.getExplanation() + "]"; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java index 5dba8ce943c44..c96a12ffa1047 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlInitializationService.java @@ -7,20 +7,13 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.cluster.ClusterStateUpdateTask; -import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.ml.MLMetadataField; -import org.elasticsearch.xpack.core.ml.MlMetadata; - -import java.util.concurrent.atomic.AtomicBoolean; class MlInitializationService extends AbstractComponent implements ClusterStateListener { @@ -28,8 +21,6 @@ class MlInitializationService extends AbstractComponent implements ClusterStateL private final ClusterService clusterService; private final Client client; - private final AtomicBoolean installMlMetadataCheck = new AtomicBoolean(false); - private volatile MlDailyMaintenanceService mlDailyMaintenanceService; MlInitializationService(Settings settings, ThreadPool threadPool, ClusterService clusterService, Client client) { @@ -48,45 +39,12 @@ public void clusterChanged(ClusterChangedEvent event) { } if (event.localNodeMaster()) { - MetaData metaData = event.state().metaData(); - installMlMetadata(metaData); installDailyMaintenanceService(); } else { uninstallDailyMaintenanceService(); } } - private void installMlMetadata(MetaData metaData) { - if (metaData.custom(MLMetadataField.TYPE) == null) { - if (installMlMetadataCheck.compareAndSet(false, true)) { - threadPool.executor(ThreadPool.Names.GENERIC).execute(() -> - clusterService.submitStateUpdateTask("install-ml-metadata", new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - // If the metadata has been added already don't try to update - if (currentState.metaData().custom(MLMetadataField.TYPE) != null) { - return currentState; - } - ClusterState.Builder builder = new ClusterState.Builder(currentState); - MetaData.Builder metadataBuilder = MetaData.builder(currentState.metaData()); - metadataBuilder.putCustom(MLMetadataField.TYPE, MlMetadata.EMPTY_METADATA); - builder.metaData(metadataBuilder.build()); - return builder.build(); - } - - @Override - public void onFailure(String source, Exception e) { - installMlMetadataCheck.set(false); - logger.error("unable to install ml metadata", e); - } - }) - ); - } - } else { - installMlMetadataCheck.set(false); - } - } - private void installDailyMaintenanceService() { if (mlDailyMaintenanceService == null) { mlDailyMaintenanceService = new MlDailyMaintenanceService(clusterService.getClusterName(), threadPool, client); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java index 7d113a838dd45..fa649e541963d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java @@ -27,7 +27,6 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.CloseJobAction; import org.elasticsearch.xpack.core.ml.action.FinalizeJobExecutionAction; @@ -92,8 +91,7 @@ public TransportCloseJobAction(Settings settings, TransportService transportServ static void resolveAndValidateJobId(CloseJobAction.Request request, ClusterState state, List openJobIds, List closingJobIds) { PersistentTasksCustomMetaData tasksMetaData = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); - MlMetadata maybeNull = state.metaData().custom(MLMetadataField.TYPE); - final MlMetadata mlMetadata = (maybeNull == null) ? MlMetadata.EMPTY_METADATA : maybeNull; + final MlMetadata mlMetadata = MlMetadata.getMlMetadata(state); List failedJobs = new ArrayList<>(); @@ -107,7 +105,7 @@ static void resolveAndValidateJobId(CloseJobAction.Request request, ClusterState }; Set expandedJobIds = mlMetadata.expandJobIds(request.getJobId(), request.allowNoJobs()); - expandedJobIds.stream().forEach(jobIdProcessor::accept); + expandedJobIds.forEach(jobIdProcessor::accept); if (request.isForce() == false && failedJobs.size() > 0) { if (expandedJobIds.size() == 1) { throw ExceptionsHelper.conflictStatusException("cannot close job [{}] because it failed, use force close", diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java index 79995af9e62aa..220d97e89ba14 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java @@ -119,8 +119,8 @@ protected DeleteDatafeedAction.Response newResponse(boolean acknowledged) { } @Override - public ClusterState execute(ClusterState currentState) throws Exception { - MlMetadata currentMetadata = currentState.getMetaData().custom(MLMetadataField.TYPE); + public ClusterState execute(ClusterState currentState) { + MlMetadata currentMetadata = MlMetadata.getMlMetadata(currentState); PersistentTasksCustomMetaData persistentTasks = currentState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); MlMetadata newMetadata = new MlMetadata.Builder(currentMetadata) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java index 0e1ca9dd9aec3..9ab2132b61912 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.ml.job.retention.ExpiredModelSnapshotsRemover; import org.elasticsearch.xpack.ml.job.retention.ExpiredResultsRemover; import org.elasticsearch.xpack.ml.job.retention.MlDataRemover; +import org.elasticsearch.xpack.ml.job.retention.UnusedStateRemover; import org.elasticsearch.xpack.ml.notifications.Auditor; import org.elasticsearch.xpack.ml.utils.VolatileCursorIterator; @@ -56,7 +57,8 @@ private void deleteExpiredData(ActionListener List dataRemovers = Arrays.asList( new ExpiredResultsRemover(client, clusterService, auditor), new ExpiredForecastsRemover(client), - new ExpiredModelSnapshotsRemover(client, clusterService) + new ExpiredModelSnapshotsRemover(client, clusterService), + new UnusedStateRemover(client, clusterService) ); Iterator dataRemoversIterator = new VolatileCursorIterator<>(dataRemovers); deleteExpiredData(dataRemoversIterator, listener); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteFilterAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteFilterAction.java index 19e6ec595042a..e14cd76aa183a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteFilterAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteFilterAction.java @@ -24,7 +24,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.action.DeleteFilterAction; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetaIndex; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.job.config.Detector; @@ -60,8 +59,7 @@ protected void doExecute(DeleteFilterAction.Request request, ActionListener jobs = currentMlMetadata.getJobs(); + Map jobs = MlMetadata.getMlMetadata(state).getJobs(); List currentlyUsedBy = new ArrayList<>(); for (Job job : jobs.values()) { List detectors = job.getAnalysisConfig().getDetectors(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java index b664487c77df6..90821b302fc5c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java @@ -200,10 +200,9 @@ public void onFailure(Exception e) { void markJobAsDeleting(String jobId, ActionListener listener, boolean force) { clusterService.submitStateUpdateTask("mark-job-as-deleted", new ClusterStateUpdateTask() { @Override - public ClusterState execute(ClusterState currentState) throws Exception { - MlMetadata currentMlMetadata = currentState.metaData().custom(MLMetadataField.TYPE); + public ClusterState execute(ClusterState currentState) { PersistentTasksCustomMetaData tasks = currentState.metaData().custom(PersistentTasksCustomMetaData.TYPE); - MlMetadata.Builder builder = new MlMetadata.Builder(currentMlMetadata); + MlMetadata.Builder builder = new MlMetadata.Builder(MlMetadata.getMlMetadata(currentState)); builder.markJobAsDeleted(jobId, tasks, force); return buildNewClusterState(currentState, builder); } @@ -248,11 +247,7 @@ public void onTimeout(TimeValue timeout) { } static boolean jobIsDeletedFromState(String jobId, ClusterState clusterState) { - MlMetadata metadata = clusterState.metaData().custom(MLMetadataField.TYPE); - if (metadata == null) { - return true; - } - return !metadata.getJobs().containsKey(jobId); + return !MlMetadata.getMlMetadata(clusterState).getJobs().containsKey(jobId); } private static ClusterState buildNewClusterState(ClusterState currentState, MlMetadata.Builder builder) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionAction.java index 3f8ca39a0f124..e939c6ef31a2f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportFinalizeJobExecutionAction.java @@ -56,8 +56,8 @@ protected void masterOperation(FinalizeJobExecutionAction.Request request, Clust logger.debug("finalizing jobs [{}]", jobIdString); clusterService.submitStateUpdateTask(source, new ClusterStateUpdateTask() { @Override - public ClusterState execute(ClusterState currentState) throws Exception { - MlMetadata mlMetadata = currentState.metaData().custom(MLMetadataField.TYPE); + public ClusterState execute(ClusterState currentState) { + MlMetadata mlMetadata = MlMetadata.getMlMetadata(currentState); MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder(mlMetadata); Date finishedTime = new Date(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportForecastJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportForecastJobAction.java index 3b09377b477b8..aaa59e7e8cac0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportForecastJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportForecastJobAction.java @@ -15,6 +15,8 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -28,6 +30,7 @@ import org.elasticsearch.xpack.ml.job.process.autodetect.params.ForecastParams; import java.io.IOException; +import java.nio.file.Path; import java.util.List; import java.util.function.Consumer; @@ -36,6 +39,8 @@ public class TransportForecastJobAction extends TransportJobTaskAction { + private static final ByteSizeValue FORECAST_LOCAL_STORAGE_LIMIT = new ByteSizeValue(500, ByteSizeUnit.MB); + private final JobProvider jobProvider; @Inject public TransportForecastJobAction(Settings settings, TransportService transportService, ThreadPool threadPool, @@ -73,6 +78,13 @@ protected void taskOperation(ForecastJobAction.Request request, TransportOpenJob paramsBuilder.expiresIn(request.getExpiresIn()); } + // tmp storage might be null, we do not log here, because it might not be + // required + Path tmpStorage = processManager.tryGetTmpStorage(task, FORECAST_LOCAL_STORAGE_LIMIT); + if (tmpStorage != null) { + paramsBuilder.tmpStorage(tmpStorage.toString()); + } + ForecastParams params = paramsBuilder.build(); processManager.forecastJob(task, params, e -> { if (e == null) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarEventsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarEventsAction.java index d59febf12cd36..c81bb2642236d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarEventsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetCalendarEventsAction.java @@ -15,7 +15,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.GetCalendarEventsAction; import org.elasticsearch.xpack.core.ml.action.GetCalendarsAction; @@ -70,7 +69,7 @@ protected void doExecute(GetCalendarEventsAction.Request request, if (request.getJobId() != null) { ClusterState state = clusterService.state(); - MlMetadata currentMlMetadata = state.metaData().custom(MLMetadataField.TYPE); + MlMetadata currentMlMetadata = MlMetadata.getMlMetadata(state); List jobGroups; String requestId = request.getJobId(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsAction.java index c94449e8c3616..b4e3851eda820 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsAction.java @@ -18,7 +18,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.action.GetDatafeedsAction; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; @@ -52,10 +51,7 @@ protected void masterOperation(GetDatafeedsAction.Request request, ClusterState ActionListener listener) throws Exception { logger.debug("Get datafeed '{}'", request.getDatafeedId()); - MlMetadata mlMetadata = state.metaData().custom(MLMetadataField.TYPE); - if (mlMetadata == null) { - mlMetadata = MlMetadata.EMPTY_METADATA; - } + MlMetadata mlMetadata = MlMetadata.getMlMetadata(state); Set expandedDatafeedIds = mlMetadata.expandDatafeedIds(request.getDatafeedId(), request.allowNoDatafeeds()); List datafeedConfigs = new ArrayList<>(); for (String expandedDatafeedId : expandedDatafeedIds) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java index 0d5b2b202098b..41c8379c39fb8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java @@ -18,7 +18,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction; import org.elasticsearch.xpack.core.ml.action.util.QueryPage; @@ -56,11 +55,7 @@ protected void masterOperation(GetDatafeedsStatsAction.Request request, ClusterS ActionListener listener) throws Exception { logger.debug("Get stats for datafeed '{}'", request.getDatafeedId()); - MlMetadata mlMetadata = state.metaData().custom(MLMetadataField.TYPE); - if (mlMetadata == null) { - mlMetadata = MlMetadata.EMPTY_METADATA; - } - + MlMetadata mlMetadata = MlMetadata.getMlMetadata(state); Set expandedDatafeedIds = mlMetadata.expandDatafeedIds(request.getDatafeedId(), request.allowNoDatafeeds()); PersistentTasksCustomMetaData tasksInProgress = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java index f8f8fffa5b710..78bfe2c7bc6b0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java @@ -23,7 +23,6 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction; import org.elasticsearch.xpack.core.ml.action.util.QueryPage; @@ -69,8 +68,7 @@ public TransportGetJobsStatsAction(Settings settings, TransportService transport @Override protected void doExecute(Task task, GetJobsStatsAction.Request request, ActionListener listener) { - MlMetadata clusterMlMetadata = clusterService.state().metaData().custom(MLMetadataField.TYPE); - MlMetadata mlMetadata = (clusterMlMetadata == null) ? MlMetadata.EMPTY_METADATA : clusterMlMetadata; + MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterService.state()); request.setExpandedJobsIds(new ArrayList<>(mlMetadata.expandJobIds(request.getJobId(), request.allowNoJobs()))); ActionListener finalListener = listener; listener = ActionListener.wrap(response -> gatherStatsForClosedJobs(mlMetadata, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java index 5ae84129254e5..5e829c72756a7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java @@ -49,7 +49,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.XPackField; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetaIndex; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.OpenJobAction; @@ -163,7 +162,7 @@ static PersistentTasksCustomMetaData.Assignment selectLeastLoadedMlNode(String j continue; } - MlMetadata mlMetadata = clusterState.getMetaData().custom(MLMetadataField.TYPE); + MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState); Job job = mlMetadata.getJobs().get(jobId); Set compatibleJobTypes = Job.getCompatibleJobTypes(node.getVersion()); if (compatibleJobTypes.contains(job.getJobType()) == false) { @@ -474,8 +473,7 @@ public void onFailure(Exception e) { // Step 3. Update established model memory for pre-6.1 jobs that haven't had it set ActionListener missingMappingsListener = ActionListener.wrap( response -> { - MlMetadata mlMetadata = clusterService.state().getMetaData().custom(MLMetadataField.TYPE); - Job job = mlMetadata.getJobs().get(jobParams.getJobId()); + Job job = MlMetadata.getMlMetadata(clusterService.state()).getJobs().get(jobParams.getJobId()); if (job != null) { Version jobVersion = job.getJobVersion(); Long jobEstablishedModelMemory = job.getEstablishedModelMemory(); @@ -650,8 +648,7 @@ public PersistentTasksCustomMetaData.Assignment getAssignment(OpenJobAction.JobP public void validate(OpenJobAction.JobParams params, ClusterState clusterState) { // If we already know that we can't find an ml node because all ml nodes are running at capacity or // simply because there are no ml nodes in the cluster then we fail quickly here: - MlMetadata mlMetadata = clusterState.metaData().custom(MLMetadataField.TYPE); - TransportOpenJobAction.validate(params.getJobId(), mlMetadata); + TransportOpenJobAction.validate(params.getJobId(), MlMetadata.getMlMetadata(clusterState)); PersistentTasksCustomMetaData.Assignment assignment = selectLeastLoadedMlNode(params.getJobId(), clusterState, maxConcurrentJobAllocations, fallbackMaxNumberOfOpenJobs, maxMachineMemoryPercent, logger); if (assignment.getExecutorNode() == null) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java index 2ffb318dc4fb2..9cba0b20c51b9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java @@ -17,7 +17,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ClientHelper; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.PreviewDatafeedAction; import org.elasticsearch.xpack.core.ml.datafeed.ChunkingConfig; @@ -52,7 +51,7 @@ public TransportPreviewDatafeedAction(Settings settings, ThreadPool threadPool, @Override protected void doExecute(PreviewDatafeedAction.Request request, ActionListener listener) { - MlMetadata mlMetadata = clusterService.state().getMetaData().custom(MLMetadataField.TYPE); + MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterService.state()); DatafeedConfig datafeed = mlMetadata.getDatafeed(request.getDatafeedId()); if (datafeed == null) { throw ExceptionsHelper.missingDatafeedException(request.getDatafeedId()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutCalendarAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutCalendarAction.java index 11e8fbf912c5b..1393d663fb251 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutCalendarAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutCalendarAction.java @@ -14,9 +14,7 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; @@ -26,16 +24,12 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.action.PutCalendarAction; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetaIndex; -import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.calendars.Calendar; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import java.io.IOException; import java.util.Collections; -import java.util.List; -import java.util.function.Consumer; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; @@ -43,17 +37,15 @@ public class TransportPutCalendarAction extends HandledTransportAction { private final Client client; - private final ClusterService clusterService; @Inject public TransportPutCalendarAction(Settings settings, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - Client client, ClusterService clusterService) { + Client client) { super(settings, PutCalendarAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, PutCalendarAction.Request::new); this.client = client; - this.clusterService = clusterService; } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java index 0c492a0817c98..2b4304a205b13 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDatafeedAction.java @@ -141,7 +141,7 @@ public ClusterState execute(ClusterState currentState) { } private ClusterState putDatafeed(PutDatafeedAction.Request request, ClusterState clusterState) { - MlMetadata currentMetadata = clusterState.getMetaData().custom(MLMetadataField.TYPE); + MlMetadata currentMetadata = MlMetadata.getMlMetadata(clusterState); MlMetadata newMetadata = new MlMetadata.Builder(currentMetadata) .putDatafeed(request.getDatafeed(), threadPool.getThreadContext()).build(); return ClusterState.builder(clusterState).metaData( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java index aba52b38ecd27..71afa656a4a69 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java @@ -130,7 +130,7 @@ public void onFailure(Exception e) { }; // Verify data extractor factory can be created, then start persistent task - MlMetadata mlMetadata = state.metaData().custom(MLMetadataField.TYPE); + MlMetadata mlMetadata = MlMetadata.getMlMetadata(state); PersistentTasksCustomMetaData tasks = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); validate(params.getDatafeedId(), mlMetadata, tasks); DatafeedConfig datafeed = mlMetadata.getDatafeed(params.getDatafeedId()); @@ -221,9 +221,8 @@ public PersistentTasksCustomMetaData.Assignment getAssignment(StartDatafeedActio @Override public void validate(StartDatafeedAction.DatafeedParams params, ClusterState clusterState) { - MlMetadata mlMetadata = clusterState.metaData().custom(MLMetadataField.TYPE); PersistentTasksCustomMetaData tasks = clusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); - TransportStartDatafeedAction.validate(params.getDatafeedId(), mlMetadata, tasks); + TransportStartDatafeedAction.validate(params.getDatafeedId(), MlMetadata.getMlMetadata(clusterState), tasks); new DatafeedNodeSelector(clusterState, resolver, params.getDatafeedId()).checkDatafeedTaskCanBeCreated(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java index 5ed923e3b2370..b5f16ff191fe2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java @@ -130,7 +130,7 @@ protected void doExecute(Task task, StopDatafeedAction.Request request, ActionLi new ActionListenerResponseHandler<>(listener, StopDatafeedAction.Response::new)); } } else { - MlMetadata mlMetadata = state.getMetaData().custom(MLMetadataField.TYPE); + MlMetadata mlMetadata = MlMetadata.getMlMetadata(state); PersistentTasksCustomMetaData tasks = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); List startedDatafeeds = new ArrayList<>(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateCalendarJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateCalendarJobAction.java index 5df7e890a8fa4..0524cb28a0c11 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateCalendarJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateCalendarJobAction.java @@ -16,7 +16,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.PutCalendarAction; import org.elasticsearch.xpack.core.ml.action.UpdateCalendarJobAction; @@ -48,8 +47,7 @@ public TransportUpdateCalendarJobAction(Settings settings, ThreadPool threadPool @Override protected void doExecute(UpdateCalendarJobAction.Request request, ActionListener listener) { ClusterState clusterState = clusterService.state(); - MlMetadata maybeNullMetaData = clusterState.getMetaData().custom(MLMetadataField.TYPE); - final MlMetadata mlMetadata = maybeNullMetaData == null ? MlMetadata.EMPTY_METADATA : maybeNullMetaData; + final MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState); Set jobIdsToAdd = Strings.tokenizeByCommaToSet(request.getJobIdsToAddExpression()); Set jobIdsToRemove = Strings.tokenizeByCommaToSet(request.getJobIdsToRemoveExpression()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateDatafeedAction.java index e50adfa8275e2..4d752fe294081 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateDatafeedAction.java @@ -63,9 +63,9 @@ protected PutDatafeedAction.Response newResponse(boolean acknowledged) { } @Override - public ClusterState execute(ClusterState currentState) throws Exception { + public ClusterState execute(ClusterState currentState) { DatafeedUpdate update = request.getUpdate(); - MlMetadata currentMetadata = currentState.getMetaData().custom(MLMetadataField.TYPE); + MlMetadata currentMetadata = MlMetadata.getMlMetadata(currentState); PersistentTasksCustomMetaData persistentTasks = currentState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); MlMetadata newMetadata = new MlMetadata.Builder(currentMetadata) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java index 5daf8ce28964e..e27156b512613 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java @@ -20,7 +20,6 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.CloseJobAction; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; @@ -80,10 +79,7 @@ public DatafeedManager(ThreadPool threadPool, Client client, ClusterService clus public void run(TransportStartDatafeedAction.DatafeedTask task, Consumer taskHandler) { String datafeedId = task.getDatafeedId(); ClusterState state = clusterService.state(); - MlMetadata mlMetadata = state.metaData().custom(MLMetadataField.TYPE); - if (mlMetadata == null) { - mlMetadata = MlMetadata.EMPTY_METADATA; - } + MlMetadata mlMetadata = MlMetadata.getMlMetadata(state); DatafeedConfig datafeed = mlMetadata.getDatafeed(datafeedId); Job job = mlMetadata.getJobs().get(datafeed.getJobId()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java index e52906a605d0d..37f9715d09464 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java @@ -12,7 +12,6 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.job.config.JobState; @@ -33,7 +32,7 @@ public class DatafeedNodeSelector { private final IndexNameExpressionResolver resolver; public DatafeedNodeSelector(ClusterState clusterState, IndexNameExpressionResolver resolver, String datafeedId) { - MlMetadata mlMetadata = Objects.requireNonNull(clusterState.metaData().custom(MLMetadataField.TYPE)); + MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState); PersistentTasksCustomMetaData tasks = clusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); this.datafeed = mlMetadata.getDatafeed(datafeedId); this.jobTask = MlMetadata.getJobTask(datafeed.getJobId(), tasks); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java index 0f6a0f44cbf02..2d67e64ec60e7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java @@ -133,8 +133,7 @@ public Job getJobOrThrowIfUnknown(String jobId) { * @throws ResourceNotFoundException if no job matches {@code jobId} */ public static Job getJobOrThrowIfUnknown(String jobId, ClusterState clusterState) { - MlMetadata mlMetadata = clusterState.getMetaData().custom(MLMetadataField.TYPE); - Job job = (mlMetadata == null) ? null : mlMetadata.getJobs().get(jobId); + Job job = MlMetadata.getMlMetadata(clusterState).getJobs().get(jobId); if (job == null) { throw ExceptionsHelper.missingJobException(jobId); } @@ -142,11 +141,7 @@ public static Job getJobOrThrowIfUnknown(String jobId, ClusterState clusterState } private Set expandJobIds(String expression, boolean allowNoJobs, ClusterState clusterState) { - MlMetadata mlMetadata = clusterState.getMetaData().custom(MLMetadataField.TYPE); - if (mlMetadata == null) { - mlMetadata = MlMetadata.EMPTY_METADATA; - } - return mlMetadata.expandJobIds(expression, allowNoJobs); + return MlMetadata.getMlMetadata(clusterState).expandJobIds(expression, allowNoJobs); } /** @@ -160,7 +155,7 @@ private Set expandJobIds(String expression, boolean allowNoJobs, Cluster */ public QueryPage expandJobs(String expression, boolean allowNoJobs, ClusterState clusterState) { Set expandedJobIds = expandJobIds(expression, allowNoJobs, clusterState); - MlMetadata mlMetadata = clusterState.getMetaData().custom(MLMetadataField.TYPE); + MlMetadata mlMetadata = MlMetadata.getMlMetadata(clusterState); List jobs = new ArrayList<>(); for (String expandedJobId : expandedJobIds) { jobs.add(mlMetadata.getJobs().get(expandedJobId)); @@ -188,8 +183,8 @@ public void putJob(PutJobAction.Request request, AnalysisRegistry analysisRegist DEPRECATION_LOGGER.deprecated("Creating jobs with delimited data format is deprecated. Please use xcontent instead."); } - MlMetadata currentMlMetadata = state.metaData().custom(MLMetadataField.TYPE); - if (currentMlMetadata != null && currentMlMetadata.getJobs().containsKey(job.getId())) { + MlMetadata currentMlMetadata = MlMetadata.getMlMetadata(state); + if (currentMlMetadata.getJobs().containsKey(job.getId())) { actionListener.onFailure(ExceptionsHelper.jobAlreadyExists(job.getId())); return; } @@ -469,8 +464,8 @@ protected Boolean newResponse(boolean acknowledged) { } @Override - public ClusterState execute(ClusterState currentState) throws Exception { - MlMetadata currentMlMetadata = currentState.metaData().custom(MLMetadataField.TYPE); + public ClusterState execute(ClusterState currentState) { + MlMetadata currentMlMetadata = MlMetadata.getMlMetadata(currentState); if (currentMlMetadata.getJobs().containsKey(jobId) == false) { // We wouldn't have got here if the job never existed so // the Job must have been deleted by another action. @@ -560,8 +555,7 @@ public ClusterState execute(ClusterState currentState) { } private static MlMetadata.Builder createMlMetadataBuilder(ClusterState currentState) { - MlMetadata currentMlMetadata = currentState.metaData().custom(MLMetadataField.TYPE); - return new MlMetadata.Builder(currentMlMetadata); + return new MlMetadata.Builder(MlMetadata.getMlMetadata(currentState)); } private static ClusterState buildNewClusterState(ClusterState currentState, MlMetadata.Builder builder) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedDocumentsIterator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedDocumentsIterator.java index cf50579a0e517..d50a7c3f8c2ad 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedDocumentsIterator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedDocumentsIterator.java @@ -97,6 +97,7 @@ private SearchResponse initScroll() { searchRequest.source(new SearchSourceBuilder() .size(BATCH_SIZE) .query(getQuery()) + .fetchSource(shouldFetchSource()) .sort(SortBuilders.fieldSort(ElasticsearchMappings.ES_DOC))); SearchResponse searchResponse = client.search(searchRequest).actionGet(); @@ -123,6 +124,14 @@ private Deque mapHits(SearchResponse searchResponse) { return results; } + /** + * Should fetch source? Defaults to {@code true} + * @return whether the source should be fetched + */ + protected boolean shouldFetchSource() { + return true; + } + /** * Get the query to use for the search * @return the search query diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedStateDocIdsIterator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedStateDocIdsIterator.java new file mode 100644 index 0000000000000..92235570b47b5 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/BatchedStateDocIdsIterator.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.job.persistence; + +import org.elasticsearch.client.Client; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.SearchHit; + +/** + * Iterates through the state doc ids + */ +public class BatchedStateDocIdsIterator extends BatchedDocumentsIterator { + + public BatchedStateDocIdsIterator(Client client, String index) { + super(client, index); + } + + @Override + protected boolean shouldFetchSource() { + return false; + } + + @Override + protected QueryBuilder getQuery() { + return QueryBuilders.matchAllQuery(); + } + + @Override + protected String map(SearchHit hit) { + return hit.getId(); + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/NativeController.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/NativeController.java index a45f44c227657..43c3f4825ddf3 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/NativeController.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/NativeController.java @@ -22,8 +22,6 @@ import java.util.List; import java.util.Map; import java.util.concurrent.TimeoutException; -import java.util.regex.Matcher; -import java.util.regex.Pattern; /** @@ -84,20 +82,7 @@ public long getPid() throws TimeoutException { } public Map getNativeCodeInfo() throws TimeoutException { - String copyrightMessage = cppLogHandler.getCppCopyright(CONTROLLER_CONNECT_TIMEOUT); - Matcher matcher = Pattern.compile("Version (.+) \\(Build ([^)]+)\\) Copyright ").matcher(copyrightMessage); - if (matcher.find()) { - Map info = new HashMap<>(2); - info.put("version", matcher.group(1)); - info.put("build_hash", matcher.group(2)); - return info; - } else { - // If this happens it probably means someone has changed the format in lib/ver/CBuildInfo.cc - // in the machine-learning-cpp repo without changing the pattern above to match - String msg = "Unexpected native controller process copyright format: " + copyrightMessage; - LOGGER.error(msg); - throw new ElasticsearchException(msg); - } + return cppLogHandler.getNativeCodeInfo(CONTROLLER_CONNECT_TIMEOUT); } public void startProcess(List command) throws IOException { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/NativeStorageProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/NativeStorageProvider.java new file mode 100644 index 0000000000000..8a0268a8d0793 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/NativeStorageProvider.java @@ -0,0 +1,123 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ml.job.process; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.env.Environment; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; + +/** + * Provide storage for native components. + */ +public class NativeStorageProvider { + + private static final Logger LOGGER = Loggers.getLogger(NativeStorageProvider.class); + + + private static final String LOCAL_STORAGE_SUBFOLDER = "ml-local-data"; + private static final String LOCAL_STORAGE_TMP_FOLDER = "tmp"; + + private final Environment environment; + + // do not allow any usage below this threshold + private final ByteSizeValue minLocalStorageAvailable; + + public NativeStorageProvider(Environment environment, ByteSizeValue minDiskSpaceOffHeap) { + this.environment = environment; + this.minLocalStorageAvailable = minDiskSpaceOffHeap; + } + + /** + * Removes any temporary storage leftovers. + * + * Removes all temp files and folder which might be there as a result of an + * unclean node shutdown or broken clients. + * + * Do not call while there are running jobs. + * + * @throws IOException if cleanup fails + */ + public void cleanupLocalTmpStorageInCaseOfUncleanShutdown() throws IOException { + for (Path p : environment.dataFiles()) { + IOUtils.rm(p.resolve(LOCAL_STORAGE_SUBFOLDER).resolve(LOCAL_STORAGE_TMP_FOLDER)); + } + } + + /** + * Tries to find local storage for storing temporary data. + * + * @param uniqueIdentifier An identifier to be used as sub folder + * @param requestedSize The maximum size required + * @return Path for temporary storage if available, null otherwise + */ + public Path tryGetLocalTmpStorage(String uniqueIdentifier, ByteSizeValue requestedSize) { + for (Path path : environment.dataFiles()) { + try { + if (getUsableSpace(path) >= requestedSize.getBytes() + minLocalStorageAvailable.getBytes()) { + Path tmpDirectory = path.resolve(LOCAL_STORAGE_SUBFOLDER).resolve(LOCAL_STORAGE_TMP_FOLDER).resolve(uniqueIdentifier); + Files.createDirectories(tmpDirectory); + return tmpDirectory; + } + } catch (IOException e) { + LOGGER.debug("Failed to obtain information about path [{}]: {}", path, e); + } + + } + LOGGER.debug("Failed to find native storage for [{}], returning null", uniqueIdentifier); + return null; + } + + public boolean localTmpStorageHasEnoughSpace(Path path, ByteSizeValue requestedSize) { + Path realPath = path.toAbsolutePath(); + for (Path p : environment.dataFiles()) { + try { + if (realPath.startsWith(p.resolve(LOCAL_STORAGE_SUBFOLDER).resolve(LOCAL_STORAGE_TMP_FOLDER))) { + return getUsableSpace(p) >= requestedSize.getBytes() + minLocalStorageAvailable.getBytes(); + } + } catch (IOException e) { + LOGGER.debug("Failed to optain information about path [{}]: {}", path, e); + } + } + + LOGGER.debug("Not enough space left for path [{}]", path); + return false; + } + + /** + * Delete temporary storage, previously allocated + * + * @param path + * Path to temporary storage + * @throws IOException + * if path can not be cleaned up + */ + public void cleanupLocalTmpStorage(Path path) throws IOException { + // do not allow to breakout from the tmp storage provided + Path realPath = path.toAbsolutePath(); + for (Path p : environment.dataFiles()) { + if (realPath.startsWith(p.resolve(LOCAL_STORAGE_SUBFOLDER).resolve(LOCAL_STORAGE_TMP_FOLDER))) { + IOUtils.rm(path); + } + } + } + + long getUsableSpace(Path path) throws IOException { + long freeSpaceInBytes = Environment.getFileStore(path).getUsableSpace(); + + /* See: https://bugs.openjdk.java.net/browse/JDK-8162520 */ + if (freeSpaceInBytes < 0) { + freeSpaceInBytes = Long.MAX_VALUE; + } + return freeSpaceInBytes; + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java index cca591682d850..d3a848ef3821f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java @@ -7,6 +7,7 @@ import org.elasticsearch.common.xcontent.XContentElasticsearchExtension; import org.elasticsearch.core.internal.io.IOUtils; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.Client; @@ -15,11 +16,12 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.env.Environment; import org.elasticsearch.index.analysis.AnalysisRegistry; @@ -47,6 +49,7 @@ import org.elasticsearch.xpack.ml.job.persistence.JobResultsPersister; import org.elasticsearch.xpack.ml.job.persistence.StateStreamer; import org.elasticsearch.xpack.ml.job.process.DataCountsReporter; +import org.elasticsearch.xpack.ml.job.process.NativeStorageProvider; import org.elasticsearch.xpack.ml.job.process.autodetect.output.AutoDetectResultProcessor; import org.elasticsearch.xpack.ml.job.process.autodetect.params.DataLoadParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.FlushJobParams; @@ -59,6 +62,7 @@ import java.io.IOException; import java.io.InputStream; +import java.nio.file.Path; import java.time.Duration; import java.time.ZonedDateTime; import java.util.Date; @@ -96,6 +100,10 @@ public class AutodetectProcessManager extends AbstractComponent { public static final Setting MAX_OPEN_JOBS_PER_NODE = Setting.intSetting("xpack.ml.max_open_jobs", MAX_RUNNING_JOBS_PER_NODE, 1, Property.NodeScope); + // Undocumented setting for integration test purposes + public static final Setting MIN_DISK_SPACE_OFF_HEAP = + Setting.byteSizeSetting("xpack.ml.min_disk_space_off_heap", new ByteSizeValue(5, ByteSizeUnit.GB), Property.NodeScope); + private final Client client; private final Environment environment; private final ThreadPool threadPool; @@ -107,8 +115,12 @@ public class AutodetectProcessManager extends AbstractComponent { private final JobResultsPersister jobResultsPersister; private final JobDataCountsPersister jobDataCountsPersister; + private NativeStorageProvider nativeStorageProvider; private final ConcurrentMap processByAllocation = new ConcurrentHashMap<>(); + // a map that manages the allocation of temporary space to jobs + private final ConcurrentMap nativeTmpStorage = new ConcurrentHashMap<>(); + private final int maxAllowedRunningJobs; private final NamedXContentRegistry xContentRegistry; @@ -133,6 +145,15 @@ public AutodetectProcessManager(Environment environment, Settings settings, Clie this.jobResultsPersister = jobResultsPersister; this.jobDataCountsPersister = jobDataCountsPersister; this.auditor = auditor; + this.nativeStorageProvider = new NativeStorageProvider(environment, MIN_DISK_SPACE_OFF_HEAP.get(settings)); + } + + public void onNodeStartup() { + try { + nativeStorageProvider.cleanupLocalTmpStorageInCaseOfUncleanShutdown(); + } catch (Exception e) { + logger.warn("Failed to cleanup native storage from previous invocation", e); + } } public synchronized void closeAllJobsOnThisNode(String reason) throws IOException { @@ -251,6 +272,28 @@ public void flushJob(JobTask jobTask, FlushJobParams params, ActionListener handler) { - logger.debug("Forecasting job {}", jobTask.getJobId()); + String jobId = jobTask.getJobId(); + logger.debug("Forecasting job {}", jobId); AutodetectCommunicator communicator = getOpenAutodetectCommunicator(jobTask); if (communicator == null) { - String message = String.format(Locale.ROOT, "Cannot forecast because job [%s] is not open", jobTask.getJobId()); + String message = String.format(Locale.ROOT, "Cannot forecast because job [%s] is not open", jobId); logger.debug(message); handler.accept(ExceptionsHelper.conflictStatusException(message)); return; @@ -271,7 +315,7 @@ public void forecastJob(JobTask jobTask, ForecastParams params, Consumer> getStatistics(JobTask jobTask return Optional.of(new Tuple<>(communicator.getDataCounts(), communicator.getModelSizeStats())); } + private void removeTmpStorage(String jobId) throws IOException { + Path path = nativeTmpStorage.get(jobId); + if (path != null) { + nativeStorageProvider.cleanupLocalTmpStorage(path); + } + } + ExecutorService createAutodetectExecutorService(ExecutorService executorService) { AutodetectWorkerExecutorService autoDetectWorkerExecutor = new AutodetectWorkerExecutorService(threadPool.getThreadContext()); executorService.submit(autoDetectWorkerExecutor::start); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/ForecastParams.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/ForecastParams.java index 0afd3b8a473fb..f243195c3a785 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/ForecastParams.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/params/ForecastParams.java @@ -16,12 +16,14 @@ public class ForecastParams { private final long createTime; private final long duration; private final long expiresIn; + private final String tmpStorage; - private ForecastParams(String forecastId, long createTime, long duration, long expiresIn) { + private ForecastParams(String forecastId, long createTime, long duration, long expiresIn, String tmpStorage) { this.forecastId = forecastId; this.createTime = createTime; this.duration = duration; this.expiresIn = expiresIn; + this.tmpStorage = tmpStorage; } public String getForecastId() { @@ -52,9 +54,18 @@ public long getExpiresIn() { return expiresIn; } + /** + * Temporary storage forecast is allowed to use for persisting models. + * + * @return path to tmp storage + */ + public String getTmpStorage() { + return tmpStorage; + } + @Override public int hashCode() { - return Objects.hash(forecastId, createTime, duration, expiresIn); + return Objects.hash(forecastId, createTime, duration, expiresIn, tmpStorage); } @Override @@ -69,7 +80,8 @@ public boolean equals(Object obj) { return Objects.equals(forecastId, other.forecastId) && Objects.equals(createTime, other.createTime) && Objects.equals(duration, other.duration) - && Objects.equals(expiresIn, other.expiresIn); + && Objects.equals(expiresIn, other.expiresIn) + && Objects.equals(tmpStorage, other.tmpStorage); } public static Builder builder() { @@ -81,6 +93,7 @@ public static class Builder { private final long createTimeEpochSecs; private long durationSecs; private long expiresInSecs; + private String tmpStorage; private Builder() { forecastId = UUIDs.base64UUID(); @@ -101,8 +114,13 @@ public Builder expiresIn(TimeValue expiresIn) { return this; } + public Builder tmpStorage(String tmpStorage) { + this.tmpStorage = tmpStorage; + return this; + } + public ForecastParams build() { - return new ForecastParams(forecastId, createTimeEpochSecs, durationSecs, expiresInSecs); + return new ForecastParams(forecastId, createTimeEpochSecs, durationSecs, expiresInSecs, tmpStorage); } } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/ControlMsgToProcessWriter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/ControlMsgToProcessWriter.java index 2a91797d28d75..2c026ec15506e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/ControlMsgToProcessWriter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/ControlMsgToProcessWriter.java @@ -164,6 +164,9 @@ public void writeForecastMessage(ForecastParams params) throws IOException { if (params.getExpiresIn() != -1) { builder.field("expires_in", params.getExpiresIn()); } + if (params.getTmpStorage() != null) { + builder.field("tmp_storage", params.getTmpStorage()); + } builder.endObject(); writeMessage(FORECAST_MESSAGE_CODE + Strings.toString(builder)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/logging/CppLogMessageHandler.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/logging/CppLogMessageHandler.java index ddafc36416b65..af0f199dd0c58 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/logging/CppLogMessageHandler.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/logging/CppLogMessageHandler.java @@ -8,7 +8,7 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.common.ParsingException; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -30,10 +30,15 @@ import java.time.Instant; import java.time.temporal.ChronoUnit; import java.util.Deque; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; import java.util.Objects; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.regex.Matcher; +import java.util.regex.Pattern; /** * Handle a stream of C++ log messages that arrive via a named pipe in JSON format. @@ -181,6 +186,26 @@ public String getCppCopyright(Duration timeout) throws TimeoutException { return cppCopyright; } + /** + * Extracts version information from the copyright string which assumes a certain format. + */ + public Map getNativeCodeInfo(Duration timeout) throws TimeoutException { + String copyrightMessage = getCppCopyright(timeout); + Matcher matcher = Pattern.compile("Version (.+) \\(Build ([^)]+)\\) Copyright ").matcher(copyrightMessage); + if (matcher.find()) { + Map info = new HashMap<>(2); + info.put("version", matcher.group(1)); + info.put("build_hash", matcher.group(2)); + return info; + } else { + // If this happens it probably means someone has changed the format in lib/ver/CBuildInfo.cc + // in the ml-cpp repo without changing the pattern above to match + String msg = "Unexpected native process copyright format: " + copyrightMessage; + LOGGER.error(msg); + throw new ElasticsearchException(msg); + } + } + /** * Expected to be called very infrequently. */ @@ -281,8 +306,18 @@ private void parseMessage(XContent xContent, BytesReference bytesRef) { } catch (XContentParseException e) { String upstreamMessage = "Fatal error: '" + bytesRef.utf8ToString() + "'"; if (upstreamMessage.contains("bad_alloc")) { - upstreamMessage += ", process ran out of memory."; + upstreamMessage += ", process ran out of memory"; } + + // add version information, so it's conveniently next to the crash log + upstreamMessage += ", version: "; + try { + Map versionInfo = getNativeCodeInfo(Duration.ofMillis(10)); + upstreamMessage += String.format(Locale.ROOT, "%s (build %s)", versionInfo.get("version"), versionInfo.get("build_hash")); + } catch (TimeoutException timeoutException) { + upstreamMessage += "failed to retrieve"; + } + storeError(upstreamMessage); seenFatalError = true; } catch (IOException e) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java index aa003d29559f0..8364e015a3456 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java @@ -11,7 +11,6 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.results.Result; @@ -61,12 +60,8 @@ private void removeData(Iterator jobIterator, ActionListener liste } private Iterator newJobIterator() { - List jobs = new ArrayList<>(); ClusterState clusterState = clusterService.state(); - MlMetadata mlMetadata = clusterState.getMetaData().custom(MLMetadataField.TYPE); - if (mlMetadata != null) { - jobs.addAll(mlMetadata.getJobs().values()); - } + List jobs = new ArrayList<>(MlMetadata.getMlMetadata(clusterState).getJobs().values()); return createVolatileCursorIterator(jobs); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/UnusedStateRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/UnusedStateRemover.java new file mode 100644 index 0000000000000..fd4085d202041 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/UnusedStateRemover.java @@ -0,0 +1,126 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.job.retention; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.xpack.core.ml.MlMetadata; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; +import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.CategorizerState; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelState; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.Quantiles; +import org.elasticsearch.xpack.ml.job.persistence.BatchedStateDocIdsIterator; + +import java.util.Arrays; +import java.util.Deque; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.function.Function; + +/** + * If for any reason a job is deleted by some of its state documents + * are left behind, this class deletes any unused documents stored + * in the .ml-state index. + */ +public class UnusedStateRemover implements MlDataRemover { + + private static final Logger LOGGER = Loggers.getLogger(UnusedStateRemover.class); + + private final Client client; + private final ClusterService clusterService; + + public UnusedStateRemover(Client client, ClusterService clusterService) { + this.client = Objects.requireNonNull(client); + this.clusterService = Objects.requireNonNull(clusterService); + } + + @Override + public void remove(ActionListener listener) { + try { + BulkRequestBuilder deleteUnusedStateRequestBuilder = findUnusedStateDocs(); + if (deleteUnusedStateRequestBuilder.numberOfActions() > 0) { + executeDeleteUnusedStateDocs(deleteUnusedStateRequestBuilder, listener); + } else { + listener.onResponse(true); + } + } catch (Exception e) { + listener.onFailure(e); + } + } + + private BulkRequestBuilder findUnusedStateDocs() { + Set jobIds = getJobIds(); + BulkRequestBuilder deleteUnusedStateRequestBuilder = client.prepareBulk(); + BatchedStateDocIdsIterator stateDocIdsIterator = new BatchedStateDocIdsIterator(client, AnomalyDetectorsIndex.jobStateIndexName()); + while (stateDocIdsIterator.hasNext()) { + Deque stateDocIds = stateDocIdsIterator.next(); + for (String stateDocId : stateDocIds) { + String jobId = JobIdExtractor.extractJobId(stateDocId); + if (jobId == null) { + // not a managed state document id + continue; + } + if (jobIds.contains(jobId) == false) { + deleteUnusedStateRequestBuilder.add(new DeleteRequest( + AnomalyDetectorsIndex.jobStateIndexName(), ElasticsearchMappings.DOC_TYPE, stateDocId)); + } + } + } + return deleteUnusedStateRequestBuilder; + } + + private Set getJobIds() { + return MlMetadata.getMlMetadata(clusterService.state()).getJobs().keySet(); + } + + private void executeDeleteUnusedStateDocs(BulkRequestBuilder deleteUnusedStateRequestBuilder, ActionListener listener) { + LOGGER.info("Found [{}] unused state documents; attempting to delete", + deleteUnusedStateRequestBuilder.numberOfActions()); + deleteUnusedStateRequestBuilder.execute(new ActionListener() { + @Override + public void onResponse(BulkResponse bulkItemResponses) { + if (bulkItemResponses.hasFailures()) { + LOGGER.error("Some unused state documents could not be deleted due to failures: {}", + bulkItemResponses.buildFailureMessage()); + } else { + LOGGER.info("Successfully deleted all unused state documents"); + } + listener.onResponse(true); + } + + @Override + public void onFailure(Exception e) { + LOGGER.error("Error deleting unused model state documents: ", e); + listener.onFailure(e); + } + }); + } + + private static class JobIdExtractor { + + private static List> extractors = Arrays.asList( + ModelState::extractJobId, Quantiles::extractJobId, CategorizerState::extractJobId); + + private static String extractJobId(String docId) { + String jobId; + for (Function extractor : extractors) { + jobId = extractor.apply(docId); + if (jobId != null) { + return jobId; + } + } + return null; + } + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSetTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSetTests.java index b685e6b961d3c..eba2054054c0d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSetTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSetTests.java @@ -65,7 +65,7 @@ public class MachineLearningFeatureSetTests extends ESTestCase { private XPackLicenseState licenseState; @Before - public void init() throws Exception { + public void init() { commonSettings = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath()) .put(MachineLearningField.AUTODETECT_PROCESS.getKey(), false) @@ -232,9 +232,28 @@ public void testUsageGivenMlMetadataNotInstalled() throws Exception { try (XContentBuilder builder = XContentFactory.jsonBuilder()) { usage.toXContent(builder, ToXContent.EMPTY_PARAMS); source = new XContentSource(builder); - assertThat(source.getValue("jobs"), equalTo(Collections.emptyMap())); - assertThat(source.getValue("datafeeds"), equalTo(Collections.emptyMap())); } + + assertThat(source.getValue("jobs._all.count"), equalTo(0)); + assertThat(source.getValue("jobs._all.detectors.min"), equalTo(0.0)); + assertThat(source.getValue("jobs._all.detectors.max"), equalTo(0.0)); + assertThat(source.getValue("jobs._all.detectors.total"), equalTo(0.0)); + assertThat(source.getValue("jobs._all.detectors.avg"), equalTo(0.0)); + assertThat(source.getValue("jobs._all.model_size.min"), equalTo(0.0)); + assertThat(source.getValue("jobs._all.model_size.max"), equalTo(0.0)); + assertThat(source.getValue("jobs._all.model_size.total"), equalTo(0.0)); + assertThat(source.getValue("jobs._all.model_size.avg"), equalTo(0.0)); + + assertThat(source.getValue("jobs.opening"), is(nullValue())); + assertThat(source.getValue("jobs.opened"), is(nullValue())); + assertThat(source.getValue("jobs.closing"), is(nullValue())); + assertThat(source.getValue("jobs.closed"), is(nullValue())); + assertThat(source.getValue("jobs.failed"), is(nullValue())); + + assertThat(source.getValue("datafeeds._all.count"), equalTo(0)); + + assertThat(source.getValue("datafeeds.started"), is(nullValue())); + assertThat(source.getValue("datafeeds.stopped"), is(nullValue())); } private void givenJobs(List jobs, List jobsStats) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlInitializationServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlInitializationServiceTests.java index ce46139a18bdb..d324d17f40e25 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlInitializationServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlInitializationServiceTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -22,20 +21,15 @@ import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.junit.Before; -import org.mockito.Mockito; import java.net.InetAddress; import java.util.concurrent.ExecutorService; import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.mock.orig.Mockito.doAnswer; -import static org.elasticsearch.mock.orig.Mockito.times; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; import static org.mockito.Matchers.any; -import static org.mockito.Matchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -68,7 +62,7 @@ public void setUpMocks() { when(clusterService.getClusterName()).thenReturn(CLUSTER_NAME); } - public void testInitialize() throws Exception { + public void testInitialize() { MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client); ClusterState cs = ClusterState.builder(new ClusterName("_name")) @@ -80,11 +74,10 @@ public void testInitialize() throws Exception { .build(); initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs)); - verify(clusterService, times(1)).submitStateUpdateTask(eq("install-ml-metadata"), any()); assertThat(initializationService.getDailyMaintenanceService().isStarted(), is(true)); } - public void testInitialize_noMasterNode() throws Exception { + public void testInitialize_noMasterNode() { MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client); ClusterState cs = ClusterState.builder(new ClusterName("_name")) @@ -94,11 +87,10 @@ public void testInitialize_noMasterNode() throws Exception { .build(); initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs)); - verify(clusterService, times(0)).submitStateUpdateTask(eq("install-ml-metadata"), any()); assertThat(initializationService.getDailyMaintenanceService(), is(nullValue())); } - public void testInitialize_alreadyInitialized() throws Exception { + public void testInitialize_alreadyInitialized() { MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client); ClusterState cs = ClusterState.builder(new ClusterName("_name")) @@ -113,67 +105,10 @@ public void testInitialize_alreadyInitialized() throws Exception { initializationService.setDailyMaintenanceService(initialDailyMaintenanceService); initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs)); - verify(clusterService, times(0)).submitStateUpdateTask(eq("install-ml-metadata"), any()); assertSame(initialDailyMaintenanceService, initializationService.getDailyMaintenanceService()); } - public void testInitialize_onlyOnce() throws Exception { - MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client); - - ClusterState cs = ClusterState.builder(new ClusterName("_name")) - .nodes(DiscoveryNodes.builder() - .add(new DiscoveryNode("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9200), Version.CURRENT)) - .localNodeId("_node_id") - .masterNodeId("_node_id")) - .metaData(MetaData.builder()) - .build(); - initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs)); - initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs)); - - verify(clusterService, times(1)).submitStateUpdateTask(eq("install-ml-metadata"), any()); - } - - public void testInitialize_reintialiseAfterFailure() throws Exception { - MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client); - - // Fail the first cluster state update - AtomicBoolean onFailureCalled = new AtomicBoolean(false); - Mockito.doAnswer(invocation -> { - ClusterStateUpdateTask task = (ClusterStateUpdateTask) invocation.getArguments()[1]; - task.onFailure("mock a failure", new IllegalStateException()); - onFailureCalled.set(true); - return null; - }).when(clusterService).submitStateUpdateTask(eq("install-ml-metadata"), any(ClusterStateUpdateTask.class)); - - ClusterState cs = ClusterState.builder(new ClusterName("_name")) - .nodes(DiscoveryNodes.builder() - .add(new DiscoveryNode("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9200), Version.CURRENT)) - .localNodeId("_node_id") - .masterNodeId("_node_id")) - .metaData(MetaData.builder()) - .build(); - initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs)); - assertTrue("Something went wrong mocking the cluster update task", onFailureCalled.get()); - verify(clusterService, times(1)).submitStateUpdateTask(eq("install-ml-metadata"), any(ClusterStateUpdateTask.class)); - - // 2nd update succeeds - AtomicReference clusterStateHolder = new AtomicReference<>(); - Mockito.doAnswer(invocation -> { - ClusterStateUpdateTask task = (ClusterStateUpdateTask) invocation.getArguments()[1]; - clusterStateHolder.set(task.execute(cs)); - return null; - }).when(clusterService).submitStateUpdateTask(eq("install-ml-metadata"), any(ClusterStateUpdateTask.class)); - - initializationService.clusterChanged(new ClusterChangedEvent("_source", cs, cs)); - assertTrue("Something went wrong mocking the sucessful cluster update task", clusterStateHolder.get() != null); - verify(clusterService, times(2)).submitStateUpdateTask(eq("install-ml-metadata"), any(ClusterStateUpdateTask.class)); - - // 3rd update won't be called as ML Metadata has been installed - initializationService.clusterChanged(new ClusterChangedEvent("_source", clusterStateHolder.get(), clusterStateHolder.get())); - verify(clusterService, times(2)).submitStateUpdateTask(eq("install-ml-metadata"), any(ClusterStateUpdateTask.class)); - } - - public void testNodeGoesFromMasterToNonMasterAndBack() throws Exception { + public void testNodeGoesFromMasterToNonMasterAndBack() { MlInitializationService initializationService = new MlInitializationService(Settings.EMPTY, threadPool, clusterService, client); MlDailyMaintenanceService initialDailyMaintenanceService = mock(MlDailyMaintenanceService.class); initializationService.setDailyMaintenanceService(initialDailyMaintenanceService); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java index a5d3faf323434..bd722ebf8ef9a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java @@ -251,11 +251,11 @@ public void testStart_GivenNewlyCreatedJobLoopBackAndRealtime() throws Exception } public void testDatafeedTaskWaitsUntilJobIsOpened() { - PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); + PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask("job_id", "node_id", JobState.OPENING, tasksBuilder); ClusterState.Builder cs = ClusterState.builder(clusterService.state()) - .metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, clusterService.state().getMetaData() - .custom(MLMetadataField.TYPE)).putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); + .metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, MlMetadata.getMlMetadata(clusterService.state())) + .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); when(clusterService.state()).thenReturn(cs.build()); Consumer handler = mockConsumer(); @@ -269,8 +269,8 @@ public void testDatafeedTaskWaitsUntilJobIsOpened() { addJobTask("job_id", "node_id", JobState.OPENING, tasksBuilder); addJobTask("another_job", "node_id", JobState.OPENED, tasksBuilder); ClusterState.Builder anotherJobCs = ClusterState.builder(clusterService.state()) - .metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, clusterService.state().getMetaData() - .custom(MLMetadataField.TYPE)).putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); + .metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, MlMetadata.getMlMetadata(clusterService.state())) + .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); capturedClusterStateListener.getValue().clusterChanged(new ClusterChangedEvent("_source", anotherJobCs.build(), cs.build())); @@ -280,8 +280,8 @@ public void testDatafeedTaskWaitsUntilJobIsOpened() { tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask("job_id", "node_id", JobState.OPENED, tasksBuilder); ClusterState.Builder jobOpenedCs = ClusterState.builder(clusterService.state()) - .metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, clusterService.state().getMetaData() - .custom(MLMetadataField.TYPE)).putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); + .metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, MlMetadata.getMlMetadata(clusterService.state())) + .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); capturedClusterStateListener.getValue().clusterChanged( new ClusterChangedEvent("_source", jobOpenedCs.build(), anotherJobCs.build())); @@ -294,8 +294,8 @@ public void testDatafeedTaskStopsBecauseJobFailedWhileOpening() { PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask("job_id", "node_id", JobState.OPENING, tasksBuilder); ClusterState.Builder cs = ClusterState.builder(clusterService.state()) - .metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, clusterService.state().getMetaData() - .custom(MLMetadataField.TYPE)).putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); + .metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, MlMetadata.getMlMetadata(clusterService.state())) + .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); when(clusterService.state()).thenReturn(cs.build()); Consumer handler = mockConsumer(); @@ -308,8 +308,8 @@ public void testDatafeedTaskStopsBecauseJobFailedWhileOpening() { tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask("job_id", "node_id", JobState.FAILED, tasksBuilder); ClusterState.Builder updatedCs = ClusterState.builder(clusterService.state()) - .metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, clusterService.state().getMetaData() - .custom(MLMetadataField.TYPE)).putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); + .metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, MlMetadata.getMlMetadata(clusterService.state())) + .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); capturedClusterStateListener.getValue().clusterChanged(new ClusterChangedEvent("_source", updatedCs.build(), cs.build())); @@ -322,8 +322,8 @@ public void testDatafeedGetsStoppedWhileWaitingForJobToOpen() { PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask("job_id", "node_id", JobState.OPENING, tasksBuilder); ClusterState.Builder cs = ClusterState.builder(clusterService.state()) - .metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, clusterService.state().getMetaData() - .custom(MLMetadataField.TYPE)).putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); + .metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, MlMetadata.getMlMetadata(clusterService.state())) + .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); when(clusterService.state()).thenReturn(cs.build()); Consumer handler = mockConsumer(); @@ -340,8 +340,8 @@ public void testDatafeedGetsStoppedWhileWaitingForJobToOpen() { tasksBuilder = PersistentTasksCustomMetaData.builder(); addJobTask("job_id", "node_id", JobState.OPENED, tasksBuilder); ClusterState.Builder updatedCs = ClusterState.builder(clusterService.state()) - .metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, clusterService.state().getMetaData() - .custom(MLMetadataField.TYPE)).putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); + .metaData(new MetaData.Builder().putCustom(MLMetadataField.TYPE, MlMetadata.getMlMetadata(clusterService.state())) + .putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())); capturedClusterStateListener.getValue().clusterChanged(new ClusterChangedEvent("_source", cs.build(), updatedCs.build())); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteJobIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteJobIT.java index 778ffe6dfae2d..357c2bc232552 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteJobIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteJobIT.java @@ -103,7 +103,7 @@ public void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) { } private ClusterState markJobAsDeleted(String jobId, ClusterState currentState) { - MlMetadata mlMetadata = currentState.metaData().custom(MLMetadataField.TYPE); + MlMetadata mlMetadata = MlMetadata.getMlMetadata(currentState); assertNotNull(mlMetadata); MlMetadata.Builder builder = new MlMetadata.Builder(mlMetadata); @@ -116,7 +116,7 @@ private ClusterState markJobAsDeleted(String jobId, ClusterState currentState) { } private ClusterState removeJobFromClusterState(String jobId, ClusterState currentState) { - MlMetadata.Builder builder = new MlMetadata.Builder(currentState.metaData().custom(MLMetadataField.TYPE)); + MlMetadata.Builder builder = new MlMetadata.Builder(MlMetadata.getMlMetadata(currentState)); builder.deleteJob(jobId, currentState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE)); ClusterState.Builder newState = ClusterState.builder(currentState); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java index 667be5d41ea2b..64b3bfbab45c8 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java @@ -326,7 +326,7 @@ private JobManager createJobManager() { private ClusterState createClusterState() { ClusterState.Builder builder = ClusterState.builder(new ClusterName("_name")); - builder.metaData(MetaData.builder().putCustom(MLMetadataField.TYPE, MlMetadata.EMPTY_METADATA)); + builder.metaData(MetaData.builder()); return builder.build(); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobProviderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobProviderTests.java index 9fea904a99fa1..ef87fe392dd75 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobProviderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobProviderTests.java @@ -39,8 +39,6 @@ import org.elasticsearch.search.SearchHits; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.ml.MLMetadataField; -import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; @@ -93,7 +91,7 @@ public void testCreateJobResultsIndex() { AtomicReference resultHolder = new AtomicReference<>(); ClusterState cs = ClusterState.builder(new ClusterName("_name")) - .metaData(MetaData.builder().putCustom(MLMetadataField.TYPE, MlMetadata.EMPTY_METADATA).indices(ImmutableOpenMap.of())) + .metaData(MetaData.builder().indices(ImmutableOpenMap.of())) .build(); ClusterService clusterService = mock(ClusterService.class); @@ -157,7 +155,7 @@ public void testCreateJobWithExistingIndex() { .fPut(AnomalyDetectorsIndex.jobResultsAliasedName("foo"), indexMetaData).build(); ClusterState cs2 = ClusterState.builder(new ClusterName("_name")) - .metaData(MetaData.builder().putCustom(MLMetadataField.TYPE, MlMetadata.EMPTY_METADATA).indices(indexMap)).build(); + .metaData(MetaData.builder().indices(indexMap)).build(); ClusterService clusterService = mock(ClusterService.class); @@ -209,7 +207,7 @@ public void testCreateJobRelatedIndicies_createsAliasBecauseIndexNameIsSet() { ImmutableOpenMap indexMap = ImmutableOpenMap.builder().build(); ClusterState cs = ClusterState.builder(new ClusterName("_name")) - .metaData(MetaData.builder().putCustom(MLMetadataField.TYPE, MlMetadata.EMPTY_METADATA).indices(indexMap)).build(); + .metaData(MetaData.builder().indices(indexMap)).build(); ClusterService clusterService = mock(ClusterService.class); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/NativeStorageProviderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/NativeStorageProviderTests.java new file mode 100644 index 0000000000000..3103e76c82bde --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/NativeStorageProviderTests.java @@ -0,0 +1,139 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ml.job.process; + +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.env.Environment; +import org.elasticsearch.test.ESTestCase; +import org.junit.Assert; + +import java.io.BufferedWriter; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.Map; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.any; + +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.doAnswer; + +public class NativeStorageProviderTests extends ESTestCase { + + public void testTmpStorage() throws IOException { + Map storage = new HashMap<>(); + Path tmpDir = createTempDir(); + + storage.put(tmpDir, new ByteSizeValue(6, ByteSizeUnit.GB).getBytes()); + NativeStorageProvider storageProvider = createNativeStorageProvider(storage); + + Assert.assertNotNull( + storageProvider.tryGetLocalTmpStorage(randomAlphaOfLengthBetween(4, 10), new ByteSizeValue(100, ByteSizeUnit.BYTES))); + Assert.assertNull(storageProvider.tryGetLocalTmpStorage(randomAlphaOfLengthBetween(4, 10), + new ByteSizeValue(1024 * 1024 * 1024 + 1, ByteSizeUnit.BYTES))); + + String id = randomAlphaOfLengthBetween(4, 10); + Path path = storageProvider.tryGetLocalTmpStorage(id, new ByteSizeValue(1, ByteSizeUnit.GB)); + Assert.assertNotNull(path); + + Assert.assertEquals(tmpDir.resolve("ml-local-data").resolve("tmp").resolve(id).toString(), path.toString()); + } + + public void testTmpStorageChooseDisk() throws IOException { + Map storage = new HashMap<>(); + Path tmpDir = createTempDir(); + + // low disk space + Path disk1 = tmpDir.resolve(randomAlphaOfLengthBetween(4, 10)); + storage.put(disk1, new ByteSizeValue(1, ByteSizeUnit.GB).getBytes()); + + // sufficient disk space + Path disk2 = tmpDir.resolve(randomAlphaOfLengthBetween(4, 10)); + storage.put(disk2, new ByteSizeValue(20, ByteSizeUnit.GB).getBytes()); + + NativeStorageProvider storageProvider = createNativeStorageProvider(storage); + + String id = randomAlphaOfLengthBetween(4, 10); + Path path = storageProvider.tryGetLocalTmpStorage(id, new ByteSizeValue(1, ByteSizeUnit.GB)); + Assert.assertNotNull(path); + + // should resolve to disk2 as disk1 is low on space + Assert.assertEquals(disk2.resolve("ml-local-data").resolve("tmp").resolve(id).toString(), path.toString()); + } + + public void testTmpStorageCleanup() throws IOException { + Map storage = new HashMap<>(); + Path tmpDir = createTempDir(); + storage.put(tmpDir, new ByteSizeValue(6, ByteSizeUnit.GB).getBytes()); + NativeStorageProvider storageProvider = createNativeStorageProvider(storage); + String id = randomAlphaOfLengthBetween(4, 10); + + Path path = storageProvider.tryGetLocalTmpStorage(id, new ByteSizeValue(1, ByteSizeUnit.KB)); + + Assert.assertTrue(Files.exists(path)); + Path testFile = PathUtils.get(path.toString(), "testFile"); + BufferedWriter writer = Files.newBufferedWriter(testFile, StandardCharsets.UTF_8); + writer.write("created by NativeStorageProviderTests::testTmpStorageDelete"); + + writer.close(); + Assert.assertTrue(Files.exists(testFile)); + Assert.assertTrue(Files.isRegularFile(testFile)); + + // the native component should cleanup itself, but assume it has crashed + storageProvider.cleanupLocalTmpStorage(path); + Assert.assertFalse(Files.exists(testFile)); + Assert.assertFalse(Files.exists(path)); + } + + public void testTmpStorageCleanupOnStart() throws IOException { + Map storage = new HashMap<>(); + Path tmpDir = createTempDir(); + storage.put(tmpDir, new ByteSizeValue(6, ByteSizeUnit.GB).getBytes()); + NativeStorageProvider storageProvider = createNativeStorageProvider(storage); + String id = randomAlphaOfLengthBetween(4, 10); + + Path path = storageProvider.tryGetLocalTmpStorage(id, new ByteSizeValue(1, ByteSizeUnit.KB)); + + Assert.assertTrue(Files.exists(path)); + Path testFile = PathUtils.get(path.toString(), "testFile"); + + BufferedWriter writer = Files.newBufferedWriter(testFile, StandardCharsets.UTF_8); + writer.write("created by NativeStorageProviderTests::testTmpStorageWipe"); + + writer.close(); + Assert.assertTrue(Files.exists(testFile)); + Assert.assertTrue(Files.isRegularFile(testFile)); + + // create a new storage provider to test the case of a crashed node + storageProvider = createNativeStorageProvider(storage); + storageProvider.cleanupLocalTmpStorageInCaseOfUncleanShutdown(); + Assert.assertFalse(Files.exists(testFile)); + Assert.assertFalse(Files.exists(path)); + } + + private NativeStorageProvider createNativeStorageProvider(Map paths) throws IOException { + Environment environment = mock(Environment.class); + + when(environment.dataFiles()).thenReturn(paths.keySet().toArray(new Path[paths.size()])); + NativeStorageProvider storageProvider = spy(new NativeStorageProvider(environment, new ByteSizeValue(5, ByteSizeUnit.GB))); + + doAnswer(invocation -> { + return paths.getOrDefault(invocation.getArguments()[0], Long.valueOf(0)).longValue(); + } + + ).when(storageProvider).getUsableSpace(any(Path.class)); + + return storageProvider; + } + +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java index 9f3d1c779f8eb..f330c501b76f9 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java @@ -28,7 +28,6 @@ import org.elasticsearch.test.discovery.TestZenDiscovery; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.ml.LocalStateMachineLearning; -import org.elasticsearch.xpack.core.ml.MLMetadataField; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.core.ml.MlMetadata; @@ -272,8 +271,7 @@ public static GetDatafeedsStatsAction.Response.DatafeedStats getDatafeedStats(St } public static void deleteAllDatafeeds(Logger logger, Client client) throws Exception { - MetaData metaData = client.admin().cluster().prepareState().get().getState().getMetaData(); - MlMetadata mlMetadata = metaData.custom(MLMetadataField.TYPE); + MlMetadata mlMetadata = MlMetadata.getMlMetadata(client.admin().cluster().prepareState().get().getState()); try { logger.info("Closing all datafeeds (using _all)"); StopDatafeedAction.Response stopResponse = client @@ -312,8 +310,7 @@ public static void deleteAllDatafeeds(Logger logger, Client client) throws Excep } public static void deleteAllJobs(Logger logger, Client client) throws Exception { - MetaData metaData = client.admin().cluster().prepareState().get().getState().getMetaData(); - MlMetadata mlMetadata = metaData.custom(MLMetadataField.TYPE); + MlMetadata mlMetadata = MlMetadata.getMlMetadata(client.admin().cluster().prepareState().get().getState()); try { CloseJobAction.Request closeRequest = new CloseJobAction.Request(MetaData.ALL); diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-certgen b/x-pack/plugin/security/src/main/bin/elasticsearch-certgen index d56ae2f4d1adb..67d5168452031 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-certgen +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-certgen @@ -4,17 +4,7 @@ # or more contributor license agreements. Licensed under the Elastic License; # you may not use this file except in compliance with the Elastic License. -source "`dirname "$0"`"/elasticsearch-env - -source "`dirname "$0"`"/x-pack-security-env - -exec \ - "$JAVA" \ - $ES_JAVA_OPTS \ - -Des.path.home="$ES_HOME" \ - -Des.path.conf="$ES_PATH_CONF" \ - -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ - -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ - -cp "$ES_CLASSPATH" \ +ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ + "`dirname "$0"`"/elasticsearch-cli \ org.elasticsearch.xpack.core.ssl.CertificateGenerateTool \ "$@" diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-certutil b/x-pack/plugin/security/src/main/bin/elasticsearch-certutil index c2502bd734ffe..eb245fd0b0ee1 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-certutil +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-certutil @@ -4,17 +4,7 @@ # or more contributor license agreements. Licensed under the Elastic License; # you may not use this file except in compliance with the Elastic License. -source "`dirname "$0"`"/elasticsearch-env - -source "`dirname "$0"`"/x-pack-security-env - -exec \ - "$JAVA" \ - $ES_JAVA_OPTS \ - -Des.path.home="$ES_HOME" \ - -Des.path.conf="$ES_PATH_CONF" \ - -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ - -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ - -cp "$ES_CLASSPATH" \ +ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ + "`dirname "$0"`"/elasticsearch-cli \ org.elasticsearch.xpack.core.ssl.CertificateTool \ "$@" diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-migrate b/x-pack/plugin/security/src/main/bin/elasticsearch-migrate index eb3c81febdfb7..dc3f360361d9b 100755 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-migrate +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-migrate @@ -4,17 +4,7 @@ # or more contributor license agreements. Licensed under the Elastic License; # you may not use this file except in compliance with the Elastic License. -source "`dirname "$0"`"/elasticsearch-env - -source "`dirname "$0"`"/x-pack-security-env - -exec \ - "$JAVA" \ - $ES_JAVA_OPTS \ - -Des.path.home="$ES_HOME" \ - -Des.path.conf="$ES_PATH_CONF" \ - -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ - -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ - -cp "$ES_CLASSPATH" \ +ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ + "`dirname "$0"`"/elasticsearch-cli \ org.elasticsearch.xpack.security.authc.esnative.ESNativeRealmMigrateTool \ - "$@" + "$@" \ No newline at end of file diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata b/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata index 92200d82e1264..48274ab7efa42 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata @@ -4,17 +4,7 @@ # or more contributor license agreements. Licensed under the Elastic License; # you may not use this file except in compliance with the Elastic License. -source "`dirname "$0"`"/elasticsearch-env - -source "`dirname "$0"`"/x-pack-security-env - -exec \ - "$JAVA" \ - $ES_JAVA_OPTS \ - -Des.path.home="$ES_HOME" \ - -Des.path.conf="$ES_PATH_CONF" \ - -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ - -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ - -cp "$ES_CLASSPATH" \ +ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ + "`dirname "$0"`"/elasticsearch-cli \ org.elasticsearch.xpack.security.authc.saml.SamlMetadataCommand \ "$@" diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords b/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords index e6aaa00d64796..d896efcfcbe28 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords @@ -4,17 +4,7 @@ # or more contributor license agreements. Licensed under the Elastic License; # you may not use this file except in compliance with the Elastic License. -source "`dirname "$0"`"/elasticsearch-env - -source "`dirname "$0"`"/x-pack-security-env - -exec \ - "$JAVA" \ - $ES_JAVA_OPTS \ - -Des.path.home="$ES_HOME" \ - -Des.path.conf="$ES_PATH_CONF" \ - -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ - -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ - -cp "$ES_CLASSPATH" \ +ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ + "`dirname "$0"`"/elasticsearch-cli \ org.elasticsearch.xpack.security.authc.esnative.tool.SetupPasswordTool \ "$@" diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen b/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen index e8c4f10c044c3..954b0884007ff 100755 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen @@ -4,17 +4,7 @@ # or more contributor license agreements. Licensed under the Elastic License; # you may not use this file except in compliance with the Elastic License. -source "`dirname "$0"`"/elasticsearch-env - -source "`dirname "$0"`"/x-pack-security-env - -exec \ - "$JAVA" \ - $ES_JAVA_OPTS \ - -Des.path.home="$ES_HOME" \ - -Des.path.conf="$ES_PATH_CONF" \ - -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ - -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ - -cp "$ES_CLASSPATH" \ +ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ + "`dirname "$0"`"/elasticsearch-cli \ org.elasticsearch.xpack.security.crypto.tool.SystemKeyTool \ "$@" diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-users b/x-pack/plugin/security/src/main/bin/elasticsearch-users index 2d9ed8df93dc4..6caeece8cbccd 100755 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-users +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-users @@ -4,17 +4,7 @@ # or more contributor license agreements. Licensed under the Elastic License; # you may not use this file except in compliance with the Elastic License. -source "`dirname "$0"`"/elasticsearch-env - -source "`dirname "$0"`"/x-pack-security-env - -exec \ - "$JAVA" \ - $ES_JAVA_OPTS \ - -Des.path.home="$ES_HOME" \ - -Des.path.conf="$ES_PATH_CONF" \ - -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ - -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ - -cp "$ES_CLASSPATH" \ +ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ + "`dirname "$0"`"/elasticsearch-cli \ org.elasticsearch.xpack.security.authc.file.tool.UsersTool \ "$@" diff --git a/x-pack/plugin/security/src/main/bin/x-pack-security-env b/x-pack/plugin/security/src/main/bin/x-pack-security-env index 3a2b15e13fa4a..7d8010deec5ea 100644 --- a/x-pack/plugin/security/src/main/bin/x-pack-security-env +++ b/x-pack/plugin/security/src/main/bin/x-pack-security-env @@ -4,7 +4,5 @@ # or more contributor license agreements. Licensed under the Elastic License; # you may not use this file except in compliance with the Elastic License. -source "`dirname "$0"`"/x-pack-env - # include x-pack-security jars in classpath ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/modules/x-pack-security/*" diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java index af1034d957455..28cf4bf95c924 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java @@ -450,7 +450,7 @@ public void testTokenExpiry() throws Exception { } final TimeValue defaultExpiration = TokenService.TOKEN_EXPIRATION.get(Settings.EMPTY); - final int fastForwardAmount = randomIntBetween(1, Math.toIntExact(defaultExpiration.getSeconds())); + final int fastForwardAmount = randomIntBetween(1, Math.toIntExact(defaultExpiration.getSeconds()) - 5); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { // move the clock forward but don't go to expiry clock.fastForwardSeconds(fastForwardAmount); diff --git a/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 new file mode 100644 index 0000000000000..50392f59374a8 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 @@ -0,0 +1 @@ +bf8f9e8284a54af18545574cb4a530da0deb968a \ No newline at end of file diff --git a/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.4.0-snapshot-6705632810.jar.sha1 b/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.4.0-snapshot-6705632810.jar.sha1 deleted file mode 100644 index c25718d0a9ee0..0000000000000 --- a/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.4.0-snapshot-6705632810.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5167fb0a14434cb10ec3224e9e32ca668e9f9ad4 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 new file mode 100644 index 0000000000000..50392f59374a8 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 @@ -0,0 +1 @@ +bf8f9e8284a54af18545574cb4a530da0deb968a \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-6705632810.jar.sha1 b/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-6705632810.jar.sha1 deleted file mode 100644 index c25718d0a9ee0..0000000000000 --- a/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-6705632810.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5167fb0a14434cb10ec3224e9e32ca668e9f9ad4 \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java index f4f98a4e4ffd7..bac4fdcfb2a63 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SourceGenerator.java @@ -5,8 +5,6 @@ */ package org.elasticsearch.xpack.sql.execution.search; -import org.elasticsearch.index.query.BoolQueryBuilder; -import org.elasticsearch.index.query.ConstantScoreQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; @@ -28,6 +26,7 @@ import java.util.List; import static java.util.Collections.singletonList; +import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.search.sort.SortBuilders.fieldSort; import static org.elasticsearch.search.sort.SortBuilders.scoreSort; import static org.elasticsearch.search.sort.SortBuilders.scriptSort; @@ -37,20 +36,23 @@ public abstract class SourceGenerator { private static final List NO_STORED_FIELD = singletonList(StoredFieldsContext._NONE_); public static SearchSourceBuilder sourceBuilder(QueryContainer container, QueryBuilder filter, Integer size) { - final SearchSourceBuilder source = new SearchSourceBuilder(); + QueryBuilder finalQuery = null; // add the source - if (container.query() == null) { + if (container.query() != null) { if (filter != null) { - source.query(new ConstantScoreQueryBuilder(filter)); + finalQuery = boolQuery().must(container.query().asBuilder()).filter(filter); + } else { + finalQuery = container.query().asBuilder(); } } else { if (filter != null) { - source.query(new BoolQueryBuilder().must(container.query().asBuilder()).filter(filter)); - } else { - source.query(container.query().asBuilder()); + finalQuery = boolQuery().filter(filter); } } + final SearchSourceBuilder source = new SearchSourceBuilder(); + source.query(finalQuery); + SqlSourceBuilder sortBuilder = new SqlSourceBuilder(); // Iterate through all the columns requested, collecting the fields that // need to be retrieved from the result documents diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/BoolQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/BoolQuery.java index 64949fe318ce6..47075773b0166 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/BoolQuery.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/BoolQuery.java @@ -5,13 +5,13 @@ */ package org.elasticsearch.xpack.sql.querydsl.query; -import java.util.Objects; - import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.sort.NestedSortBuilder; import org.elasticsearch.xpack.sql.tree.Location; +import java.util.Objects; + import static org.elasticsearch.index.query.QueryBuilders.boolQuery; /** @@ -63,9 +63,8 @@ public void enrichNestedSort(NestedSortBuilder sort) { public QueryBuilder asBuilder() { BoolQueryBuilder boolQuery = boolQuery(); if (isAnd) { - // TODO are we throwing out score by using filter? - boolQuery.filter(left.asBuilder()); - boolQuery.filter(right.asBuilder()); + boolQuery.must(left.asBuilder()); + boolQuery.must(right.asBuilder()); } else { boolQuery.should(left.asBuilder()); boolQuery.should(right.asBuilder()); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/SourceGeneratorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/SourceGeneratorTests.java index f038a20823dbc..816b665133583 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/SourceGeneratorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/SourceGeneratorTests.java @@ -5,9 +5,6 @@ */ package org.elasticsearch.xpack.sql.execution.search; -import org.elasticsearch.index.query.BoolQueryBuilder; -import org.elasticsearch.index.query.ConstantScoreQueryBuilder; -import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.Operator; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; @@ -28,6 +25,8 @@ import org.elasticsearch.xpack.sql.type.KeywordEsField; import static java.util.Collections.singletonList; +import static org.elasticsearch.index.query.QueryBuilders.boolQuery; +import static org.elasticsearch.index.query.QueryBuilders.matchQuery; import static org.elasticsearch.search.sort.SortBuilders.fieldSort; import static org.elasticsearch.search.sort.SortBuilders.scoreSort; @@ -42,22 +41,22 @@ public void testNoQueryNoFilter() { public void testQueryNoFilter() { QueryContainer container = new QueryContainer().with(new MatchQuery(Location.EMPTY, "foo", "bar")); SearchSourceBuilder sourceBuilder = SourceGenerator.sourceBuilder(container, null, randomIntBetween(1, 10)); - assertEquals(new MatchQueryBuilder("foo", "bar").operator(Operator.OR), sourceBuilder.query()); + assertEquals(matchQuery("foo", "bar").operator(Operator.OR), sourceBuilder.query()); } public void testNoQueryFilter() { QueryContainer container = new QueryContainer(); - QueryBuilder filter = new MatchQueryBuilder("bar", "baz"); + QueryBuilder filter = matchQuery("bar", "baz"); SearchSourceBuilder sourceBuilder = SourceGenerator.sourceBuilder(container, filter, randomIntBetween(1, 10)); - assertEquals(new ConstantScoreQueryBuilder(new MatchQueryBuilder("bar", "baz")), sourceBuilder.query()); + assertEquals(boolQuery().filter(matchQuery("bar", "baz")), sourceBuilder.query()); } public void testQueryFilter() { QueryContainer container = new QueryContainer().with(new MatchQuery(Location.EMPTY, "foo", "bar")); - QueryBuilder filter = new MatchQueryBuilder("bar", "baz"); + QueryBuilder filter = matchQuery("bar", "baz"); SearchSourceBuilder sourceBuilder = SourceGenerator.sourceBuilder(container, filter, randomIntBetween(1, 10)); - assertEquals(new BoolQueryBuilder().must(new MatchQueryBuilder("foo", "bar").operator(Operator.OR)) - .filter(new MatchQueryBuilder("bar", "baz")), sourceBuilder.query()); + assertEquals(boolQuery().must(matchQuery("foo", "bar").operator(Operator.OR)).filter(matchQuery("bar", "baz")), + sourceBuilder.query()); } public void testLimit() { diff --git a/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval b/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval index 6de537660cbc4..c7185d167562c 100755 --- a/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval +++ b/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval @@ -4,16 +4,7 @@ # or more contributor license agreements. Licensed under the Elastic License; # you may not use this file except in compliance with the Elastic License. -source "`dirname "$0"`"/elasticsearch-env - -source "`dirname "$0"`"/x-pack-watcher-env - -exec \ - "$JAVA" \ - $ES_JAVA_OPTS \ - -Des.path.home="$ES_HOME" \ - -Des.path.conf="$ES_PATH_CONF" \ - -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ - -cp "$ES_CLASSPATH" \ +ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-watcher-env" \ + "`dirname "$0"`"/elasticsearch-cli \ org.elasticsearch.xpack.watcher.trigger.schedule.tool.CronEvalTool \ "$@" diff --git a/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env b/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env index 13718a01b4330..83045297a0c68 100644 --- a/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env +++ b/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env @@ -4,7 +4,5 @@ # or more contributor license agreements. Licensed under the Elastic License; # you may not use this file except in compliance with the Elastic License. -source "`dirname "$0"`"/x-pack-env - # include x-pack-security jars in classpath ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/modules/x-pack-watcher/*" diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java index ce2b54ddd3b24..fa169933e45b2 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.watcher.test; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; @@ -70,10 +71,12 @@ import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Set; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import java.util.stream.Collectors; +import java.util.stream.StreamSupport; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; @@ -177,7 +180,7 @@ protected boolean timeWarped() { public void _setup() throws Exception { if (timeWarped()) { timeWarp = new TimeWarp(internalCluster().getInstances(ScheduleTriggerEngineMock.class), - (ClockMock)getInstanceFromMaster(Clock.class)); + (ClockMock)getInstanceFromMaster(Clock.class), logger); } if (internalCluster().size() > 0) { @@ -536,24 +539,28 @@ public EmailSent send(Email email, Authentication auth, Profile profile, String protected static class TimeWarp { - protected final Iterable schedulers; - protected final ClockMock clock; + private final List schedulers; + private final ClockMock clock; + private final Logger logger; - public TimeWarp(Iterable schedulers, ClockMock clock) { - this.schedulers = schedulers; + TimeWarp(Iterable schedulers, ClockMock clock, Logger logger) { + this.schedulers = StreamSupport.stream(schedulers.spliterator(), false).collect(Collectors.toList()); this.clock = clock; + this.logger = logger; } public void trigger(String jobName) { - schedulers.forEach(scheduler -> scheduler.trigger(jobName)); + trigger(jobName, 1, null); } public ClockMock clock() { return clock; } - public void trigger(String id, int times, TimeValue timeValue) { - schedulers.forEach(scheduler -> scheduler.trigger(id, times, timeValue)); + public void trigger(String watchId, int times, TimeValue timeValue) { + boolean isTriggered = schedulers.stream().anyMatch(scheduler -> scheduler.trigger(watchId, times, timeValue)); + String msg = String.format(Locale.ROOT, "could not find watch [%s] to trigger", watchId); + assertThat(msg, isTriggered, is(true)); } } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/activate/ActivateWatchTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/activate/ActivateWatchTests.java index c692a1dfc8b7b..5da3f05177aae 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/activate/ActivateWatchTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/action/activate/ActivateWatchTests.java @@ -47,8 +47,8 @@ public class ActivateWatchTests extends AbstractWatcherIntegrationTestCase { protected boolean timeWarped() { return false; } - - // FIXME not to be sleep based + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30699") public void testDeactivateAndActivate() throws Exception { PutWatchResponse putWatchResponse = watcherClient().preparePutWatch() .setId("_id") diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java index 63f4f95ae2161..f6c06117970dc 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java @@ -77,18 +77,13 @@ public boolean remove(String jobId) { return watches.remove(jobId) != null; } - public void trigger(String jobName) { - trigger(jobName, 1, null); + public boolean trigger(String jobName) { + return trigger(jobName, 1, null); } - public void trigger(String jobName, int times) { - trigger(jobName, times, null); - } - - public void trigger(String jobName, int times, TimeValue interval) { + public boolean trigger(String jobName, int times, TimeValue interval) { if (watches.containsKey(jobName) == false) { - logger.trace("not executing job [{}], not found", jobName); - return; + return false; } for (int i = 0; i < times; i++) { @@ -108,5 +103,7 @@ public void trigger(String jobName, int times, TimeValue interval) { } } } + + return true; } } diff --git a/x-pack/qa/ml-native-tests/build.gradle b/x-pack/qa/ml-native-tests/build.gradle index 94b7be3a44d4d..657aa7cfef68b 100644 --- a/x-pack/qa/ml-native-tests/build.gradle +++ b/x-pack/qa/ml-native-tests/build.gradle @@ -61,6 +61,7 @@ integTestCluster { setting 'xpack.security.transport.ssl.verification_mode', 'certificate' setting 'xpack.security.audit.enabled', 'true' setting 'xpack.license.self_generated.type', 'trial' + setting 'xpack.ml.min_disk_space_off_heap', '200mb' keystoreSetting 'bootstrap.password', 'x-pack-test-password' keystoreSetting 'xpack.security.transport.ssl.keystore.secure_password', 'keypass' diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java index c46b1d1c8689b..4e0aa9c7e0613 100644 --- a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectMemoryLimitIT.java @@ -33,7 +33,7 @@ public class AutodetectMemoryLimitIT extends MlNativeAutodetectIntegTestCase { @After - public void cleanUpTest() throws Exception { + public void cleanUpTest() { cleanUp(); } @@ -75,19 +75,10 @@ public void testTooManyPartitions() throws Exception { closeJob(job.getId()); // Assert we haven't violated the limit too much - // and a balance of partitions/by fields were created GetJobsStatsAction.Response.JobStats jobStats = getJobStats(job.getId()).get(0); ModelSizeStats modelSizeStats = jobStats.getModelSizeStats(); assertThat(modelSizeStats.getModelBytes(), lessThan(35000000L)); assertThat(modelSizeStats.getModelBytes(), greaterThan(30000000L)); - - // it is important to check that while we rejected partitions, we still managed - // to create some by fields; it shows we utilize memory in a meaningful way - // rather than creating empty partitions - assertThat(modelSizeStats.getTotalPartitionFieldCount(), lessThan(900L)); - assertThat(modelSizeStats.getTotalPartitionFieldCount(), greaterThan(650L)); - assertThat(modelSizeStats.getTotalByFieldCount(), lessThan(900L)); - assertThat(modelSizeStats.getTotalByFieldCount(), greaterThan(650L)); assertThat(modelSizeStats.getMemoryStatus(), equalTo(ModelSizeStats.MemoryStatus.HARD_LIMIT)); } @@ -133,8 +124,6 @@ public void testTooManyByFields() throws Exception { ModelSizeStats modelSizeStats = jobStats.getModelSizeStats(); assertThat(modelSizeStats.getModelBytes(), lessThan(36000000L)); assertThat(modelSizeStats.getModelBytes(), greaterThan(30000000L)); - assertThat(modelSizeStats.getTotalByFieldCount(), lessThan(1900L)); - assertThat(modelSizeStats.getTotalByFieldCount(), greaterThan(1600L)); assertThat(modelSizeStats.getMemoryStatus(), equalTo(ModelSizeStats.MemoryStatus.HARD_LIMIT)); } @@ -184,9 +173,6 @@ public void testTooManyByAndOverFields() throws Exception { ModelSizeStats modelSizeStats = jobStats.getModelSizeStats(); assertThat(modelSizeStats.getModelBytes(), lessThan(36000000L)); assertThat(modelSizeStats.getModelBytes(), greaterThan(24000000L)); - assertThat(modelSizeStats.getTotalByFieldCount(), equalTo(7L)); - assertThat(modelSizeStats.getTotalOverFieldCount(), greaterThan(40000L)); - assertThat(modelSizeStats.getTotalOverFieldCount(), lessThan(50000L)); assertThat(modelSizeStats.getMemoryStatus(), equalTo(ModelSizeStats.MemoryStatus.HARD_LIMIT)); } @@ -237,7 +223,6 @@ public void testManyDistinctOverFields() throws Exception { ModelSizeStats modelSizeStats = jobStats.getModelSizeStats(); assertThat(modelSizeStats.getModelBytes(), lessThan(90000000L)); assertThat(modelSizeStats.getModelBytes(), greaterThan(75000000L)); - assertThat(modelSizeStats.getTotalOverFieldCount(), greaterThan(140000L)); assertThat(modelSizeStats.getMemoryStatus(), equalTo(ModelSizeStats.MemoryStatus.OK)); } diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java index 3a1fc2b0f6d4a..23bd5c7f7ddf1 100644 --- a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java @@ -8,12 +8,15 @@ import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.update.UpdateAction; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.SearchHit; import org.elasticsearch.xpack.core.ml.action.DeleteExpiredDataAction; import org.elasticsearch.xpack.core.ml.action.UpdateModelSnapshotAction; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; @@ -21,6 +24,7 @@ import org.elasticsearch.xpack.core.ml.job.config.DataDescription; import org.elasticsearch.xpack.core.ml.job.config.Detector; import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.core.ml.job.results.Bucket; import org.elasticsearch.xpack.core.ml.job.results.ForecastRequestStats; @@ -31,13 +35,16 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; +import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; public class DeleteExpiredDataIT extends MlNativeAutodetectIntegTestCase { @@ -78,11 +85,16 @@ public void setUpData() throws IOException { } @After - public void tearDownData() throws Exception { + public void tearDownData() { client().admin().indices().prepareDelete(DATA_INDEX).get(); cleanUp(); } + public void testDeleteExpiredDataGivenNothingToDelete() throws Exception { + // Tests that nothing goes wrong when there's nothing to delete + client().execute(DeleteExpiredDataAction.INSTANCE, new DeleteExpiredDataAction.Request()).get(); + } + public void testDeleteExpiredData() throws Exception { registerJob(newJobBuilder("no-retention").setResultsRetentionDays(null).setModelSnapshotRetentionDays(null)); registerJob(newJobBuilder("results-retention").setResultsRetentionDays(1L).setModelSnapshotRetentionDays(null)); @@ -166,6 +178,18 @@ public void testDeleteExpiredData() throws Exception { assertThat(countForecastDocs(forecastStat.getJobId(), forecastStat.getForecastId()), equalTo(forecastStat.getRecordCount())); } + // Index some unused state documents (more than 10K to test scrolling works) + BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); + bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + for (int i = 0; i < 10010; i++) { + String docId = "non_existing_job_" + randomFrom("model_state_1234567#" + i, "quantiles", "categorizer_state#" + i); + IndexRequest indexRequest = new IndexRequest(AnomalyDetectorsIndex.jobStateIndexName(), "doc", docId); + indexRequest.source(Collections.emptyMap()); + bulkRequestBuilder.add(indexRequest); + } + assertThat(bulkRequestBuilder.get().status(), equalTo(RestStatus.OK)); + + // Now call the action under test client().execute(DeleteExpiredDataAction.INSTANCE, new DeleteExpiredDataAction.Request()).get(); // We need to refresh to ensure the deletion is visible @@ -216,6 +240,16 @@ public void testDeleteExpiredData() throws Exception { assertThat(countForecastDocs(job.getId(), forecastId), equalTo(0L)); } } + + // Verify .ml-state doesn't contain unused state documents + SearchResponse stateDocsResponse = client().prepareSearch(AnomalyDetectorsIndex.jobStateIndexName()) + .setFetchSource(false) + .setSize(10000) + .get(); + assertThat(stateDocsResponse.getHits().getTotalHits(), lessThan(10000L)); + for (SearchHit hit : stateDocsResponse.getHits().getHits()) { + assertThat(hit.getId().startsWith("non_existing_job"), is(false)); + } } private static Job.Builder newJobBuilder(String id) { diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java index 14bdd533c6b38..81c54353a2d70 100644 --- a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.ml.integration; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; import org.elasticsearch.xpack.core.ml.job.config.AnalysisLimits; @@ -206,8 +207,7 @@ public void testMemoryStatus() throws Exception { assertThat(e.getMessage(), equalTo("Cannot run forecast: Forecast cannot be executed as model memory status is not OK")); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/30399") - public void testMemoryLimit() throws Exception { + public void testOverflowToDisk() throws Exception { Detector.Builder detector = new Detector.Builder("mean", "value"); detector.setByFieldName("clientIP"); @@ -216,7 +216,9 @@ public void testMemoryLimit() throws Exception { analysisConfig.setBucketSpan(bucketSpan); DataDescription.Builder dataDescription = new DataDescription.Builder(); dataDescription.setTimeFormat("epoch"); - Job.Builder job = new Job.Builder("forecast-it-test-memory-limit"); + Job.Builder job = new Job.Builder("forecast-it-test-overflow-to-disk"); + AnalysisLimits limits = new AnalysisLimits(1200L, null); + job.setAnalysisLimits(limits); job.setAnalysisConfig(analysisConfig); job.setDataDescription(dataDescription); @@ -224,28 +226,47 @@ public void testMemoryLimit() throws Exception { putJob(job); openJob(job.getId()); createDataWithLotsOfClientIps(bucketSpan, job); - ElasticsearchException e = expectThrows(ElasticsearchException.class, - () -> forecast(job.getId(), TimeValue.timeValueMinutes(120), null)); - assertThat(e.getMessage(), - equalTo("Cannot run forecast: Forecast cannot be executed as forecast memory usage is predicted to exceed 20MB")); + + try { + String forecastId = forecast(job.getId(), TimeValue.timeValueHours(1), null); + + waitForecastToFinish(job.getId(), forecastId); + } catch (ElasticsearchStatusException e) { + if (e.getMessage().contains("disk space")) { + throw new ElasticsearchStatusException( + "Test likely fails due to insufficient disk space on test machine, please free up space.", e.status(), e); + } + throw e; + } + + closeJob(job.getId()); + + List forecastStats = getForecastStats(); + assertThat(forecastStats.size(), equalTo(1)); + ForecastRequestStats forecastRequestStats = forecastStats.get(0); + List forecasts = getForecasts(job.getId(), forecastRequestStats); + + assertThat(forecastRequestStats.getRecordCount(), equalTo(8000L)); + assertThat(forecasts.size(), equalTo(8000)); } private void createDataWithLotsOfClientIps(TimeValue bucketSpan, Job.Builder job) throws IOException { long now = Instant.now().getEpochSecond(); - long timestamp = now - 50 * bucketSpan.seconds(); - while (timestamp < now) { - for (int i = 1; i < 256; i++) { + long timestamp = now - 15 * bucketSpan.seconds(); + + for (int h = 0; h < 15; h++) { + for (int i = 1; i < 101; i++) { List data = new ArrayList<>(); - for (int j = 1; j < 100; j++) { + for (int j = 1; j < 81; j++) { Map record = new HashMap<>(); record.put("time", timestamp); - record.put("value", 10.0); + record.put("value", 10.0 + h); record.put("clientIP", String.format(Locale.ROOT, "192.168.%d.%d", i, j)); data.add(createJsonRecord(record)); } postData(job.getId(), data.stream().collect(Collectors.joining())); - timestamp += bucketSpan.seconds(); } + timestamp += bucketSpan.seconds(); } flushJob(job.getId(), false); } diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ModelPlotsIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ModelPlotsIT.java index eb0c125a13ce4..81a44cd133643 100644 --- a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ModelPlotsIT.java +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ModelPlotsIT.java @@ -30,7 +30,9 @@ import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; public class ModelPlotsIT extends MlNativeAutodetectIntegTestCase { @@ -83,7 +85,11 @@ public void testPartitionFieldWithoutTerms() throws Exception { startDatafeed(datafeedId, 0, System.currentTimeMillis()); waitUntilJobIsClosed(job.getId()); - assertThat(getBuckets(job.getId()).size(), equalTo(23)); + // As the initial time is random, there's a chance the first record is + // aligned on a bucket start. Thus we check the buckets are in [23, 24] + assertThat(getBuckets(job.getId()).size(), greaterThanOrEqualTo(23)); + assertThat(getBuckets(job.getId()).size(), lessThanOrEqualTo(24)); + Set modelPlotTerms = modelPlotTerms(job.getId(), "partition_field_value"); assertThat(modelPlotTerms, containsInAnyOrder("user_1", "user_2", "user_3")); } @@ -101,7 +107,11 @@ public void testPartitionFieldWithTerms() throws Exception { startDatafeed(datafeedId, 0, System.currentTimeMillis()); waitUntilJobIsClosed(job.getId()); - assertThat(getBuckets(job.getId()).size(), equalTo(23)); + // As the initial time is random, there's a chance the first record is + // aligned on a bucket start. Thus we check the buckets are in [23, 24] + assertThat(getBuckets(job.getId()).size(), greaterThanOrEqualTo(23)); + assertThat(getBuckets(job.getId()).size(), lessThanOrEqualTo(24)); + Set modelPlotTerms = modelPlotTerms(job.getId(), "partition_field_value"); assertThat(modelPlotTerms, containsInAnyOrder("user_2", "user_3")); } diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java index ffdf0a39a909a..1f76e670854a2 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java @@ -71,6 +71,6 @@ private void assertNumUniqueNodeNameBuckets(int numBuckets) throws Exception { assertNotNull(nodesAgg); List> buckets = (List>) nodesAgg.get("buckets"); assertNotNull(buckets); - assertEquals(numBuckets, buckets.size()); + assertEquals("Found node buckets " + buckets, numBuckets, buckets.size()); } } diff --git a/x-pack/qa/smoke-test-plugins-ssl/build.gradle b/x-pack/qa/smoke-test-plugins-ssl/build.gradle index bf26831fae82c..595c562af3707 100644 --- a/x-pack/qa/smoke-test-plugins-ssl/build.gradle +++ b/x-pack/qa/smoke-test-plugins-ssl/build.gradle @@ -1,6 +1,5 @@ import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.MavenFilteringHack -import org.elasticsearch.gradle.plugin.MetaPluginBuildPlugin import org.elasticsearch.gradle.plugin.PluginBuildPlugin import org.elasticsearch.gradle.test.NodeInfo diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java index 3019a00351c28..e0cf0efac472e 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java @@ -16,7 +16,6 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.CheckedSupplier; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.xcontent.XContentHelper; @@ -33,12 +32,11 @@ import java.sql.JDBCType; import java.util.Arrays; import java.util.HashMap; +import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.TreeMap; import static java.util.Collections.emptyList; -import static java.util.Collections.emptyMap; import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; import static java.util.Collections.unmodifiableMap; @@ -396,19 +394,23 @@ public void testBasicTranslateQueryWithFilter() throws IOException { assertNotNull(query); @SuppressWarnings("unchecked") - Map constantScore = (Map) query.get("constant_score"); - assertNotNull(constantScore); + Map bool = (Map) query.get("bool"); + assertNotNull(bool); @SuppressWarnings("unchecked") - Map filter = (Map) constantScore.get("filter"); + List filter = (List) bool.get("filter"); assertNotNull(filter); @SuppressWarnings("unchecked") - Map match = (Map) filter.get("match"); - assertNotNull(match); + Map map = (Map) filter.get(0); + assertNotNull(map); @SuppressWarnings("unchecked") - Map matchQuery = (Map) match.get("test"); + Map matchQ = (Map) map.get("match"); + + @SuppressWarnings("unchecked") + Map matchQuery = (Map) matchQ.get("test"); + assertNotNull(matchQuery); assertEquals("foo", matchQuery.get("query")); } diff --git a/x-pack/qa/third-party/slack/src/test/resources/rest-api-spec/test/slack/10_slack.yml b/x-pack/qa/third-party/slack/src/test/resources/rest-api-spec/test/slack/10_slack.yml index 3b04ba716759a..259bc9e1d25af 100644 --- a/x-pack/qa/third-party/slack/src/test/resources/rest-api-spec/test/slack/10_slack.yml +++ b/x-pack/qa/third-party/slack/src/test/resources/rest-api-spec/test/slack/10_slack.yml @@ -16,7 +16,13 @@ }, "input": { "simple": { - "foo" : "something from input" + "foo" : "something from input", + "hits" : { + "hits" : [ + { "_source" : { "name" : "first", "value" : "2018-04-26T11:45:12.518Z" } }, + { "_source" : { "name" : "second", "value" : "anything" } } + ] + } } }, "actions": { @@ -49,7 +55,20 @@ } ] } - ] + ], + "dynamic_attachments" : { + "list_path" : "ctx.payload.hits.hits", + "attachment_template" : { + "title": "Title", + "fields" : [ + { + "title" : "Field title {{_source.name}}", + "value" : "{{_source.value}}", + "short" : true + } + ] + } + } } } } diff --git a/x-pack/qa/vagrant/build.gradle b/x-pack/qa/vagrant/build.gradle index c69214578fd16..411b8d90c6d41 100644 --- a/x-pack/qa/vagrant/build.gradle +++ b/x-pack/qa/vagrant/build.gradle @@ -1,8 +1,3 @@ -import org.elasticsearch.gradle.plugin.MetaPluginBuildPlugin -import org.elasticsearch.gradle.plugin.MetaPluginPropertiesExtension -import org.elasticsearch.gradle.plugin.PluginBuildPlugin -import org.elasticsearch.gradle.plugin.PluginPropertiesExtension - apply plugin: 'elasticsearch.vagrantsupport' apply plugin: 'elasticsearch.vagrant'