diff --git a/NOTICE.txt b/NOTICE.txt index 643a060cd05c4..f1e3198ab4a9a 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1,5 +1,5 @@ Elasticsearch -Copyright 2009-2017 Elasticsearch +Copyright 2009-2018 Elasticsearch This product includes software developed by The Apache Software Foundation (http://www.apache.org/). diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 97902d56ec8c7..2e4a6ede754ad 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -498,6 +498,13 @@ will contain your change. . Push both branches to your remote repository. . Run the tests with `./gradlew check -Dtests.bwc.remote=${remote} -Dtests.bwc.refspec.5.x=index_req_bwc_5.x`. +== Skip fetching latest + +For some BWC testing scenarios, you want to use the local clone of the +repository without fetching latest. For these use cases, you can set the system +property `tests.bwc.git_fetch_latest` to `false` and the BWC builds will skip +fetching the latest from the remote. + == Test coverage analysis Generating test coverage reports for Elasticsearch is currently not possible through Gradle. diff --git a/build.gradle b/build.gradle index dce2adf5ee0bd..8218d49fd68ff 100644 --- a/build.gradle +++ b/build.gradle @@ -36,7 +36,7 @@ import java.security.MessageDigest // common maven publishing configuration subprojects { group = 'org.elasticsearch' - version = VersionProperties.elasticsearch + version = VersionProperties.elasticsearch.toString() description = "Elasticsearch subproject ${project.path}" } @@ -80,7 +80,7 @@ configure(subprojects.findAll { it.projectDir.toPath().startsWith(rootPath) }) { * in a branch if there are only betas and rcs in the branch so we have * *something* to test against. */ VersionCollection versions = new VersionCollection(file('server/src/main/java/org/elasticsearch/Version.java').readLines('UTF-8')) -if (versions.currentVersion.toString() != VersionProperties.elasticsearch) { +if (versions.currentVersion != VersionProperties.elasticsearch) { throw new GradleException("The last version in Versions.java [${versions.currentVersion}] does not match " + "VersionProperties.elasticsearch [${VersionProperties.elasticsearch}]") } @@ -231,6 +231,23 @@ subprojects { } } + /* + * Gradle only resolve project substitutions during dependency resolution but + * we sometimes want to do the resolution at other times. This creates a + * convenient method we can call to do it. + */ + ext.dependencyToProject = { Dependency dep -> + if (dep instanceof ProjectDependency) { + return dep.dependencyProject + } else { + String substitution = projectSubstitutions.get("${dep.group}:${dep.name}:${dep.version}") + if (substitution != null) { + return findProject(substitution) + } + return null + } + } + project.afterEvaluate { configurations.all { resolutionStrategy.dependencySubstitution { DependencySubstitutions subs -> @@ -245,15 +262,15 @@ subprojects { // other packages (e.g org.elasticsearch.client) will point to server rather than // their own artifacts. if (project.plugins.hasPlugin(BuildPlugin)) { - String artifactsHost = VersionProperties.elasticsearch.endsWith("-SNAPSHOT") ? "https://snapshots.elastic.co" : "https://artifacts.elastic.co" + String artifactsHost = VersionProperties.elasticsearch.isSnapshot() ? "https://snapshots.elastic.co" : "https://artifacts.elastic.co" Closure sortClosure = { a, b -> b.group <=> a.group } Closure depJavadocClosure = { dep -> if (dep.group != null && dep.group.startsWith('org.elasticsearch')) { - String substitution = project.ext.projectSubstitutions.get("${dep.group}:${dep.name}:${dep.version}") - if (substitution != null) { - project.javadoc.dependsOn substitution + ':javadoc' + Project upstreamProject = dependencyToProject(dep) + if (upstreamProject != null) { + project.javadoc.dependsOn "${upstreamProject.path}:javadoc" String artifactPath = dep.group.replaceAll('\\.', '/') + '/' + dep.name.replaceAll('\\.', '/') + '/' + dep.version - project.javadoc.options.linksOffline artifactsHost + "/javadoc/" + artifactPath, "${project.project(substitution).buildDir}/docs/javadoc/" + project.javadoc.options.linksOffline artifactsHost + "/javadoc/" + artifactPath, "${upstreamProject.buildDir}/docs/javadoc/" } } } @@ -275,17 +292,7 @@ gradle.projectsEvaluated { } configurations.all { dependencies.all { Dependency dep -> - Project upstreamProject = null - if (dep instanceof ProjectDependency) { - upstreamProject = dep.dependencyProject - } else { - // gradle doesn't apply substitutions until resolve time, so they won't - // show up as a ProjectDependency above - String substitution = projectSubstitutions.get("${dep.group}:${dep.name}:${dep.version}") - if (substitution != null) { - upstreamProject = findProject(substitution) - } - } + Project upstreamProject = dependencyToProject(dep) if (upstreamProject != null) { if (project.path == upstreamProject.path) { // TODO: distribution integ tests depend on themselves (!), fix that diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 50e1cd68523d5..3103f23472ed7 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -97,6 +97,12 @@ class BuildPlugin implements Plugin { String compilerJavaHome = findCompilerJavaHome() String runtimeJavaHome = findRuntimeJavaHome(compilerJavaHome) File gradleJavaHome = Jvm.current().javaHome + + final Map javaVersions = [:] + for (int version = 7; version <= Integer.parseInt(minimumCompilerVersion.majorVersion); version++) { + javaVersions.put(version, findJavaHome(version)); + } + String javaVendor = System.getProperty('java.vendor') String javaVersion = System.getProperty('java.version') String gradleJavaVersionDetails = "${javaVendor} ${javaVersion}" + @@ -158,10 +164,32 @@ class BuildPlugin implements Plugin { throw new GradleException(message) } + for (final Map.Entry javaVersionEntry : javaVersions.entrySet()) { + final String javaHome = javaVersionEntry.getValue() + if (javaHome == null) { + continue + } + JavaVersion javaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(project, javaHome)) + final JavaVersion expectedJavaVersionEnum + final int version = javaVersionEntry.getKey() + if (version < 9) { + expectedJavaVersionEnum = JavaVersion.toVersion("1." + version) + } else { + expectedJavaVersionEnum = JavaVersion.toVersion(Integer.toString(version)) + } + if (javaVersionEnum != expectedJavaVersionEnum) { + final String message = + "the environment variable JAVA" + version + "_HOME must be set to a JDK installation directory for Java" + + " ${expectedJavaVersionEnum} but is [${javaHome}] corresponding to [${javaVersionEnum}]" + throw new GradleException(message) + } + } + project.rootProject.ext.compilerJavaHome = compilerJavaHome project.rootProject.ext.runtimeJavaHome = runtimeJavaHome project.rootProject.ext.compilerJavaVersion = compilerJavaVersionEnum project.rootProject.ext.runtimeJavaVersion = runtimeJavaVersionEnum + project.rootProject.ext.javaVersions = javaVersions project.rootProject.ext.buildChecksDone = true } @@ -173,6 +201,7 @@ class BuildPlugin implements Plugin { project.ext.runtimeJavaHome = project.rootProject.ext.runtimeJavaHome project.ext.compilerJavaVersion = project.rootProject.ext.compilerJavaVersion project.ext.runtimeJavaVersion = project.rootProject.ext.runtimeJavaVersion + project.ext.javaVersions = project.rootProject.ext.javaVersions } private static String findCompilerJavaHome() { @@ -188,6 +217,27 @@ class BuildPlugin implements Plugin { return javaHome } + private static String findJavaHome(int version) { + return System.getenv('JAVA' + version + '_HOME') + } + + /** + * Get Java home for the project for the specified version. If the specified version is not configured, an exception with the specified + * message is thrown. + * + * @param project the project + * @param version the version of Java home to obtain + * @param message the exception message if Java home for the specified version is not configured + * @return Java home for the specified version + * @throws GradleException if Java home for the specified version is not configured + */ + static String getJavaHome(final Project project, final int version, final String message) { + if (project.javaVersions.get(version) == null) { + throw new GradleException(message) + } + return project.javaVersions.get(version) + } + private static String findRuntimeJavaHome(final String compilerJavaHome) { assert compilerJavaHome != null return System.getenv('RUNTIME_JAVA_HOME') ?: compilerJavaHome @@ -517,17 +567,18 @@ class BuildPlugin implements Plugin { jarTask.destinationDir = new File(project.buildDir, 'distributions') // fixup the jar manifest jarTask.doFirst { - boolean isSnapshot = VersionProperties.elasticsearch.endsWith("-SNAPSHOT"); - String version = VersionProperties.elasticsearch; - if (isSnapshot) { - version = version.substring(0, version.length() - 9) - } + final Version versionWithoutSnapshot = new Version( + VersionProperties.elasticsearch.major, + VersionProperties.elasticsearch.minor, + VersionProperties.elasticsearch.revision, + VersionProperties.elasticsearch.suffix, + false) // this doFirst is added before the info plugin, therefore it will run // after the doFirst added by the info plugin, and we can override attributes jarTask.manifest.attributes( - 'X-Compile-Elasticsearch-Version': version, + 'X-Compile-Elasticsearch-Version': versionWithoutSnapshot, 'X-Compile-Lucene-Version': VersionProperties.lucene, - 'X-Compile-Elasticsearch-Snapshot': isSnapshot, + 'X-Compile-Elasticsearch-Snapshot': VersionProperties.elasticsearch.isSnapshot(), 'Build-Date': ZonedDateTime.now(ZoneOffset.UTC), 'Build-Java-Version': project.compilerJavaVersion) if (jarTask.manifest.attributes.containsKey('Change') == false) { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/Version.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/Version.groovy index 419d3792bb616..c28738d7695eb 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/Version.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/Version.groovy @@ -74,20 +74,36 @@ public class Version { return "${major}.${minor}.${revision}${suffix}${snapshotStr}" } + public boolean before(Version compareTo) { + return id < compareTo.id + } + public boolean before(String compareTo) { - return id < fromString(compareTo).id + return before(fromString(compareTo)) + } + + public boolean onOrBefore(Version compareTo) { + return id <= compareTo.id } public boolean onOrBefore(String compareTo) { - return id <= fromString(compareTo).id + return onOrBefore(fromString(compareTo)) + } + + public boolean onOrAfter(Version compareTo) { + return id >= compareTo.id } public boolean onOrAfter(String compareTo) { - return id >= fromString(compareTo).id + return onOrAfter(fromString(compareTo)) + } + + public boolean after(Version compareTo) { + return id > compareTo.id } public boolean after(String compareTo) { - return id > fromString(compareTo).id + return after(fromString(compareTo)) } public boolean onOrBeforeIncludingSuffix(Version otherVersion) { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionProperties.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionProperties.groovy index c24431b4cbc1b..6983d12872f23 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionProperties.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionProperties.groovy @@ -22,7 +22,7 @@ package org.elasticsearch.gradle * Accessor for shared dependency versions used by elasticsearch, namely the elasticsearch and lucene versions. */ class VersionProperties { - static final String elasticsearch + static final Version elasticsearch static final String lucene static final Map versions = new HashMap<>() static { @@ -32,7 +32,7 @@ class VersionProperties { throw new RuntimeException('/version.properties resource missing') } props.load(propsStream) - elasticsearch = props.getProperty('elasticsearch') + elasticsearch = Version.fromString(props.getProperty('elasticsearch')) lucene = props.getProperty('lucene') for (String property : props.stringPropertyNames()) { versions.put(property, props.getProperty(property)) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy index d2802638ce512..f674dbd33cdfd 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy @@ -41,7 +41,7 @@ public class DocsTestPlugin extends RestTestPlugin { * to the version being built for testing but needs to resolve to * the last released version for docs. */ '\\{version\\}': - VersionProperties.elasticsearch.replace('-SNAPSHOT', ''), + VersionProperties.elasticsearch.toString().replace('-SNAPSHOT', ''), '\\{lucene_version\\}' : VersionProperties.lucene.replaceAll('-snapshot-\\w+$', ''), ] Task listSnippets = project.tasks.create('listSnippets', SnippetsTask) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy index f5dbcfd8b0d48..8e913153f05ad 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy @@ -77,7 +77,7 @@ class PluginPropertiesTask extends Copy { 'name': extension.name, 'description': extension.description, 'version': stringSnap(extension.version), - 'elasticsearchVersion': stringSnap(VersionProperties.elasticsearch), + 'elasticsearchVersion': stringSnap(VersionProperties.elasticsearch.toString()), 'javaVersion': project.targetCompatibility as String, 'classname': extension.classname, 'extendedPlugins': extension.extendedPlugins.join(','), diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy index fccbb43e87230..45f60c35bf68f 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy @@ -49,6 +49,14 @@ public class LicenseHeadersTask extends AntTask { @Input List approvedLicenses = ['Apache', 'Generated'] + /** + * Files that should be excluded from the license header check. Use with extreme care, only in situations where the license on the + * source file is compatible with the codebase but we do not want to add the license to the list of approved headers (to avoid the + * possibility of inadvertently using the license on our own source files). + */ + @Input + List excludes = [] + /** * Additional license families that may be found. The key is the license category name (5 characters), * followed by the family name and the value list of patterns to search for. @@ -95,7 +103,7 @@ public class LicenseHeadersTask extends AntTask { for (File dir: dirSet.srcDirs) { // sometimes these dirs don't exist, e.g. site-plugin has no actual java src/main... if (dir.exists()) { - ant.fileset(dir: dir) + ant.fileset(dir: dir, excludes: excludes.join(' ')) } } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy index 884f008b8baf5..5aaf54454e137 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy @@ -18,6 +18,7 @@ */ package org.elasticsearch.gradle.test +import org.elasticsearch.gradle.Version import org.gradle.api.GradleException import org.gradle.api.Project import org.gradle.api.tasks.Input @@ -37,7 +38,7 @@ class ClusterConfiguration { int numBwcNodes = 0 @Input - String bwcVersion = null + Version bwcVersion = null @Input int httpPort = 0 @@ -141,6 +142,8 @@ class ClusterConfiguration { Map keystoreSettings = new HashMap<>() + Map keystoreFiles = new HashMap<>() + // map from destination path, to source file Map extraConfigFiles = new HashMap<>() @@ -167,6 +170,15 @@ class ClusterConfiguration { keystoreSettings.put(name, value) } + /** + * Adds a file to the keystore. The name is the secure setting name, and the sourceFile + * is anything accepted by project.file() + */ + @Input + void keystoreFile(String name, Object sourceFile) { + keystoreFiles.put(name, sourceFile) + } + @Input void plugin(String path) { Project pluginProject = project.project(path) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index 8e97ee352ead2..5f9e4c49b34e9 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -107,11 +107,14 @@ class ClusterFormationTasks { for (int i = 0; i < config.numNodes; i++) { // we start N nodes and out of these N nodes there might be M bwc nodes. // for each of those nodes we might have a different configuration - String elasticsearchVersion = VersionProperties.elasticsearch - Configuration distro = currentDistro + final Configuration distro + final Version elasticsearchVersion if (i < config.numBwcNodes) { elasticsearchVersion = config.bwcVersion distro = bwcDistro + } else { + elasticsearchVersion = VersionProperties.elasticsearch + distro = currentDistro } NodeInfo node = new NodeInfo(config, i, project, prefix, elasticsearchVersion, sharedDir) nodes.add(node) @@ -126,7 +129,7 @@ class ClusterFormationTasks { } /** Adds a dependency on the given distribution */ - static void configureDistributionDependency(Project project, String distro, Configuration configuration, String elasticsearchVersion) { + static void configureDistributionDependency(Project project, String distro, Configuration configuration, Version elasticsearchVersion) { String packaging = distro if (distro == 'tar') { packaging = 'tar.gz' @@ -137,7 +140,7 @@ class ClusterFormationTasks { } /** Adds a dependency on a different version of the given plugin, which will be retrieved using gradle's dependency resolution */ - static void configureBwcPluginDependency(String name, Project project, Project pluginProject, Configuration configuration, String elasticsearchVersion) { + static void configureBwcPluginDependency(String name, Project project, Project pluginProject, Configuration configuration, Version elasticsearchVersion) { verifyProjectHasBuildPlugin(name, elasticsearchVersion, project, pluginProject) final String pluginName = findPluginName(pluginProject) project.dependencies.add(configuration.name, "org.elasticsearch.plugin:${pluginName}:${elasticsearchVersion}@zip") @@ -180,6 +183,7 @@ class ClusterFormationTasks { setup = configureWriteConfigTask(taskName(prefix, node, 'configure'), project, setup, node, seedNode) setup = configureCreateKeystoreTask(taskName(prefix, node, 'createKeystore'), project, setup, node) setup = configureAddKeystoreSettingTasks(prefix, project, setup, node) + setup = configureAddKeystoreFileTasks(prefix, project, setup, node) if (node.config.plugins.isEmpty() == false) { if (node.nodeVersion == VersionProperties.elasticsearch) { @@ -302,7 +306,7 @@ class ClusterFormationTasks { // Default the watermarks to absurdly low to prevent the tests from failing on nodes without enough disk space esConfig['cluster.routing.allocation.disk.watermark.low'] = '1b' esConfig['cluster.routing.allocation.disk.watermark.high'] = '1b' - if (Version.fromString(node.nodeVersion).major >= 6) { + if (node.nodeVersion.major >= 6) { esConfig['cluster.routing.allocation.disk.watermark.flood_stage'] = '1b' } // increase script compilation limit since tests can rapid-fire script compilations @@ -323,7 +327,7 @@ class ClusterFormationTasks { /** Adds a task to create keystore */ static Task configureCreateKeystoreTask(String name, Project project, Task setup, NodeInfo node) { - if (node.config.keystoreSettings.isEmpty()) { + if (node.config.keystoreSettings.isEmpty() && node.config.keystoreFiles.isEmpty()) { return setup } else { /* @@ -357,6 +361,37 @@ class ClusterFormationTasks { return parentTask } + /** Adds tasks to add files to the keystore */ + static Task configureAddKeystoreFileTasks(String parent, Project project, Task setup, NodeInfo node) { + Map kvs = node.config.keystoreFiles + if (kvs.isEmpty()) { + return setup + } + Task parentTask = setup + /* + * We have to delay building the string as the path will not exist during configuration which will fail on Windows due to getting + * the short name requiring the path to already exist. + */ + final Object esKeystoreUtil = "${-> node.binPath().resolve('elasticsearch-keystore').toString()}" + for (Map.Entry entry in kvs) { + String key = entry.getKey() + String name = taskName(parent, node, 'addToKeystore#' + key) + String srcFileName = entry.getValue() + Task t = configureExecTask(name, project, parentTask, node, esKeystoreUtil, 'add-file', key, srcFileName) + t.doFirst { + File srcFile = project.file(srcFileName) + if (srcFile.isDirectory()) { + throw new GradleException("Source for keystoreFile must be a file: ${srcFile}") + } + if (srcFile.exists() == false) { + throw new GradleException("Source file for keystoreFile does not exist: ${srcFile}") + } + } + parentTask = t + } + return parentTask + } + static Task configureExtraConfigFilesTask(String name, Project project, Task setup, NodeInfo node) { if (node.config.extraConfigFiles.isEmpty()) { return setup @@ -771,7 +806,7 @@ class ClusterFormationTasks { return retVal } - static void verifyProjectHasBuildPlugin(String name, String version, Project project, Project pluginProject) { + static void verifyProjectHasBuildPlugin(String name, Version version, Project project, Project pluginProject) { if (pluginProject.plugins.hasPlugin(PluginBuildPlugin) == false && pluginProject.plugins.hasPlugin(MetaPluginBuildPlugin) == false) { throw new GradleException("Task [${name}] cannot add plugin [${pluginProject.path}] with version [${version}] to project's " + "[${project.path}] dependencies: the plugin is not an esplugin or es_meta_plugin") diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy index 40a8ec230ac4e..1fc944eeec6eb 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.gradle.test import com.sun.jna.Native @@ -29,6 +30,8 @@ import java.nio.file.Files import java.nio.file.Path import java.nio.file.Paths +import static org.elasticsearch.gradle.BuildPlugin.getJavaHome + /** * A container for the files and configuration associated with a single node in a test cluster. */ @@ -100,10 +103,10 @@ class NodeInfo { ByteArrayOutputStream buffer = new ByteArrayOutputStream() /** the version of elasticsearch that this node runs */ - String nodeVersion + Version nodeVersion /** Holds node configuration for part of a test cluster. */ - NodeInfo(ClusterConfiguration config, int nodeNum, Project project, String prefix, String nodeVersion, File sharedDir) { + NodeInfo(ClusterConfiguration config, int nodeNum, Project project, String prefix, Version nodeVersion, File sharedDir) { this.config = config this.nodeNum = nodeNum this.sharedDir = sharedDir @@ -162,12 +165,22 @@ class NodeInfo { args.add("${esScript}") } - env = ['JAVA_HOME': project.runtimeJavaHome] + if (nodeVersion.before("6.2.0")) { + env = ['JAVA_HOME': "${-> getJavaHome(project, 8, "JAVA8_HOME must be set to run BWC tests against [" + nodeVersion + "]")}"] + } else if (nodeVersion.onOrAfter("6.2.0") && nodeVersion.before("6.3.0")) { + env = ['JAVA_HOME': "${-> getJavaHome(project, 9, "JAVA9_HOME must be set to run BWC tests against [" + nodeVersion + "]")}"] + } else { + env = ['JAVA_HOME': (String) project.runtimeJavaHome] + } + args.addAll("-E", "node.portsfile=true") String collectedSystemProperties = config.systemProperties.collect { key, value -> "-D${key}=${value}" }.join(" ") String esJavaOpts = config.jvmArgs.isEmpty() ? collectedSystemProperties : collectedSystemProperties + " " + config.jvmArgs if (Boolean.parseBoolean(System.getProperty('tests.asserts', 'true'))) { - esJavaOpts += " -ea -esa" + // put the enable assertions options before other options to allow + // flexibility to disable assertions for specific packages or classes + // in the cluster-specific options + esJavaOpts = String.join(" ", "-ea", "-esa", esJavaOpts) } env.put('ES_JAVA_OPTS', esJavaOpts) for (Map.Entry property : System.properties.entrySet()) { @@ -281,7 +294,7 @@ class NodeInfo { } /** Returns the directory elasticsearch home is contained in for the given distribution */ - static File homeDir(File baseDir, String distro, String nodeVersion) { + static File homeDir(File baseDir, String distro, Version nodeVersion) { String path switch (distro) { case 'integ-test-zip': @@ -299,7 +312,7 @@ class NodeInfo { return new File(baseDir, path) } - static File pathConf(File baseDir, String distro, String nodeVersion) { + static File pathConf(File baseDir, String distro, Version nodeVersion) { switch (distro) { case 'integ-test-zip': case 'zip': diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 11f19f683e557..2aa72f0fa7a1c 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -56,14 +56,12 @@ - - @@ -77,18 +75,15 @@ - - - @@ -101,7 +96,6 @@ - @@ -112,19 +106,15 @@ - - - - @@ -139,19 +129,16 @@ - - - @@ -177,7 +164,6 @@ - @@ -215,7 +201,6 @@ - @@ -314,7 +299,6 @@ - @@ -358,7 +342,6 @@ - @@ -401,7 +384,6 @@ - @@ -439,7 +421,6 @@ - @@ -582,7 +563,6 @@ - @@ -604,7 +584,6 @@ - @@ -733,7 +712,6 @@ - @@ -744,11 +722,9 @@ - - diff --git a/buildSrc/version.properties b/buildSrc/version.properties index e064b2f223cb6..0f3e6c62c9b75 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 7.0.0-alpha1 -lucene = 7.3.0-snapshot-98a6b3d +lucene = 7.3.0 # optional dependencies spatial4j = 0.7 diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java index ca5f32205674c..9c632afe19192 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/bulk/RestNoopBulkAction.java @@ -27,7 +27,6 @@ import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.Requests; import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.shard.ShardId; @@ -68,9 +67,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC String defaultIndex = request.param("index"); String defaultType = request.param("type"); String defaultRouting = request.param("routing"); - String fieldsParam = request.param("fields"); String defaultPipeline = request.param("pipeline"); - String[] defaultFields = fieldsParam != null ? Strings.commaDelimitedListToStringArray(fieldsParam) : null; String waitForActiveShards = request.param("wait_for_active_shards"); if (waitForActiveShards != null) { @@ -78,7 +75,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC } bulkRequest.timeout(request.paramAsTime("timeout", BulkShardRequest.DEFAULT_TIMEOUT)); bulkRequest.setRefreshPolicy(request.param("refresh")); - bulkRequest.add(request.requiredContent(), defaultIndex, defaultType, defaultRouting, defaultFields, + bulkRequest.add(request.requiredContent(), defaultIndex, defaultType, defaultRouting, null, defaultPipeline, null, true, request.getXContentType()); // short circuit the call to the transport layer diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index f5b46a6a53192..ff9c612e1d475 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -45,6 +45,8 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; @@ -406,4 +408,28 @@ public void rolloverAsync(RolloverRequest rolloverRequest, ActionListener + * See Update Indices Settings + * API on elastic.co + */ + public UpdateSettingsResponse putSettings(UpdateSettingsRequest updateSettingsRequest, Header... headers) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(updateSettingsRequest, Request::indexPutSettings, + UpdateSettingsResponse::fromXContent, emptySet(), headers); + } + + /** + * Asynchronously updates specific index level settings using the Update Indices Settings API + *

+ * See Update Indices Settings + * API on elastic.co + */ + public void putSettingsAsync(UpdateSettingsRequest updateSettingsRequest, ActionListener listener, + Header... headers) { + restHighLevelClient.performRequestAsyncAndParseEntity(updateSettingsRequest, Request::indexPutSettings, + UpdateSettingsResponse::fromXContent, listener, emptySet(), headers); + } + } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java index 802b1492be092..4e6fcdbb8dd4a 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java @@ -43,6 +43,7 @@ import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.bulk.BulkRequest; @@ -136,7 +137,6 @@ static Request delete(DeleteRequest deleteRequest) { Params parameters = Params.builder(); parameters.withRouting(deleteRequest.routing()); - parameters.withParent(deleteRequest.parent()); parameters.withTimeout(deleteRequest.timeout()); parameters.withVersion(deleteRequest.version()); parameters.withVersionType(deleteRequest.versionType()); @@ -315,9 +315,6 @@ static Request bulk(BulkRequest bulkRequest) throws IOException { if (Strings.hasLength(request.routing())) { metadata.field("routing", request.routing()); } - if (Strings.hasLength(request.parent())) { - metadata.field("parent", request.parent()); - } if (request.version() != Versions.MATCH_ANY) { metadata.field("version", request.version()); } @@ -394,7 +391,6 @@ static Request get(GetRequest getRequest) { Params parameters = Params.builder(); parameters.withPreference(getRequest.preference()); parameters.withRouting(getRequest.routing()); - parameters.withParent(getRequest.parent()); parameters.withRefresh(getRequest.refresh()); parameters.withRealtime(getRequest.realtime()); parameters.withStoredFields(getRequest.storedFields()); @@ -422,7 +418,6 @@ static Request index(IndexRequest indexRequest) { Params parameters = Params.builder(); parameters.withRouting(indexRequest.routing()); - parameters.withParent(indexRequest.parent()); parameters.withTimeout(indexRequest.timeout()); parameters.withVersion(indexRequest.version()); parameters.withVersionType(indexRequest.versionType()); @@ -446,7 +441,6 @@ static Request update(UpdateRequest updateRequest) throws IOException { Params parameters = Params.builder(); parameters.withRouting(updateRequest.routing()); - parameters.withParent(updateRequest.parent()); parameters.withTimeout(updateRequest.timeout()); parameters.withRefreshPolicy(updateRequest.getRefreshPolicy()); parameters.withWaitForActiveShards(updateRequest.waitForActiveShards()); @@ -544,8 +538,10 @@ static Request existsAlias(GetAliasesRequest getAliasesRequest) { static Request rankEval(RankEvalRequest rankEvalRequest) throws IOException { String endpoint = endpoint(rankEvalRequest.indices(), Strings.EMPTY_ARRAY, "_rank_eval"); + Params params = Params.builder(); + params.withIndicesOptions(rankEvalRequest.indicesOptions()); HttpEntity entity = createEntity(rankEvalRequest.getRankEvalSpec(), REQUEST_BODY_CONTENT_TYPE); - return new Request(HttpGet.METHOD_NAME, endpoint, Collections.emptyMap(), entity); + return new Request(HttpGet.METHOD_NAME, endpoint, params.getParams(), entity); } static Request split(ResizeRequest resizeRequest) throws IOException { @@ -598,7 +594,7 @@ static Request rollover(RolloverRequest rolloverRequest) throws IOException { } static Request indicesExist(GetIndexRequest request) { - //this can be called with no indices as argument by transport client, not via REST though + // this can be called with no indices as argument by transport client, not via REST though if (request.indices() == null || request.indices().length == 0) { throw new IllegalArgumentException("indices are mandatory"); } @@ -612,6 +608,20 @@ static Request indicesExist(GetIndexRequest request) { return new Request(HttpHead.METHOD_NAME, endpoint, params.getParams(), null); } + static Request indexPutSettings(UpdateSettingsRequest updateSettingsRequest) throws IOException { + Params parameters = Params.builder(); + parameters.withTimeout(updateSettingsRequest.timeout()); + parameters.withMasterTimeout(updateSettingsRequest.masterNodeTimeout()); + parameters.withIndicesOptions(updateSettingsRequest.indicesOptions()); + parameters.withFlatSettings(updateSettingsRequest.flatSettings()); + parameters.withPreserveExisting(updateSettingsRequest.isPreserveExisting()); + + String[] indices = updateSettingsRequest.indices() == null ? Strings.EMPTY_ARRAY : updateSettingsRequest.indices(); + String endpoint = endpoint(indices, "_settings"); + HttpEntity entity = createEntity(updateSettingsRequest, REQUEST_BODY_CONTENT_TYPE); + return new Request(HttpPut.METHOD_NAME, endpoint, parameters.getParams(), entity); + } + private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException { BytesRef source = XContentHelper.toXContent(toXContent, xContentType, false).toBytesRef(); return new ByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType)); @@ -709,10 +719,6 @@ Params withMasterTimeout(TimeValue masterTimeout) { return putParam("master_timeout", masterTimeout); } - Params withParent(String parent) { - return putParam("parent", parent); - } - Params withPipeline(String pipeline) { return putParam("pipeline", pipeline); } @@ -833,6 +839,13 @@ Params withIncludeDefaults(boolean includeDefaults) { return this; } + Params withPreserveExisting(boolean preserveExisting) { + if (preserveExisting) { + return putParam("preserve_existing", Boolean.TRUE.toString()); + } + return this; + } + Map getParams() { return Collections.unmodifiableMap(params); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java index 352a6a5e61d1b..ee820871dbb3d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java @@ -362,19 +362,6 @@ public void testIndex() throws IOException { "version conflict, current version [2] is different than the one provided [5]]", exception.getMessage()); assertEquals("index", exception.getMetadata("es.index").get(0)); } - { - ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> { - IndexRequest indexRequest = new IndexRequest("index", "type", "missing_parent"); - indexRequest.source(XContentBuilder.builder(xContentType.xContent()).startObject().field("field", "test").endObject()); - indexRequest.parent("missing"); - - execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync); - }); - - assertEquals(RestStatus.BAD_REQUEST, exception.status()); - assertEquals("Elasticsearch exception [type=illegal_argument_exception, " + - "reason=can't specify parent if no parent field has been configured]", exception.getMessage()); - } { ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> { IndexRequest indexRequest = new IndexRequest("index", "type", "missing_pipeline"); @@ -456,22 +443,6 @@ public void testUpdate() throws IOException { assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[type][id]: version conflict, " + "current version [2] is different than the one provided [1]]", exception.getMessage()); } - { - ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> { - UpdateRequest updateRequest = new UpdateRequest("index", "type", "id"); - updateRequest.doc(singletonMap("field", "updated"), randomFrom(XContentType.values())); - if (randomBoolean()) { - updateRequest.parent("missing"); - } else { - updateRequest.routing("missing"); - } - execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); - }); - - assertEquals(RestStatus.NOT_FOUND, exception.status()); - assertEquals("Elasticsearch exception [type=document_missing_exception, reason=[type][id]: document missing]", - exception.getMessage()); - } { IndexRequest indexRequest = new IndexRequest("index", "type", "with_script"); indexRequest.source(singletonMap("counter", 12)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 7a29a35d20ab1..0feb78d66b2dd 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -49,6 +49,8 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeType; @@ -56,6 +58,8 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.broadcast.BroadcastResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -63,6 +67,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.rest.RestStatus; import java.io.IOException; @@ -72,6 +77,7 @@ import static org.hamcrest.CoreMatchers.hasItem; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.startsWith; public class IndicesClientIT extends ESRestHighLevelClientTestCase { @@ -609,4 +615,97 @@ public void testRollover() throws IOException { assertEquals("test_new", rolloverResponse.getNewIndex()); } } + + public void testIndexPutSettings() throws IOException { + + final Setting dynamicSetting = IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING; + final String dynamicSettingKey = IndexMetaData.SETTING_NUMBER_OF_REPLICAS; + final int dynamicSettingValue = 0; + + final Setting staticSetting = IndexSettings.INDEX_CHECK_ON_STARTUP; + final String staticSettingKey = IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(); + final String staticSettingValue = "true"; + + final Setting unmodifiableSetting = IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING; + final String unmodifiableSettingKey = IndexMetaData.SETTING_NUMBER_OF_SHARDS; + final int unmodifiableSettingValue = 3; + + String index = "index"; + createIndex(index, Settings.EMPTY); + + assertThat(dynamicSetting.getDefault(Settings.EMPTY), not(dynamicSettingValue)); + UpdateSettingsRequest dynamicSettingRequest = new UpdateSettingsRequest(); + dynamicSettingRequest.settings(Settings.builder().put(dynamicSettingKey, dynamicSettingValue).build()); + UpdateSettingsResponse response = execute(dynamicSettingRequest, highLevelClient().indices()::putSettings, + highLevelClient().indices()::putSettingsAsync); + + assertTrue(response.isAcknowledged()); + Map indexSettingsAsMap = getIndexSettingsAsMap(index); + assertThat(indexSettingsAsMap.get(dynamicSettingKey), equalTo(String.valueOf(dynamicSettingValue))); + + assertThat(staticSetting.getDefault(Settings.EMPTY), not(staticSettingValue)); + UpdateSettingsRequest staticSettingRequest = new UpdateSettingsRequest(); + staticSettingRequest.settings(Settings.builder().put(staticSettingKey, staticSettingValue).build()); + ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(staticSettingRequest, + highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync)); + assertThat(exception.getMessage(), + startsWith("Elasticsearch exception [type=illegal_argument_exception, " + + "reason=Can't update non dynamic settings [[index.shard.check_on_startup]] for open indices [[index/")); + + indexSettingsAsMap = getIndexSettingsAsMap(index); + assertNull(indexSettingsAsMap.get(staticSettingKey)); + + closeIndex(index); + response = execute(staticSettingRequest, highLevelClient().indices()::putSettings, + highLevelClient().indices()::putSettingsAsync); + assertTrue(response.isAcknowledged()); + openIndex(index); + indexSettingsAsMap = getIndexSettingsAsMap(index); + assertThat(indexSettingsAsMap.get(staticSettingKey), equalTo(staticSettingValue)); + + assertThat(unmodifiableSetting.getDefault(Settings.EMPTY), not(unmodifiableSettingValue)); + UpdateSettingsRequest unmodifiableSettingRequest = new UpdateSettingsRequest(); + unmodifiableSettingRequest.settings(Settings.builder().put(unmodifiableSettingKey, unmodifiableSettingValue).build()); + exception = expectThrows(ElasticsearchException.class, () -> execute(unmodifiableSettingRequest, + highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync)); + assertThat(exception.getMessage(), startsWith( + "Elasticsearch exception [type=illegal_argument_exception, " + + "reason=Can't update non dynamic settings [[index.number_of_shards]] for open indices [[index/")); + closeIndex(index); + exception = expectThrows(ElasticsearchException.class, () -> execute(unmodifiableSettingRequest, + highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync)); + assertThat(exception.getMessage(), startsWith( + "Elasticsearch exception [type=illegal_argument_exception, " + + "reason=final index setting [index.number_of_shards], not updateable")); + } + + @SuppressWarnings("unchecked") + private Map getIndexSettingsAsMap(String index) throws IOException { + Map indexSettings = getIndexSettings(index); + return (Map)((Map) indexSettings.get(index)).get("settings"); + } + + public void testIndexPutSettingNonExistent() throws IOException { + + String index = "index"; + UpdateSettingsRequest indexUpdateSettingsRequest = new UpdateSettingsRequest(index); + String setting = "no_idea_what_you_are_talking_about"; + int value = 10; + indexUpdateSettingsRequest.settings(Settings.builder().put(setting, value).build()); + + ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(indexUpdateSettingsRequest, + highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync)); + assertEquals(RestStatus.NOT_FOUND, exception.status()); + assertThat(exception.getMessage(), equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]")); + + createIndex(index, Settings.EMPTY); + exception = expectThrows(ElasticsearchException.class, () -> execute(indexUpdateSettingsRequest, + highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync)); + assertThat(exception.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(exception.getMessage(), equalTo( + "Elasticsearch exception [type=illegal_argument_exception, " + + "reason=unknown setting [index.no_idea_what_you_are_talking_about] please check that any required plugins are installed, " + + "or check the breaking changes documentation for removed settings]")); + } + } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RankEvalIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RankEvalIT.java index 7e60e5f169f32..9497bdded0549 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RankEvalIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RankEvalIT.java @@ -21,6 +21,8 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.rankeval.EvalQueryQuality; import org.elasticsearch.index.rankeval.PrecisionAtK; @@ -37,8 +39,9 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.Map.Entry; -import java.util.Set; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.elasticsearch.index.rankeval.EvaluationMetric.filterUnknownDocuments; @@ -55,6 +58,10 @@ public void indexDocuments() throws IOException { client().performRequest("PUT", "/index/doc/5", Collections.emptyMap(), doc); client().performRequest("PUT", "/index/doc/6", Collections.emptyMap(), doc); client().performRequest("POST", "/index/_refresh"); + + // add another index to test basic multi index support + client().performRequest("PUT", "/index2/doc/7", Collections.emptyMap(), doc); + client().performRequest("POST", "/index2/_refresh"); } /** @@ -64,7 +71,9 @@ public void indexDocuments() throws IOException { public void testRankEvalRequest() throws IOException { SearchSourceBuilder testQuery = new SearchSourceBuilder(); testQuery.query(new MatchAllQueryBuilder()); - RatedRequest amsterdamRequest = new RatedRequest("amsterdam_query", createRelevant("index" , "2", "3", "4", "5"), testQuery); + List amsterdamRatedDocs = createRelevant("index" , "2", "3", "4", "5"); + amsterdamRatedDocs.addAll(createRelevant("index2", "7")); + RatedRequest amsterdamRequest = new RatedRequest("amsterdam_query", amsterdamRatedDocs, testQuery); RatedRequest berlinRequest = new RatedRequest("berlin_query", createRelevant("index", "1"), testQuery); List specifications = new ArrayList<>(); specifications.add(amsterdamRequest); @@ -72,49 +81,46 @@ public void testRankEvalRequest() throws IOException { PrecisionAtK metric = new PrecisionAtK(1, false, 10); RankEvalSpec spec = new RankEvalSpec(specifications, metric); - RankEvalResponse response = execute(new RankEvalRequest(spec, new String[] { "index" }), highLevelClient()::rankEval, + RankEvalRequest rankEvalRequest = new RankEvalRequest(spec, new String[] { "index", "index2" }); + RankEvalResponse response = execute(rankEvalRequest, highLevelClient()::rankEval, highLevelClient()::rankEvalAsync); - // the expected Prec@ for the first query is 4/6 and the expected Prec@ for the second is 1/6, divided by 2 to get the average - double expectedPrecision = (1.0 / 6.0 + 4.0 / 6.0) / 2.0; + // the expected Prec@ for the first query is 5/7 and the expected Prec@ for the second is 1/7, divided by 2 to get the average + double expectedPrecision = (1.0 / 7.0 + 5.0 / 7.0) / 2.0; assertEquals(expectedPrecision, response.getEvaluationResult(), Double.MIN_VALUE); - Set> entrySet = response.getPartialResults().entrySet(); - assertEquals(2, entrySet.size()); - for (Entry entry : entrySet) { - EvalQueryQuality quality = entry.getValue(); - if (entry.getKey() == "amsterdam_query") { - assertEquals(2, filterUnknownDocuments(quality.getHitsAndRatings()).size()); - List hitsAndRatings = quality.getHitsAndRatings(); - assertEquals(6, hitsAndRatings.size()); - for (RatedSearchHit hit : hitsAndRatings) { - String id = hit.getSearchHit().getId(); - if (id.equals("1") || id.equals("6")) { - assertFalse(hit.getRating().isPresent()); - } else { - assertEquals(1, hit.getRating().get().intValue()); - } - } + Map partialResults = response.getPartialResults(); + assertEquals(2, partialResults.size()); + EvalQueryQuality amsterdamQueryQuality = partialResults.get("amsterdam_query"); + assertEquals(2, filterUnknownDocuments(amsterdamQueryQuality.getHitsAndRatings()).size()); + List hitsAndRatings = amsterdamQueryQuality.getHitsAndRatings(); + assertEquals(7, hitsAndRatings.size()); + for (RatedSearchHit hit : hitsAndRatings) { + String id = hit.getSearchHit().getId(); + if (id.equals("1") || id.equals("6")) { + assertFalse(hit.getRating().isPresent()); + } else { + assertEquals(1, hit.getRating().get().intValue()); } - if (entry.getKey() == "berlin_query") { - assertEquals(5, filterUnknownDocuments(quality.getHitsAndRatings()).size()); - List hitsAndRatings = quality.getHitsAndRatings(); - assertEquals(6, hitsAndRatings.size()); - for (RatedSearchHit hit : hitsAndRatings) { - String id = hit.getSearchHit().getId(); - if (id.equals("1")) { - assertEquals(1, hit.getRating().get().intValue()); - } else { - assertFalse(hit.getRating().isPresent()); - } - } + } + EvalQueryQuality berlinQueryQuality = partialResults.get("berlin_query"); + assertEquals(6, filterUnknownDocuments(berlinQueryQuality.getHitsAndRatings()).size()); + hitsAndRatings = berlinQueryQuality.getHitsAndRatings(); + assertEquals(7, hitsAndRatings.size()); + for (RatedSearchHit hit : hitsAndRatings) { + String id = hit.getSearchHit().getId(); + if (id.equals("1")) { + assertEquals(1, hit.getRating().get().intValue()); + } else { + assertFalse(hit.getRating().isPresent()); } } + + // now try this when test2 is closed + client().performRequest("POST", "index2/_close", Collections.emptyMap()); + rankEvalRequest.indicesOptions(IndicesOptions.fromParameters(null, "true", null, SearchRequest.DEFAULT_INDICES_OPTIONS)); + response = execute(rankEvalRequest, highLevelClient()::rankEval, highLevelClient()::rankEvalAsync); } private static List createRelevant(String indexName, String... docs) { - List relevant = new ArrayList<>(); - for (String doc : docs) { - relevant.add(new RatedDocument(indexName, doc, 1)); - } - return relevant; + return Stream.of(docs).map(s -> new RatedDocument(indexName, s, 1)).collect(Collectors.toList()); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java index 75ac543fbb4ce..abce180546dfc 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java @@ -46,6 +46,7 @@ import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.bulk.BulkRequest; @@ -209,9 +210,6 @@ public void testMultiGet() throws IOException { if (randomBoolean()) { item.routing(randomAlphaOfLength(4)); } - if (randomBoolean()) { - item.parent(randomAlphaOfLength(4)); - } if (randomBoolean()) { item.storedFields(generateRandomStringArray(16, 8, false)); } @@ -253,11 +251,6 @@ public void testDelete() { deleteRequest.routing(routing); expectedParams.put("routing", routing); } - if (randomBoolean()) { - String parent = randomAlphaOfLengthBetween(3, 10); - deleteRequest.parent(parent); - expectedParams.put("parent", parent); - } } Request request = Request.delete(deleteRequest); @@ -525,11 +518,6 @@ public void testIndex() throws IOException { indexRequest.routing(routing); expectedParams.put("routing", routing); } - if (randomBoolean()) { - String parent = randomAlphaOfLengthBetween(3, 10); - indexRequest.parent(parent); - expectedParams.put("parent", parent); - } if (randomBoolean()) { String pipeline = randomAlphaOfLengthBetween(3, 10); indexRequest.setPipeline(pipeline); @@ -732,11 +720,6 @@ public void testUpdate() throws IOException { updateRequest.routing(routing); expectedParams.put("routing", routing); } - if (randomBoolean()) { - String parent = randomAlphaOfLengthBetween(3, 10); - updateRequest.parent(parent); - expectedParams.put("parent", parent); - } if (randomBoolean()) { String timeout = randomTimeValue(); updateRequest.timeout(timeout); @@ -840,15 +823,9 @@ public void testBulk() throws IOException { if (randomBoolean()) { indexRequest.setPipeline(randomAlphaOfLength(5)); } - if (randomBoolean()) { - indexRequest.parent(randomAlphaOfLength(5)); - } } else if (opType == DocWriteRequest.OpType.CREATE) { IndexRequest createRequest = new IndexRequest(index, type, id).source(source, xContentType).create(true); docWriteRequest = createRequest; - if (randomBoolean()) { - createRequest.parent(randomAlphaOfLength(5)); - } } else if (opType == DocWriteRequest.OpType.UPDATE) { final UpdateRequest updateRequest = new UpdateRequest(index, type, id).doc(new IndexRequest().source(source, xContentType)); docWriteRequest = updateRequest; @@ -858,9 +835,6 @@ public void testBulk() throws IOException { if (randomBoolean()) { randomizeFetchSourceContextParams(updateRequest::fetchSource, new HashMap<>()); } - if (randomBoolean()) { - updateRequest.parent(randomAlphaOfLength(5)); - } } else if (opType == DocWriteRequest.OpType.DELETE) { docWriteRequest = new DeleteRequest(index, type, id); } else { @@ -902,7 +876,6 @@ public void testBulk() throws IOException { assertEquals(originalRequest.type(), parsedRequest.type()); assertEquals(originalRequest.id(), parsedRequest.id()); assertEquals(originalRequest.routing(), parsedRequest.routing()); - assertEquals(originalRequest.parent(), parsedRequest.parent()); assertEquals(originalRequest.version(), parsedRequest.version()); assertEquals(originalRequest.versionType(), parsedRequest.versionType()); @@ -1155,7 +1128,7 @@ public void testMultiSearch() throws IOException { List requests = new ArrayList<>(); CheckedBiConsumer consumer = (searchRequest, p) -> { - SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(p); + SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(p, false); if (searchSourceBuilder.equals(new SearchSourceBuilder()) == false) { searchRequest.source(searchSourceBuilder); } @@ -1247,6 +1220,8 @@ public void testRankEval() throws Exception { new PrecisionAtK()); String[] indices = randomIndicesNames(0, 5); RankEvalRequest rankEvalRequest = new RankEvalRequest(spec, indices); + Map expectedParams = new HashMap<>(); + setRandomIndicesOptions(rankEvalRequest::indicesOptions, rankEvalRequest::indicesOptions, expectedParams); Request request = Request.rankEval(rankEvalRequest); StringJoiner endpoint = new StringJoiner("/", "/", ""); @@ -1256,8 +1231,10 @@ public void testRankEval() throws Exception { } endpoint.add(RestRankEvalAction.ENDPOINT); assertEquals(endpoint.toString(), request.getEndpoint()); - assertEquals(Collections.emptyMap(), request.getParameters()); + assertEquals(3, request.getParameters().size()); + assertEquals(expectedParams, request.getParameters()); assertToXContentBody(spec, request.getEntity()); + } public void testSplit() throws IOException { @@ -1363,6 +1340,33 @@ public void testRollover() throws IOException { assertEquals(expectedParams, request.getParameters()); } + public void testIndexPutSettings() throws IOException { + String[] indices = randomBoolean() ? null : randomIndicesNames(0, 2); + UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(indices); + Map expectedParams = new HashMap<>(); + setRandomFlatSettings(updateSettingsRequest::flatSettings, expectedParams); + setRandomMasterTimeout(updateSettingsRequest, expectedParams); + setRandomTimeout(updateSettingsRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + setRandomIndicesOptions(updateSettingsRequest::indicesOptions, updateSettingsRequest::indicesOptions, expectedParams); + if (randomBoolean()) { + updateSettingsRequest.setPreserveExisting(randomBoolean()); + if (updateSettingsRequest.isPreserveExisting()) { + expectedParams.put("preserve_existing", "true"); + } + } + + Request request = Request.indexPutSettings(updateSettingsRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + if (indices != null && indices.length > 0) { + endpoint.add(String.join(",", indices)); + } + endpoint.add("_settings"); + assertThat(endpoint.toString(), equalTo(request.getEndpoint())); + assertEquals(HttpPut.METHOD_NAME, request.getMethod()); + assertToXContentBody(updateSettingsRequest, request.getEntity()); + assertEquals(expectedParams, request.getParameters()); + } + private static void assertToXContentBody(ToXContent expectedBody, HttpEntity actualEntity) throws IOException { BytesReference expectedBytes = XContentHelper.toXContent(expectedBody, REQUEST_BODY_CONTENT_TYPE, false); assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType().getValue()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java index 09a3fbd4d16a8..9b0b1ab83a460 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java @@ -186,9 +186,6 @@ public void testIndex() throws Exception { // tag::index-request-routing request.routing("routing"); // <1> // end::index-request-routing - // tag::index-request-parent - request.parent("parent"); // <1> - // end::index-request-parent // tag::index-request-timeout request.timeout(TimeValue.timeValueSeconds(1)); // <1> request.timeout("1s"); // <2> @@ -475,9 +472,6 @@ public void testUpdate() throws Exception { // tag::update-request-routing request.routing("routing"); // <1> // end::update-request-routing - // tag::update-request-parent - request.parent("parent"); // <1> - // end::update-request-parent // tag::update-request-timeout request.timeout(TimeValue.timeValueSeconds(1)); // <1> request.timeout("1s"); // <2> @@ -583,9 +577,6 @@ public void testDelete() throws Exception { // tag::delete-request-routing request.routing("routing"); // <1> // end::delete-request-routing - // tag::delete-request-parent - request.parent("parent"); // <1> - // end::delete-request-parent // tag::delete-request-timeout request.timeout(TimeValue.timeValueMinutes(2)); // <1> request.timeout("2m"); // <2> @@ -869,9 +860,6 @@ public void testGet() throws Exception { //tag::get-request-routing request.routing("routing"); // <1> //end::get-request-routing - //tag::get-request-parent - request.parent("parent"); // <1> - //end::get-request-parent //tag::get-request-preference request.preference("preference"); // <1> //end::get-request-preference @@ -1122,11 +1110,9 @@ public void testMultiGet() throws Exception { // tag::multi-get-request-item-extras request.add(new MultiGetRequest.Item("index", "type", "with_routing") .routing("some_routing")); // <1> - request.add(new MultiGetRequest.Item("index", "type", "with_parent") - .parent("some_parent")); // <2> request.add(new MultiGetRequest.Item("index", "type", "with_version") - .versionType(VersionType.EXTERNAL) // <3> - .version(10123L)); // <4> + .versionType(VersionType.EXTERNAL) // <2> + .version(10123L)); // <3> // end::multi-get-request-item-extras // tag::multi-get-request-top-level-extras request.preference("some_preference"); // <1> diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index bc6946eb2dc7f..e33d1e4729b0e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -48,6 +48,8 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeType; @@ -56,6 +58,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -394,6 +397,7 @@ public void testCreateIndexAsync() throws Exception { // tag::create-index-execute-listener ActionListener listener = new ActionListener() { + @Override public void onResponse(CreateIndexResponse createIndexResponse) { // <1> @@ -1378,4 +1382,110 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } + + public void testIndexPutSettings() throws Exception { + RestHighLevelClient client = highLevelClient(); + + { + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index")); + assertTrue(createIndexResponse.isAcknowledged()); + } + + // tag::put-settings-request + UpdateSettingsRequest request = new UpdateSettingsRequest("index1"); // <1> + UpdateSettingsRequest requestMultiple = + new UpdateSettingsRequest("index1", "index2"); // <2> + UpdateSettingsRequest requestAll = new UpdateSettingsRequest(); // <3> + // end::put-settings-request + + // tag::put-settings-create-settings + String settingKey = "index.number_of_replicas"; + int settingValue = 0; + Settings settings = + Settings.builder() + .put(settingKey, settingValue) + .build(); // <1> + // end::put-settings-create-settings + // tag::put-settings-request-index-settings + request.settings(settings); + // end::put-settings-request-index-settings + + { + // tag::put-settings-settings-builder + Settings.Builder settingsBuilder = + Settings.builder() + .put(settingKey, settingValue); + request.settings(settingsBuilder); // <1> + // end::put-settings-settings-builder + } + { + // tag::put-settings-settings-map + Map map = new HashMap<>(); + map.put(settingKey, settingValue); + request.settings(map); // <1> + // end::put-settings-settings-map + } + { + // tag::put-settings-settings-source + request.settings( + "{\"index.number_of_replicas\": \"2\"}" + , XContentType.JSON); // <1> + // end::put-settings-settings-source + } + + // tag::put-settings-request-flat-settings + request.flatSettings(true); // <1> + // end::put-settings-request-flat-settings + // tag::put-settings-request-preserveExisting + request.setPreserveExisting(false); // <1> + // end::put-settings-request-preserveExisting + // tag::put-settings-request-timeout + request.timeout(TimeValue.timeValueMinutes(2)); // <1> + request.timeout("2m"); // <2> + // end::put-settings-request-timeout + // tag::put-settings-request-masterTimeout + request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.masterNodeTimeout("1m"); // <2> + // end::put-settings-request-masterTimeout + // tag::put-settings-request-indicesOptions + request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1> + // end::put-settings-request-indicesOptions + + // tag::put-settings-execute + UpdateSettingsResponse updateSettingsResponse = + client.indices().putSettings(request); + // end::put-settings-execute + + // tag::put-settings-response + boolean acknowledged = updateSettingsResponse.isAcknowledged(); // <1> + // end::put-settings-response + assertTrue(acknowledged); + + // tag::put-settings-execute-listener + ActionListener listener = + new ActionListener() { + + @Override + public void onResponse(UpdateSettingsResponse updateSettingsResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::put-settings-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::put-settings-execute-async + client.indices().putSettingsAsync(request,listener); // <1> + // end::put-settings-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java index 96d962c3ac553..bd1cf48f14195 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java @@ -131,7 +131,7 @@ public void testSearch() throws Exception { // tag::search-source-sorting sourceBuilder.sort(new ScoreSortBuilder().order(SortOrder.DESC)); // <1> - sourceBuilder.sort(new FieldSortBuilder("_uid").order(SortOrder.ASC)); // <2> + sourceBuilder.sort(new FieldSortBuilder("_id").order(SortOrder.ASC)); // <2> // end::search-source-sorting // tag::search-source-filtering-off diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java index 3d282a642e0da..59aa2baab9672 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java @@ -58,6 +58,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; /** * Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}. @@ -135,8 +136,7 @@ private static RestClient createRestClient(final boolean useAuth, final boolean final RestClientBuilder restClientBuilder = RestClient.builder( new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort())).setDefaultHeaders(defaultHeaders); if (pathPrefix.length() > 0) { - // sometimes cut off the leading slash - restClientBuilder.setPathPrefix(randomBoolean() ? pathPrefix.substring(1) : pathPrefix); + restClientBuilder.setPathPrefix(pathPrefix); } if (useAuth) { @@ -281,6 +281,33 @@ public void testPreemptiveAuthDisabled() throws IOException { } } + public void testUrlWithoutLeadingSlash() throws Exception { + if (pathPrefix.length() == 0) { + try { + restClient.performRequest("GET", "200"); + fail("request should have failed"); + } catch(ResponseException e) { + assertEquals(404, e.getResponse().getStatusLine().getStatusCode()); + } + } else { + { + Response response = restClient.performRequest("GET", "200"); + //a trailing slash gets automatically added if a pathPrefix is configured + assertEquals(200, response.getStatusLine().getStatusCode()); + } + { + //pathPrefix is not required to start with '/', will be added automatically + try (RestClient restClient = RestClient.builder( + new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort())) + .setPathPrefix(pathPrefix.substring(1)).build()) { + Response response = restClient.performRequest("GET", "200"); + //a trailing slash gets automatically added if a pathPrefix is configured + assertEquals(200, response.getStatusLine().getStatusCode()); + } + } + } + } + private Response bodyTest(final String method) throws IOException { return bodyTest(restClient, method); } diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index 8d5aa204c487d..48b84b4036240 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -17,11 +17,12 @@ * under the License. */ + import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.Version -import java.util.regex.Matcher +import static org.elasticsearch.gradle.BuildPlugin.getJavaHome /** * This is a dummy project which does a local checkout of the previous @@ -54,6 +55,16 @@ subprojects { final String remote = System.getProperty("tests.bwc.remote", "elastic") + final boolean gitFetchLatest + final String gitFetchLatestProperty = System.getProperty("tests.bwc.git_fetch_latest", "true") + if ("true".equals(gitFetchLatestProperty)) { + gitFetchLatest = true + } else if ("false".equals(gitFetchLatestProperty)) { + gitFetchLatest = false + } else { + throw new GradleException("tests.bwc.git_fetch_latest must be [true] or [false] but was [" + gitFetchLatestProperty + "]") + } + task createClone(type: LoggedExec) { onlyIf { checkoutDir.exists() == false } commandLine = ['git', 'clone', rootDir, checkoutDir] @@ -83,7 +94,7 @@ subprojects { } task fetchLatest(type: LoggedExec) { - onlyIf { project.gradle.startParameter.isOffline() == false } + onlyIf { project.gradle.startParameter.isOffline() == false && gitFetchLatest } dependsOn addRemote workingDir = checkoutDir commandLine = ['git', 'fetch', '--all'] @@ -134,12 +145,13 @@ subprojects { task buildBwcVersion(type: Exec) { dependsOn checkoutBwcBranch, writeBuildMetadata workingDir = checkoutDir - if (project.rootProject.ext.runtimeJavaVersion == JavaVersion.VERSION_1_8 && ["5.6", "6.0", "6.1"].contains(bwcBranch)) { - /* - * If runtime Java home is set to JDK 8 and we are building branches that are officially built with JDK 8, push this to JAVA_HOME for - * these builds. - */ - environment('JAVA_HOME', System.getenv('RUNTIME_JAVA_HOME')) + if (["5.6", "6.0", "6.1"].contains(bwcBranch)) { + // we are building branches that are officially built with JDK 8, push JAVA8_HOME to JAVA_HOME for these builds + environment('JAVA_HOME', "${-> getJavaHome(project, 8, "JAVA8_HOME is required to build BWC versions for BWC branch [" + bwcBranch + "]")}") + } else if ("6.2".equals(bwcBranch)) { + environment('JAVA_HOME', "${-> getJavaHome(project, 9, "JAVA9_HOME is required to build BWC versions for BWC branch [" + bwcBranch + "]")}") + } else { + environment('JAVA_HOME', project.compilerJavaHome) } if (Os.isFamily(Os.FAMILY_WINDOWS)) { executable 'cmd' diff --git a/distribution/packages/src/deb/copyright b/distribution/packages/src/deb/copyright index 1e647bbac54e0..98a923677c907 100644 --- a/distribution/packages/src/deb/copyright +++ b/distribution/packages/src/deb/copyright @@ -1,4 +1,4 @@ -Copyright 2013-2016 Elasticsearch +Copyright 2013-2018 Elasticsearch License: Apache-2.0 Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java index 5a14d041c763b..e1733e478b8c2 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java @@ -698,10 +698,13 @@ private void installPlugin(Terminal terminal, boolean isBatch, Path tmpRoot, final PluginInfo info = loadPluginInfo(terminal, tmpRoot, isBatch, env); // read optional security policy (extra permissions), if it exists, confirm or warn the user Path policy = tmpRoot.resolve(PluginInfo.ES_PLUGIN_POLICY); + final Set permissions; if (Files.exists(policy)) { - Set permissions = PluginSecurity.parsePermissions(policy, env.tmpFile()); - PluginSecurity.confirmPolicyExceptions(terminal, permissions, info.hasNativeController(), isBatch); + permissions = PluginSecurity.parsePermissions(policy, env.tmpFile()); + } else { + permissions = Collections.emptySet(); } + PluginSecurity.confirmPolicyExceptions(terminal, permissions, info.hasNativeController(), isBatch); final Path destination = env.pluginsFile().resolve(info.getName()); deleteOnFailure.add(destination); diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java index d799cb0407f58..96e009b3462f1 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java @@ -1153,6 +1153,59 @@ private Function checksumAndString(final MessageDigest digest, f return bytes -> MessageDigests.toHexString(digest.digest(bytes)) + s; } + // checks the plugin requires a policy confirmation, and does not install when that is rejected by the user + // the plugin is installed after this method completes + private void assertPolicyConfirmation(Tuple env, String pluginZip, String... warnings) throws Exception { + for (int i = 0; i < warnings.length; ++i) { + String warning = warnings[i]; + for (int j = 0; j < i; ++j) { + terminal.addTextInput("y"); // accept warnings we have already tested + } + // default answer, does not install + terminal.addTextInput(""); + UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); + assertEquals("installation aborted by user", e.getMessage()); + + assertThat(terminal.getOutput(), containsString("WARNING: " + warning)); + try (Stream fileStream = Files.list(env.v2().pluginsFile())) { + assertThat(fileStream.collect(Collectors.toList()), empty()); + } + + // explicitly do not install + terminal.reset(); + for (int j = 0; j < i; ++j) { + terminal.addTextInput("y"); // accept warnings we have already tested + } + terminal.addTextInput("n"); + e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); + assertEquals("installation aborted by user", e.getMessage()); + assertThat(terminal.getOutput(), containsString("WARNING: " + warning)); + try (Stream fileStream = Files.list(env.v2().pluginsFile())) { + assertThat(fileStream.collect(Collectors.toList()), empty()); + } + } + + // allow installation + terminal.reset(); + for (int j = 0; j < warnings.length; ++j) { + terminal.addTextInput("y"); + } + installPlugin(pluginZip, env.v1()); + for (String warning : warnings) { + assertThat(terminal.getOutput(), containsString("WARNING: " + warning)); + } + } + + public void testPolicyConfirmation() throws Exception { + Tuple env = createEnv(fs, temp); + Path pluginDir = createPluginDir(temp); + writePluginSecurityPolicy(pluginDir, "setAccessible", "setFactory"); + String pluginZip = createPluginUrl("fake", pluginDir); + + assertPolicyConfirmation(env, pluginZip, "plugin requires additional permissions"); + assertPlugin("fake", pluginDir, env.v2()); + } + public void testMetaPluginPolicyConfirmation() throws Exception { Tuple env = createEnv(fs, temp); Path metaDir = createPluginDir(temp); @@ -1166,32 +1219,60 @@ public void testMetaPluginPolicyConfirmation() throws Exception { writePlugin("fake2", fake2Dir); String pluginZip = createMetaPluginUrl("meta-plugin", metaDir); - // default answer, does not install - terminal.addTextInput(""); - UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); - assertEquals("installation aborted by user", e.getMessage()); - assertThat(terminal.getOutput(), containsString("WARNING: plugin requires additional permissions")); - try (Stream fileStream = Files.list(env.v2().pluginsFile())) { - assertThat(fileStream.collect(Collectors.toList()), empty()); - } + assertPolicyConfirmation(env, pluginZip, "plugin requires additional permissions"); + assertMetaPlugin("meta-plugin", "fake1", metaDir, env.v2()); + assertMetaPlugin("meta-plugin", "fake2", metaDir, env.v2()); + } - // explicitly do not install - terminal.reset(); - terminal.addTextInput("n"); - e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); - assertEquals("installation aborted by user", e.getMessage()); - assertThat(terminal.getOutput(), containsString("WARNING: plugin requires additional permissions")); - try (Stream fileStream = Files.list(env.v2().pluginsFile())) { - assertThat(fileStream.collect(Collectors.toList()), empty()); - } + public void testNativeControllerConfirmation() throws Exception { + Tuple env = createEnv(fs, temp); + Path pluginDir = createPluginDir(temp); + String pluginZip = createPluginUrl("fake", pluginDir, "has.native.controller", "true"); - // allow installation - terminal.reset(); - terminal.addTextInput("y"); - installPlugin(pluginZip, env.v1()); - assertThat(terminal.getOutput(), containsString("WARNING: plugin requires additional permissions")); + assertPolicyConfirmation(env, pluginZip, "plugin forks a native controller"); + assertPlugin("fake", pluginDir, env.v2()); + } + + public void testMetaPluginNativeControllerConfirmation() throws Exception { + Tuple env = createEnv(fs, temp); + Path metaDir = createPluginDir(temp); + Path fake1Dir = metaDir.resolve("fake1"); + Files.createDirectory(fake1Dir); + writePlugin("fake1", fake1Dir, "has.native.controller", "true"); + Path fake2Dir = metaDir.resolve("fake2"); + Files.createDirectory(fake2Dir); + writePlugin("fake2", fake2Dir); + String pluginZip = createMetaPluginUrl("meta-plugin", metaDir); + + assertPolicyConfirmation(env, pluginZip, "plugin forks a native controller"); assertMetaPlugin("meta-plugin", "fake1", metaDir, env.v2()); assertMetaPlugin("meta-plugin", "fake2", metaDir, env.v2()); } + public void testNativeControllerAndPolicyConfirmation() throws Exception { + Tuple env = createEnv(fs, temp); + Path pluginDir = createPluginDir(temp); + writePluginSecurityPolicy(pluginDir, "setAccessible", "setFactory"); + String pluginZip = createPluginUrl("fake", pluginDir, "has.native.controller", "true"); + + assertPolicyConfirmation(env, pluginZip, "plugin requires additional permissions", "plugin forks a native controller"); + assertPlugin("fake", pluginDir, env.v2()); + } + + public void testMetaPluginNativeControllerAndPolicyConfirmation() throws Exception { + Tuple env = createEnv(fs, temp); + Path metaDir = createPluginDir(temp); + Path fake1Dir = metaDir.resolve("fake1"); + Files.createDirectory(fake1Dir); + writePluginSecurityPolicy(fake1Dir, "setAccessible", "setFactory"); + writePlugin("fake1", fake1Dir); + Path fake2Dir = metaDir.resolve("fake2"); + Files.createDirectory(fake2Dir); + writePlugin("fake2", fake2Dir, "has.native.controller", "true"); + String pluginZip = createMetaPluginUrl("meta-plugin", metaDir); + + assertPolicyConfirmation(env, pluginZip, "plugin requires additional permissions", "plugin forks a native controller"); + assertMetaPlugin("meta-plugin", "fake1", metaDir, env.v2()); + assertMetaPlugin("meta-plugin", "fake2", metaDir, env.v2()); + } } diff --git a/docs/java-rest/high-level/cluster/put_settings.asciidoc b/docs/java-rest/high-level/cluster/put_settings.asciidoc index 2d9f55c1e9419..74b479faa0501 100644 --- a/docs/java-rest/high-level/cluster/put_settings.asciidoc +++ b/docs/java-rest/high-level/cluster/put_settings.asciidoc @@ -58,7 +58,7 @@ The following arguments can optionally be provided: -------------------------------------------------- include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-settings-request-flat-settings] -------------------------------------------------- -<1> Wether the updated settings returned in the `ClusterUpdateSettings` should +<1> Whether the updated settings returned in the `ClusterUpdateSettings` should be in a flat format ["source","java",subs="attributes,callouts,macros"] diff --git a/docs/java-rest/high-level/document/delete.asciidoc b/docs/java-rest/high-level/document/delete.asciidoc index da252fa224f8d..5d263c894c663 100644 --- a/docs/java-rest/high-level/document/delete.asciidoc +++ b/docs/java-rest/high-level/document/delete.asciidoc @@ -23,12 +23,6 @@ include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-request-routing] -------------------------------------------------- <1> Routing value -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-request-parent] --------------------------------------------------- -<1> Parent value - ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- include-tagged::{doc-tests}/CRUDDocumentationIT.java[delete-request-timeout] diff --git a/docs/java-rest/high-level/document/get.asciidoc b/docs/java-rest/high-level/document/get.asciidoc index 9d04e138eea1e..504b22a8e6dd7 100644 --- a/docs/java-rest/high-level/document/get.asciidoc +++ b/docs/java-rest/high-level/document/get.asciidoc @@ -51,12 +51,6 @@ include-tagged::{doc-tests}/CRUDDocumentationIT.java[get-request-routing] -------------------------------------------------- <1> Routing value -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[get-request-parent] --------------------------------------------------- -<1> Parent value - ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- include-tagged::{doc-tests}/CRUDDocumentationIT.java[get-request-preference] diff --git a/docs/java-rest/high-level/document/index.asciidoc b/docs/java-rest/high-level/document/index.asciidoc index f673a1799094f..b64e19d1d33eb 100644 --- a/docs/java-rest/high-level/document/index.asciidoc +++ b/docs/java-rest/high-level/document/index.asciidoc @@ -49,12 +49,6 @@ include-tagged::{doc-tests}/CRUDDocumentationIT.java[index-request-routing] -------------------------------------------------- <1> Routing value -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[index-request-parent] --------------------------------------------------- -<1> Parent value - ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- include-tagged::{doc-tests}/CRUDDocumentationIT.java[index-request-timeout] diff --git a/docs/java-rest/high-level/document/multi-get.asciidoc b/docs/java-rest/high-level/document/multi-get.asciidoc index 1f4628e149c96..723cd0fba8645 100644 --- a/docs/java-rest/high-level/document/multi-get.asciidoc +++ b/docs/java-rest/high-level/document/multi-get.asciidoc @@ -57,9 +57,8 @@ separately in the mappings) include-tagged::{doc-tests}/CRUDDocumentationIT.java[multi-get-request-item-extras] -------------------------------------------------- <1> Routing value -<2> Parent value -<3> Version -<4> Version type +<2> Version +<3> Version type {ref}/search-request-preference.html[`preference`], {ref}/docs-get.html#realtime[`realtime`] diff --git a/docs/java-rest/high-level/document/update.asciidoc b/docs/java-rest/high-level/document/update.asciidoc index 70483454e0043..1c780093115d8 100644 --- a/docs/java-rest/high-level/document/update.asciidoc +++ b/docs/java-rest/high-level/document/update.asciidoc @@ -93,12 +93,6 @@ include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-request-routing] -------------------------------------------------- <1> Routing value -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-request-parent] --------------------------------------------------- -<1> Parent value - ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-request-timeout] diff --git a/docs/java-rest/high-level/indices/put_settings.asciidoc b/docs/java-rest/high-level/indices/put_settings.asciidoc new file mode 100644 index 0000000000000..49312da82a400 --- /dev/null +++ b/docs/java-rest/high-level/indices/put_settings.asciidoc @@ -0,0 +1,142 @@ +[[java-rest-high-indices-put-settings]] +=== Update Indices Settings API + +The Update Indices Settings API allows to change specific index level settings. + +[[java-rest-high-indices-put-settings-request]] +==== Update Indices Settings Request + +An `UpdateSettingsRequest`: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-request] +-------------------------------------------------- +<1> Update settings for one index +<2> Update settings for multiple indices +<3> Update settings for all indices + +==== Indices Settings +At least one setting to be updated must be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-create-settings] +-------------------------------------------------- +<1> Sets the index settings to be applied + +==== Providing the Settings +The settings to be applied can be provided in different ways: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-create-settings] +-------------------------------------------------- +<1> Creates a setting as `Settings` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-settings-builder] +-------------------------------------------------- +<1> Settings provided as `Settings.Builder` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-settings-source] +-------------------------------------------------- +<1> Settings provided as `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-settings-map] +-------------------------------------------------- +<1> Settings provided as a `Map` + +==== Optional Arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-request-flat-settings] +-------------------------------------------------- +<1> Whether the updated settings returned in the `UpdateSettings` should +be in a flat format + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-request-preserveExisting] +-------------------------------------------------- +<1> Whether to update existing settings. If set to `true` existing settings +on an index remain unchanged, the default is `false` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-request-timeout] +-------------------------------------------------- +<1> Timeout to wait for the all the nodes to acknowledge the new setting +as a `TimeValue` +<2> Timeout to wait for the all the nodes to acknowledge the new setting +as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-request-masterTimeout] +-------------------------------------------------- +<1> Timeout to connect to the master node as a `TimeValue` +<2> Timeout to connect to the master node as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-request-indicesOptions] +-------------------------------------------------- +<1> Setting `IndicesOptions` controls how unavailable indices are resolved and +how wildcard expressions are expanded + +[[java-rest-high-indices-put-settings-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-execute] +-------------------------------------------------- + +[[java-rest-high-indices-put-settings-async]] +==== Asynchronous Execution + +The asynchronous execution of an indices update settings requires both the +`UpdateSettingsRequest` instance and an `ActionListener` instance to be +passed to the asynchronous method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-execute-async] +-------------------------------------------------- +<1> The `UpdateSettingsRequest` to execute and the `ActionListener` +to use when the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `UpdateSettingsResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of a failure. The raised exception is provided as an argument + +[[java-rest-high-indices-put-settings-response]] +==== Update Indices Settings Response + +The returned `UpdateSettingsResponse` allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-response] +-------------------------------------------------- +<1> Indicates whether all of the nodes have acknowledged the request \ No newline at end of file diff --git a/docs/java-rest/high-level/search/multi-search.asciidoc b/docs/java-rest/high-level/search/multi-search.asciidoc index 1b76f8976666a..5d5910be30084 100644 --- a/docs/java-rest/high-level/search/multi-search.asciidoc +++ b/docs/java-rest/high-level/search/multi-search.asciidoc @@ -72,7 +72,7 @@ include-tagged::{doc-tests}/SearchDocumentationIT.java[multi-search-execute-list ==== MultiSearchResponse -The `MultiSearchResponse` that is returned by executing the `multiSearch` +The `MultiSearchResponse` that is returned by executing the `multiSearch` method contains a `MultiSearchResponse.Item` for each `SearchRequest` in the `MultiSearchRequest`. Each `MultiSearchResponse.Item` contains an exception in `getFailure` if the request failed or a diff --git a/docs/java-rest/high-level/search/search.asciidoc b/docs/java-rest/high-level/search/search.asciidoc index af81775a90072..3e9472ff2cb58 100644 --- a/docs/java-rest/high-level/search/search.asciidoc +++ b/docs/java-rest/high-level/search/search.asciidoc @@ -275,7 +275,7 @@ include-tagged::{doc-tests}/SearchDocumentationIT.java[search-execute-listener] The `SearchResponse` that is returned by executing the search provides details about the search execution itself as well as access to the documents returned. First, there is useful information about the request execution itself, like the -HTTP status code, execution time or wether the request terminated early or timed +HTTP status code, execution time or whether the request terminated early or timed out: ["source","java",subs="attributes,callouts,macros"] diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 0330b1903c5bf..29052171cddc6 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -64,6 +64,7 @@ Index Management:: * <> * <> * <> +* <> Mapping Management:: * <> @@ -87,6 +88,7 @@ include::indices/rollover.asciidoc[] include::indices/put_mapping.asciidoc[] include::indices/update_aliases.asciidoc[] include::indices/exists_alias.asciidoc[] +include::indices/put_settings.asciidoc[] == Cluster APIs diff --git a/docs/java-rest/license.asciidoc b/docs/java-rest/license.asciidoc index b097fe853b64e..687974868274e 100644 --- a/docs/java-rest/license.asciidoc +++ b/docs/java-rest/license.asciidoc @@ -1,6 +1,6 @@ == License -Copyright 2013-2017 Elasticsearch +Copyright 2013-2018 Elasticsearch Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/docs/painless/painless-getting-started.asciidoc b/docs/painless/painless-getting-started.asciidoc index e82e14b043840..8cf163d55d7b9 100644 --- a/docs/painless/painless-getting-started.asciidoc +++ b/docs/painless/painless-getting-started.asciidoc @@ -239,7 +239,7 @@ their last name: [source,js] ---------------------------------------------------------------- -POST hockey/player/_update_by_query +POST hockey/_update_by_query { "script": { "lang": "painless", @@ -260,7 +260,7 @@ names start with a consonant and end with a vowel: [source,js] ---------------------------------------------------------------- -POST hockey/player/_update_by_query +POST hockey/_update_by_query { "script": { "lang": "painless", @@ -281,7 +281,7 @@ remove all of the vowels in all of their last names: [source,js] ---------------------------------------------------------------- -POST hockey/player/_update_by_query +POST hockey/_update_by_query { "script": { "lang": "painless", @@ -297,7 +297,7 @@ method so it supports `$1` and `\1` for replacements: [source,js] ---------------------------------------------------------------- -POST hockey/player/_update_by_query +POST hockey/_update_by_query { "script": { "lang": "painless", @@ -319,7 +319,7 @@ This will make all of the vowels in the hockey player's last names upper case: [source,js] ---------------------------------------------------------------- -POST hockey/player/_update_by_query +POST hockey/_update_by_query { "script": { "lang": "painless", @@ -337,7 +337,7 @@ last names upper case: [source,js] ---------------------------------------------------------------- -POST hockey/player/_update_by_query +POST hockey/_update_by_query { "script": { "lang": "painless", diff --git a/docs/painless/painless-lang-spec.asciidoc b/docs/painless/painless-lang-spec.asciidoc index dbad00931af20..6544b0ad26495 100644 --- a/docs/painless/painless-lang-spec.asciidoc +++ b/docs/painless/painless-lang-spec.asciidoc @@ -58,7 +58,7 @@ characters from the opening `/*` to the closing `*/` are ignored. ==== Keywords Painless reserves the following keywords for built-in language features. -These keywords cannot be used used in other contexts, such as identifiers. +These keywords cannot be used in other contexts, such as identifiers. [cols="^1,^1,^1,^1,^1"] |==== diff --git a/docs/perl/index.asciidoc b/docs/perl/index.asciidoc index 734447d0dd103..fc487c735ebd6 100644 --- a/docs/perl/index.asciidoc +++ b/docs/perl/index.asciidoc @@ -115,7 +115,7 @@ https://github.com/elastic/elasticsearch-perl/blob/master/CONTRIBUTING.asciidoc[ == Copyright and License -This software is Copyright (c) 2013-2016 by Elasticsearch BV. +This software is Copyright (c) 2013-2018 by Elasticsearch BV. This is free software, licensed under: https://github.com/elastic/elasticsearch-perl/blob/master/LICENSE.txt[The Apache License Version 2.0]. diff --git a/docs/python/index.asciidoc b/docs/python/index.asciidoc index 3dc718229961a..ea1b6a837b58f 100644 --- a/docs/python/index.asciidoc +++ b/docs/python/index.asciidoc @@ -120,7 +120,7 @@ some of the more engaging tasks like bulk indexing and reindexing. === License -Copyright 2013-2017 Elasticsearch +Copyright 2013-2018 Elasticsearch Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/docs/reference/aggregations/bucket/significanttext-aggregation.asciidoc b/docs/reference/aggregations/bucket/significanttext-aggregation.asciidoc index 11dd0ea7b99f7..fa4d94c232784 100644 --- a/docs/reference/aggregations/bucket/significanttext-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/significanttext-aggregation.asciidoc @@ -38,7 +38,7 @@ Example: [source,js] -------------------------------------------------- -GET news/article/_search +GET news/_search { "query" : { "match" : {"content" : "Bird flu"} @@ -153,7 +153,7 @@ We can drill down into examples of these documents to see why pozmantier is conn [source,js] -------------------------------------------------- -GET news/article/_search +GET news/_search { "query": { "simple_query_string": { @@ -221,7 +221,7 @@ with the `filter_duplicate_text` setting turned on: [source,js] -------------------------------------------------- -GET news/article/_search +GET news/_search { "query": { "match": { @@ -424,7 +424,7 @@ context: [source,js] -------------------------------------------------- -GET news/article/_search +GET news/_search { "query" : { "match" : { @@ -463,7 +463,7 @@ will be analyzed using the `source_fields` parameter: [source,js] -------------------------------------------------- -GET news/article/_search +GET news/_search { "query" : { "match" : { diff --git a/docs/reference/aggregations/metrics/percentile-rank-aggregation.asciidoc b/docs/reference/aggregations/metrics/percentile-rank-aggregation.asciidoc index da5595adfbc43..3fd0d21ae4e69 100644 --- a/docs/reference/aggregations/metrics/percentile-rank-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/percentile-rank-aggregation.asciidoc @@ -217,7 +217,7 @@ had a value. [source,js] -------------------------------------------------- -GET latency/data/_search +GET latency/_search { "size": 0, "aggs" : { diff --git a/docs/reference/aggregations/pipeline.asciidoc b/docs/reference/aggregations/pipeline.asciidoc index 087dc4cc41280..bd1b0284a84fb 100644 --- a/docs/reference/aggregations/pipeline.asciidoc +++ b/docs/reference/aggregations/pipeline.asciidoc @@ -114,7 +114,7 @@ POST /_search === Special Paths Instead of pathing to a metric, `buckets_path` can use a special `"_count"` path. This instructs -the pipeline aggregation to use the document count as it's input. For example, a moving average can be calculated on the document count of each bucket, instead of a specific metric: +the pipeline aggregation to use the document count as its input. For example, a moving average can be calculated on the document count of each bucket, instead of a specific metric: [source,js] -------------------------------------------------- diff --git a/docs/reference/docs/delete-by-query.asciidoc b/docs/reference/docs/delete-by-query.asciidoc index db2ff3eeb6e3c..be015a811e9b3 100644 --- a/docs/reference/docs/delete-by-query.asciidoc +++ b/docs/reference/docs/delete-by-query.asciidoc @@ -75,7 +75,7 @@ Back to the API format, this will delete tweets from the `twitter` index: [source,js] -------------------------------------------------- -POST twitter/_doc/_delete_by_query?conflicts=proceed +POST twitter/_delete_by_query?conflicts=proceed { "query": { "match_all": {} @@ -85,12 +85,12 @@ POST twitter/_doc/_delete_by_query?conflicts=proceed // CONSOLE // TEST[setup:twitter] -It's also possible to delete documents of multiple indexes and multiple -types at once, just like the search API: +It's also possible to delete documents of multiple indexes at once, just like +the search API: [source,js] -------------------------------------------------- -POST twitter,blog/_docs,post/_delete_by_query +POST twitter,blog/_delete_by_query { "query": { "match_all": {} @@ -492,7 +492,7 @@ Which results in a sensible `total` like this one: ==== Automatic slicing You can also let delete-by-query automatically parallelize using -<> to slice on `_uid`. Use `slices` to specify the number of +<> to slice on `_id`. Use `slices` to specify the number of slices to use: [source,js] diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc index fe1ebf4739632..01eb0ad6adf90 100644 --- a/docs/reference/docs/index_.asciidoc +++ b/docs/reference/docs/index_.asciidoc @@ -229,14 +229,14 @@ The result of the above index operation is: }, "_index" : "twitter", "_type" : "_doc", - "_id" : "6a8ca01c-7896-48e9-81cc-9f70661fcb32", + "_id" : "W0tpsmIBdwcYyG50zbta", "_version" : 1, "_seq_no" : 0, "_primary_term" : 1, "result": "created" } -------------------------------------------------- -// TESTRESPONSE[s/6a8ca01c-7896-48e9-81cc-9f70661fcb32/$body._id/ s/"successful" : 2/"successful" : 1/] +// TESTRESPONSE[s/W0tpsmIBdwcYyG50zbta/$body._id/ s/"successful" : 2/"successful" : 1/] [float] [[index-routing]] diff --git a/docs/reference/docs/update-by-query.asciidoc b/docs/reference/docs/update-by-query.asciidoc index 527a007c5f1a9..482f3d62f5d5d 100644 --- a/docs/reference/docs/update-by-query.asciidoc +++ b/docs/reference/docs/update-by-query.asciidoc @@ -67,7 +67,7 @@ Back to the API format, this will update tweets from the `twitter` index: [source,js] -------------------------------------------------- -POST twitter/_doc/_update_by_query?conflicts=proceed +POST twitter/_update_by_query?conflicts=proceed -------------------------------------------------- // CONSOLE // TEST[setup:twitter] @@ -145,12 +145,12 @@ This API doesn't allow you to move the documents it touches, just modify their source. This is intentional! We've made no provisions for removing the document from its original location. -It's also possible to do this whole thing on multiple indexes and multiple -types at once, just like the search API: +It's also possible to do this whole thing on multiple indexes at once, just +like the search API: [source,js] -------------------------------------------------- -POST twitter,blog/_doc,post/_update_by_query +POST twitter,blog/_update_by_query -------------------------------------------------- // CONSOLE // TEST[s/^/PUT twitter\nPUT blog\n/] @@ -531,7 +531,7 @@ Which results in a sensible `total` like this one: ==== Automatic slicing You can also let update-by-query automatically parallelize using -<> to slice on `_uid`. Use `slices` to specify the number of +<> to slice on `_id`. Use `slices` to specify the number of slices to use: [source,js] diff --git a/docs/reference/glossary.asciidoc b/docs/reference/glossary.asciidoc index 0012beebdca98..53164d366cd93 100644 --- a/docs/reference/glossary.asciidoc +++ b/docs/reference/glossary.asciidoc @@ -61,6 +61,15 @@ `object`. The mapping also allows you to define (amongst other things) how the value for a field should be analyzed. +[[glossary-filter]] filter :: + + A filter is a non-scoring <>, meaning that it does not score documents. + It is only concerned about answering the question - "Does this document match?". + The answer is always a simple, binary yes or no. This kind of query is said to be made + in a <>, + hence it is called a filter. Filters are simple checks for set inclusion or exclusion. + In most cases, the goal of filtering is to reduce the number of documents that have to be examined. + [[glossary-index]] index :: An index is like a _table_ in a relational database. It has a @@ -105,6 +114,16 @@ + See also <> +[[glossary-query]] query :: + + A query is the basic component of a search. A search can be defined by one or more queries + which can be mixed and matched in endless combinations. While <> are + queries that only determine if a document matches, those queries that also calculate how well + the document matches are known as "scoring queries". Those queries assign it a score, which is + later used to sort matched documents. Scoring queries take more resources than <> + and their query results are not cacheable. As a general rule, use query clauses for full-text + search or for any condition that requires scoring, and use filters for everything else. + [[glossary-replica-shard]] replica shard :: Each <> can have zero or more @@ -161,8 +180,9 @@ A term is an exact value that is indexed in Elasticsearch. The terms `foo`, `Foo`, `FOO` are NOT equivalent. Terms (i.e. exact values) can - be searched for using _term_ queries. + - See also <> and <>. + be searched for using _term_ queries. + + + See also <> and <>. [[glossary-text]] text :: diff --git a/docs/reference/how-to/recipes/scoring.asciidoc b/docs/reference/how-to/recipes/scoring.asciidoc index f9973385c7163..09c0e585765d6 100644 --- a/docs/reference/how-to/recipes/scoring.asciidoc +++ b/docs/reference/how-to/recipes/scoring.asciidoc @@ -35,9 +35,9 @@ consistent across queries. This work around has another benefit: when two documents have the same score, they will be sorted by their internal Lucene doc id (which is unrelated to the -`_id` or `_uid`) by default. However these doc ids could be different across -copies of the same shard. So by always hitting the same shard, we would get -more consistent ordering of documents that have the same scores. +`_id`) by default. However these doc ids could be different across copies of +the same shard. So by always hitting the same shard, we would get more +consistent ordering of documents that have the same scores. [float] ==== Relevancy looks wrong diff --git a/docs/reference/indices/create-index.asciidoc b/docs/reference/indices/create-index.asciidoc index 8089745844d8b..f2882e6fb60d4 100644 --- a/docs/reference/indices/create-index.asciidoc +++ b/docs/reference/indices/create-index.asciidoc @@ -173,3 +173,28 @@ PUT test?wait_for_active_shards=2 A detailed explanation of `wait_for_active_shards` and its possible values can be found <>. + +[float] +=== Skipping types + +Types are scheduled to be fully removed in Elasticsearch 8.0 and will not appear +in requests or responses anymore. You can opt in for this future behaviour by +setting `include_type_name=false` and putting mappings directly under `mappings` +in the index creation call. + +Here is an example: + +[source,js] +-------------------------------------------------- +PUT test?include_type_name=false +{ + "mappings": { + "properties": { + "foo": { + "type": "keyword" + } + } + } +} +-------------------------------------------------- +// CONSOLE diff --git a/docs/reference/indices/get-mapping.asciidoc b/docs/reference/indices/get-mapping.asciidoc index 953f9522a4128..4bca1a9d09d35 100644 --- a/docs/reference/indices/get-mapping.asciidoc +++ b/docs/reference/indices/get-mapping.asciidoc @@ -41,3 +41,48 @@ GET /_mapping -------------------------------------------------- // CONSOLE // TEST[setup:twitter] + +[float] +=== Skipping types + +Types are scheduled to be fully removed in Elasticsearch 8.0 and will not appear +in requests or responses anymore. You can opt in for this future behaviour by +setting `include_type_name=false` in the request, which will return mappings +directly under `mappings` without keying by the type name. + +Here is an example: + +[source,js] +-------------------------------------------------- +PUT test?include_type_name=false +{ + "mappings": { + "properties": { + "foo": { + "type": "keyword" + } + } + } +} + +GET test/_mappings?include_type_name=false +-------------------------------------------------- +// CONSOLE + +which returns + +[source,js] +-------------------------------------------------- +{ + "test": { + "mappings": { + "properties": { + "foo": { + "type": "keyword" + } + } + } + } +} +-------------------------------------------------- +// TESTRESPONSE diff --git a/docs/reference/indices/put-mapping.asciidoc b/docs/reference/indices/put-mapping.asciidoc index 74a05aa554f9d..80dc52cd47d06 100644 --- a/docs/reference/indices/put-mapping.asciidoc +++ b/docs/reference/indices/put-mapping.asciidoc @@ -109,3 +109,54 @@ PUT my_index/_mapping/_doc Each <> specifies whether or not its setting can be updated on an existing field. + +[float] +=== Skipping types + +Types are scheduled to be fully removed in Elasticsearch 8.0 and will not appear +in requests or responses anymore. You can opt in for this future behaviour by +setting `include_type_name=false`. + +NOTE: This should only be done on indices that have been created with +`include_type_name=false` or that used `_doc` as a type name. + +The Console script from the above section is equivalent to the below invocation: + +[source,js] +----------------------------------- +PUT my_index?include_type_name=false <1> +{ + "mappings": { + "properties": { + "name": { + "properties": { + "first": { + "type": "text" + } + } + }, + "user_id": { + "type": "keyword" + } + } + } +} + +PUT my_index/_mapping?include_type_name=false +{ + "properties": { + "name": { + "properties": { + "last": { <2> + "type": "text" + } + } + }, + "user_id": { + "type": "keyword", + "ignore_above": 100 <3> + } + } +} +----------------------------------- +// CONSOLE diff --git a/docs/reference/mapping/fields.asciidoc b/docs/reference/mapping/fields.asciidoc index dd88910607269..155e23c9e544a 100644 --- a/docs/reference/mapping/fields.asciidoc +++ b/docs/reference/mapping/fields.asciidoc @@ -13,10 +13,6 @@ can be customised when a mapping type is created. The index to which the document belongs. -<>:: - - A composite field consisting of the `_type` and the `_id`. - <>:: The document's <>. @@ -73,5 +69,3 @@ include::fields/source-field.asciidoc[] include::fields/type-field.asciidoc[] -include::fields/uid-field.asciidoc[] - diff --git a/docs/reference/mapping/fields/id-field.asciidoc b/docs/reference/mapping/fields/id-field.asciidoc index c46ca28af0602..0f4ed15196962 100644 --- a/docs/reference/mapping/fields/id-field.asciidoc +++ b/docs/reference/mapping/fields/id-field.asciidoc @@ -5,10 +5,6 @@ Each document has an `_id` that uniquely identifies it, which is indexed so that documents can be looked up either with the <> or the <>. -NOTE: This was not the case with pre-6.0 indices due to the fact that they -supported multiple types, so the `_type` and `_id` were merged into a composite -primary key called `_uid`. - The value of the `_id` field is accessible in certain queries (`term`, `terms`, `match`, `query_string`, `simple_query_string`). diff --git a/docs/reference/mapping/fields/uid-field.asciidoc b/docs/reference/mapping/fields/uid-field.asciidoc deleted file mode 100644 index 2ca3b69a5ae61..0000000000000 --- a/docs/reference/mapping/fields/uid-field.asciidoc +++ /dev/null @@ -1,69 +0,0 @@ -[[mapping-uid-field]] -=== `_uid` field - -deprecated[6.0.0, Now that types have been removed, documents are uniquely identified by their `_id` and the `_uid` field has only been kept as a view over the `_id` field for backward compatibility.] - -Each document indexed is associated with a <> (see -<>) and an <>. These values are -combined as `{type}#{id}` and indexed as the `_uid` field. - -The value of the `_uid` field is accessible in queries, aggregations, scripts, -and when sorting: - -[source,js] --------------------------- -# Example documents -PUT my_index/_doc/1 -{ - "text": "Document with ID 1" -} - -PUT my_index/_doc/2?refresh=true -{ - "text": "Document with ID 2" -} --------------------------- -// CONSOLE - -[source,js] --------------------------- -GET my_index/_search -{ - "query": { - "terms": { - "_uid": [ "_doc#1", "_doc#2" ] <1> - } - }, - "aggs": { - "UIDs": { - "terms": { - "field": "_uid", <2> - "size": 10 - } - } - }, - "sort": [ - { - "_uid": { <3> - "order": "desc" - } - } - ], - "script_fields": { - "UID": { - "script": { - "lang": "painless", - "source": "doc['_uid']" <4> - } - } - } -} --------------------------- -// CONSOLE -// TEST[continued] -// TEST[warning:Fielddata access on the _uid field is deprecated, use _id instead] - -<1> Querying on the `_uid` field (also see the <>) -<2> Aggregating on the `_uid` field -<3> Sorting on the `_uid` field -<4> Accessing the `_uid` field in scripts diff --git a/docs/reference/mapping/types/geo-point.asciidoc b/docs/reference/mapping/types/geo-point.asciidoc index ae81773e6a0a2..57faef2dbd7db 100644 --- a/docs/reference/mapping/types/geo-point.asciidoc +++ b/docs/reference/mapping/types/geo-point.asciidoc @@ -92,6 +92,16 @@ format was changed early on to conform to the format used by GeoJSON. ================================================== +[NOTE] +A point can be expressed as a http://en.wikipedia.org/wiki/Geohash[geohash]. +Geohashes are https://en.wikipedia.org/wiki/Base32[base32] encoded strings of +the bits of the latitude and longitude interleaved. Each character in a geohash +adds additional 5 bits to the precision. So the longer the hash, the more +precise it is. For the indexing purposed geohashs are translated into +latitude-longitude pairs. During this process only first 12 characters are +used, so specifying more than 12 characters in a geohash doesn't increase the +precision. The 12 characters provide 60 bits, which should reduce a possible +error to less than 2cm. [[geo-point-params]] ==== Parameters for `geo_point` fields diff --git a/docs/reference/migration/migrate_7_0/analysis.asciidoc b/docs/reference/migration/migrate_7_0/analysis.asciidoc index 560cc68818ad4..db617d3301fd7 100644 --- a/docs/reference/migration/migrate_7_0/analysis.asciidoc +++ b/docs/reference/migration/migrate_7_0/analysis.asciidoc @@ -1,20 +1,12 @@ [[breaking_70_analysis_changes]] === Analysis changes -==== The `delimited_payload_filter` is renamed - -The `delimited_payload_filter` is renamed to `delimited_payload`, the old name is -deprecated and will be removed at some point, so it should be replaced by -`delimited_payload`. - - ==== Limiting the number of tokens produced by _analyze To safeguard against out of memory errors, the number of tokens that can be produced using the `_analyze` endpoint has been limited to 10000. This default limit can be changed for a particular index with the index setting `index.analyze.max_token_count`. - ==== Limiting the length of an analyzed text during highlighting Highlighting a text that was indexed without offsets or term vectors, @@ -22,4 +14,11 @@ requires analysis of this text in memory real time during the search request. For large texts this analysis may take substantial amount of time and memory. To protect against this, the maximum number of characters that will be analyzed has been limited to 1000000. This default limit can be changed -for a particular index with the index setting `index.highlight.max_analyzed_offset`. \ No newline at end of file +for a particular index with the index setting `index.highlight.max_analyzed_offset`. + +==== `delimited_payload_filter` renaming + +The `delimited_payload_filter` was deprecated and renamed to `delimited_payload` in 6.2. +Using it in indices created before 7.0 will issue deprecation warnings. Using the old +name in new indices created in 7.0 will throw an error. Use the new name `delimited_payload` +instead. diff --git a/docs/reference/migration/migrate_7_0/api.asciidoc b/docs/reference/migration/migrate_7_0/api.asciidoc index 0b5bdede0afef..f8b8f9670c7fa 100644 --- a/docs/reference/migration/migrate_7_0/api.asciidoc +++ b/docs/reference/migration/migrate_7_0/api.asciidoc @@ -2,7 +2,7 @@ === Breaking API changes in 7.0 ==== Camel case and underscore parameters deprecated in 6.x have been removed -A number of duplicate parameters deprecated in 6.x have been removed from +A number of duplicate parameters deprecated in 6.x have been removed from Bulk request, Multi Get request, Term Vectors request, and More Like This Query requests. @@ -47,3 +47,8 @@ has been reused to report the configured number of threads in the pool. This aligns the output of the API with the configuration values for thread pools. Note that `core` and `max` will be populated for scaling thread pools, and `size` will be populated for fixed thread pools. + +==== The parameter `fields` deprecated in 6.x has been removed from Bulk request +and Update request. The Update API returns `400 - Bad request` if request contains +unknown parameters (instead of ignored in the previous version). + diff --git a/docs/reference/migration/migrate_7_0/mappings.asciidoc b/docs/reference/migration/migrate_7_0/mappings.asciidoc index b0ab90546c3a8..c56a0ae9b6422 100644 --- a/docs/reference/migration/migrate_7_0/mappings.asciidoc +++ b/docs/reference/migration/migrate_7_0/mappings.asciidoc @@ -5,6 +5,12 @@ The `_all` field deprecated in 6 have now been removed. +==== The `_uid` meta field is removed + +This field used to index a composite key formed of the `_type` and the `_id`. +Now that indices cannot have multiple types, this has been removed in favour +of `_id`. + ==== The `_default_` mapping is no longer allowed The `_default_` mapping has been deprecated in 6.0 and is now no longer allowed diff --git a/docs/reference/migration/migrate_7_0/search.asciidoc b/docs/reference/migration/migrate_7_0/search.asciidoc index 0d3770993b2ff..529bd1fa5995b 100644 --- a/docs/reference/migration/migrate_7_0/search.asciidoc +++ b/docs/reference/migration/migrate_7_0/search.asciidoc @@ -9,6 +9,9 @@ `all_fields`, `locale`, `auto_generate_phrase_query` and `lowercase_expanded_terms` deprecated in 6.x have been removed. +* Purely negative queries (only MUST_NOT clauses) now return a score of `0` + rather than `1`. + ==== Adaptive replica selection enabled by default Adaptive replica selection has been enabled by default. If you wish to return to @@ -70,3 +73,8 @@ Executing a Regexp Query with a long regex string may degrade search performance To safeguard against this, the maximum length of regex that can be used in a Regexp Query request has been limited to 1000. This default maximum can be changed for a particular index with the index setting `index.max_regex_length`. + +==== Invalid `_search` request body + +Search requests with extra content after the main object will no longer be accepted +by the `_search` endpoint. A parsing exception will be thrown instead. diff --git a/docs/reference/modules/cluster/misc.asciidoc b/docs/reference/modules/cluster/misc.asciidoc index 837cfcc43ebf7..9686749486826 100644 --- a/docs/reference/modules/cluster/misc.asciidoc +++ b/docs/reference/modules/cluster/misc.asciidoc @@ -82,4 +82,4 @@ Enable or disable allocation for persistent tasks: This setting does not affect the persistent tasks that are already being executed. Only newly created persistent tasks, or tasks that must be reassigned (after a node left the cluster, for example), are impacted by this setting. --- +-- \ No newline at end of file diff --git a/docs/reference/modules/http.asciidoc b/docs/reference/modules/http.asciidoc index 920f62043cfe2..c69d4991583be 100644 --- a/docs/reference/modules/http.asciidoc +++ b/docs/reference/modules/http.asciidoc @@ -20,7 +20,7 @@ http://en.wikipedia.org/wiki/Chunked_transfer_encoding[HTTP chunking]. The settings in the table below can be configured for HTTP. Note that none of them are dynamically updatable so for them to take effect they should be set in -`elasticsearch.yml`. +the Elasticsearch <>. [cols="<,<",options="header",] |======================================================================= @@ -100,6 +100,12 @@ simple message will be returned. Defaults to `true` |`http.pipelining.max_events` |The maximum number of events to be queued up in memory before a HTTP connection is closed, defaults to `10000`. +|`http.max_warning_header_count` |The maximum number of warning headers in + client HTTP responses, defaults to unbounded. + +|`http.max_warning_header_size` |The maximum total size of warning headers in +client HTTP responses, defaults to unbounded. + |======================================================================= It also uses the common diff --git a/docs/reference/query-dsl/ids-query.asciidoc b/docs/reference/query-dsl/ids-query.asciidoc index 5eb52a5dda5a7..55adcb8f94cf8 100644 --- a/docs/reference/query-dsl/ids-query.asciidoc +++ b/docs/reference/query-dsl/ids-query.asciidoc @@ -2,7 +2,7 @@ === Ids Query Filters documents that only have the provided ids. Note, this query -uses the <> field. +uses the <> field. [source,js] -------------------------------------------------- diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 2e2967d4efab9..a17027fb3c335 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -60,6 +60,11 @@ directory. Instead, mappings should be created using the API with: The `_parent` field has been removed in favour of the <>. +[role="exclude",id="mapping-uid-field"] +=== `_uid` field + +The `_uid` field has been removed in favour of the <>. + [role="exclude",id="modules-memcached"] === memcached diff --git a/docs/reference/search/rank-eval.asciidoc b/docs/reference/search/rank-eval.asciidoc index e2998086c8917..571a488699168 100644 --- a/docs/reference/search/rank-eval.asciidoc +++ b/docs/reference/search/rank-eval.asciidoc @@ -5,7 +5,7 @@ experimental[The ranking evaluation API is experimental and may be changed or re The ranking evaluation API allows to evaluate the quality of ranked search results over a set of typical search queries. Given this set of queries and a -list or manually rated documents, the `_rank_eval` endpoint calculates and +list of manually rated documents, the `_rank_eval` endpoint calculates and returns typical information retrieval metrics like _mean reciprocal rank_, _precision_ or _discounted cumulative gain_. diff --git a/docs/reference/search/request/scroll.asciidoc b/docs/reference/search/request/scroll.asciidoc index b1166eae9f9e6..be725aaf362f5 100644 --- a/docs/reference/search/request/scroll.asciidoc +++ b/docs/reference/search/request/scroll.asciidoc @@ -225,9 +225,9 @@ GET /twitter/_search?scroll=1m The result from the first request returned documents that belong to the first slice (id: 0) and the result from the second request returned documents that belong to the second slice. Since the maximum number of slices is set to 2 the union of the results of the two requests is equivalent to the results of a scroll query without slicing. -By default the splitting is done on the shards first and then locally on each shard using the _uid field +By default the splitting is done on the shards first and then locally on each shard using the _id field with the following formula: -`slice(doc) = floorMod(hashCode(doc._uid), max)` +`slice(doc) = floorMod(hashCode(doc._id), max)` For instance if the number of shards is equal to 2 and the user requested 4 slices then the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard. diff --git a/docs/reference/search/search.asciidoc b/docs/reference/search/search.asciidoc index d507d5001f63e..b8e3da976d616 100644 --- a/docs/reference/search/search.asciidoc +++ b/docs/reference/search/search.asciidoc @@ -12,8 +12,7 @@ that match the query. The query can either be provided using a simple All search APIs can be applied across multiple types within an index, and across multiple indices with support for the <>. For -example, we can search on all documents across all types within the -twitter index: +example, we can search on all documents within the twitter index: [source,js] -------------------------------------------------- @@ -22,15 +21,6 @@ GET /twitter/_search?q=user:kimchy // CONSOLE // TEST[setup:twitter] -We can also search within specific types: - -[source,js] --------------------------------------------------- -GET /twitter/tweet,user/_search?q=user:kimchy --------------------------------------------------- -// CONSOLE -// TEST[setup:twitter] - We can also search all tweets with a certain tag across several indices (for example, when each user has his own index): diff --git a/docs/reference/search/validate.asciidoc b/docs/reference/search/validate.asciidoc index 27b09ad407ab6..2c0c8821355a7 100644 --- a/docs/reference/search/validate.asciidoc +++ b/docs/reference/search/validate.asciidoc @@ -180,7 +180,7 @@ Response: { "index": "twitter", "valid": true, - "explanation": "((user:terminator^3.71334 plot:future^2.763601 plot:human^2.8415773 plot:sarah^3.4193945 plot:kyle^3.8244398 plot:cyborg^3.9177752 plot:connor^4.040236 plot:reese^4.7133346 ... )~6) -ConstantScore(_uid:tweet#2)) #(ConstantScore(_type:tweet))^0.0" + "explanation": "((user:terminator^3.71334 plot:future^2.763601 plot:human^2.8415773 plot:sarah^3.4193945 plot:kyle^3.8244398 plot:cyborg^3.9177752 plot:connor^4.040236 plot:reese^4.7133346 ... )~6) -ConstantScore(_id:2)) #(ConstantScore(_type:tweet))^0.0" } ] } diff --git a/docs/reference/setup/bootstrap-checks.asciidoc b/docs/reference/setup/bootstrap-checks.asciidoc index cbeb10c8c8571..4670bb875802b 100644 --- a/docs/reference/setup/bootstrap-checks.asciidoc +++ b/docs/reference/setup/bootstrap-checks.asciidoc @@ -114,7 +114,7 @@ that the Elasticsearch process has the rights to create enough threads under normal use. This check is enforced only on Linux. If you are on Linux, to pass the maximum number of threads check, you must configure your system to allow the Elasticsearch process the ability to create at -least 2048 threads. This can be done via `/etc/security/limits.conf` +least 4096 threads. This can be done via `/etc/security/limits.conf` using the `nproc` setting (note that you might have to increase the limits for the `root` user too). diff --git a/docs/reference/setup/install/deb.asciidoc b/docs/reference/setup/install/deb.asciidoc index da043772252d4..d055f1251e19d 100644 --- a/docs/reference/setup/install/deb.asciidoc +++ b/docs/reference/setup/install/deb.asciidoc @@ -128,6 +128,15 @@ sudo dpkg -i elasticsearch-{version}.deb endif::[] +ifdef::include-xpack[] +[[deb-enable-indices]] +==== Enable automatic creation of {xpack} indices + +{xpack} will try to automatically create a number of indices within Elasticsearch. +include::{xes-repo-dir}/setup/xpack-indices.asciidoc[] + +endif::include-xpack[] + include::init-systemd.asciidoc[] [[deb-running-init]] diff --git a/docs/reference/setup/install/rpm.asciidoc b/docs/reference/setup/install/rpm.asciidoc index b820a4f71b6d0..730f043341773 100644 --- a/docs/reference/setup/install/rpm.asciidoc +++ b/docs/reference/setup/install/rpm.asciidoc @@ -115,6 +115,15 @@ endif::[] include::skip-set-kernel-parameters.asciidoc[] +ifdef::include-xpack[] +[[rpm-enable-indices]] +==== Enable automatic creation of {xpack} indices + +{xpack} will try to automatically create a number of indices within {es}. +include::{xes-repo-dir}/setup/xpack-indices.asciidoc[] + +endif::include-xpack[] + include::init-systemd.asciidoc[] [[rpm-running-init]] diff --git a/docs/reference/setup/install/windows.asciidoc b/docs/reference/setup/install/windows.asciidoc index 049db8b0c7437..5d79e9669f9f0 100644 --- a/docs/reference/setup/install/windows.asciidoc +++ b/docs/reference/setup/install/windows.asciidoc @@ -37,7 +37,7 @@ endif::[] [[install-msi-gui]] ==== Install using the graphical user interface (GUI) -Double-click the downloaded `.msi` package to launch a GUI wizard that will guide you through the +Double-click the downloaded `.msi` package to launch a GUI wizard that will guide you through the installation process. You can view help on any step by clicking the `?` button, which reveals an aside panel with additional information for each input: @@ -52,7 +52,7 @@ image::images/msi_installer/msi_installer_locations.png[] Then select whether to install as a service or start Elasticsearch manually as needed. When installing as a service, you can also decide which account to run the service under as well -as whether the service should be started after installation and when Windows is started or +as whether the service should be started after installation and when Windows is started or restarted: [[msi-installer-service]] @@ -73,14 +73,14 @@ part of the installation, with the option to configure a HTTPS proxy through whi [[msi-installer-selected-plugins]] image::images/msi_installer/msi_installer_selected_plugins.png[] -Upon choosing to install X-Pack plugin, an additional step allows a choice of the type of X-Pack +Upon choosing to install X-Pack plugin, an additional step allows a choice of the type of X-Pack license to install, in addition to X-Pack Security configuration and built-in user configuration: [[msi-installer-xpack]] image::images/msi_installer/msi_installer_xpack.png[] -NOTE: X-Pack includes a choice of a Trial or Basic license for 30 days. After that, you can obtain one of the -https://www.elastic.co/subscriptions[available subscriptions] or {ref}/security-settings.html[disable Security]. +NOTE: X-Pack includes a choice of a Trial or Basic license for 30 days. After that, you can obtain one of the +https://www.elastic.co/subscriptions[available subscriptions] or {ref}/security-settings.html[disable Security]. The Basic license is free and includes the https://www.elastic.co/products/x-pack/monitoring[Monitoring] extension. After clicking the install button, the installer will begin installation: @@ -105,7 +105,7 @@ then running: msiexec.exe /i elasticsearch-{version}.msi /qn -------------------------------------------- -By default, msiexec does not wait for the installation process to complete, since it runs in the +By default, msiexec does not wait for the installation process to complete, since it runs in the Windows subsystem. To wait on the process to finish and ensure that `%ERRORLEVEL%` is set accordingly, it is recommended to use `start /wait` to create a process and wait for it to exit @@ -114,8 +114,8 @@ accordingly, it is recommended to use `start /wait` to create a process and wait start /wait msiexec.exe /i elasticsearch-{version}.msi /qn -------------------------------------------- -As with any MSI installation package, a log file for the installation process can be found -within the `%TEMP%` directory, with a randomly generated name adhering to the format +As with any MSI installation package, a log file for the installation process can be found +within the `%TEMP%` directory, with a randomly generated name adhering to the format `MSI.LOG`. The path to a log file can be supplied using the `/l` command line argument ["source","sh",subs="attributes,callouts"] @@ -139,126 +139,126 @@ All settings exposed within the GUI are also available as command line arguments as _properties_ within Windows Installer documentation) that can be passed to msiexec: [horizontal] -`INSTALLDIR`:: +`INSTALLDIR`:: - The installation directory. The final directory in the path **must** + The installation directory. The final directory in the path **must** be the version of Elasticsearch. Defaults to ++%ProgramW6432%\Elastic\Elasticsearch{backslash}{version}++. -`DATADIRECTORY`:: +`DATADIRECTORY`:: - The directory in which to store your data. + The directory in which to store your data. Defaults to `%ALLUSERSPROFILE%\Elastic\Elasticsearch\data` -`CONFIGDIRECTORY`:: +`CONFIGDIRECTORY`:: - The directory in which to store your configuration. + The directory in which to store your configuration. Defaults to `%ALLUSERSPROFILE%\Elastic\Elasticsearch\config` -`LOGSDIRECTORY`:: +`LOGSDIRECTORY`:: - The directory in which to store your logs. + The directory in which to store your logs. Defaults to `%ALLUSERSPROFILE%\Elastic\Elasticsearch\logs` -`PLACEWRITABLELOCATIONSINSAMEPATH`:: +`PLACEWRITABLELOCATIONSINSAMEPATH`:: Whether the data, configuration and logs directories should be created under the installation directory. Defaults to `false` -`INSTALLASSERVICE`:: +`INSTALLASSERVICE`:: - Whether Elasticsearch is installed and configured as a Windows Service. + Whether Elasticsearch is installed and configured as a Windows Service. Defaults to `true` -`STARTAFTERINSTALL`:: +`STARTAFTERINSTALL`:: - Whether the Windows Service is started after installation finishes. + Whether the Windows Service is started after installation finishes. Defaults to `true` -`STARTWHENWINDOWSSTARTS`:: +`STARTWHENWINDOWSSTARTS`:: - Whether the Windows Service is started when Windows is started. + Whether the Windows Service is started when Windows is started. Defaults to `true` -`USELOCALSYSTEM`:: +`USELOCALSYSTEM`:: - Whether the Windows service runs under the LocalSystem Account. + Whether the Windows service runs under the LocalSystem Account. Defaults to `true` -`USENETWORKSERVICE`:: +`USENETWORKSERVICE`:: Whether the Windows service runs under the NetworkService Account. Defaults to `false` -`USEEXISTINGUSER`:: +`USEEXISTINGUSER`:: Whether the Windows service runs under a specified existing account. Defaults to `false` -`USER`:: +`USER`:: The username for the account under which the Windows service runs. Defaults to `""` -`PASSWORD`:: +`PASSWORD`:: The password for the account under which the Windows service runs. Defaults to `""` -`CLUSTERNAME`:: +`CLUSTERNAME`:: The name of the cluster. Defaults to `elasticsearch` -`NODENAME`:: +`NODENAME`:: The name of the node. Defaults to `%COMPUTERNAME%` -`MASTERNODE`:: +`MASTERNODE`:: Whether Elasticsearch is configured as a master node. Defaults to `true` -`DATANODE`:: +`DATANODE`:: Whether Elasticsearch is configured as a data node. Defaults to `true` -`INGESTNODE`:: +`INGESTNODE`:: Whether Elasticsearch is configured as an ingest node. Defaults to `true` -`SELECTEDMEMORY`:: +`SELECTEDMEMORY`:: - The amount of memory to allocate to the JVM heap for Elasticsearch. - Defaults to `2048` unless the target machine has less than 4GB in total, in which case + The amount of memory to allocate to the JVM heap for Elasticsearch. + Defaults to `2048` unless the target machine has less than 4GB in total, in which case it defaults to 50% of total memory. -`LOCKMEMORY`:: +`LOCKMEMORY`:: Whether `bootstrap.memory_lock` should be used to try to lock the process address space into RAM. Defaults to `false` -`UNICASTNODES`:: +`UNICASTNODES`:: A comma separated list of hosts in the form `host:port` or `host` to be used for unicast discovery. Defaults to `""` -`MINIMUMMASTERNODES`:: +`MINIMUMMASTERNODES`:: - The minimum number of master-eligible nodes that must be visible + The minimum number of master-eligible nodes that must be visible in order to form a cluster. Defaults to `""` -`NETWORKHOST`:: +`NETWORKHOST`:: - The hostname or IP address to bind the node to and _publish_ (advertise) this + The hostname or IP address to bind the node to and _publish_ (advertise) this host to other nodes in the cluster. Defaults to `""` -`HTTPPORT`:: +`HTTPPORT`:: The port to use for exposing Elasticsearch APIs over HTTP. Defaults to `9200` -`TRANSPORTPORT`:: +`TRANSPORTPORT`:: - The port to use for internal communication between nodes within the cluster. + The port to use for internal communication between nodes within the cluster. Defaults to `9300` -`PLUGINS`:: +`PLUGINS`:: A comma separated list of the plugins to download and install as part of the installation. Defaults to `""` @@ -294,7 +294,7 @@ as _properties_ within Windows Installer documentation) that can be passed to ms used to bootstrap the cluster and persisted as the `bootstrap.password` setting in the keystore. Defaults to a randomized value. -`SKIPSETTINGPASSWORDS`:: +`SKIPSETTINGPASSWORDS`:: When installing X-Pack plugin with a `Trial` license and X-Pack Security enabled, whether the installation should skip setting up the built-in users `elastic`, `kibana` and `logstash_system`. @@ -313,7 +313,7 @@ as _properties_ within Windows Installer documentation) that can be passed to ms `LOGSTASHSYSTEMUSERPASSWORD`:: When installing X-Pack plugin with a `Trial` license and X-Pack Security enabled, the password - to use for the built-in user `logstash_system`. Defaults to `""` + to use for the built-in user `logstash_system`. Defaults to `""` To pass a value, simply append the property name and value using the format `=""` to the installation command. For example, to use a different installation directory to the default one and to install https://www.elastic.co/products/x-pack[X-Pack]: @@ -324,7 +324,16 @@ start /wait msiexec.exe /i elasticsearch-{version}.msi /qn INSTALLDIR="C:\Custom -------------------------------------------- Consult the https://msdn.microsoft.com/en-us/library/windows/desktop/aa367988(v=vs.85).aspx[Windows Installer SDK Command-Line Options] -for additional rules related to values containing quotation marks. +for additional rules related to values containing quotation marks. + +ifdef::include-xpack[] +[[msi-installer-enable-indices]] +==== Enable automatic creation of {xpack} indices + +{xpack} will try to automatically create a number of indices within {es}. +include::{xes-repo-dir}/setup/xpack-indices.asciidoc[] + +endif::include-xpack[] [[msi-installer-command-line-running]] ==== Running Elasticsearch from the command line @@ -374,10 +383,10 @@ include::check-running.asciidoc[] Elasticsearch can be installed as a service to run in the background or start automatically at boot time without any user interaction. This can be achieved upon installation using the following command line options - -* `INSTALLASSERVICE=true` -* `STARTAFTERINSTALL=true` -* `STARTWHENWINDOWSSTARTS=true` + +* `INSTALLASSERVICE=true` +* `STARTAFTERINSTALL=true` +* `STARTWHENWINDOWSSTARTS=true` Once installed, Elasticsearch will appear within the Services control panel: @@ -401,18 +410,18 @@ with PowerShell: Get-Service Elasticsearch | Stop-Service | Start-Service -------------------------------------------- -Changes can be made to jvm.options and elasticsearch.yml configuration files to configure the -service after installation. Most changes (like JVM settings) will require a restart of the +Changes can be made to jvm.options and elasticsearch.yml configuration files to configure the +service after installation. Most changes (like JVM settings) will require a restart of the service in order to take effect. [[upgrade-msi-gui]] ==== Upgrade using the graphical user interface (GUI) -The `.msi` package supports upgrading an installed version of Elasticsearch to a newer -version of Elasticsearch. The upgrade process through the GUI handles upgrading all +The `.msi` package supports upgrading an installed version of Elasticsearch to a newer +version of Elasticsearch. The upgrade process through the GUI handles upgrading all installed plugins as well as retaining both your data and configuration. -Downloading and clicking on a newer version of the `.msi` package will launch the GUI wizard. +Downloading and clicking on a newer version of the `.msi` package will launch the GUI wizard. The first step will list the read only properties from the previous installation: [[msi-installer-upgrade-notice]] @@ -423,7 +432,7 @@ The following configuration step allows certain configuration options to be chan [[msi-installer-upgrade-configuration]] image::images/msi_installer/msi_installer_upgrade_configuration.png[] -Finally, the plugins step allows currently installed plugins to be upgraded or removed, and +Finally, the plugins step allows currently installed plugins to be upgraded or removed, and for plugins not currently installed, to be downloaded and installed: [[msi-installer-upgrade-plugins]] @@ -432,25 +441,25 @@ image::images/msi_installer/msi_installer_upgrade_plugins.png[] [[upgrade-msi-command-line]] ==== Upgrade using the command line -The `.msi` can also upgrade Elasticsearch using the command line. +The `.msi` can also upgrade Elasticsearch using the command line. [IMPORTANT] =========================================== A command line upgrade requires passing the **same** command line properties as -used at first install time; the Windows Installer does not remember these properties. +used at first install time; the Windows Installer does not remember these properties. For example, if you originally installed with the command line options `PLUGINS="x-pack"` and `LOCKMEMORY="true"`, then you must pass these same values when performing an upgrade from the command line. -The **exception** to this is `INSTALLDIR` (if originally specified), which must be a different directory to the -current installation. +The **exception** to this is `INSTALLDIR` (if originally specified), which must be a different directory to the +current installation. If setting `INSTALLDIR`, the final directory in the path **must** be the version of Elasticsearch e.g. ++C:\Program Files\Elastic\Elasticsearch{backslash}{version}++ =========================================== -The simplest upgrade, assuming Elasticsearch was installed using all defaults, +The simplest upgrade, assuming Elasticsearch was installed using all defaults, is achieved by first navigating to the download directory, then running: ["source","sh",subs="attributes,callouts"] @@ -471,7 +480,7 @@ start /wait msiexec.exe /i elasticsearch-{version}.msi /qn /l upgrade.log The `.msi` package handles uninstallation of all directories and files added as part of installation. -WARNING: Uninstallation will remove **all** directories and their contents created as part of +WARNING: Uninstallation will remove **all** directories and their contents created as part of installation, **including data within the data directory**. If you wish to retain your data upon uninstallation, it is recommended that you make a copy of the data directory before uninstallation. @@ -505,4 +514,4 @@ be passed using the `/l` command line argument start /wait msiexec.exe /x elasticsearch-{version}.msi /qn /l uninstall.log -------------------------------------------- -include::next-steps.asciidoc[] \ No newline at end of file +include::next-steps.asciidoc[] diff --git a/docs/reference/setup/install/zip-targz.asciidoc b/docs/reference/setup/install/zip-targz.asciidoc index ac7470d12381b..18cf0a6506fe4 100644 --- a/docs/reference/setup/install/zip-targz.asciidoc +++ b/docs/reference/setup/install/zip-targz.asciidoc @@ -70,6 +70,15 @@ cd elasticsearch-{version}/ <2> endif::[] +ifdef::include-xpack[] +[[zip-targz-enable-indices]] +==== Enable automatic creation of {xpack} indices + +{xpack} will try to automatically create a number of indices within {es}. +include::{xes-repo-dir}/setup/xpack-indices.asciidoc[] + +endif::include-xpack[] + [[zip-targz-running]] ==== Running Elasticsearch from the command line @@ -197,4 +206,4 @@ directory so that you do not delete important data later on. |======================================================================= -include::next-steps.asciidoc[] \ No newline at end of file +include::next-steps.asciidoc[] diff --git a/docs/reference/setup/install/zip-windows.asciidoc b/docs/reference/setup/install/zip-windows.asciidoc index 0c186bbd80a41..3ebf4f3d77dc3 100644 --- a/docs/reference/setup/install/zip-windows.asciidoc +++ b/docs/reference/setup/install/zip-windows.asciidoc @@ -42,6 +42,15 @@ cd c:\elasticsearch-{version} endif::[] +ifdef::include-xpack[] +[[windows-enable-indices]] +==== Enable automatic creation of {xpack} indices + +{xpack} will try to automatically create a number of indices within {es}. +include::{xes-repo-dir}/setup/xpack-indices.asciidoc[] + +endif::include-xpack[] + [[windows-running]] ==== Running Elasticsearch from the command line @@ -268,4 +277,4 @@ directory so that you do not delete important data later on. |======================================================================= -include::next-steps.asciidoc[] \ No newline at end of file +include::next-steps.asciidoc[] diff --git a/docs/ruby/copyright.asciidoc b/docs/ruby/copyright.asciidoc index cf5c19c9fee67..3747cc572e40f 100644 --- a/docs/ruby/copyright.asciidoc +++ b/docs/ruby/copyright.asciidoc @@ -1,5 +1,5 @@ == Copyright and License -This software is Copyright (c) 2013-2016 by Elasticsearch BV. +This software is Copyright (c) 2013-2018 by Elasticsearch BV. This is free software, licensed under The Apache License Version 2.0. diff --git a/libs/build.gradle b/libs/build.gradle index e69de29bb2d1d..78eb93886243d 100644 --- a/libs/build.gradle +++ b/libs/build.gradle @@ -0,0 +1,46 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +subprojects { + /* + * All subprojects are java projects using Elasticsearch's standard build + * tools. + */ + apply plugin: 'elasticsearch.build' + + /* + * Subprojects may depend on the "core" lib but may not depend on any + * other libs. This keeps are dependencies simpler. + */ + project.afterEvaluate { + configurations.all { Configuration conf -> + dependencies.all { Dependency dep -> + Project depProject = dependencyToProject(dep) + if (depProject != null + && false == depProject.path.equals(':libs:elasticsearch-core') + && depProject.path.startsWith(':libs')) { + throw new InvalidUserDataException("projects in :libs " + + "may not depend on other projects libs except " + + ":libs:elasticsearch-core but " + + "${project.path} depends on ${depProject.path}") + } + } + } + } +} diff --git a/libs/elasticsearch-core/build.gradle b/libs/elasticsearch-core/build.gradle index dea5664a14fd1..d374e7a8486e7 100644 --- a/libs/elasticsearch-core/build.gradle +++ b/libs/elasticsearch-core/build.gradle @@ -19,13 +19,51 @@ import org.elasticsearch.gradle.precommit.PrecommitTasks * under the License. */ -apply plugin: 'elasticsearch.build' apply plugin: 'nebula.optional-base' apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' archivesBaseName = 'elasticsearch-core' +// we want to keep the JDKs in our IDEs set to JDK 8 until minimum JDK is bumped to 9 so we do not include this source set in our IDEs +if (!isEclipse && !isIdea) { + sourceSets { + java9 { + java { + srcDirs = ['src/main/java9'] + } + } + } + + configurations { + java9Compile.extendsFrom(compile) + } + + dependencies { + java9Compile sourceSets.main.output + } + + compileJava9Java { + sourceCompatibility = 9 + targetCompatibility = 9 + } + + /* Enable this when forbiddenapis was updated to 2.6. + * See: https://github.com/elastic/elasticsearch/issues/29292 + forbiddenApisJava9 { + targetCompatibility = 9 + } + */ + + jar { + metaInf { + into 'versions/9' + from sourceSets.java9.output + } + manifest.attributes('Multi-Release': 'true') + } +} + publishing { publications { nebula { @@ -39,6 +77,10 @@ dependencies { testCompile "junit:junit:${versions.junit}" testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" + if (!isEclipse && !isIdea) { + java9Compile sourceSets.main.output + } + if (isEclipse == false || project.path == ":libs:elasticsearch-core-tests") { testCompile("org.elasticsearch.test:framework:${version}") { exclude group: 'org.elasticsearch', module: 'elasticsearch-core' @@ -66,14 +108,14 @@ if (isEclipse) { } thirdPartyAudit.excludes = [ - // from log4j - 'org/osgi/framework/AdaptPermission', - 'org/osgi/framework/AdminPermission', - 'org/osgi/framework/Bundle', - 'org/osgi/framework/BundleActivator', - 'org/osgi/framework/BundleContext', - 'org/osgi/framework/BundleEvent', - 'org/osgi/framework/SynchronousBundleListener', - 'org/osgi/framework/wiring/BundleWire', - 'org/osgi/framework/wiring/BundleWiring' + // from log4j + 'org/osgi/framework/AdaptPermission', + 'org/osgi/framework/AdminPermission', + 'org/osgi/framework/Bundle', + 'org/osgi/framework/BundleActivator', + 'org/osgi/framework/BundleContext', + 'org/osgi/framework/BundleEvent', + 'org/osgi/framework/SynchronousBundleListener', + 'org/osgi/framework/wiring/BundleWire', + 'org/osgi/framework/wiring/BundleWiring' ] diff --git a/server/src/main/java/org/elasticsearch/common/collect/Tuple.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/collect/Tuple.java similarity index 97% rename from server/src/main/java/org/elasticsearch/common/collect/Tuple.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/common/collect/Tuple.java index 2a0d860e1a3f4..70c7bcbc045b6 100644 --- a/server/src/main/java/org/elasticsearch/common/collect/Tuple.java +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/collect/Tuple.java @@ -46,7 +46,7 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - Tuple tuple = (Tuple) o; + Tuple tuple = (Tuple) o; if (v1 != null ? !v1.equals(tuple.v1) : tuple.v1 != null) return false; if (v2 != null ? !v2.equals(tuple.v2) : tuple.v2 != null) return false; diff --git a/server/src/main/java/org/elasticsearch/common/unit/TimeValue.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/unit/TimeValue.java similarity index 72% rename from server/src/main/java/org/elasticsearch/common/unit/TimeValue.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/common/unit/TimeValue.java index abd62adaa0e3e..56cdc09b34e8c 100644 --- a/server/src/main/java/org/elasticsearch/common/unit/TimeValue.java +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/unit/TimeValue.java @@ -19,63 +19,16 @@ package org.elasticsearch.common.unit; -import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.ToXContentFragment; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.joda.time.Period; -import org.joda.time.PeriodType; -import org.joda.time.format.PeriodFormat; -import org.joda.time.format.PeriodFormatter; - import java.io.IOException; -import java.util.Collections; -import java.util.EnumMap; -import java.util.HashMap; -import java.util.HashSet; import java.util.Locale; -import java.util.Map; import java.util.Objects; -import java.util.Set; import java.util.concurrent.TimeUnit; -public class TimeValue implements Writeable, Comparable, ToXContentFragment { +public class TimeValue implements Comparable { /** How many nano-seconds in one milli-second */ public static final long NSEC_PER_MSEC = TimeUnit.NANOSECONDS.convert(1, TimeUnit.MILLISECONDS); - private static Map TIME_UNIT_BYTE_MAP; - private static Map BYTE_TIME_UNIT_MAP; - - static { - final Map timeUnitByteMap = new EnumMap<>(TimeUnit.class); - timeUnitByteMap.put(TimeUnit.NANOSECONDS, (byte)0); - timeUnitByteMap.put(TimeUnit.MICROSECONDS, (byte)1); - timeUnitByteMap.put(TimeUnit.MILLISECONDS, (byte)2); - timeUnitByteMap.put(TimeUnit.SECONDS, (byte)3); - timeUnitByteMap.put(TimeUnit.MINUTES, (byte)4); - timeUnitByteMap.put(TimeUnit.HOURS, (byte)5); - timeUnitByteMap.put(TimeUnit.DAYS, (byte)6); - - final Set bytes = new HashSet<>(); - for (TimeUnit value : TimeUnit.values()) { - assert timeUnitByteMap.containsKey(value) : value; - assert bytes.add(timeUnitByteMap.get(value)); - } - - final Map byteTimeUnitMap = new HashMap<>(); - for (Map.Entry entry : timeUnitByteMap.entrySet()) { - byteTimeUnitMap.put(entry.getValue(), entry.getKey()); - } - - TIME_UNIT_BYTE_MAP = Collections.unmodifiableMap(timeUnitByteMap); - BYTE_TIME_UNIT_MAP = Collections.unmodifiableMap(byteTimeUnitMap); - } - public static final TimeValue MINUS_ONE = timeValueMillis(-1); public static final TimeValue ZERO = timeValueMillis(0); @@ -101,15 +54,19 @@ public static TimeValue timeValueHours(long hours) { private final long duration; - // visible for testing - long duration() { + /** + * @return the number of {@link #timeUnit()} units this value contains + */ + public long duration() { return duration; } private final TimeUnit timeUnit; - // visible for testing - TimeUnit timeUnit() { + /** + * @return the unit used for the this time value, see {@link #duration()} + */ + public TimeUnit timeUnit() { return timeUnit; } @@ -122,20 +79,6 @@ public TimeValue(long duration, TimeUnit timeUnit) { this.timeUnit = timeUnit; } - /** - * Read from a stream. - */ - public TimeValue(StreamInput in) throws IOException { - duration = in.readZLong(); - timeUnit = BYTE_TIME_UNIT_MAP.get(in.readByte()); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeZLong(duration); - out.writeByte(TIME_UNIT_BYTE_MAP.get(timeUnit)); - } - public long nanos() { return timeUnit.toNanos(duration); } @@ -240,19 +183,6 @@ public double getDaysFrac() { return daysFrac(); } - private final PeriodFormatter defaultFormatter = PeriodFormat.getDefault() - .withParseType(PeriodType.standard()); - - public String format() { - Period period = new Period(millis()); - return defaultFormatter.print(period); - } - - public String format(PeriodType type) { - Period period = new Period(millis()); - return PeriodFormat.getDefault().withParseType(type).print(period); - } - /** * Returns a {@link String} representation of the current {@link TimeValue}. * @@ -289,7 +219,27 @@ public String toString() { value = microsFrac(); suffix = "micros"; } - return Strings.format1Decimals(value, suffix); + return formatDecimal(value) + suffix; + } + + private static String formatDecimal(double value) { + String p = String.valueOf(value); + int ix = p.indexOf('.') + 1; + int ex = p.indexOf('E'); + char fraction = p.charAt(ix); + if (fraction == '0') { + if (ex != -1) { + return p.substring(0, ix - 1) + p.substring(ex); + } else { + return p.substring(0, ix - 1); + } + } else { + if (ex != -1) { + return p.substring(0, ix) + fraction + p.substring(ex); + } else { + return p.substring(0, ix) + fraction; + } + } } public String getStringRep() { @@ -349,10 +299,8 @@ public static TimeValue parseTimeValue(String sValue, TimeValue defaultValue, St return TimeValue.ZERO; } else { // Missing units: - throw new ElasticsearchParseException( - "failed to parse setting [{}] with value [{}] as a time value: unit is missing or unrecognized", - settingName, - sValue); + throw new IllegalArgumentException("failed to parse setting [" + settingName + "] with value [" + sValue + + "] as a time value: unit is missing or unrecognized"); } } @@ -363,9 +311,9 @@ private static long parse(final String initialInput, final String normalized, fi } catch (final NumberFormatException e) { try { @SuppressWarnings("unused") final double ignored = Double.parseDouble(s); - throw new ElasticsearchParseException("failed to parse [{}], fractional time values are not supported", e, initialInput); + throw new IllegalArgumentException("failed to parse [" + initialInput + "], fractional time values are not supported", e); } catch (final NumberFormatException ignored) { - throw new ElasticsearchParseException("failed to parse [{}]", e, initialInput); + throw new IllegalArgumentException("failed to parse [" + initialInput + "]", e); } } } @@ -401,9 +349,4 @@ public int compareTo(TimeValue timeValue) { double otherValue = ((double) timeValue.duration) * timeValue.timeUnit.toNanos(1); return Double.compare(thisValue, otherValue); } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder.value(toString()); - } } diff --git a/libs/elasticsearch-core/src/main/java/org/elasticsearch/core/internal/io/IOUtils.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/core/internal/io/IOUtils.java index eaa4df768cd71..4108992fb1f59 100644 --- a/libs/elasticsearch-core/src/main/java/org/elasticsearch/core/internal/io/IOUtils.java +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/core/internal/io/IOUtils.java @@ -41,45 +41,73 @@ private IOUtils() { } /** - * Closes all given Closeables. Some of the Closeables may be null; they are ignored. After everything is closed, the - * method either throws the first exception it hit while closing, or completes normally if there were no exceptions. + * Closes all given Closeables. Some of the Closeables may be null; they are + * ignored. After everything is closed, the method either throws the first exception it hit + * while closing with other exceptions added as suppressed, or completes normally if there were + * no exceptions. * * @param objects objects to close */ public static void close(final Closeable... objects) throws IOException { - close(Arrays.asList(objects)); + close(null, Arrays.asList(objects)); } /** - * Closes all given {@link Closeable}s. + * Closes all given Closeables. Some of the Closeables may be null; they are + * ignored. After everything is closed, the method adds any exceptions as suppressed to the + * original exception, or throws the first exception it hit if {@code Exception} is null. If + * no exceptions are encountered and the passed in exception is null, it completes normally. * * @param objects objects to close + */ + public static void close(final Exception e, final Closeable... objects) throws IOException { + close(e, Arrays.asList(objects)); + } + + /** + * Closes all given Closeables. Some of the Closeables may be null; they are + * ignored. After everything is closed, the method either throws the first exception it hit + * while closing with other exceptions added as suppressed, or completes normally if there were + * no exceptions. * - * @see #close(Closeable...) + * @param objects objects to close */ public static void close(final Iterable objects) throws IOException { - Exception ex = null; + close(null, objects); + } + /** + * Closes all given {@link Closeable}s. If a non-null exception is passed in, or closing a + * stream causes an exception, throws the exception with other {@link RuntimeException} or + * {@link IOException} exceptions added as suppressed. + * + * @param ex existing Exception to add exceptions occurring during close to + * @param objects objects to close + * + * @see #close(Closeable...) + */ + public static void close(final Exception ex, final Iterable objects) throws IOException { + Exception firstException = ex; for (final Closeable object : objects) { try { if (object != null) { object.close(); } } catch (final IOException | RuntimeException e) { - if (ex == null) { - ex = e; + if (firstException == null) { + firstException = e; } else { - ex.addSuppressed(e); + firstException.addSuppressed(e); } } } - if (ex != null) { - if (ex instanceof IOException) { - throw (IOException) ex; + if (firstException != null) { + if (firstException instanceof IOException) { + throw (IOException) firstException; } else { // since we only assigned an IOException or a RuntimeException to ex above, in this case ex must be a RuntimeException - throw (RuntimeException) ex; + throw (RuntimeException) firstException; } } } diff --git a/libs/elasticsearch-core/src/main/java/org/elasticsearch/core/internal/io/Streams.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/core/internal/io/Streams.java new file mode 100644 index 0000000000000..a006028b90556 --- /dev/null +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/core/internal/io/Streams.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.core.internal.io; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.Objects; + +/** + * Simple utility methods for file and stream copying. + * All copy methods use a block size of 4096 bytes, + * and close all affected streams when done. + *

+ * Mainly for use within the framework, + * but also useful for application code. + */ +public class Streams { + + /** + * Copy the contents of the given InputStream to the given OutputStream. + * Closes both streams when done. + * + * @param in the stream to copy from + * @param out the stream to copy to + * @return the number of bytes copied + * @throws IOException in case of I/O errors + */ + public static long copy(final InputStream in, final OutputStream out) throws IOException { + Objects.requireNonNull(in, "No InputStream specified"); + Objects.requireNonNull(out, "No OutputStream specified"); + final byte[] buffer = new byte[8192]; + Exception err = null; + try { + long byteCount = 0; + int bytesRead; + while ((bytesRead = in.read(buffer)) != -1) { + out.write(buffer, 0, bytesRead); + byteCount += bytesRead; + } + out.flush(); + return byteCount; + } catch (IOException | RuntimeException e) { + err = e; + throw e; + } finally { + IOUtils.close(err, in, out); + } + } +} diff --git a/libs/elasticsearch-core/src/main/java9/org/elasticsearch/core/internal/io/Streams.java b/libs/elasticsearch-core/src/main/java9/org/elasticsearch/core/internal/io/Streams.java new file mode 100644 index 0000000000000..34b3785765d87 --- /dev/null +++ b/libs/elasticsearch-core/src/main/java9/org/elasticsearch/core/internal/io/Streams.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.core.internal.io; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + +/** + * Simple utility methods for file and stream copying. + * All copy methods close all affected streams when done. + *

+ * Mainly for use within the framework, + * but also useful for application code. + */ +public abstract class Streams { + + /** + * Copy the contents of the given InputStream to the given OutputStream. + * Closes both streams when done. + * + * @param in the stream to copy from + * @param out the stream to copy to + * @return the number of bytes copied + * @throws IOException in case of I/O errors + */ + public static long copy(final InputStream in, final OutputStream out) throws IOException { + Exception err = null; + try { + final long byteCount = in.transferTo(out); + out.flush(); + return byteCount; + } catch (IOException | RuntimeException e) { + err = e; + throw e; + } finally { + IOUtils.close(err, in, out); + } + } +} diff --git a/libs/elasticsearch-core/src/test/java/org/elasticsearch/common/collect/TupleTests.java b/libs/elasticsearch-core/src/test/java/org/elasticsearch/common/collect/TupleTests.java new file mode 100644 index 0000000000000..79a9969ad0510 --- /dev/null +++ b/libs/elasticsearch-core/src/test/java/org/elasticsearch/common/collect/TupleTests.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.collect; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; + +public class TupleTests extends ESTestCase { + + public void testTuple() { + Tuple t1 = new Tuple<>(2L, "foo"); + Tuple t2 = new Tuple<>(2L, "foo"); + Tuple t3 = new Tuple<>(3L, "foo"); + Tuple t4 = new Tuple<>(2L, "bar"); + Tuple t5 = new Tuple<>(2, "foo"); + + assertThat(t1.v1(), equalTo(Long.valueOf(2L))); + assertThat(t1.v2(), equalTo("foo")); + + assertThat(t1, equalTo(t2)); + assertNotEquals(t1, t3); + assertNotEquals(t2, t3); + assertNotEquals(t2, t4); + assertNotEquals(t3, t4); + assertNotEquals(t1, t5); + + assertThat(t1.toString(), equalTo("Tuple [v1=2, v2=foo]")); + } +} diff --git a/server/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java b/libs/elasticsearch-core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java similarity index 79% rename from server/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java rename to libs/elasticsearch-core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java index 02394df3867e8..af6b89be5fffe 100644 --- a/server/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java +++ b/libs/elasticsearch-core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java @@ -19,17 +19,10 @@ package org.elasticsearch.common.unit; -import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.test.ESTestCase; -import org.joda.time.PeriodType; -import java.io.IOException; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.common.unit.TimeValue.timeValueNanos; -import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.Matchers.containsString; @@ -57,13 +50,6 @@ public void testToString() { assertThat("1000d", equalTo(new TimeValue(1000, TimeUnit.DAYS).toString())); } - public void testFormat() { - assertThat(new TimeValue(1025, TimeUnit.MILLISECONDS).format(PeriodType.dayTime()), equalTo("1 second and 25 milliseconds")); - assertThat(new TimeValue(1, TimeUnit.MINUTES).format(PeriodType.dayTime()), equalTo("1 minute")); - assertThat(new TimeValue(65, TimeUnit.MINUTES).format(PeriodType.dayTime()), equalTo("1 hour and 5 minutes")); - assertThat(new TimeValue(24 * 600 + 85, TimeUnit.MINUTES).format(PeriodType.dayTime()), equalTo("241 hours and 25 minutes")); - } - public void testMinusOne() { assertThat(new TimeValue(-1).nanos(), lessThan(0L)); } @@ -115,10 +101,10 @@ public void testParseTimeValue() { // supported. Note that this is the only unit that is not case sensitive // as `m` is the only character that is overloaded in terms of which // time unit is expected between the upper and lower case versions - expectThrows(ElasticsearchParseException.class, () -> { + expectThrows(IllegalArgumentException.class, () -> { TimeValue.parseTimeValue("10 M", null, "test"); }); - expectThrows(ElasticsearchParseException.class, () -> { + expectThrows(IllegalArgumentException.class, () -> { TimeValue.parseTimeValue("10M", null, "test"); }); @@ -139,8 +125,8 @@ public void testRoundTrip() { public void testNonFractionalTimeValues() { final String s = randomAlphaOfLength(10) + randomTimeUnit(); - final ElasticsearchParseException e = - expectThrows(ElasticsearchParseException.class, () -> TimeValue.parseTimeValue(s, null, "test")); + final IllegalArgumentException e = + expectThrows(IllegalArgumentException.class, () -> TimeValue.parseTimeValue(s, null, "test")); assertThat(e, hasToString(containsString("failed to parse [" + s + "]"))); assertThat(e, not(hasToString(containsString(FRACTIONAL_TIME_VALUES_ARE_NOT_SUPPORTED)))); assertThat(e.getCause(), instanceOf(NumberFormatException.class)); @@ -152,8 +138,8 @@ public void testFractionalTimeValues() { value = randomDouble(); } while (value == 0); final String s = Double.toString(randomIntBetween(0, 128) + value) + randomTimeUnit(); - final ElasticsearchParseException e = - expectThrows(ElasticsearchParseException.class, () -> TimeValue.parseTimeValue(s, null, "test")); + final IllegalArgumentException e = + expectThrows(IllegalArgumentException.class, () -> TimeValue.parseTimeValue(s, null, "test")); assertThat(e, hasToString(containsString("failed to parse [" + s + "]"))); assertThat(e, hasToString(containsString(FRACTIONAL_TIME_VALUES_ARE_NOT_SUPPORTED))); assertThat(e.getCause(), instanceOf(NumberFormatException.class)); @@ -163,36 +149,11 @@ private String randomTimeUnit() { return randomFrom("nanos", "micros", "ms", "s", "m", "h", "d"); } - private void assertEqualityAfterSerialize(TimeValue value, int expectedSize) throws IOException { - BytesStreamOutput out = new BytesStreamOutput(); - value.writeTo(out); - assertEquals(expectedSize, out.size()); - - StreamInput in = out.bytes().streamInput(); - TimeValue inValue = new TimeValue(in); - - assertThat(inValue, equalTo(value)); - assertThat(inValue.duration(), equalTo(value.duration())); - assertThat(inValue.timeUnit(), equalTo(value.timeUnit())); - } - - public void testSerialize() throws Exception { - assertEqualityAfterSerialize(new TimeValue(100, TimeUnit.DAYS), 3); - assertEqualityAfterSerialize(timeValueNanos(-1), 2); - assertEqualityAfterSerialize(timeValueNanos(1), 2); - assertEqualityAfterSerialize(timeValueSeconds(30), 2); - - final TimeValue timeValue = new TimeValue(randomIntBetween(0, 1024), randomFrom(TimeUnit.values())); - BytesStreamOutput out = new BytesStreamOutput(); - out.writeZLong(timeValue.duration()); - assertEqualityAfterSerialize(timeValue, 1 + out.bytes().length()); - } - public void testFailOnUnknownUnits() { try { TimeValue.parseTimeValue("23tw", null, "test"); fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { + } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("failed to parse")); } } @@ -201,7 +162,7 @@ public void testFailOnMissingUnits() { try { TimeValue.parseTimeValue("42", null, "test"); fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { + } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("failed to parse")); } } @@ -210,7 +171,7 @@ public void testNoDotsAllowed() { try { TimeValue.parseTimeValue("42ms.", null, "test"); fail("Expected ElasticsearchParseException"); - } catch (ElasticsearchParseException e) { + } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("failed to parse")); } } diff --git a/libs/elasticsearch-core/src/test/java/org/elasticsearch/core/internal/io/StreamsTests.java b/libs/elasticsearch-core/src/test/java/org/elasticsearch/core/internal/io/StreamsTests.java new file mode 100644 index 0000000000000..3908ef83500c4 --- /dev/null +++ b/libs/elasticsearch-core/src/test/java/org/elasticsearch/core/internal/io/StreamsTests.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.core.internal.io; + +import org.elasticsearch.test.ESTestCase; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; + +import static org.hamcrest.Matchers.equalTo; + +public class StreamsTests extends ESTestCase { + public void testCopyFromInputStream() throws IOException { + byte[] content = "content".getBytes(StandardCharsets.UTF_8); + ByteArrayInputStream in = new ByteArrayInputStream(content); + ByteArrayOutputStream out = new ByteArrayOutputStream(content.length); + long count = Streams.copy(in, out); + + assertThat(count, equalTo((long) content.length)); + assertThat(Arrays.equals(content, out.toByteArray()), equalTo(true)); + } +} diff --git a/libs/elasticsearch-nio/build.gradle b/libs/elasticsearch-nio/build.gradle index 247edeeed3543..a32a860a62848 100644 --- a/libs/elasticsearch-nio/build.gradle +++ b/libs/elasticsearch-nio/build.gradle @@ -19,7 +19,6 @@ import org.elasticsearch.gradle.precommit.PrecommitTasks -apply plugin: 'elasticsearch.build' apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' @@ -39,7 +38,7 @@ dependencies { testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testCompile "junit:junit:${versions.junit}" testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" - + if (isEclipse == false || project.path == ":libs:elasticsearch-nio-tests") { testCompile("org.elasticsearch.test:framework:${version}") { exclude group: 'org.elasticsearch', module: 'elasticsearch-nio' diff --git a/libs/grok/build.gradle b/libs/grok/build.gradle index c91312cf9e8af..61437be6aff13 100644 --- a/libs/grok/build.gradle +++ b/libs/grok/build.gradle @@ -19,8 +19,6 @@ import org.elasticsearch.gradle.precommit.PrecommitTasks * under the License. */ -apply plugin: 'elasticsearch.build' - archivesBaseName = 'elasticsearch-grok' dependencies { diff --git a/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java b/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java index 4cbeb84806089..3800c7711a2fd 100644 --- a/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java +++ b/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java @@ -34,8 +34,10 @@ import java.io.InputStreamReader; import java.io.UncheckedIOException; import java.nio.charset.StandardCharsets; +import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; +import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Collections; @@ -74,8 +76,6 @@ public final class Grok { private final Map patternBank; private final boolean namedCaptures; private final Regex compiledExpression; - private final String expression; - public Grok(Map patternBank, String grokPattern) { this(patternBank, grokPattern, true); @@ -86,11 +86,59 @@ public Grok(Map patternBank, String grokPattern) { this.patternBank = patternBank; this.namedCaptures = namedCaptures; - this.expression = toRegex(grokPattern); + for (Map.Entry entry : patternBank.entrySet()) { + String name = entry.getKey(); + String pattern = entry.getValue(); + forbidCircularReferences(name, new ArrayList<>(), pattern); + } + + String expression = toRegex(grokPattern); byte[] expressionBytes = expression.getBytes(StandardCharsets.UTF_8); this.compiledExpression = new Regex(expressionBytes, 0, expressionBytes.length, Option.DEFAULT, UTF8Encoding.INSTANCE); } + /** + * Checks whether patterns reference each other in a circular manner and if so fail with an exception + * + * In a pattern, anything between %{ and } or : is considered + * a reference to another named pattern. This method will navigate to all these named patterns and + * check for a circular reference. + */ + private void forbidCircularReferences(String patternName, List path, String pattern) { + if (pattern.contains("%{" + patternName + "}") || pattern.contains("%{" + patternName + ":")) { + String message; + if (path.isEmpty()) { + message = "circular reference in pattern [" + patternName + "][" + pattern + "]"; + } else { + message = "circular reference in pattern [" + path.remove(path.size() - 1) + "][" + pattern + + "] back to pattern [" + patternName + "]"; + // add rest of the path: + if (path.isEmpty() == false) { + message += " via patterns [" + String.join("=>", path) + "]"; + } + } + throw new IllegalArgumentException(message); + } + + for (int i = pattern.indexOf("%{"); i != -1; i = pattern.indexOf("%{", i + 1)) { + int begin = i + 2; + int brackedIndex = pattern.indexOf('}', begin); + int columnIndex = pattern.indexOf(':', begin); + int end; + if (brackedIndex != -1 && columnIndex == -1) { + end = brackedIndex; + } else if (columnIndex != -1 && brackedIndex == -1) { + end = columnIndex; + } else if (brackedIndex != -1 && columnIndex != -1) { + end = Math.min(brackedIndex, columnIndex); + } else { + throw new IllegalArgumentException("pattern [" + pattern + "] has circular references to other pattern definitions"); + } + String otherPatternName = pattern.substring(begin, end); + path.add(otherPatternName); + forbidCircularReferences(patternName, path, patternBank.get(otherPatternName)); + } + } public String groupMatch(String name, Region region, String pattern) { try { @@ -125,10 +173,12 @@ public String toRegex(String grokPattern) { String patternName = groupMatch(PATTERN_GROUP, region, grokPattern); String pattern = patternBank.get(patternName); - if (pattern == null) { throw new IllegalArgumentException("Unable to find pattern [" + patternName + "] in Grok's pattern dictionary"); } + if (pattern.contains("%{" + patternName + "}") || pattern.contains("%{" + patternName + ":")) { + throw new IllegalArgumentException("circular reference in pattern back [" + patternName + "]"); + } String grokPart; if (namedCaptures && subName != null) { diff --git a/libs/grok/src/test/java/org/elasticsearch/grok/GrokTests.java b/libs/grok/src/test/java/org/elasticsearch/grok/GrokTests.java index 931842d9f247f..eb8d0e9548753 100644 --- a/libs/grok/src/test/java/org/elasticsearch/grok/GrokTests.java +++ b/libs/grok/src/test/java/org/elasticsearch/grok/GrokTests.java @@ -28,6 +28,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.TreeMap; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -205,6 +206,65 @@ public void testNoNamedCaptures() { assertEquals(expected, actual); } + public void testCircularReference() { + Exception e = expectThrows(IllegalArgumentException.class, () -> { + Map bank = new HashMap<>(); + bank.put("NAME", "!!!%{NAME}!!!"); + String pattern = "%{NAME}"; + new Grok(bank, pattern, false); + }); + assertEquals("circular reference in pattern [NAME][!!!%{NAME}!!!]", e.getMessage()); + + e = expectThrows(IllegalArgumentException.class, () -> { + Map bank = new HashMap<>(); + bank.put("NAME", "!!!%{NAME:name}!!!"); + String pattern = "%{NAME}"; + new Grok(bank, pattern, false); + }); + assertEquals("circular reference in pattern [NAME][!!!%{NAME:name}!!!]", e.getMessage()); + + e = expectThrows(IllegalArgumentException.class, () -> { + Map bank = new HashMap<>(); + bank.put("NAME", "!!!%{NAME:name:int}!!!"); + String pattern = "%{NAME}"; + new Grok(bank, pattern, false); + }); + assertEquals("circular reference in pattern [NAME][!!!%{NAME:name:int}!!!]", e.getMessage()); + + e = expectThrows(IllegalArgumentException.class, () -> { + Map bank = new TreeMap<>(); + bank.put("NAME1", "!!!%{NAME2}!!!"); + bank.put("NAME2", "!!!%{NAME1}!!!"); + String pattern = "%{NAME1}"; + new Grok(bank, pattern, false); + }); + assertEquals("circular reference in pattern [NAME2][!!!%{NAME1}!!!] back to pattern [NAME1]", e.getMessage()); + + e = expectThrows(IllegalArgumentException.class, () -> { + Map bank = new TreeMap<>(); + bank.put("NAME1", "!!!%{NAME2}!!!"); + bank.put("NAME2", "!!!%{NAME3}!!!"); + bank.put("NAME3", "!!!%{NAME1}!!!"); + String pattern = "%{NAME1}"; + new Grok(bank, pattern, false); + }); + assertEquals("circular reference in pattern [NAME3][!!!%{NAME1}!!!] back to pattern [NAME1] via patterns [NAME2]", + e.getMessage()); + + e = expectThrows(IllegalArgumentException.class, () -> { + Map bank = new TreeMap<>(); + bank.put("NAME1", "!!!%{NAME2}!!!"); + bank.put("NAME2", "!!!%{NAME3}!!!"); + bank.put("NAME3", "!!!%{NAME4}!!!"); + bank.put("NAME4", "!!!%{NAME5}!!!"); + bank.put("NAME5", "!!!%{NAME1}!!!"); + String pattern = "%{NAME1}"; + new Grok(bank, pattern, false); + }); + assertEquals("circular reference in pattern [NAME5][!!!%{NAME1}!!!] back to pattern [NAME1] " + + "via patterns [NAME2=>NAME3=>NAME4]", e.getMessage()); + } + public void testBooleanCaptures() { Map bank = new HashMap<>(); diff --git a/libs/plugin-classloader/build.gradle b/libs/plugin-classloader/build.gradle index 1bc7fc9027237..d6af6600d3463 100644 --- a/libs/plugin-classloader/build.gradle +++ b/libs/plugin-classloader/build.gradle @@ -17,8 +17,6 @@ * under the License. */ -apply plugin: 'elasticsearch.build' - test.enabled = false // test depend on ES core... diff --git a/libs/secure-sm/build.gradle b/libs/secure-sm/build.gradle index 607230753b52b..93fdfd01c8f0c 100644 --- a/libs/secure-sm/build.gradle +++ b/libs/secure-sm/build.gradle @@ -19,7 +19,6 @@ import org.elasticsearch.gradle.precommit.PrecommitTasks -apply plugin: 'elasticsearch.build' apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/AbstractObjectParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/AbstractObjectParser.java similarity index 93% rename from server/src/main/java/org/elasticsearch/common/xcontent/AbstractObjectParser.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/AbstractObjectParser.java index aeb4e53690a69..d2a0e16318060 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/AbstractObjectParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/AbstractObjectParser.java @@ -21,10 +21,8 @@ import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ObjectParser.NamedObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; -import org.elasticsearch.common.xcontent.json.JsonXContent; import java.io.IOException; import java.util.ArrayList; @@ -214,17 +212,6 @@ public void declareFieldArray(BiConsumer> consumer, ContextPa declareField(consumer, (p, c) -> parseArray(p, () -> itemParser.parse(p, c)), field, type); } - public void declareRawObject(BiConsumer consumer, ParseField field) { - CheckedFunction bytesParser = p -> { - try (XContentBuilder builder = JsonXContent.contentBuilder()) { - builder.prettyPrint(); - builder.copyCurrentStructure(p); - return BytesReference.bytes(builder); - } - }; - declareField(consumer, bytesParser, field, ValueType.OBJECT); - } - private interface IOSupplier { T get() throws IOException; } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ConstructingObjectParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ConstructingObjectParser.java similarity index 97% rename from server/src/main/java/org/elasticsearch/common/xcontent/ConstructingObjectParser.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ConstructingObjectParser.java index 03f6b14f525ec..d61bd8a5dbbdb 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/ConstructingObjectParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ConstructingObjectParser.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.xcontent; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.ObjectParser.NamedObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; @@ -161,7 +160,7 @@ public Value apply(XContentParser parser, Context context) { try { return parse(parser, context); } catch (IOException e) { - throw new ParsingException(parser.getTokenLocation(), "[" + objectParser.getName() + "] failed to parse object", e); + throw new XContentParseException(parser.getTokenLocation(), "[" + objectParser.getName() + "] failed to parse object", e); } } @@ -335,7 +334,7 @@ private BiConsumer queueingConsumer(BiConsumer consumer try { consumer.accept(targetObject, v); } catch (Exception e) { - throw new ParsingException(location, + throw new XContentParseException(location, "[" + objectParser.getName() + "] failed to parse field [" + parseField.getPreferredName() + "]", e); } }); @@ -413,7 +412,7 @@ private void constructorArg(int position, ParseField parseField, Object value) { private void queue(Consumer queueMe) { assert targetObject == null: "Don't queue after the targetObject has been built! Just apply the consumer directly."; if (queuedFields == null) { - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "rawtypes"}) Consumer[] queuedFields = new Consumer[numberOfFields]; this.queuedFields = queuedFields; } @@ -471,11 +470,12 @@ private void buildTarget() { queuedFieldsCount -= 1; queuedFields[queuedFieldsCount].accept(targetObject); } - } catch (ParsingException e) { - throw new ParsingException(e.getLineNumber(), e.getColumnNumber(), - "failed to build [" + objectParser.getName() + "] after last required field arrived", e); + } catch (XContentParseException e) { + throw new XContentParseException(e.getLocation(), + "failed to build [" + objectParser.getName() + "] after last required field arrived", e); } catch (Exception e) { - throw new ParsingException(null, "Failed to build [" + objectParser.getName() + "] after last required field arrived", e); + throw new XContentParseException(null, + "Failed to build [" + objectParser.getName() + "] after last required field arrived", e); } } } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java similarity index 93% rename from server/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java index 1a3be1a5a7bdd..71b888bf44acb 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ObjectParser.java @@ -20,7 +20,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParsingException; import java.io.IOException; import java.lang.reflect.Array; @@ -147,7 +146,7 @@ public Value parse(XContentParser parser, Value value, Context context) throws I } else { token = parser.nextToken(); if (token != XContentParser.Token.START_OBJECT) { - throw new ParsingException(parser.getTokenLocation(), "[" + name + "] Expected START_OBJECT but was: " + token); + throw new XContentParseException(parser.getTokenLocation(), "[" + name + "] Expected START_OBJECT but was: " + token); } } @@ -159,7 +158,7 @@ public Value parse(XContentParser parser, Value value, Context context) throws I fieldParser = getParser(currentFieldName); } else { if (currentFieldName == null) { - throw new ParsingException(parser.getTokenLocation(), "[" + name + "] no field found"); + throw new XContentParseException(parser.getTokenLocation(), "[" + name + "] no field found"); } if (fieldParser == null) { assert ignoreUnknownFields : "this should only be possible if configured to ignore known fields"; @@ -182,7 +181,7 @@ public Value apply(XContentParser parser, Context context) { try { return parse(parser, valueSupplier.get(), context); } catch (IOException e) { - throw new ParsingException(parser.getTokenLocation(), "[" + name + "] failed to parse object", e); + throw new XContentParseException(parser.getTokenLocation(), "[" + name + "] failed to parse object", e); } } @@ -233,7 +232,7 @@ public void declareNamedObjects(BiConsumer> consumer, NamedOb // This creates and parses the named object BiFunction objectParser = (XContentParser p, Context c) -> { if (p.currentToken() != XContentParser.Token.FIELD_NAME) { - throw new ParsingException(p.getTokenLocation(), "[" + field + "] can be a single object with any number of " + throw new XContentParseException(p.getTokenLocation(), "[" + field + "] can be a single object with any number of " + "fields or an array where each entry is an object with a single field"); } // This messy exception nesting has the nice side effect of telling the use which field failed to parse @@ -242,10 +241,10 @@ public void declareNamedObjects(BiConsumer> consumer, NamedOb try { return namedObjectParser.parse(p, c, name); } catch (Exception e) { - throw new ParsingException(p.getTokenLocation(), "[" + field + "] failed to parse field [" + name + "]", e); + throw new XContentParseException(p.getTokenLocation(), "[" + field + "] failed to parse field [" + name + "]", e); } } catch (IOException e) { - throw new ParsingException(p.getTokenLocation(), "[" + field + "] error while parsing", e); + throw new XContentParseException(p.getTokenLocation(), "[" + field + "] error while parsing", e); } }; declareField((XContentParser p, Value v, Context c) -> { @@ -261,14 +260,14 @@ public void declareNamedObjects(BiConsumer> consumer, NamedOb orderedModeCallback.accept(v); while ((token = p.nextToken()) != XContentParser.Token.END_ARRAY) { if (token != XContentParser.Token.START_OBJECT) { - throw new ParsingException(p.getTokenLocation(), "[" + field + "] can be a single object with any number of " + throw new XContentParseException(p.getTokenLocation(), "[" + field + "] can be a single object with any number of " + "fields or an array where each entry is an object with a single field"); } p.nextToken(); // Move to the first field in the object fields.add(objectParser.apply(p, c)); p.nextToken(); // Move past the object, should be back to into the array if (p.currentToken() != XContentParser.Token.END_OBJECT) { - throw new ParsingException(p.getTokenLocation(), "[" + field + "] can be a single object with any number of " + throw new XContentParseException(p.getTokenLocation(), "[" + field + "] can be a single object with any number of " + "fields or an array where each entry is an object with a single field"); } } @@ -314,7 +313,8 @@ private void parseValue(XContentParser parser, FieldParser fieldParser, String c try { fieldParser.parser.parse(parser, value, context); } catch (Exception ex) { - throw new ParsingException(parser.getTokenLocation(), "[" + name + "] failed to parse field [" + currentFieldName + "]", ex); + throw new XContentParseException(parser.getTokenLocation(), + "[" + name + "] failed to parse field [" + currentFieldName + "]", ex); } } @@ -331,7 +331,7 @@ private void parseSub(XContentParser parser, FieldParser fieldParser, String cur case END_OBJECT: case END_ARRAY: case FIELD_NAME: - throw new ParsingException(parser.getTokenLocation(), "[" + name + "]" + token + " is unexpected"); + throw new XContentParseException(parser.getTokenLocation(), "[" + name + "]" + token + " is unexpected"); case VALUE_STRING: case VALUE_NUMBER: case VALUE_BOOLEAN: @@ -364,11 +364,11 @@ private class FieldParser { void assertSupports(String parserName, XContentParser parser, String currentFieldName) { if (parseField.match(currentFieldName, parser.getDeprecationHandler()) == false) { - throw new ParsingException(parser.getTokenLocation(), + throw new XContentParseException(parser.getTokenLocation(), "[" + parserName + "] parsefield doesn't accept: " + currentFieldName); } if (supportedTokens.contains(parser.currentToken()) == false) { - throw new ParsingException(parser.getTokenLocation(), + throw new XContentParseException(parser.getTokenLocation(), "[" + parserName + "] " + currentFieldName + " doesn't support values of type: " + parser.currentToken()); } } diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParseException.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParseException.java index cd2e3dbb59baa..69c345d20c2a6 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParseException.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParseException.java @@ -19,6 +19,8 @@ package org.elasticsearch.common.xcontent; +import org.elasticsearch.common.Nullable; + import java.util.Optional; /** @@ -37,6 +39,11 @@ public XContentParseException(XContentLocation location, String message) { this.location = Optional.ofNullable(location); } + public XContentParseException(XContentLocation location, String message, Exception cause) { + super(message, cause); + this.location = Optional.ofNullable(location); + } + public int getLineNumber() { return location.map(l -> l.lineNumber).orElse(-1); } @@ -45,8 +52,14 @@ public int getColumnNumber() { return location.map(l -> l.columnNumber).orElse(-1); } + @Nullable + public XContentLocation getLocation() { + return location.orElse(null); + } + @Override public String getMessage() { return location.map(l -> "[" + l.toString() + "] ").orElse("") + super.getMessage(); } + } diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/ConstructingObjectParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ConstructingObjectParserTests.java similarity index 84% rename from server/src/test/java/org/elasticsearch/common/xcontent/ConstructingObjectParserTests.java rename to libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ConstructingObjectParserTests.java index 9f24861fdaa0e..7488cfd7e9c55 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/ConstructingObjectParserTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ConstructingObjectParserTests.java @@ -22,14 +22,12 @@ import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ObjectParserTests.NamedObject; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matcher; +import java.io.ByteArrayOutputStream; import java.io.IOException; import java.util.Arrays; import java.util.List; @@ -38,6 +36,7 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; @@ -79,7 +78,8 @@ public void testRandomOrder() throws Exception { XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); expected.toXContent(builder, ToXContent.EMPTY_PARAMS); builder = shuffleXContent(builder); - BytesReference bytes = BytesReference.bytes(builder); + builder.flush(); + byte[] bytes = ((ByteArrayOutputStream) builder.getOutputStream()).toByteArray(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, bytes)) { HasCtorArguments parsed = randomFrom(HasCtorArguments.ALL_PARSERS).apply(parser, null); assertEquals(expected.animal, parsed.animal); @@ -90,9 +90,6 @@ public void testRandomOrder() throws Exception { assertEquals(expected.b, parsed.b); assertEquals(expected.c, parsed.c); assertEquals(expected.d, parsed.d); - } catch (Exception e) { - // It is convenient to decorate the error message with the json - throw new Exception("Error parsing: [" + Strings.toString(builder) + "]", e); } } @@ -175,7 +172,7 @@ public void testRepeatedConstructorParam() throws IOException { + " \"vegetable\": 1,\n" + " \"vegetable\": 2\n" + "}"); - Throwable e = expectThrows(ParsingException.class, () -> randomFrom(HasCtorArguments.ALL_PARSERS).apply(parser, null)); + Throwable e = expectThrows(XContentParseException.class, () -> randomFrom(HasCtorArguments.ALL_PARSERS).apply(parser, null)); assertEquals("[has_required_arguments] failed to parse field [vegetable]", e.getMessage()); e = e.getCause(); assertThat(e, instanceOf(IllegalArgumentException.class)); @@ -189,8 +186,9 @@ public void testBadParam() throws IOException { + " \"vegetable\": 2,\n" + " \"a\": \"supercalifragilisticexpialidocious\"\n" + "}"); - ParsingException e = expectThrows(ParsingException.class, () -> randomFrom(HasCtorArguments.ALL_PARSERS).apply(parser, null)); - assertEquals("[has_required_arguments] failed to parse field [a]", e.getMessage()); + XContentParseException e = expectThrows(XContentParseException.class, + () -> randomFrom(HasCtorArguments.ALL_PARSERS).apply(parser, null)); + assertThat(e.getMessage(), containsString("[has_required_arguments] failed to parse field [a]")); assertEquals(4, e.getLineNumber()); assertEquals("[a] must be less than 10 characters in length but was [supercalifragilisticexpialidocious]", e.getCause().getMessage()); @@ -203,14 +201,15 @@ public void testBadParamBeforeObjectBuilt() throws IOException { + " \"animal\": \"cat\"\n," + " \"vegetable\": 2\n" + "}"); - ParsingException e = expectThrows(ParsingException.class, () -> randomFrom(HasCtorArguments.ALL_PARSERS).apply(parser, null)); - assertEquals("[has_required_arguments] failed to parse field [vegetable]", e.getMessage()); + XContentParseException e = expectThrows(XContentParseException.class, + () -> randomFrom(HasCtorArguments.ALL_PARSERS).apply(parser, null)); + assertThat(e.getMessage(), containsString("[has_required_arguments] failed to parse field [vegetable]")); assertEquals(4, e.getLineNumber()); - e = (ParsingException) e.getCause(); - assertEquals("failed to build [has_required_arguments] after last required field arrived", e.getMessage()); + e = (XContentParseException) e.getCause(); + assertThat(e.getMessage(), containsString("failed to build [has_required_arguments] after last required field arrived")); assertEquals(2, e.getLineNumber()); - e = (ParsingException) e.getCause(); - assertEquals("[has_required_arguments] failed to parse field [a]", e.getMessage()); + e = (XContentParseException) e.getCause(); + assertThat(e.getMessage(), containsString("[has_required_arguments] failed to parse field [a]")); assertEquals(2, e.getLineNumber()); assertEquals("[a] must be less than 10 characters in length but was [supercalifragilisticexpialidocious]", e.getCause().getMessage()); @@ -465,11 +464,11 @@ public void testParseNamedObjectTwoFieldsInArray() throws IOException { + "],\"named_in_constructor\": [\n" + " {\"c\": {}}" + "]}"); - ParsingException e = expectThrows(ParsingException.class, () -> NamedObjectHolder.PARSER.apply(parser, null)); - assertEquals("[named_object_holder] failed to parse field [named]", e.getMessage()); - assertEquals( - "[named] can be a single object with any number of fields or an array where each entry is an object with a single field", - e.getCause().getMessage()); + XContentParseException e = expectThrows(XContentParseException.class, () -> NamedObjectHolder.PARSER.apply(parser, null)); + assertThat(e.getMessage(), containsString("[named_object_holder] failed to parse field [named]")); + assertThat(e.getCause().getMessage(), + containsString("[named] can be a single object with any number of fields " + + "or an array where each entry is an object with a single field")); } public void testParseNamedObjectTwoFieldsInArrayConstructorArg() throws IOException { @@ -479,11 +478,11 @@ public void testParseNamedObjectTwoFieldsInArrayConstructorArg() throws IOExcept + "],\"named_in_constructor\": [\n" + " {\"c\": {}, \"d\": {}}" + "]}"); - ParsingException e = expectThrows(ParsingException.class, () -> NamedObjectHolder.PARSER.apply(parser, null)); - assertEquals("[named_object_holder] failed to parse field [named_in_constructor]", e.getMessage()); - assertEquals( - "[named_in_constructor] can be a single object with any number of fields or an array where each entry is an object with a " - + "single field", e.getCause().getMessage()); + XContentParseException e = expectThrows(XContentParseException.class, () -> NamedObjectHolder.PARSER.apply(parser, null)); + assertThat(e.getMessage(), containsString("[named_object_holder] failed to parse field [named_in_constructor]")); + assertThat(e.getCause().getMessage(), + containsString("[named_in_constructor] can be a single object with any number of fields " + + "or an array where each entry is an object with a single field")); } public void testParseNamedObjectNoFieldsInArray() throws IOException { @@ -493,11 +492,11 @@ public void testParseNamedObjectNoFieldsInArray() throws IOException { + "],\"named_in_constructor\": [\n" + " {\"a\": {}}" + "]}"); - ParsingException e = expectThrows(ParsingException.class, () -> NamedObjectHolder.PARSER.apply(parser, null)); - assertEquals("[named_object_holder] failed to parse field [named]", e.getMessage()); - assertEquals( - "[named] can be a single object with any number of fields or an array where each entry is an object with a single field", - e.getCause().getMessage()); + XContentParseException e = expectThrows(XContentParseException.class, () -> NamedObjectHolder.PARSER.apply(parser, null)); + assertThat(e.getMessage(), containsString("[named_object_holder] failed to parse field [named]")); + assertThat(e.getCause().getMessage(), + containsString("[named] can be a single object with any number of fields " + + "or an array where each entry is an object with a single field")); } public void testParseNamedObjectNoFieldsInArrayConstructorArg() throws IOException { @@ -507,11 +506,11 @@ public void testParseNamedObjectNoFieldsInArrayConstructorArg() throws IOExcepti + "],\"named_in_constructor\": [\n" + " {}" + "]}"); - ParsingException e = expectThrows(ParsingException.class, () -> NamedObjectHolder.PARSER.apply(parser, null)); - assertEquals("[named_object_holder] failed to parse field [named_in_constructor]", e.getMessage()); - assertEquals( - "[named_in_constructor] can be a single object with any number of fields or an array where each entry is an object with a " - + "single field", e.getCause().getMessage()); + XContentParseException e = expectThrows(XContentParseException.class, () -> NamedObjectHolder.PARSER.apply(parser, null)); + assertThat(e.getMessage(), containsString("[named_object_holder] failed to parse field [named_in_constructor]")); + assertThat(e.getCause().getMessage(), + containsString("[named_in_constructor] can be a single object with any number of fields " + + "or an array where each entry is an object with a single field")); } public void testParseNamedObjectJunkInArray() throws IOException { @@ -521,11 +520,11 @@ public void testParseNamedObjectJunkInArray() throws IOException { + "],\"named_in_constructor\": [\n" + " {\"a\": {}}" + "]}"); - ParsingException e = expectThrows(ParsingException.class, () -> NamedObjectHolder.PARSER.apply(parser, null)); - assertEquals("[named_object_holder] failed to parse field [named]", e.getMessage()); - assertEquals( - "[named] can be a single object with any number of fields or an array where each entry is an object with a single field", - e.getCause().getMessage()); + XContentParseException e = expectThrows(XContentParseException.class, () -> NamedObjectHolder.PARSER.apply(parser, null)); + assertThat(e.getMessage(), containsString("[named_object_holder] failed to parse field [named]")); + assertThat(e.getCause().getMessage(), + containsString("[named] can be a single object with any number of fields " + + "or an array where each entry is an object with a single field")); } public void testParseNamedObjectJunkInArrayConstructorArg() throws IOException { @@ -535,11 +534,11 @@ public void testParseNamedObjectJunkInArrayConstructorArg() throws IOException { + "],\"named_in_constructor\": [\n" + " \"junk\"" + "]}"); - ParsingException e = expectThrows(ParsingException.class, () -> NamedObjectHolder.PARSER.apply(parser, null)); - assertEquals("[named_object_holder] failed to parse field [named_in_constructor]", e.getMessage()); - assertEquals( - "[named_in_constructor] can be a single object with any number of fields or an array where each entry is an object with a " - + "single field", e.getCause().getMessage()); + XContentParseException e = expectThrows(XContentParseException.class, () -> NamedObjectHolder.PARSER.apply(parser, null)); + assertThat(e.getMessage(), containsString("[named_object_holder] failed to parse field [named_in_constructor]")); + assertThat(e.getCause().getMessage(), + containsString("[named_in_constructor] can be a single object with any number of fields " + + "or an array where each entry is an object with a single field")); } public void testParseNamedObjectInOrderNotSupported() throws IOException { @@ -558,8 +557,8 @@ public void testParseNamedObjectInOrderNotSupported() throws IOException { objectParser.declareNamedObjects(NamedObjectHolder::setNamed, NamedObject.PARSER, new ParseField("named")); // Now firing the xml through it fails - ParsingException e = expectThrows(ParsingException.class, () -> objectParser.apply(parser, null)); - assertEquals("[named_object_holder] failed to parse field [named]", e.getMessage()); + XContentParseException e = expectThrows(XContentParseException.class, () -> objectParser.apply(parser, null)); + assertThat(e.getMessage(), containsString("[named_object_holder] failed to parse field [named]")); assertEquals("[named] doesn't support arrays. Use a single object with multiple fields.", e.getCause().getMessage()); } @@ -579,9 +578,10 @@ public void testParseNamedObjectInOrderNotSupportedConstructorArg() throws IOExc objectParser.declareNamedObjects(NamedObjectHolder::setNamed, NamedObject.PARSER, new ParseField("named")); // Now firing the xml through it fails - ParsingException e = expectThrows(ParsingException.class, () -> objectParser.apply(parser, null)); - assertEquals("[named_object_holder] failed to parse field [named_in_constructor]", e.getMessage()); - assertEquals("[named_in_constructor] doesn't support arrays. Use a single object with multiple fields.", e.getCause().getMessage()); + XContentParseException e = expectThrows(XContentParseException.class, () -> objectParser.apply(parser, null)); + assertThat(e.getMessage(), containsString("[named_object_holder] failed to parse field [named_in_constructor]")); + assertThat(e.getCause().getMessage(), + containsString("[named_in_constructor] doesn't support arrays. Use a single object with multiple fields.")); } static class NamedObjectHolder { diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java similarity index 92% rename from server/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java rename to libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java index 6f0c0208b9c75..3dd33e997b2ea 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectParserTests.java @@ -20,14 +20,13 @@ import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ObjectParser.NamedObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.ESTestCase; +import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.UncheckedIOException; import java.net.URI; @@ -199,8 +198,8 @@ public void setTest(int test) { try { objectParser.parse(parser, s, null); fail("numeric value expected"); - } catch (ParsingException ex) { - assertEquals(ex.getMessage(), "[the_parser] failed to parse field [test]"); + } catch (XContentParseException ex) { + assertThat(ex.getMessage(), containsString("[the_parser] failed to parse field [test]")); assertTrue(ex.getCause() instanceof NumberFormatException); } @@ -235,7 +234,7 @@ class TestStruct { TestStruct s = new TestStruct(); objectParser.declareField((i, c, x) -> c.test = i.text(), new ParseField("numeric_value"), ObjectParser.ValueType.FLOAT); - Exception e = expectThrows(ParsingException.class, () -> objectParser.parse(parser, s, null)); + Exception e = expectThrows(XContentParseException.class, () -> objectParser.parse(parser, s, null)); assertThat(e.getMessage(), containsString("[foo] numeric_value doesn't support values of type: VALUE_BOOLEAN")); } @@ -478,11 +477,11 @@ public void testParseNamedObjectTwoFieldsInArray() throws IOException { "{\"named\": [\n" + " {\"a\": {}, \"b\": {}}" + "]}"); - ParsingException e = expectThrows(ParsingException.class, () -> NamedObjectHolder.PARSER.apply(parser, null)); - assertEquals("[named_object_holder] failed to parse field [named]", e.getMessage()); - assertEquals( - "[named] can be a single object with any number of fields or an array where each entry is an object with a single field", - e.getCause().getMessage()); + XContentParseException e = expectThrows(XContentParseException.class, () -> NamedObjectHolder.PARSER.apply(parser, null)); + assertThat(e.getMessage(), containsString("[named_object_holder] failed to parse field [named]")); + assertThat(e.getCause().getMessage(), + containsString("[named] can be a single object with any number of fields " + + "or an array where each entry is an object with a single field")); } public void testParseNamedObjectNoFieldsInArray() throws IOException { @@ -490,11 +489,11 @@ public void testParseNamedObjectNoFieldsInArray() throws IOException { "{\"named\": [\n" + " {}" + "]}"); - ParsingException e = expectThrows(ParsingException.class, () -> NamedObjectHolder.PARSER.apply(parser, null)); - assertEquals("[named_object_holder] failed to parse field [named]", e.getMessage()); - assertEquals( - "[named] can be a single object with any number of fields or an array where each entry is an object with a single field", - e.getCause().getMessage()); + XContentParseException e = expectThrows(XContentParseException.class, () -> NamedObjectHolder.PARSER.apply(parser, null)); + assertThat(e.getMessage(), containsString("[named_object_holder] failed to parse field [named]")); + assertThat(e.getCause().getMessage(), + containsString("[named] can be a single object with any number of fields " + + "or an array where each entry is an object with a single field")); } public void testParseNamedObjectJunkInArray() throws IOException { @@ -502,11 +501,11 @@ public void testParseNamedObjectJunkInArray() throws IOException { "{\"named\": [\n" + " \"junk\"" + "]}"); - ParsingException e = expectThrows(ParsingException.class, () -> NamedObjectHolder.PARSER.apply(parser, null)); - assertEquals("[named_object_holder] failed to parse field [named]", e.getMessage()); - assertEquals( - "[named] can be a single object with any number of fields or an array where each entry is an object with a single field", - e.getCause().getMessage()); + XContentParseException e = expectThrows(XContentParseException.class, () -> NamedObjectHolder.PARSER.apply(parser, null)); + assertThat(e.getMessage(), containsString("[named_object_holder] failed to parse field [named]")); + assertThat(e.getCause().getMessage(), + containsString("[named] can be a single object with any number of fields " + + "or an array where each entry is an object with a single field")); } public void testParseNamedObjectInOrderNotSupported() throws IOException { @@ -521,8 +520,8 @@ public void testParseNamedObjectInOrderNotSupported() throws IOException { objectParser.declareNamedObjects(NamedObjectHolder::setNamed, NamedObject.PARSER, new ParseField("named")); // Now firing the xml through it fails - ParsingException e = expectThrows(ParsingException.class, () -> objectParser.apply(parser, null)); - assertEquals("[named_object_holder] failed to parse field [named]", e.getMessage()); + XContentParseException e = expectThrows(XContentParseException.class, () -> objectParser.apply(parser, null)); + assertThat(e.getMessage(), containsString("[named_object_holder] failed to parse field [named]")); assertEquals("[named] doesn't support arrays. Use a single object with multiple fields.", e.getCause().getMessage()); } @@ -535,7 +534,9 @@ public void testIgnoreUnknownFields() throws IOException { } b.endObject(); b = shuffleXContent(b); - XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(b)); + b.flush(); + byte[] bytes = ((ByteArrayOutputStream) b.getOutputStream()).toByteArray(); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); class TestStruct { public String test; @@ -559,7 +560,9 @@ public void testIgnoreUnknownObjects() throws IOException { } b.endObject(); b = shuffleXContent(b); - XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(b)); + b.flush(); + byte[] bytes = ((ByteArrayOutputStream) b.getOutputStream()).toByteArray(); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); class TestStruct { public String test; @@ -587,7 +590,9 @@ public void testIgnoreUnknownArrays() throws IOException { } b.endObject(); b = shuffleXContent(b); - XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(b)); + b.flush(); + byte[] bytes = ((ByteArrayOutputStream) b.getOutputStream()).toByteArray(); + XContentParser parser = createParser(JsonXContent.jsonXContent, bytes); class TestStruct { public String test; } @@ -646,8 +651,8 @@ public void setArray(List testArray) { // Make sure that we didn't break the null handling in arrays that shouldn't support nulls XContentParser parser2 = createParser(JsonXContent.jsonXContent, "{\"int_array\": [1, null, 3]}"); TestStruct s2 = new TestStruct(); - ParsingException ex = expectThrows(ParsingException.class, () -> objectParser.parse(parser2, s2, null)); - assertThat(ex.getMessage(), startsWith("[foo] failed to parse field [int_array]")); + XContentParseException ex = expectThrows(XContentParseException.class, () -> objectParser.parse(parser2, s2, null)); + assertThat(ex.getMessage(), containsString("[foo] failed to parse field [int_array]")); } static class NamedObjectHolder { diff --git a/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/20_empty_bucket.yml b/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/20_empty_bucket.yml index 5ed2fe542ee24..ad29728a6bd6f 100644 --- a/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/20_empty_bucket.yml +++ b/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/20_empty_bucket.yml @@ -35,14 +35,12 @@ - do: search: index: empty_bucket_idx - type: test - match: {hits.total: 2} - do: search: index: empty_bucket_idx - type: test body: {"aggs": {"histo": {"histogram": {"field": "val1", "interval": 1, "min_doc_count": 0}, "aggs": { "mfs" : { "matrix_stats": {"fields": ["value", "val1"]} } } } } } - match: {hits.total: 2} diff --git a/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/30_single_value_field.yml b/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/30_single_value_field.yml index fbc73974f8ac9..abbc22a4f25bf 100644 --- a/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/30_single_value_field.yml +++ b/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/30_single_value_field.yml @@ -130,7 +130,6 @@ setup: - do: search: index: unmapped - type: test body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2", "val3"]} } } } - match: {hits.total: 0} @@ -142,7 +141,6 @@ setup: - do: search: index: test - type: test body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val3"]} } } } - match: {hits.total: 15} @@ -155,7 +153,6 @@ setup: - do: search: index: [test, unmapped] - type: test body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2", "val3"]} } } } - match: {hits.total: 15} @@ -169,7 +166,6 @@ setup: - do: search: index: [test, unmapped] - type: test body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2", "val3"], "missing" : {"val2" : 10} } } } } - match: {hits.total: 15} @@ -184,7 +180,6 @@ setup: catch: /parsing_exception/ search: index: test - type: test body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2"], "script" : { "my_script" : {"source" : "1 + doc['val1'].value", "lang" : "js"} } } } } } --- @@ -194,5 +189,4 @@ setup: catch: /parsing_exception/ search: index: test - type: test body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2", "val3"], "script" : { "my_script" : {"source" : "my_var + doc['val1'].value", "params" : { "my_var" : 1 }, "lang" : "js" } } } } } } diff --git a/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/40_multi_value_field.yml b/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/40_multi_value_field.yml index dff452d43cfd0..978c35f8c0b9f 100644 --- a/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/40_multi_value_field.yml +++ b/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/40_multi_value_field.yml @@ -130,7 +130,6 @@ setup: - do: search: index: unmapped - type: test body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2", "vals"]} } } } - match: {hits.total: 0} @@ -142,7 +141,6 @@ setup: - do: search: index: test - type: test body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "vals"], "mode" : "max"} } } } - match: {hits.total: 15} @@ -156,7 +154,6 @@ setup: - do: search: index: test - type: test body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "vals"], "mode" : "min"} } } } - match: {hits.total: 15} @@ -170,7 +167,6 @@ setup: - do: search: index: [test, unmapped] - type: test body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2", "vals"]} } } } - match: {hits.total: 15} @@ -184,7 +180,6 @@ setup: - do: search: index: [test, unmapped] - type: test body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2", "vals"], "missing" : {"val2" : 10, "vals" : 5 } } } } } - match: {hits.total: 15} @@ -199,7 +194,6 @@ setup: catch: /parsing_exception/ search: index: test - type: test body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["vals", "val3"], "script" : { "my_script" : {"source" : "1 + doc['val1'].value", "lang" : "js"} } } } } } --- @@ -209,5 +203,4 @@ setup: catch: /parsing_exception/ search: index: test - type: test body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val3", "vals"], "script" : { "my_script" : {"source" : "my_var + doc['val1'].value", "params" : { "my_var" : 1 }, "lang" : "js" } } } } } } diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LegacyDelimitedPayloadTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LegacyDelimitedPayloadTokenFilterFactory.java index 484c9d9b1280b..06c179d95f7af 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LegacyDelimitedPayloadTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LegacyDelimitedPayloadTokenFilterFactory.java @@ -32,6 +32,10 @@ public class LegacyDelimitedPayloadTokenFilterFactory extends DelimitedPayloadTo LegacyDelimitedPayloadTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, env, name, settings); + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0_alpha1)) { + throw new IllegalArgumentException( + "[delimited_payload_filter] is not supported for new indices, use [delimited_payload] instead"); + } if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_6_2_0)) { DEPRECATION_LOGGER.deprecated("Deprecated [delimited_payload_filter] used, replaced by [delimited_payload]"); } diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/40_token_filters.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/40_token_filters.yml index bfb6c97c24f6d..3dca3bfd7770c 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/40_token_filters.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/40_token_filters.yml @@ -1026,15 +1026,13 @@ - match: { tokens.10.token: ちた } --- -"delimited_payload_filter": +"delimited_payload_filter_error": - skip: - version: " - 6.1.99" - reason: delimited_payload_filter deprecated in 6.2, replaced by delimited_payload - features: "warnings" + version: " - 6.99.99" + reason: using delimited_payload_filter throws error from 7.0 on - do: - warnings: - - "Deprecated [delimited_payload_filter] used, replaced by [delimited_payload]" + catch: /\[delimited_payload_filter\] is not supported for new indices, use \[delimited_payload\] instead/ indices.create: index: test body: @@ -1045,29 +1043,15 @@ type: delimited_payload_filter delimiter: ^ encoding: identity - - do: - warnings: - - "Deprecated [delimited_payload_filter] used, replaced by [delimited_payload]" - indices.analyze: - index: test - body: - text: foo^bar - tokenizer: keyword - filter: [my_delimited_payload_filter] - - length: { tokens: 1 } - - match: { tokens.0.token: foo } # Test pre-configured token filter too: - do: - warnings: - - "Deprecated [delimited_payload_filter] used, replaced by [delimited_payload]" + catch: /\[delimited_payload_filter\] is not supported for new indices, use \[delimited_payload\] instead/ indices.analyze: body: text: foo|5 tokenizer: keyword filter: [delimited_payload_filter] - - length: { tokens: 1 } - - match: { tokens.0.token: foo } --- "delimited_payload": diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AppendProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AppendProcessorTests.java index 66cddd43e6583..5dedd72be72bc 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AppendProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AppendProcessorTests.java @@ -127,7 +127,7 @@ public void testConvertScalarToList() throws Exception { public void testAppendMetadataExceptVersion() throws Exception { // here any metadata field value becomes a list, which won't make sense in most of the cases, // but support for append is streamlined like for set so we test it - MetaData randomMetaData = randomFrom(MetaData.INDEX, MetaData.TYPE, MetaData.ID, MetaData.ROUTING, MetaData.PARENT); + MetaData randomMetaData = randomFrom(MetaData.INDEX, MetaData.TYPE, MetaData.ID, MetaData.ROUTING); List values = new ArrayList<>(); Processor appendProcessor; if (randomBoolean()) { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameProcessorTests.java index d052ce0cd44c3..eba37dc742169 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameProcessorTests.java @@ -38,7 +38,7 @@ public void testJodaPattern() throws Exception { "events-", "y", "yyyyMMdd" ); - IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, null, + IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, Collections.singletonMap("_field", "2016-04-25T12:24:20.101Z")); processor.execute(document); assertThat(document.getSourceAndMetadata().get("_index"), equalTo("")); @@ -48,7 +48,7 @@ public void testTAI64N()throws Exception { Function function = DateFormat.Tai64n.getFunction(null, DateTimeZone.UTC, null); DateIndexNameProcessor dateProcessor = new DateIndexNameProcessor("_tag", "_field", Collections.singletonList(function), DateTimeZone.UTC, "events-", "m", "yyyyMMdd"); - IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, null, + IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, Collections.singletonMap("_field", (randomBoolean() ? "@" : "") + "4000000050d506482dbdf024")); dateProcessor.execute(document); assertThat(document.getSourceAndMetadata().get("_index"), equalTo("")); @@ -58,12 +58,12 @@ public void testUnixMs()throws Exception { Function function = DateFormat.UnixMs.getFunction(null, DateTimeZone.UTC, null); DateIndexNameProcessor dateProcessor = new DateIndexNameProcessor("_tag", "_field", Collections.singletonList(function), DateTimeZone.UTC, "events-", "m", "yyyyMMdd"); - IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, null, + IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, Collections.singletonMap("_field", "1000500")); dateProcessor.execute(document); assertThat(document.getSourceAndMetadata().get("_index"), equalTo("")); - document = new IngestDocument("_index", "_type", "_id", null, null, null, null, + document = new IngestDocument("_index", "_type", "_id", null, null, null, Collections.singletonMap("_field", 1000500L)); dateProcessor.execute(document); assertThat(document.getSourceAndMetadata().get("_index"), equalTo("")); @@ -73,7 +73,7 @@ public void testUnix()throws Exception { Function function = DateFormat.Unix.getFunction(null, DateTimeZone.UTC, null); DateIndexNameProcessor dateProcessor = new DateIndexNameProcessor("_tag", "_field", Collections.singletonList(function), DateTimeZone.UTC, "events-", "m", "yyyyMMdd"); - IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, null, + IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, Collections.singletonMap("_field", "1000.5")); dateProcessor.execute(document); assertThat(document.getSourceAndMetadata().get("_index"), equalTo("")); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorTests.java index 95c25bedb6280..07573a780a17a 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorTests.java @@ -45,7 +45,7 @@ public void testExecute() throws Exception { values.add("bar"); values.add("baz"); IngestDocument ingestDocument = new IngestDocument( - "_index", "_type", "_id", null, null, null, null, Collections.singletonMap("values", values) + "_index", "_type", "_id", null, null, null, Collections.singletonMap("values", values) ); ForEachProcessor processor = new ForEachProcessor( @@ -61,7 +61,7 @@ public void testExecute() throws Exception { public void testExecuteWithFailure() throws Exception { IngestDocument ingestDocument = new IngestDocument( - "_index", "_type", "_id", null, null, null, null, Collections.singletonMap("values", Arrays.asList("a", "b", "c")) + "_index", "_type", "_id", null, null, null, Collections.singletonMap("values", Arrays.asList("a", "b", "c")) ); TestProcessor testProcessor = new TestProcessor(id -> { @@ -101,7 +101,7 @@ public void testMetaDataAvailable() throws Exception { values.add(new HashMap<>()); values.add(new HashMap<>()); IngestDocument ingestDocument = new IngestDocument( - "_index", "_type", "_id", null, null, null, null, Collections.singletonMap("values", values) + "_index", "_type", "_id", null, null, null, Collections.singletonMap("values", values) ); TestProcessor innerProcessor = new TestProcessor(id -> { @@ -132,7 +132,7 @@ public void testRestOfTheDocumentIsAvailable() throws Exception { document.put("values", values); document.put("flat_values", new ArrayList<>()); document.put("other", "value"); - IngestDocument ingestDocument = new IngestDocument("_index", "_type", "_id", null, null, null, null, document); + IngestDocument ingestDocument = new IngestDocument("_index", "_type", "_id", null, null, null, document); ForEachProcessor processor = new ForEachProcessor( "_tag", "values", new SetProcessor("_tag", @@ -171,7 +171,7 @@ public String getTag() { values.add(""); } IngestDocument ingestDocument = new IngestDocument( - "_index", "_type", "_id", null, null, null, null, Collections.singletonMap("values", values) + "_index", "_type", "_id", null, null, null, Collections.singletonMap("values", values) ); ForEachProcessor processor = new ForEachProcessor("_tag", "values", innerProcessor); @@ -190,7 +190,7 @@ public void testModifyFieldsOutsideArray() throws Exception { values.add(1); values.add(null); IngestDocument ingestDocument = new IngestDocument( - "_index", "_type", "_id", null, null, null, null, Collections.singletonMap("values", values) + "_index", "_type", "_id", null, null, null, Collections.singletonMap("values", values) ); TemplateScript.Factory template = new TestTemplateService.MockTemplateScript.Factory("errors"); @@ -220,7 +220,7 @@ public void testScalarValueAllowsUnderscoreValueFieldToRemainAccessible() throws source.put("_value", "new_value"); source.put("values", values); IngestDocument ingestDocument = new IngestDocument( - "_index", "_type", "_id", null, null, null, null, source + "_index", "_type", "_id", null, null, null, source ); TestProcessor processor = new TestProcessor(doc -> doc.setFieldValue("_ingest._value", @@ -251,7 +251,7 @@ public void testNestedForEach() throws Exception { values.add(value); IngestDocument ingestDocument = new IngestDocument( - "_index", "_type", "_id", null, null, null, null, Collections.singletonMap("values1", values) + "_index", "_type", "_id", null, null, null, Collections.singletonMap("values1", values) ); TestProcessor testProcessor = new TestProcessor( diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorFactoryTests.java index f1a7add303f1b..d7f8e8838bb8c 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorFactoryTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptException; import org.elasticsearch.script.ScriptService; @@ -30,6 +31,7 @@ import java.util.HashMap; import java.util.Map; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.mockito.Matchers.any; @@ -80,9 +82,9 @@ public void testFactoryValidationForMultipleScriptingTypes() throws Exception { configMap.put("source", "bar"); configMap.put("lang", "mockscript"); - ElasticsearchException exception = expectThrows(ElasticsearchException.class, + XContentParseException exception = expectThrows(XContentParseException.class, () -> factory.create(null, randomAlphaOfLength(10), configMap)); - assertThat(exception.getMessage(), is("[script] failed to parse field [source]")); + assertThat(exception.getMessage(), containsString("[script] failed to parse field [source]")); } public void testFactoryValidationAtLeastOneScriptingType() throws Exception { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorTests.java index 6fec977e6c268..5212ea2172c63 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorTests.java @@ -102,7 +102,7 @@ public void testSetExistingNullFieldWithOverrideDisabled() throws Exception { } public void testSetMetadataExceptVersion() throws Exception { - MetaData randomMetaData = randomFrom(MetaData.INDEX, MetaData.TYPE, MetaData.ID, MetaData.ROUTING, MetaData.PARENT); + MetaData randomMetaData = randomFrom(MetaData.INDEX, MetaData.TYPE, MetaData.ID, MetaData.ROUTING); Processor processor = createSetProcessor(randomMetaData.getFieldName(), "_value", true); IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); processor.execute(ingestDocument); diff --git a/modules/lang-expression/licenses/lucene-expressions-7.3.0-snapshot-98a6b3d.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.3.0-snapshot-98a6b3d.jar.sha1 deleted file mode 100644 index a92cbe3045071..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-7.3.0-snapshot-98a6b3d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -38ff5a1f4bcbfb6e1ffacd3263175c2a1ba23e9f \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-7.3.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.3.0.jar.sha1 new file mode 100644 index 0000000000000..62a094a8b0feb --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-7.3.0.jar.sha1 @@ -0,0 +1 @@ +cb82d9db3043bbd25b4d0eb5022ed1e529c936d3 \ No newline at end of file diff --git a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java index 9a91fccf4ad30..f4095b3f68ada 100644 --- a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java +++ b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/MoreExpressionTests.java @@ -82,7 +82,7 @@ private SearchRequestBuilder buildRequest(String script, Object... params) { SearchRequestBuilder req = client().prepareSearch().setIndices("test"); req.setQuery(QueryBuilders.matchAllQuery()) - .addSort(SortBuilders.fieldSort("_uid") + .addSort(SortBuilders.fieldSort("_id") .order(SortOrder.ASC)) .addScriptField("foo", new Script(ScriptType.INLINE, "expression", script, paramsMap)); return req; diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java index 50f63841231f8..7d4dddc73165c 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java @@ -112,7 +112,7 @@ static SearchRequest convert(SearchTemplateRequest searchTemplateRequest, Search try (XContentParser parser = XContentFactory.xContent(XContentType.JSON) .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, source)) { SearchSourceBuilder builder = SearchSourceBuilder.searchSource(); - builder.parseXContent(parser); + builder.parseXContent(parser, false); builder.explain(searchTemplateRequest.isExplain()); builder.profile(searchTemplateRequest.isProfile()); searchRequest.source(builder); diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateRequestTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateRequestTests.java index 51b7df3fc2cae..9cdca70f0e1a6 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateRequestTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateRequestTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.script.mustache; -import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.script.ScriptType; @@ -122,7 +122,7 @@ public void testParseStoredTemplateWithParams() throws Exception { public void testParseWrongTemplate() { // Unclosed template id - expectThrows(ParsingException.class, () -> RestSearchTemplateAction.parse(newParser("{'id' : 'another_temp }"))); + expectThrows(XContentParseException.class, () -> RestSearchTemplateAction.parse(newParser("{'id' : 'another_temp }"))); } /** diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java index 5770c91cfdb7e..07ee5b5dc6243 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java @@ -39,6 +39,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.AtomicNumericFieldData; @@ -157,10 +158,10 @@ public Mapper.Builder parse(String name, Map node, builder.nullValue(ScaledFloatFieldMapper.parse(propNode)); iterator.remove(); } else if (propName.equals("ignore_malformed")) { - builder.ignoreMalformed(TypeParsers.nodeBooleanValue(name, "ignore_malformed", propNode, parserContext)); + builder.ignoreMalformed(XContentMapValues.nodeBooleanValue(propNode, name + ".ignore_malformed")); iterator.remove(); } else if (propName.equals("coerce")) { - builder.coerce(TypeParsers.nodeBooleanValue(name, "coerce", propNode, parserContext)); + builder.coerce(XContentMapValues.nodeBooleanValue(propNode, name + ".coerce")); iterator.remove(); } else if (propName.equals("scaling_factor")) { builder.scalingFactor(ScaledFloatFieldMapper.parse(propNode)); diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregationBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregationBuilder.java index a14a7a35c85ec..7f22acff2fc2f 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregationBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregationBuilder.java @@ -28,7 +28,6 @@ import org.elasticsearch.index.fielddata.plain.SortedSetDVOrdinalsIndexFieldData; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.ParentFieldMapper; import org.elasticsearch.join.mapper.ParentIdFieldMapper; import org.elasticsearch.join.mapper.ParentJoinFieldMapper; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -109,11 +108,7 @@ protected void innerWriteTo(StreamOutput out) throws IOException { @Override protected ValuesSourceConfig resolveConfig(SearchContext context) { ValuesSourceConfig config = new ValuesSourceConfig<>(ValuesSourceType.BYTES); - if (context.mapperService().getIndexSettings().isSingleType()) { - joinFieldResolveConfig(context, config); - } else { - parentFieldResolveConfig(context, config); - } + joinFieldResolveConfig(context, config); return config; } @@ -131,30 +126,6 @@ private void joinFieldResolveConfig(SearchContext context, ValuesSourceConfig config) { - DocumentMapper childDocMapper = context.mapperService().documentMapper(childType); - if (childDocMapper != null) { - ParentFieldMapper parentFieldMapper = childDocMapper.parentFieldMapper(); - if (!parentFieldMapper.active()) { - throw new IllegalArgumentException("[children] no [_parent] field not configured that points to a parent type"); - } - String parentType = parentFieldMapper.type(); - DocumentMapper parentDocMapper = context.mapperService().documentMapper(parentType); - if (parentDocMapper != null) { - parentFilter = parentDocMapper.typeFilter(context.getQueryShardContext()); - childFilter = childDocMapper.typeFilter(context.getQueryShardContext()); - MappedFieldType parentFieldType = parentDocMapper.parentFieldMapper().getParentJoinFieldType(); - final SortedSetDVOrdinalsIndexFieldData fieldData = context.getForField(parentFieldType); - config.fieldContext(new FieldContext(parentFieldType.name(), fieldData, - parentFieldType)); - } else { - config.unmapped(true); - } - } else { - config.unmapped(true); - } - } - @Override protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { builder.field(ParentToChildrenAggregator.TYPE_FIELD.getPreferredName(), childType); diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java index d3164ae6a12da..3e6f8eac8148b 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/mapper/ParentJoinFieldMapper.java @@ -96,10 +96,6 @@ private static void checkIndexCompatibility(IndexSettings settings, String name) throw new IllegalStateException("cannot create join field [" + name + "] " + "for the partitioned index " + "[" + settings.getIndex().getName() + "]"); } - if (settings.isSingleType() == false) { - throw new IllegalStateException("cannot create join field [" + name + "] " + - "on multi-types index [" + settings.getIndex().getName() + "]"); - } } private static void checkObjectOrNested(ContentPath path, String name) { diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java index 0577aa01ebd8f..ebfeb5ab01931 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java @@ -37,9 +37,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData; import org.elasticsearch.index.fielddata.plain.SortedSetDVOrdinalsIndexFieldData; -import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.ParentFieldMapper; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.InnerHitBuilder; import org.elasticsearch.index.query.InnerHitContextBuilder; @@ -306,14 +304,6 @@ public String getWriteableName() { @Override protected Query doToQuery(QueryShardContext context) throws IOException { - if (context.getIndexSettings().isSingleType()) { - return joinFieldDoToQuery(context); - } else { - return parentFieldDoToQuery(context); - } - } - - private Query joinFieldDoToQuery(QueryShardContext context) throws IOException { ParentJoinFieldMapper joinFieldMapper = ParentJoinFieldMapper.getMapper(context.getMapperService()); if (joinFieldMapper == null) { if (ignoreUnmapped) { @@ -342,44 +332,6 @@ private Query joinFieldDoToQuery(QueryShardContext context) throws IOException { } } - private Query parentFieldDoToQuery(QueryShardContext context) throws IOException { - Query innerQuery; - final String[] previousTypes = context.getTypes(); - context.setTypes(type); - try { - innerQuery = query.toQuery(context); - } finally { - context.setTypes(previousTypes); - } - DocumentMapper childDocMapper = context.getMapperService().documentMapper(type); - if (childDocMapper == null) { - if (ignoreUnmapped) { - return new MatchNoDocsQuery(); - } else { - throw new QueryShardException(context, "[" + NAME + "] no mapping found for type [" + type + "]"); - } - } - ParentFieldMapper parentFieldMapper = childDocMapper.parentFieldMapper(); - if (parentFieldMapper.active() == false) { - throw new QueryShardException(context, "[" + NAME + "] _parent field has no parent type configured"); - } - String parentType = parentFieldMapper.type(); - DocumentMapper parentDocMapper = context.getMapperService().documentMapper(parentType); - if (parentDocMapper == null) { - throw new QueryShardException(context, - "[" + NAME + "] Type [" + type + "] points to a non existent parent type [" + parentType + "]"); - } - - // wrap the query with type query - innerQuery = Queries.filtered(innerQuery, childDocMapper.typeFilter(context)); - - String joinField = ParentFieldMapper.joinField(parentType); - final MappedFieldType parentFieldType = parentDocMapper.parentFieldMapper().getParentJoinFieldType(); - final SortedSetDVOrdinalsIndexFieldData fieldData = context.getForField(parentFieldType); - return new LateParsingQuery(parentDocMapper.typeFilter(context), innerQuery, minChildren(), maxChildren(), - joinField, scoreMode, fieldData, context.getSearchSimilarity()); - } - /** * A query that rewrites into another query using * {@link JoinUtil#createJoinQuery(String, Query, Query, IndexSearcher, ScoreMode, OrdinalMap, int, int)} diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java index 5e2dd4206f2f7..aca5f4a56d393 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java @@ -18,8 +18,6 @@ */ package org.elasticsearch.join.query; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.join.ScoreMode; @@ -32,9 +30,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.plain.SortedSetDVOrdinalsIndexFieldData; -import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.ParentFieldMapper; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.InnerHitBuilder; import org.elasticsearch.index.query.InnerHitContextBuilder; @@ -47,10 +43,8 @@ import java.io.IOException; import java.util.HashMap; -import java.util.HashSet; import java.util.Map; import java.util.Objects; -import java.util.Set; /** * Builder for the 'has_parent' query. @@ -173,14 +167,6 @@ public boolean ignoreUnmapped() { @Override protected Query doToQuery(QueryShardContext context) throws IOException { - if (context.getIndexSettings().isSingleType()) { - return joinFieldDoToQuery(context); - } else { - return parentFieldDoToQuery(context); - } - } - - private Query joinFieldDoToQuery(QueryShardContext context) throws IOException { ParentJoinFieldMapper joinFieldMapper = ParentJoinFieldMapper.getMapper(context.getMapperService()); if (joinFieldMapper == null) { if (ignoreUnmapped) { @@ -210,65 +196,6 @@ private Query joinFieldDoToQuery(QueryShardContext context) throws IOException { } } - private Query parentFieldDoToQuery(QueryShardContext context) throws IOException { - Query innerQuery; - String[] previousTypes = context.getTypes(); - context.setTypes(type); - try { - innerQuery = query.toQuery(context); - } finally { - context.setTypes(previousTypes); - } - - DocumentMapper parentDocMapper = context.documentMapper(type); - if (parentDocMapper == null) { - if (ignoreUnmapped) { - return new MatchNoDocsQuery(); - } else { - throw new QueryShardException(context, - "[" + NAME + "] query configured 'parent_type' [" + type + "] is not a valid type"); - } - } - - Set childTypes = new HashSet<>(); - for (DocumentMapper documentMapper : context.getMapperService().docMappers(false)) { - ParentFieldMapper parentFieldMapper = documentMapper.parentFieldMapper(); - if (parentFieldMapper.active() && type.equals(parentFieldMapper.type())) { - childTypes.add(documentMapper.type()); - } - } - if (childTypes.isEmpty()) { - throw new QueryShardException(context, "[" + NAME + "] no child types found for type [" + type + "]"); - } - - Query childrenQuery; - if (childTypes.size() == 1) { - DocumentMapper documentMapper = context.getMapperService().documentMapper(childTypes.iterator().next()); - childrenQuery = documentMapper.typeFilter(context); - } else { - BooleanQuery.Builder childrenFilter = new BooleanQuery.Builder(); - for (String childrenTypeStr : childTypes) { - DocumentMapper documentMapper = context.getMapperService().documentMapper(childrenTypeStr); - childrenFilter.add(documentMapper.typeFilter(context), BooleanClause.Occur.SHOULD); - } - childrenQuery = childrenFilter.build(); - } - - // wrap the query with type query - innerQuery = Queries.filtered(innerQuery, parentDocMapper.typeFilter(context)); - - final MappedFieldType parentType = parentDocMapper.parentFieldMapper().getParentJoinFieldType(); - final SortedSetDVOrdinalsIndexFieldData fieldData = context.getForField(parentType); - return new HasChildQueryBuilder.LateParsingQuery(childrenQuery, - innerQuery, - HasChildQueryBuilder.DEFAULT_MIN_CHILDREN, - HasChildQueryBuilder.DEFAULT_MAX_CHILDREN, - ParentFieldMapper.joinField(type), - score ? ScoreMode.Max : ScoreMode.None, - fieldData, - context.getSearchSimilarity()); - } - @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(NAME); diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java index b2c09b5cbdbe4..6593c7efb9fab 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java @@ -21,13 +21,9 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.index.SortedDocValues; -import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.DocValuesTermsQuery; -import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocsCollector; import org.apache.lucene.search.TopFieldCollector; @@ -36,12 +32,8 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.IdFieldMapper; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.ParentFieldMapper; import org.elasticsearch.index.query.InnerHitBuilder; import org.elasticsearch.index.query.InnerHitContextBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -70,15 +62,7 @@ class ParentChildInnerHitContextBuilder extends InnerHitContextBuilder { } @Override - protected void doBuild(SearchContext parentSearchContext, InnerHitsContext innerHitsContext) throws IOException { - if (parentSearchContext.mapperService().getIndexSettings().isSingleType()) { - handleJoinFieldInnerHits(parentSearchContext, innerHitsContext); - } else { - handleParentFieldInnerHits(parentSearchContext, innerHitsContext); - } - } - - private void handleJoinFieldInnerHits(SearchContext context, InnerHitsContext innerHitsContext) throws IOException { + protected void doBuild(SearchContext context, InnerHitsContext innerHitsContext) throws IOException { QueryShardContext queryShardContext = context.getQueryShardContext(); ParentJoinFieldMapper joinFieldMapper = ParentJoinFieldMapper.getMapper(context.mapperService()); if (joinFieldMapper != null) { @@ -94,24 +78,6 @@ private void handleJoinFieldInnerHits(SearchContext context, InnerHitsContext in } } - private void handleParentFieldInnerHits(SearchContext context, InnerHitsContext innerHitsContext) throws IOException { - QueryShardContext queryShardContext = context.getQueryShardContext(); - DocumentMapper documentMapper = queryShardContext.documentMapper(typeName); - if (documentMapper == null) { - if (innerHitBuilder.isIgnoreUnmapped() == false) { - throw new IllegalStateException("[" + query.getName() + "] no mapping found for type [" + typeName + "]"); - } else { - return; - } - } - String name = innerHitBuilder.getName() != null ? innerHitBuilder.getName() : documentMapper.type(); - ParentChildInnerHitSubContext parentChildInnerHits = new ParentChildInnerHitSubContext( - name, context, queryShardContext.getMapperService(), documentMapper - ); - setupInnerHitsContext(queryShardContext, parentChildInnerHits); - innerHitsContext.addInnerHitDefinition(parentChildInnerHits); - } - static final class JoinFieldInnerHitSubContext extends InnerHitsContext.InnerHitSubContext { private final String typeName; private final boolean fetchChildInnerHits; @@ -206,85 +172,4 @@ private String getSortedDocValue(String field, SearchContext context, int docId) } - static final class ParentChildInnerHitSubContext extends InnerHitsContext.InnerHitSubContext { - private final MapperService mapperService; - private final DocumentMapper documentMapper; - - ParentChildInnerHitSubContext(String name, SearchContext context, MapperService mapperService, DocumentMapper documentMapper) { - super(name, context); - this.mapperService = mapperService; - this.documentMapper = documentMapper; - } - - @Override - public TopDocs[] topDocs(SearchHit[] hits) throws IOException { - Weight innerHitQueryWeight = createInnerHitQueryWeight(); - TopDocs[] result = new TopDocs[hits.length]; - for (int i = 0; i < hits.length; i++) { - SearchHit hit = hits[i]; - final Query hitQuery; - if (isParentHit(hit)) { - String field = ParentFieldMapper.joinField(hit.getType()); - hitQuery = new DocValuesTermsQuery(field, hit.getId()); - } else if (isChildHit(hit)) { - DocumentMapper hitDocumentMapper = mapperService.documentMapper(hit.getType()); - final String parentType = hitDocumentMapper.parentFieldMapper().type(); - DocumentField parentField = hit.field(ParentFieldMapper.NAME); - if (parentField == null) { - throw new IllegalStateException("All children must have a _parent"); - } - Term uidTerm = context.mapperService().createUidTerm(parentType, parentField.getValue()); - if (uidTerm == null) { - hitQuery = new MatchNoDocsQuery("Missing type: " + parentType); - } else { - hitQuery = new TermQuery(uidTerm); - } - } else { - result[i] = Lucene.EMPTY_TOP_DOCS; - continue; - } - - BooleanQuery q = new BooleanQuery.Builder() - // Only include docs that have the current hit as parent - .add(hitQuery, BooleanClause.Occur.FILTER) - // Only include docs that have this inner hits type - .add(documentMapper.typeFilter(context.getQueryShardContext()), BooleanClause.Occur.FILTER) - .build(); - Weight weight = context.searcher().createNormalizedWeight(q, false); - if (size() == 0) { - TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector(); - for (LeafReaderContext ctx : context.searcher().getIndexReader().leaves()) { - intersect(weight, innerHitQueryWeight, totalHitCountCollector, ctx); - } - result[i] = new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, 0); - } else { - int topN = Math.min(from() + size(), context.searcher().getIndexReader().maxDoc()); - TopDocsCollector topDocsCollector; - if (sort() != null) { - topDocsCollector = TopFieldCollector.create(sort().sort, topN, true, trackScores(), trackScores(), true); - } else { - topDocsCollector = TopScoreDocCollector.create(topN); - } - try { - for (LeafReaderContext ctx : context.searcher().getIndexReader().leaves()) { - intersect(weight, innerHitQueryWeight, topDocsCollector, ctx); - } - } finally { - clearReleasables(Lifetime.COLLECTION); - } - result[i] = topDocsCollector.topDocs(from(), size()); - } - } - return result; - } - - private boolean isParentHit(SearchHit hit) { - return hit.getType().equals(documentMapper.parentFieldMapper().type()); - } - - private boolean isChildHit(SearchHit hit) { - DocumentMapper hitDocumentMapper = mapperService.documentMapper(hit.getType()); - return documentMapper.type().equals(hitDocumentMapper.parentFieldMapper().type()); - } - } } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentIdQueryBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentIdQueryBuilder.java index 25a380e566a42..bc8820c597790 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentIdQueryBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentIdQueryBuilder.java @@ -19,22 +19,16 @@ package org.elasticsearch.join.query; -import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.DocValuesTermsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermQuery; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.ParentFieldMapper; -import org.elasticsearch.index.mapper.TypeFieldMapper; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardException; @@ -159,11 +153,6 @@ public static ParentIdQueryBuilder fromXContent(XContentParser parser) throws IO @Override protected Query doToQuery(QueryShardContext context) throws IOException { - if (context.getIndexSettings().isSingleType() == false) { - // BWC for indices with multiple types - return doToQueryBWC(context); - } - ParentJoinFieldMapper joinFieldMapper = ParentJoinFieldMapper.getMapper(context.getMapperService()); if (joinFieldMapper == null) { if (ignoreUnmapped) { @@ -188,32 +177,6 @@ protected Query doToQuery(QueryShardContext context) throws IOException { .build(); } - /** - * Creates parent_id query from a {@link ParentFieldMapper} - * Only used for BWC with multi-types indices - */ - private Query doToQueryBWC(QueryShardContext context) throws IOException { - DocumentMapper childDocMapper = context.getMapperService().documentMapper(type); - if (childDocMapper == null) { - if (ignoreUnmapped) { - return new MatchNoDocsQuery(); - } else { - throw new QueryShardException(context, "[" + NAME + "] no mapping found for type [" + type + "]"); - } - } - ParentFieldMapper parentFieldMapper = childDocMapper.parentFieldMapper(); - if (parentFieldMapper.active() == false) { - throw new QueryShardException(context, "[" + NAME + "] _parent field has no parent type configured"); - } - String fieldName = ParentFieldMapper.joinField(parentFieldMapper.type()); - - return new BooleanQuery.Builder() - .add(new DocValuesTermsQuery(fieldName, id), BooleanClause.Occur.MUST) - // Need to take child type into account, otherwise a child doc of different type with the same id could match - .add(new TermQuery(new Term(TypeFieldMapper.NAME, type)), BooleanClause.Occur.FILTER) - .build(); - } - @Override protected boolean doEquals(ParentIdQueryBuilder that) { return Objects.equals(type, that.type) diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenIT.java b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenIT.java index 12bb2f700e394..869019ac0ffce 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenIT.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.join.aggregations; import org.apache.lucene.search.join.ScoreMode; -import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.update.UpdateResponse; @@ -68,20 +67,12 @@ public class ChildrenIT extends ParentChildTestCase { @Before public void setupCluster() throws Exception { categoryToControl.clear(); - if (legacy()) { - assertAcked( - prepareCreate("test") - .addMapping("article", "category", "type=keyword") - .addMapping("comment", "_parent", "type=article", "commenter", "type=keyword") - ); - } else { - assertAcked( - prepareCreate("test") - .addMapping("doc", - addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "article", "comment"), - "commenter", "keyword", "category", "keyword")) - ); - } + assertAcked( + prepareCreate("test") + .addMapping("doc", + addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "article", "comment"), + "commenter", "keyword", "category", "keyword")) + ); List requests = new ArrayList<>(); String[] uniqueCategories = new String[randomIntBetween(1, 25)]; @@ -189,7 +180,7 @@ public void testParentWithMultipleBuckets() throws Exception { .setQuery(matchQuery("randomized", false)) .addAggregation( terms("category").field("category").size(10000).subAggregation( - children("to_comment", "comment").subAggregation(topHits("top_comments").sort("_uid", SortOrder.ASC)) + children("to_comment", "comment").subAggregation(topHits("top_comments").sort("_id", SortOrder.ASC)) ) ).get(); assertSearchResponse(searchResponse); @@ -244,20 +235,12 @@ public void testParentWithMultipleBuckets() throws Exception { public void testWithDeletes() throws Exception { String indexName = "xyz"; - if (legacy()) { - assertAcked( - prepareCreate(indexName) - .addMapping("parent") - .addMapping("child", "_parent", "type=parent", "count", "type=long") - ); - } else { - assertAcked( - prepareCreate(indexName) - .addMapping("doc", - addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"), - "name", "keyword")) - ); - } + assertAcked( + prepareCreate(indexName) + .addMapping("doc", + addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"), + "name", "keyword")) + ); List requests = new ArrayList<>(); requests.add(createIndexRequest(indexName, "parent", "1", null)); @@ -286,19 +269,11 @@ public void testWithDeletes() throws Exception { * the updates cause that. */ UpdateResponse updateResponse; - if (legacy()) { - updateResponse = client().prepareUpdate(indexName, "child", idToUpdate) - .setParent("1") - .setDoc(Requests.INDEX_CONTENT_TYPE, "count", 1) - .setDetectNoop(false) - .get(); - } else { - updateResponse = client().prepareUpdate(indexName, "doc", idToUpdate) - .setRouting("1") - .setDoc(Requests.INDEX_CONTENT_TYPE, "count", 1) - .setDetectNoop(false) - .get(); - } + updateResponse = client().prepareUpdate(indexName, "doc", idToUpdate) + .setRouting("1") + .setDoc(Requests.INDEX_CONTENT_TYPE, "count", 1) + .setDetectNoop(false) + .get(); assertThat(updateResponse.getVersion(), greaterThan(1L)); refresh(); } @@ -320,26 +295,15 @@ public void testPostCollection() throws Exception { String indexName = "prodcatalog"; String masterType = "masterprod"; String childType = "variantsku"; - if (legacy()) { - assertAcked( - prepareCreate(indexName) - .setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) - .put("index.version.created", Version.V_5_6_0)) // multi type - .addMapping(masterType, "brand", "type=text", "name", "type=keyword", "material", "type=text") - .addMapping(childType, "_parent", "type=masterprod", "color", "type=keyword", "size", "type=keyword") - ); - } else { - assertAcked( - prepareCreate(indexName) - .setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)) - .addMapping("doc", - addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, - masterType, childType), - "brand", "text", "name", "keyword", "material", "text", "color", "keyword", "size", "keyword")) - ); - } + assertAcked( + prepareCreate(indexName) + .setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)) + .addMapping("doc", + addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, + masterType, childType), + "brand", "text", "name", "keyword", "material", "text", "color", "keyword", "size", "keyword")) + ); List requests = new ArrayList<>(); requests.add(createIndexRequest(indexName, masterType, "1", null, "brand", "Levis", "name", @@ -396,25 +360,13 @@ public void testHierarchicalChildrenAggs() { String grandParentType = "continent"; String parentType = "country"; String childType = "city"; - if (legacy()) { - assertAcked( - prepareCreate(indexName) - .setSettings(Settings.builder() - .put("index.version.created", Version.V_5_6_0) // multi type - ).addMapping(grandParentType, "name", "type=keyword") - .addMapping(parentType, "_parent", "type=" + grandParentType) - .addMapping(childType, "_parent", "type=" + parentType) - - ); - } else { - assertAcked( - prepareCreate(indexName) - .addMapping("doc", - addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, - grandParentType, parentType, parentType, childType), - "name", "keyword")) - ); - } + assertAcked( + prepareCreate(indexName) + .addMapping("doc", + addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, + grandParentType, parentType, parentType, childType), + "name", "keyword")) + ); createIndexRequest(indexName, grandParentType, "1", null, "name", "europe").get(); createIndexRequest(indexName, parentType, "2", "1", "name", "belgium").get(); @@ -451,21 +403,13 @@ public void testPostCollectAllLeafReaders() throws Exception { // Before we only evaluated segments that yielded matches in 'towns' and 'parent_names' aggs, which caused // us to miss to evaluate child docs in segments we didn't have parent matches for. - if (legacy()) { - assertAcked( - prepareCreate("index") - .addMapping("parentType", "name", "type=keyword", "town", "type=keyword") - .addMapping("childType", "_parent", "type=parentType", "name", "type=keyword", "age", "type=integer") - ); - } else { - assertAcked( - prepareCreate("index") - .addMapping("doc", - addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, - "parentType", "childType"), - "name", "keyword", "town", "keyword", "age", "integer")) - ); - } + assertAcked( + prepareCreate("index") + .addMapping("doc", + addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, + "parentType", "childType"), + "name", "keyword", "town", "keyword", "age", "integer")) + ); List requests = new ArrayList<>(); requests.add(createIndexRequest("index", "parentType", "1", null, "name", "Bob", "town", "Memphis")); requests.add(createIndexRequest("index", "parentType", "2", null, "name", "Alice", "town", "Chicago")); diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/LegacyChildrenIT.java b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/LegacyChildrenIT.java deleted file mode 100644 index ecf4d06f325e8..0000000000000 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/LegacyChildrenIT.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.join.aggregations; - -public class LegacyChildrenIT extends ChildrenIT { - - @Override - protected boolean legacy() { - return true; - } -} diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregatorTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregatorTests.java index e60c1a58bc617..d6557256ce002 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregatorTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregatorTests.java @@ -39,12 +39,12 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.mapper.ContentPath; +import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.join.mapper.MetaJoinFieldMapper; import org.elasticsearch.join.mapper.ParentJoinFieldMapper; @@ -109,7 +109,7 @@ public void testParentChild() throws IOException { }); for (String parent : expectedParentChildRelations.keySet()) { - testCase(new TermInSetQuery(UidFieldMapper.NAME, new BytesRef(Uid.createUid(PARENT_TYPE, parent))), indexSearcher, child -> { + testCase(new TermInSetQuery(IdFieldMapper.NAME, Uid.encodeId(parent)), indexSearcher, child -> { assertEquals((long) expectedParentChildRelations.get(parent).v1(), child.getDocCount()); assertEquals(expectedParentChildRelations.get(parent).v2(), ((InternalMin) child.getAggregations().get("in_child")).getValue(), Double.MIN_VALUE); @@ -139,7 +139,7 @@ private static Map> setupIndex(RandomIndexWriter private static List createParentDocument(String id) { return Arrays.asList( - new StringField(UidFieldMapper.NAME, Uid.createUid(PARENT_TYPE, id), Field.Store.NO), + new StringField(IdFieldMapper.NAME, Uid.encodeId(id), Field.Store.NO), new StringField("join_field", PARENT_TYPE, Field.Store.NO), createJoinField(PARENT_TYPE, id) ); @@ -147,7 +147,7 @@ private static List createParentDocument(String id) { private static List createChildDocument(String childId, String parentId, int value) { return Arrays.asList( - new StringField(UidFieldMapper.NAME, Uid.createUid(CHILD_TYPE, childId), Field.Store.NO), + new StringField(IdFieldMapper.NAME, Uid.encodeId(childId), Field.Store.NO), new StringField("join_field", CHILD_TYPE, Field.Store.NO), createJoinField(PARENT_TYPE, parentId), new SortedNumericDocValuesField("number", value) diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/ChildQuerySearchIT.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/ChildQuerySearchIT.java index 180c1ec9b1388..247d8aa7b2480 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/ChildQuerySearchIT.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/ChildQuerySearchIT.java @@ -69,7 +69,6 @@ import static org.elasticsearch.index.query.QueryBuilders.prefixQuery; import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; -import static org.elasticsearch.index.query.QueryBuilders.termsQuery; import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.fieldValueFactorFunction; import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.weightFactorFunction; import static org.elasticsearch.join.query.JoinQueryBuilders.hasChildQuery; @@ -88,23 +87,10 @@ public class ChildQuerySearchIT extends ParentChildTestCase { - public void testSelfReferentialIsForbidden() { - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - prepareCreate("test").addMapping("type", "_parent", "type=type").get()); - assertThat(e.getMessage(), equalTo("The [_parent.type] option can't point to the same type")); - } - public void testMultiLevelChild() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("test") - .addMapping("parent") - .addMapping("child", "_parent", "type=parent") - .addMapping("grandchild", "_parent", "type=child")); - } else { - assertAcked(prepareCreate("test") - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, - "parent", "child", "child", "grandchild"))); - } + assertAcked(prepareCreate("test") + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, + "parent", "child", "child", "grandchild"))); ensureGreen(); createIndexRequest("test", "parent", "p1", null, "p_field", "p_value1").get(); @@ -159,14 +145,8 @@ public void testMultiLevelChild() throws Exception { // see #2744 public void test2744() throws IOException { - if (legacy()) { - assertAcked(prepareCreate("test") - .addMapping("foo") - .addMapping("test", "_parent", "type=foo")); - } else { - assertAcked(prepareCreate("test") - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "foo", "test"))); - } + assertAcked(prepareCreate("test") + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "foo", "test"))); ensureGreen(); // index simple data @@ -183,14 +163,8 @@ public void test2744() throws IOException { } public void testSimpleChildQuery() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("test") - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - } else { - assertAcked(prepareCreate("test") - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); - } + assertAcked(prepareCreate("test") + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); ensureGreen(); // index simple data @@ -204,57 +178,26 @@ public void testSimpleChildQuery() throws Exception { // TEST FETCHING _parent from child SearchResponse searchResponse; - if (legacy()) { - searchResponse = client().prepareSearch("test") - .setQuery(idsQuery("child").addIds("c1")).storedFields("_parent").get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("c1")); - assertThat(searchResponse.getHits().getAt(0).field("_parent").getValue(), equalTo("p1")); - } else { - searchResponse = client().prepareSearch("test") - .setQuery(idsQuery("doc").addIds("c1")).get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L)); - assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("c1")); - assertThat(extractValue("join_field.name", searchResponse.getHits().getAt(0).getSourceAsMap()), equalTo("child")); - assertThat(extractValue("join_field.parent", searchResponse.getHits().getAt(0).getSourceAsMap()), equalTo("p1")); - } + searchResponse = client().prepareSearch("test") + .setQuery(idsQuery("doc").addIds("c1")).get(); + assertNoFailures(searchResponse); + assertThat(searchResponse.getHits().getTotalHits(), equalTo(1L)); + assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("c1")); + assertThat(extractValue("join_field.name", searchResponse.getHits().getAt(0).getSourceAsMap()), equalTo("child")); + assertThat(extractValue("join_field.parent", searchResponse.getHits().getAt(0).getSourceAsMap()), equalTo("p1")); // TEST matching on parent - if (legacy()) { - searchResponse = client().prepareSearch("test").setQuery(termQuery("_parent#parent", "p1")).storedFields("_parent").get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits(), equalTo(2L)); - assertThat(searchResponse.getHits().getAt(0).getId(), anyOf(equalTo("c1"), equalTo("c2"))); - assertThat(searchResponse.getHits().getAt(0).field("_parent").getValue(), equalTo("p1")); - assertThat(searchResponse.getHits().getAt(1).getId(), anyOf(equalTo("c1"), equalTo("c2"))); - assertThat(searchResponse.getHits().getAt(1).field("_parent").getValue(), equalTo("p1")); - } else { - searchResponse = client().prepareSearch("test") - .setQuery(boolQuery().filter(termQuery("join_field#parent", "p1")).filter(termQuery("join_field", "child"))) - .get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits(), equalTo(2L)); - assertThat(searchResponse.getHits().getAt(0).getId(), anyOf(equalTo("c1"), equalTo("c2"))); - assertThat(extractValue("join_field.name", searchResponse.getHits().getAt(0).getSourceAsMap()), equalTo("child")); - assertThat(extractValue("join_field.parent", searchResponse.getHits().getAt(0).getSourceAsMap()), equalTo("p1")); - assertThat(searchResponse.getHits().getAt(1).getId(), anyOf(equalTo("c1"), equalTo("c2"))); - assertThat(extractValue("join_field.name", searchResponse.getHits().getAt(1).getSourceAsMap()), equalTo("child")); - assertThat(extractValue("join_field.parent", searchResponse.getHits().getAt(1).getSourceAsMap()), equalTo("p1")); - } - - if (legacy()) { - searchResponse = client().prepareSearch("test").setQuery(queryStringQuery("_parent#parent:p1")).storedFields("_parent").get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().getTotalHits(), equalTo(2L)); - assertThat(searchResponse.getHits().getAt(0).getId(), anyOf(equalTo("c1"), equalTo("c2"))); - assertThat(searchResponse.getHits().getAt(0).field("_parent").getValue(), equalTo("p1")); - assertThat(searchResponse.getHits().getAt(1).getId(), anyOf(equalTo("c1"), equalTo("c2"))); - assertThat(searchResponse.getHits().getAt(1).field("_parent").getValue(), equalTo("p1")); - } else { - // doesn't make sense for join field, because query string & term query om this field have no special logic. - } + searchResponse = client().prepareSearch("test") + .setQuery(boolQuery().filter(termQuery("join_field#parent", "p1")).filter(termQuery("join_field", "child"))) + .get(); + assertNoFailures(searchResponse); + assertThat(searchResponse.getHits().getTotalHits(), equalTo(2L)); + assertThat(searchResponse.getHits().getAt(0).getId(), anyOf(equalTo("c1"), equalTo("c2"))); + assertThat(extractValue("join_field.name", searchResponse.getHits().getAt(0).getSourceAsMap()), equalTo("child")); + assertThat(extractValue("join_field.parent", searchResponse.getHits().getAt(0).getSourceAsMap()), equalTo("p1")); + assertThat(searchResponse.getHits().getAt(1).getId(), anyOf(equalTo("c1"), equalTo("c2"))); + assertThat(extractValue("join_field.name", searchResponse.getHits().getAt(1).getSourceAsMap()), equalTo("child")); + assertThat(extractValue("join_field.parent", searchResponse.getHits().getAt(1).getSourceAsMap()), equalTo("p1")); // HAS CHILD searchResponse = client().prepareSearch("test").setQuery(randomHasChild("child", "c_field", "yellow")) @@ -290,14 +233,8 @@ public void testSimpleChildQuery() throws Exception { // Issue #3290 public void testCachingBugWithFqueryFilter() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("test") - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - } else { - assertAcked(prepareCreate("test") - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); - } + assertAcked(prepareCreate("test") + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); ensureGreen(); List builders = new ArrayList<>(); // index simple data @@ -334,14 +271,8 @@ public void testCachingBugWithFqueryFilter() throws Exception { } public void testHasParentFilter() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("test") - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - } else { - assertAcked(prepareCreate("test") - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); - } + assertAcked(prepareCreate("test") + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); ensureGreen(); Map> parentToChildren = new HashMap<>(); // Childless parent @@ -388,14 +319,8 @@ public void testHasParentFilter() throws Exception { } public void testSimpleChildQueryWithFlush() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("test") - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - } else { - assertAcked(prepareCreate("test") - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); - } + assertAcked(prepareCreate("test") + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); ensureGreen(); // index simple data with flushes, so we have many segments @@ -462,16 +387,10 @@ public void testSimpleChildQueryWithFlush() throws Exception { } public void testScopedFacet() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("test") - .addMapping("parent") - .addMapping("child", "_parent", "type=parent", "c_field", "type=keyword")); - } else { - assertAcked(prepareCreate("test") - .addMapping("doc", - addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"), - "c_field", "keyword"))); - } + assertAcked(prepareCreate("test") + .addMapping("doc", + addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"), + "c_field", "keyword"))); ensureGreen(); // index simple data @@ -508,14 +427,8 @@ public void testScopedFacet() throws Exception { } public void testDeletedParent() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("test") - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - } else { - assertAcked(prepareCreate("test") - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); - } + assertAcked(prepareCreate("test") + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); ensureGreen(); // index simple data createIndexRequest("test", "parent", "p1", null, "p_field", "p_value1").get(); @@ -548,14 +461,8 @@ public void testDeletedParent() throws Exception { } public void testDfsSearchType() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("test") - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - } else { - assertAcked(prepareCreate("test") - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); - } + assertAcked(prepareCreate("test") + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); ensureGreen(); // index simple data @@ -581,20 +488,14 @@ public void testDfsSearchType() throws Exception { } public void testHasChildAndHasParentFailWhenSomeSegmentsDontContainAnyParentOrChildDocs() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("test") - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - } else { - assertAcked(prepareCreate("test") - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); - } + assertAcked(prepareCreate("test") + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); ensureGreen(); createIndexRequest("test", "parent", "1", null, "p_field", 1).get(); createIndexRequest("test", "child", "2", "1", "c_field", 1).get(); - client().prepareIndex("test", legacy() ? "type1" : "doc", "3").setSource("p_field", 1).get(); + client().prepareIndex("test", "doc", "3").setSource("p_field", 1).get(); refresh(); SearchResponse searchResponse = client().prepareSearch("test") @@ -609,14 +510,8 @@ public void testHasChildAndHasParentFailWhenSomeSegmentsDontContainAnyParentOrCh } public void testCountApiUsage() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("test") - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - } else { - assertAcked(prepareCreate("test") - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); - } + assertAcked(prepareCreate("test") + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); ensureGreen(); String parentId = "p1"; @@ -646,14 +541,8 @@ public void testCountApiUsage() throws Exception { } public void testExplainUsage() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("test") - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - } else { - assertAcked(prepareCreate("test") - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); - } + assertAcked(prepareCreate("test") + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); ensureGreen(); String parentId = "p1"; @@ -675,7 +564,7 @@ public void testExplainUsage() throws Exception { assertHitCount(searchResponse, 1L); assertThat(searchResponse.getHits().getAt(0).getExplanation().getDescription(), containsString("join value p1")); - ExplainResponse explainResponse = client().prepareExplain("test", legacy() ? "parent" : "doc", parentId) + ExplainResponse explainResponse = client().prepareExplain("test", "doc", parentId) .setQuery(hasChildQuery("child", termQuery("c_field", "1"), ScoreMode.Max)) .get(); assertThat(explainResponse.isExists(), equalTo(true)); @@ -716,23 +605,16 @@ List createDocBuilders() { } public void testScoreForParentChildQueriesWithFunctionScore() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("test") - .addMapping("parent") - .addMapping("child", "_parent", "type=parent") - .addMapping("child1", "_parent", "type=parent")); - } else { - assertAcked(prepareCreate("test") - .addMapping("doc", jsonBuilder().startObject().startObject("doc").startObject("properties") - .startObject("join_field") - .field("type", "join") - .startObject("relations") - .field("parent", new String[] {"child", "child1"}) - .endObject() + assertAcked(prepareCreate("test") + .addMapping("doc", jsonBuilder().startObject().startObject("doc").startObject("properties") + .startObject("join_field") + .field("type", "join") + .startObject("relations") + .field("parent", new String[] {"child", "child1"}) .endObject() - .endObject().endObject().endObject() - )); - } + .endObject() + .endObject().endObject().endObject() + )); ensureGreen(); indexRandom(true, createDocBuilders().toArray(new IndexRequestBuilder[0])); @@ -816,14 +698,8 @@ public void testScoreForParentChildQueriesWithFunctionScore() throws Exception { // Issue #2536 public void testParentChildQueriesCanHandleNoRelevantTypesInIndex() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("test") - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - } else { - assertAcked(prepareCreate("test") - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); - } + assertAcked(prepareCreate("test") + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); ensureGreen(); SearchResponse response = client().prepareSearch("test") @@ -831,13 +707,8 @@ public void testParentChildQueriesCanHandleNoRelevantTypesInIndex() throws Excep assertNoFailures(response); assertThat(response.getHits().getTotalHits(), equalTo(0L)); - if (legacy()) { - client().prepareIndex("test", "child1").setSource(jsonBuilder().startObject().field("text", "value").endObject()) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); - } else { - client().prepareIndex("test", "doc").setSource(jsonBuilder().startObject().field("text", "value").endObject()) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); - } + client().prepareIndex("test", "doc").setSource(jsonBuilder().startObject().field("text", "value").endObject()) + .setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); response = client().prepareSearch("test") .setQuery(hasChildQuery("child", matchQuery("text", "value"), ScoreMode.None)).get(); @@ -861,25 +732,15 @@ public void testParentChildQueriesCanHandleNoRelevantTypesInIndex() throws Excep } public void testHasChildAndHasParentFilter_withFilter() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("test") - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - } else { - assertAcked(prepareCreate("test") - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); - } + assertAcked(prepareCreate("test") + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); ensureGreen(); createIndexRequest("test", "parent", "1", null, "p_field", 1).get(); createIndexRequest("test", "child", "2", "1", "c_field", 1).get(); client().admin().indices().prepareFlush("test").get(); - if (legacy()) { - client().prepareIndex("test", "type1", "3").setSource("p_field", 2).get(); - } else { - client().prepareIndex("test", "doc", "3").setSource("p_field", 2).get(); - } + client().prepareIndex("test", "doc", "3").setSource("p_field", 2).get(); refresh(); SearchResponse searchResponse = client().prepareSearch("test") @@ -898,14 +759,8 @@ public void testHasChildAndHasParentFilter_withFilter() throws Exception { } public void testHasChildInnerHitsHighlighting() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("test") - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - } else { - assertAcked(prepareCreate("test") - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); - } + assertAcked(prepareCreate("test") + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); ensureGreen(); createIndexRequest("test", "parent", "1", null, "p_field", 1).get(); @@ -928,14 +783,8 @@ public void testHasChildInnerHitsHighlighting() throws Exception { } public void testHasChildAndHasParentWrappedInAQueryFilter() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("test") - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - } else { - assertAcked(prepareCreate("test") - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); - } + assertAcked(prepareCreate("test") + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); ensureGreen(); // query filter in case for p/c shouldn't execute per segment, but rather @@ -966,15 +815,9 @@ public void testHasChildAndHasParentWrappedInAQueryFilter() throws Exception { } public void testSimpleQueryRewrite() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("test") - .addMapping("parent", "p_field", "type=keyword") - .addMapping("child", "_parent", "type=parent", "c_field", "type=keyword")); - } else { - assertAcked(prepareCreate("test") - .addMapping("doc", addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"), - "c_field", "keyword", "p_field", "keyword"))); - } + assertAcked(prepareCreate("test") + .addMapping("doc", addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"), + "c_field", "keyword", "p_field", "keyword"))); ensureGreen(); // index simple data @@ -1020,14 +863,8 @@ public void testSimpleQueryRewrite() throws Exception { // Issue #3144 public void testReIndexingParentAndChildDocuments() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("test") - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - } else { - assertAcked(prepareCreate("test") - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); - } + assertAcked(prepareCreate("test") + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); ensureGreen(); // index simple data @@ -1087,14 +924,8 @@ public void testReIndexingParentAndChildDocuments() throws Exception { // Issue #3203 public void testHasChildQueryWithMinimumScore() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("test") - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - } else { - assertAcked(prepareCreate("test") - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); - } + assertAcked(prepareCreate("test") + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); ensureGreen(); // index simple data @@ -1117,90 +948,42 @@ public void testHasChildQueryWithMinimumScore() throws Exception { } public void testParentFieldQuery() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("test") - .setSettings(Settings.builder() - .put(indexSettings()) - .put("index.refresh_interval", -1) - ) - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - } else { - assertAcked(prepareCreate("test") - .setSettings(Settings.builder().put("index.refresh_interval", -1)) - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); - } + assertAcked(prepareCreate("test") + .setSettings(Settings.builder().put("index.refresh_interval", -1)) + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); ensureGreen(); - SearchResponse response; - if (legacy()){ - response = client().prepareSearch("test").setQuery(termQuery("_parent#parent:p1", "p1")) - .get(); - } else { - response = client().prepareSearch("test") - .setQuery(boolQuery().filter(termQuery("join_field#parent", "p1")).filter(termQuery("join_field", "child"))) - .get(); - } + SearchResponse response = client().prepareSearch("test") + .setQuery(boolQuery().filter(termQuery("join_field#parent", "p1")).filter(termQuery("join_field", "child"))) + .get(); assertHitCount(response, 0L); createIndexRequest("test", "child", "c1", "p1").get(); refresh(); - if (legacy()){ - response = client().prepareSearch("test").setQuery(termQuery("_parent#parent", "p1")) - .get(); - } else { - response = client().prepareSearch("test") - .setQuery(boolQuery().filter(termQuery("join_field#parent", "p1")).filter(termQuery("join_field", "child"))) - .get(); - } + response = client().prepareSearch("test") + .setQuery(boolQuery().filter(termQuery("join_field#parent", "p1")).filter(termQuery("join_field", "child"))) + .get(); assertHitCount(response, 1L); - if (legacy()) { - response = client().prepareSearch("test").setQuery(queryStringQuery("_parent#parent:p1")).get(); - assertHitCount(response, 1L); - } - createIndexRequest("test", "child", "c2", "p2").get(); refresh(); - if (legacy()) { - response = client().prepareSearch("test").setQuery(termsQuery("_parent#parent", "p1", "p2")).get(); - assertHitCount(response, 2L); - } - if (legacy()) { - response = client().prepareSearch("test") - .setQuery(boolQuery() - .should(termQuery("_parent#parent", "p1")) - .should(termQuery("_parent#parent", "p2")) - ).get(); - } else { - response = client().prepareSearch("test") - .setQuery(boolQuery() - .should(boolQuery().filter(termQuery("join_field#parent", "p1")).filter(termQuery("join_field", "child"))) - .should(boolQuery().filter(termQuery("join_field#parent", "p2")).filter(termQuery("join_field", "child"))) - ).get(); - } + response = client().prepareSearch("test") + .setQuery(boolQuery() + .should(boolQuery().filter(termQuery("join_field#parent", "p1")).filter(termQuery("join_field", "child"))) + .should(boolQuery().filter(termQuery("join_field#parent", "p2")).filter(termQuery("join_field", "child"))) + ).get(); assertHitCount(response, 2L); } public void testParentIdQuery() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("test") - .setSettings(Settings.builder() - .put(indexSettings()) - .put("index.refresh_interval", -1) - ) - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - } else { - assertAcked(prepareCreate("test") - .setSettings(Settings.builder() - .put(indexSettings()) - .put("index.refresh_interval", -1) - ) - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); - } + assertAcked(prepareCreate("test") + .setSettings(Settings.builder() + .put(indexSettings()) + .put("index.refresh_interval", -1) + ) + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); ensureGreen(); createIndexRequest("test", "child", "c1", "p1").get(); @@ -1221,14 +1004,8 @@ public void testParentIdQuery() throws Exception { } public void testHasChildNotBeingCached() throws IOException { - if (legacy()) { - assertAcked(prepareCreate("test") - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - } else { - assertAcked(prepareCreate("test") - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); - } + assertAcked(prepareCreate("test") + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); ensureGreen(); // index simple data @@ -1288,25 +1065,17 @@ private QueryBuilder randomHasParent(String type, String field, String value) { // Issue #3818 public void testHasChildQueryOnlyReturnsSingleChildType() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("grandissue") - .addMapping("grandparent", "name", "type=text") - .addMapping("parent", "_parent", "type=grandparent") - .addMapping("child_type_one", "_parent", "type=parent") - .addMapping("child_type_two", "_parent", "type=parent")); - } else { - assertAcked(prepareCreate("grandissue") - .addMapping("doc", jsonBuilder().startObject().startObject("doc").startObject("properties") - .startObject("join_field") - .field("type", "join") - .startObject("relations") - .field("grandparent", "parent") - .field("parent", new String[] {"child_type_one", "child_type_two"}) - .endObject() + assertAcked(prepareCreate("grandissue") + .addMapping("doc", jsonBuilder().startObject().startObject("doc").startObject("properties") + .startObject("join_field") + .field("type", "join") + .startObject("relations") + .field("grandparent", "parent") + .field("parent", new String[] {"child_type_one", "child_type_two"}) .endObject() - .endObject().endObject().endObject() - )); - } + .endObject() + .endObject().endObject().endObject() + )); createIndexRequest("grandissue", "grandparent", "1", null, "name", "Grandpa").get(); createIndexRequest("grandissue", "parent", "2", "1", "name", "Dana").get(); @@ -1350,16 +1119,10 @@ public void testHasChildQueryOnlyReturnsSingleChildType() throws Exception { } public void testHasChildQueryWithNestedInnerObjects() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("test") - .addMapping("parent", "objects", "type=nested") - .addMapping("child", "_parent", "type=parent")); - } else { - assertAcked(prepareCreate("test") - .addMapping("doc", - addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"), - "objects", "nested"))); - } + assertAcked(prepareCreate("test") + .addMapping("doc", + addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"), + "objects", "nested"))); ensureGreen(); createIndexRequest("test", "parent", "p1", null, jsonBuilder().startObject().field("p_field", "1").startArray("objects") @@ -1398,14 +1161,8 @@ public void testHasChildQueryWithNestedInnerObjects() throws Exception { } public void testNamedFilters() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("test") - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - } else { - assertAcked(prepareCreate("test") - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); - } + assertAcked(prepareCreate("test") + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); ensureGreen(); String parentId = "p1"; @@ -1450,7 +1207,7 @@ public void testParentChildQueriesNoParentType() throws Exception { ensureGreen(); String parentId = "p1"; - client().prepareIndex("test", legacy() ? "parent" : "doc", parentId).setSource("p_field", "1").get(); + client().prepareIndex("test", "doc", parentId).setSource("p_field", "1").get(); refresh(); try { @@ -1500,19 +1257,9 @@ public void testParentChildQueriesNoParentType() throws Exception { } public void testParentChildCaching() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("test") - .setSettings(Settings.builder() - .put(indexSettings()) - .put("index.refresh_interval", -1) - ) - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - } else { - assertAcked(prepareCreate("test") - .setSettings(Settings.builder().put("index.refresh_interval", -1)) - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); - } + assertAcked(prepareCreate("test") + .setSettings(Settings.builder().put("index.refresh_interval", -1)) + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); ensureGreen(); // index simple data @@ -1553,14 +1300,8 @@ public void testParentChildCaching() throws Exception { } public void testParentChildQueriesViaScrollApi() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("test") - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - } else { - assertAcked(prepareCreate("test") - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); - } + assertAcked(prepareCreate("test") + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); ensureGreen(); for (int i = 0; i < 10; i++) { createIndexRequest("test", "parent", "p" + i, null).get(); @@ -1600,44 +1341,6 @@ public void testParentChildQueriesViaScrollApi() throws Exception { } } - public void testTypeIsAppliedInHasParentInnerQuery() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("test") - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - } else { - assertAcked(prepareCreate("test") - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); - } - ensureGreen(); - - List indexRequests = new ArrayList<>(); - indexRequests.add(createIndexRequest("test", "parent", "p1", null, "field1", "a")); - indexRequests.add(createIndexRequest("test", "child", "c1", "p1")); - indexRequests.add(createIndexRequest("test", "child", "c2", "p1")); - indexRandom(true, indexRequests); - - SearchResponse searchResponse = client().prepareSearch("test") - .setQuery(constantScoreQuery(hasParentQuery("parent", boolQuery().mustNot(termQuery("field1", "a")), false))) - .get(); - assertHitCount(searchResponse, 0L); - - searchResponse = client().prepareSearch("test") - .setQuery(hasParentQuery("parent", constantScoreQuery(boolQuery().mustNot(termQuery("field1", "a"))), false)) - .get(); - assertHitCount(searchResponse, 0L); - - searchResponse = client().prepareSearch("test") - .setQuery(constantScoreQuery(hasParentQuery("parent", termQuery("field1", "a"), false))) - .get(); - assertHitCount(searchResponse, 2L); - - searchResponse = client().prepareSearch("test") - .setQuery(hasParentQuery("parent", constantScoreQuery(termQuery("field1", "a")), false)) - .get(); - assertHitCount(searchResponse, 2L); - } - private List createMinMaxDocBuilders() { List indexBuilders = new ArrayList<>(); // Parent 1 and its children @@ -1685,14 +1388,8 @@ private SearchResponse minMaxQuery(ScoreMode scoreMode, int minChildren, Integer } public void testMinMaxChildren() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("test") - .addMapping("parent", "id", "type=long") - .addMapping("child", "_parent", "type=parent")); - } else { - assertAcked(prepareCreate("test") - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); - } + assertAcked(prepareCreate("test") + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); ensureGreen(); indexRandom(true, createMinMaxDocBuilders().toArray(new IndexRequestBuilder[0])); @@ -2004,13 +1701,8 @@ public void testMinMaxChildren() throws Exception { } public void testHasParentInnerQueryType() { - if (legacy()) { - assertAcked(prepareCreate("test") - .addMapping("parent-type").addMapping("child-type", "_parent", "type=parent-type")); - } else { - assertAcked(prepareCreate("test") - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent-type", "child-type"))); - } + assertAcked(prepareCreate("test") + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent-type", "child-type"))); createIndexRequest("test", "child-type", "child-id", "parent-id").get(); createIndexRequest("test", "parent-type", "parent-id", null).get(); refresh(); @@ -2026,28 +1718,21 @@ public void testHasParentInnerQueryType() { } public void testHighlightersIgnoreParentChild() throws IOException { - if (legacy()) { - assertAcked(prepareCreate("test") - .addMapping("parent-type", "searchText", "type=text,term_vector=with_positions_offsets,index_options=offsets") - .addMapping("child-type", "_parent", "type=parent-type", "searchText", - "type=text,term_vector=with_positions_offsets,index_options=offsets")); - } else { - assertAcked(prepareCreate("test") - .addMapping("doc", jsonBuilder().startObject().startObject("properties") - .startObject("join_field") - .field("type", "join") - .startObject("relations") - .field("parent-type", "child-type") - .endObject() - .endObject() - .startObject("searchText") - .field("type", "text") - .field("term_vector", "with_positions_offsets") - .field("index_options", "offsets") + assertAcked(prepareCreate("test") + .addMapping("doc", jsonBuilder().startObject().startObject("properties") + .startObject("join_field") + .field("type", "join") + .startObject("relations") + .field("parent-type", "child-type") .endObject() - .endObject().endObject() - )); - } + .endObject() + .startObject("searchText") + .field("type", "text") + .field("term_vector", "with_positions_offsets") + .field("index_options", "offsets") + .endObject() + .endObject().endObject() + )); createIndexRequest("test", "parent-type", "parent-id", null, "searchText", "quick brown fox").get(); createIndexRequest("test", "child-type", "child-id", "parent-id", "searchText", "quick brown fox").get(); refresh(); @@ -2082,15 +1767,8 @@ public void testHighlightersIgnoreParentChild() throws IOException { } public void testAliasesFilterWithHasChildQuery() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("my-index") - .addMapping("parent") - .addMapping("child", "_parent", "type=parent") - ); - } else { - assertAcked(prepareCreate("my-index") - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); - } + assertAcked(prepareCreate("my-index") + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); createIndexRequest("my-index", "parent", "1", null).get(); createIndexRequest("my-index", "child", "2", "1").get(); refresh(); diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/InnerHitsIT.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/InnerHitsIT.java index 6efd5256e5422..57415d47f6e7f 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/InnerHitsIT.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/InnerHitsIT.java @@ -86,30 +86,23 @@ protected Map, Object>> pluginScripts() { } public void testSimpleParentChild() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("articles") - .addMapping("article", "title", "type=text") - .addMapping("comment", "_parent", "type=article", "message", "type=text,fielddata=true") - ); - } else { - assertAcked(prepareCreate("articles") - .addMapping("doc", jsonBuilder().startObject().startObject("doc").startObject("properties") - .startObject("join_field") - .field("type", "join") - .startObject("relations") - .field("article", "comment") - .endObject() + assertAcked(prepareCreate("articles") + .addMapping("doc", jsonBuilder().startObject().startObject("doc").startObject("properties") + .startObject("join_field") + .field("type", "join") + .startObject("relations") + .field("article", "comment") .endObject() - .startObject("title") - .field("type", "text") - .endObject() - .startObject("message") - .field("type", "text") - .field("fielddata", true) - .endObject() - .endObject().endObject().endObject() - )); - } + .endObject() + .startObject("title") + .field("type", "text") + .endObject() + .startObject("message") + .field("type", "text") + .field("fielddata", true) + .endObject() + .endObject().endObject().endObject() + )); List requests = new ArrayList<>(); requests.add(createIndexRequest("articles", "article", "p1", null, "title", "quick brown fox")); @@ -136,9 +129,9 @@ public void testSimpleParentChild() throws Exception { assertThat(innerHits.getTotalHits(), equalTo(2L)); assertThat(innerHits.getAt(0).getId(), equalTo("c1")); - assertThat(innerHits.getAt(0).getType(), equalTo(legacy() ? "comment" : "doc")); + assertThat(innerHits.getAt(0).getType(), equalTo("doc")); assertThat(innerHits.getAt(1).getId(), equalTo("c2")); - assertThat(innerHits.getAt(1).getType(), equalTo(legacy() ? "comment" : "doc")); + assertThat(innerHits.getAt(1).getType(), equalTo("doc")); response = client().prepareSearch("articles") .setQuery(hasChildQuery("comment", matchQuery("message", "elephant"), ScoreMode.None) @@ -153,11 +146,11 @@ public void testSimpleParentChild() throws Exception { assertThat(innerHits.getTotalHits(), equalTo(3L)); assertThat(innerHits.getAt(0).getId(), equalTo("c4")); - assertThat(innerHits.getAt(0).getType(), equalTo(legacy() ? "comment" : "doc")); + assertThat(innerHits.getAt(0).getType(), equalTo("doc")); assertThat(innerHits.getAt(1).getId(), equalTo("c5")); - assertThat(innerHits.getAt(1).getType(), equalTo(legacy() ? "comment" : "doc")); + assertThat(innerHits.getAt(1).getType(), equalTo("doc")); assertThat(innerHits.getAt(2).getId(), equalTo("c6")); - assertThat(innerHits.getAt(2).getType(), equalTo(legacy() ? "comment" : "doc")); + assertThat(innerHits.getAt(2).getType(), equalTo("doc")); response = client().prepareSearch("articles") .setQuery( @@ -179,24 +172,16 @@ public void testSimpleParentChild() throws Exception { } public void testRandomParentChild() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("idx") - .addMapping("parent") - .addMapping("child1", "_parent", "type=parent") - .addMapping("child2", "_parent", "type=parent") - ); - } else { - assertAcked(prepareCreate("idx") - .addMapping("doc", jsonBuilder().startObject().startObject("doc").startObject("properties") - .startObject("join_field") - .field("type", "join") - .startObject("relations") - .field("parent", new String[] {"child1", "child2"}) - .endObject() + assertAcked(prepareCreate("idx") + .addMapping("doc", jsonBuilder().startObject().startObject("doc").startObject("properties") + .startObject("join_field") + .field("type", "join") + .startObject("relations") + .field("parent", new String[] {"child1", "child2"}) .endObject() - .endObject().endObject().endObject() - )); - } + .endObject() + .endObject().endObject().endObject() + )); int numDocs = scaledRandomIntBetween(5, 50); List requestBuilders = new ArrayList<>(); @@ -225,13 +210,13 @@ public void testRandomParentChild() throws Exception { BoolQueryBuilder boolQuery = new BoolQueryBuilder(); boolQuery.should(constantScoreQuery(hasChildQuery("child1", matchAllQuery(), ScoreMode.None) .innerHit(new InnerHitBuilder().setName("a") - .addSort(new FieldSortBuilder("_uid").order(SortOrder.ASC)).setSize(size)))); + .addSort(new FieldSortBuilder("_id").order(SortOrder.ASC)).setSize(size)))); boolQuery.should(constantScoreQuery(hasChildQuery("child2", matchAllQuery(), ScoreMode.None) .innerHit(new InnerHitBuilder().setName("b") - .addSort(new FieldSortBuilder("_uid").order(SortOrder.ASC)).setSize(size)))); + .addSort(new FieldSortBuilder("_id").order(SortOrder.ASC)).setSize(size)))); SearchResponse searchResponse = client().prepareSearch("idx") .setSize(numDocs) - .addSort("_uid", SortOrder.ASC) + .addSort("_id", SortOrder.ASC) .setQuery(boolQuery) .get(); @@ -243,7 +228,7 @@ public void testRandomParentChild() throws Exception { int offset2 = 0; for (int parent = 0; parent < numDocs; parent++) { SearchHit searchHit = searchResponse.getHits().getAt(parent); - assertThat(searchHit.getType(), equalTo(legacy() ? "parent" : "doc")); + assertThat(searchHit.getType(), equalTo("doc")); assertThat(searchHit.getId(), equalTo(String.format(Locale.ENGLISH, "p_%03d", parent))); assertThat(searchHit.getShard(), notNullValue()); @@ -251,7 +236,7 @@ public void testRandomParentChild() throws Exception { assertThat(inner.getTotalHits(), equalTo((long) child1InnerObjects[parent])); for (int child = 0; child < child1InnerObjects[parent] && child < size; child++) { SearchHit innerHit = inner.getAt(child); - assertThat(innerHit.getType(), equalTo(legacy() ? "child1" : "doc")); + assertThat(innerHit.getType(), equalTo("doc")); String childId = String.format(Locale.ENGLISH, "c1_%04d", offset1 + child); assertThat(innerHit.getId(), equalTo(childId)); assertThat(innerHit.getNestedIdentity(), nullValue()); @@ -262,7 +247,7 @@ public void testRandomParentChild() throws Exception { assertThat(inner.getTotalHits(), equalTo((long) child2InnerObjects[parent])); for (int child = 0; child < child2InnerObjects[parent] && child < size; child++) { SearchHit innerHit = inner.getAt(child); - assertThat(innerHit.getType(), equalTo(legacy() ? "child2" : "doc")); + assertThat(innerHit.getType(), equalTo("doc")); String childId = String.format(Locale.ENGLISH, "c2_%04d", offset2 + child); assertThat(innerHit.getId(), equalTo(childId)); assertThat(innerHit.getNestedIdentity(), nullValue()); @@ -272,16 +257,9 @@ public void testRandomParentChild() throws Exception { } public void testInnerHitsOnHasParent() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("stack") - .addMapping("question", "body", "type=text") - .addMapping("answer", "_parent", "type=question", "body", "type=text") - ); - } else { - assertAcked(prepareCreate("stack") - .addMapping("doc", addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "question", "answer"), - "body", "text"))); - } + assertAcked(prepareCreate("stack") + .addMapping("doc", addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "question", "answer"), + "body", "text"))); List requests = new ArrayList<>(); requests.add(createIndexRequest("stack", "question", "1", null, "body", "I'm using HTTPS + Basic authentication " + "to protect a resource. How can I throttle authentication attempts to protect against brute force attacks?")); @@ -293,7 +271,7 @@ public void testInnerHitsOnHasParent() throws Exception { indexRandom(true, requests); SearchResponse response = client().prepareSearch("stack") - .addSort("_uid", SortOrder.ASC) + .addSort("_id", SortOrder.ASC) .setQuery( boolQuery() .must(matchQuery("body", "fail2ban")) @@ -304,32 +282,24 @@ public void testInnerHitsOnHasParent() throws Exception { SearchHit searchHit = response.getHits().getAt(0); assertThat(searchHit.getId(), equalTo("3")); - assertThat(searchHit.getType(), equalTo(legacy() ? "answer" : "doc")); + assertThat(searchHit.getType(), equalTo("doc")); assertThat(searchHit.getInnerHits().get("question").getTotalHits(), equalTo(1L)); - assertThat(searchHit.getInnerHits().get("question").getAt(0).getType(), equalTo(legacy() ? "question" : "doc")); + assertThat(searchHit.getInnerHits().get("question").getAt(0).getType(), equalTo("doc")); assertThat(searchHit.getInnerHits().get("question").getAt(0).getId(), equalTo("1")); searchHit = response.getHits().getAt(1); assertThat(searchHit.getId(), equalTo("4")); - assertThat(searchHit.getType(), equalTo(legacy() ? "answer" : "doc")); + assertThat(searchHit.getType(), equalTo("doc")); assertThat(searchHit.getInnerHits().get("question").getTotalHits(), equalTo(1L)); - assertThat(searchHit.getInnerHits().get("question").getAt(0).getType(), equalTo(legacy() ? "question" : "doc")); + assertThat(searchHit.getInnerHits().get("question").getAt(0).getType(), equalTo("doc")); assertThat(searchHit.getInnerHits().get("question").getAt(0).getId(), equalTo("2")); } public void testParentChildMultipleLayers() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("articles") - .addMapping("article", "title", "type=text") - .addMapping("comment", "_parent", "type=article", "message", "type=text") - .addMapping("remark", "_parent", "type=comment", "message", "type=text") - ); - } else { - assertAcked(prepareCreate("articles") - .addMapping("doc", - addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, - "article", "comment", "comment", "remark"), "title", "text", "message", "text"))); - } + assertAcked(prepareCreate("articles") + .addMapping("doc", + addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, + "article", "comment", "comment", "remark"), "title", "text", "message", "text"))); List requests = new ArrayList<>(); requests.add(createIndexRequest("articles", "article", "1", null, "title", "quick brown fox")); @@ -354,12 +324,12 @@ public void testParentChildMultipleLayers() throws Exception { SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment"); assertThat(innerHits.getTotalHits(), equalTo(1L)); assertThat(innerHits.getAt(0).getId(), equalTo("3")); - assertThat(innerHits.getAt(0).getType(), equalTo(legacy() ? "comment" : "doc")); + assertThat(innerHits.getAt(0).getType(), equalTo("doc")); innerHits = innerHits.getAt(0).getInnerHits().get("remark"); assertThat(innerHits.getTotalHits(), equalTo(1L)); assertThat(innerHits.getAt(0).getId(), equalTo("5")); - assertThat(innerHits.getAt(0).getType(), equalTo(legacy() ? "remark" : "doc")); + assertThat(innerHits.getAt(0).getType(), equalTo("doc")); response = client().prepareSearch("articles") .setQuery(hasChildQuery("comment", @@ -375,29 +345,18 @@ public void testParentChildMultipleLayers() throws Exception { innerHits = response.getHits().getAt(0).getInnerHits().get("comment"); assertThat(innerHits.getTotalHits(), equalTo(1L)); assertThat(innerHits.getAt(0).getId(), equalTo("4")); - assertThat(innerHits.getAt(0).getType(), equalTo(legacy() ? "comment" : "doc")); + assertThat(innerHits.getAt(0).getType(), equalTo("doc")); innerHits = innerHits.getAt(0).getInnerHits().get("remark"); assertThat(innerHits.getTotalHits(), equalTo(1L)); assertThat(innerHits.getAt(0).getId(), equalTo("6")); - assertThat(innerHits.getAt(0).getType(), equalTo(legacy() ? "remark" : "doc")); + assertThat(innerHits.getAt(0).getType(), equalTo("doc")); } public void testRoyals() throws Exception { - if (legacy()) { - assertAcked( - prepareCreate("royals") - .addMapping("king") - .addMapping("prince", "_parent", "type=king") - .addMapping("duke", "_parent", "type=prince") - .addMapping("earl", "_parent", "type=duke") - .addMapping("baron", "_parent", "type=earl") - ); - } else { - assertAcked(prepareCreate("royals") - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, - "king", "prince", "prince", "duke", "duke", "earl", "earl", "baron"))); - } + assertAcked(prepareCreate("royals") + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, + "king", "prince", "prince", "duke", "duke", "earl", "earl", "baron"))); List requests = new ArrayList<>(); requests.add(createIndexRequest("royals", "king", "king", null)); @@ -423,7 +382,7 @@ public void testRoyals() throws Exception { hasChildQuery("baron", matchAllQuery(), ScoreMode.None) .innerHit(new InnerHitBuilder().setName("barons")), ScoreMode.None).innerHit(new InnerHitBuilder() - .addSort(SortBuilders.fieldSort("_uid").order(SortOrder.ASC)) + .addSort(SortBuilders.fieldSort("_id").order(SortOrder.ASC)) .setName("earls") .setSize(4)) ) @@ -464,13 +423,8 @@ public void testRoyals() throws Exception { } public void testMatchesQueriesParentChildInnerHits() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("index") - .addMapping("child", "_parent", "type=parent")); - } else { - assertAcked(prepareCreate("index") - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); - } + assertAcked(prepareCreate("index") + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); List requests = new ArrayList<>(); requests.add(createIndexRequest("index", "parent", "1", null)); requests.add(createIndexRequest("index", "child", "3", "1", "field", "value1")); @@ -482,7 +436,7 @@ public void testMatchesQueriesParentChildInnerHits() throws Exception { SearchResponse response = client().prepareSearch("index") .setQuery(hasChildQuery("child", matchQuery("field", "value1").queryName("_name1"), ScoreMode.None) .innerHit(new InnerHitBuilder())) - .addSort("_uid", SortOrder.ASC) + .addSort("_id", SortOrder.ASC) .get(); assertHitCount(response, 2); assertThat(response.getHits().getAt(0).getId(), equalTo("1")); @@ -499,7 +453,7 @@ public void testMatchesQueriesParentChildInnerHits() throws Exception { .innerHit(new InnerHitBuilder()); response = client().prepareSearch("index") .setQuery(query) - .addSort("_uid", SortOrder.ASC) + .addSort("_id", SortOrder.ASC) .get(); assertHitCount(response, 1); assertThat(response.getHits().getAt(0).getId(), equalTo("1")); @@ -509,12 +463,8 @@ public void testMatchesQueriesParentChildInnerHits() throws Exception { } public void testUseMaxDocInsteadOfSize() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("index1").addMapping("child", "_parent", "type=parent")); - } else { - assertAcked(prepareCreate("index1") - .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); - } + assertAcked(prepareCreate("index1") + .addMapping("doc", buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent", "child"))); client().admin().indices().prepareUpdateSettings("index1") .setSettings(Collections.singletonMap(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING.getKey(), ArrayUtil.MAX_ARRAY_LENGTH)) .get(); @@ -533,14 +483,9 @@ public void testUseMaxDocInsteadOfSize() throws Exception { } public void testNestedInnerHitWrappedInParentChildInnerhit() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("test") - .addMapping("child_type", "_parent", "type=parent_type", "nested_type", "type=nested")); - } else { - assertAcked(prepareCreate("test") - .addMapping("doc", addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, - "parent_type", "child_type"), "nested_type", "nested"))); - } + assertAcked(prepareCreate("test") + .addMapping("doc", addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, + "parent_type", "child_type"), "nested_type", "nested"))); createIndexRequest("test", "parent_type", "1", null, "key", "value").get(); createIndexRequest("test", "child_type", "2", "1", "nested_type", Collections.singletonMap("key", "value")).get(); refresh(); @@ -551,28 +496,17 @@ public void testNestedInnerHitWrappedInParentChildInnerhit() throws Exception { .get(); assertHitCount(response, 1); SearchHit hit = response.getHits().getAt(0); - if (legacy()) { - assertThat(hit.getInnerHits().get("child_type").getAt(0).field("_parent").getValue(), equalTo("1")); - } else { - String parentId = (String) extractValue("join_field.parent", hit.getInnerHits().get("child_type").getAt(0).getSourceAsMap()); - assertThat(parentId, equalTo("1")); - } + String parentId = (String) extractValue("join_field.parent", hit.getInnerHits().get("child_type").getAt(0).getSourceAsMap()); + assertThat(parentId, equalTo("1")); assertThat(hit.getInnerHits().get("child_type").getAt(0).getInnerHits().get("nested_type").getAt(0).field("_parent"), nullValue()); } public void testInnerHitsWithIgnoreUnmapped() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("index1") - .addMapping("parent_type", "nested_type", "type=nested") - .addMapping("child_type", "_parent", "type=parent_type") - ); - } else { - assertAcked(prepareCreate("index1") - .addMapping("doc", addFieldMappings( - buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent_type", "child_type"), - "nested_type", "nested")) - ); - } + assertAcked(prepareCreate("index1") + .addMapping("doc", addFieldMappings( + buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent_type", "child_type"), + "nested_type", "nested")) + ); assertAcked(prepareCreate("index2")); createIndexRequest("index1", "parent_type", "1", null, "nested_type", Collections.singletonMap("key", "value")).get(); createIndexRequest("index1", "child_type", "2", "1").get(); @@ -592,18 +526,11 @@ public void testInnerHitsWithIgnoreUnmapped() throws Exception { } public void testTooHighResultWindow() throws Exception { - if (legacy()) { - assertAcked(prepareCreate("index1") - .addMapping("parent_type", "nested_type", "type=nested") - .addMapping("child_type", "_parent", "type=parent_type") - ); - } else { - assertAcked(prepareCreate("index1") - .addMapping("doc", addFieldMappings( - buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent_type", "child_type"), - "nested_type", "nested")) - ); - } + assertAcked(prepareCreate("index1") + .addMapping("doc", addFieldMappings( + buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "parent_type", "child_type"), + "nested_type", "nested")) + ); createIndexRequest("index1", "parent_type", "1", null, "nested_type", Collections.singletonMap("key", "value")).get(); createIndexRequest("index1", "child_type", "2", "1").get(); refresh(); diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyChildQuerySearchIT.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyChildQuerySearchIT.java deleted file mode 100644 index c13a1d76732fc..0000000000000 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyChildQuerySearchIT.java +++ /dev/null @@ -1,308 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.join.query; - -import org.apache.lucene.search.join.ScoreMode; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; -import org.elasticsearch.action.bulk.BulkRequestBuilder; -import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.search.builder.SearchSourceBuilder; - -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.util.Map; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery; -import static org.elasticsearch.join.query.JoinQueryBuilders.hasChildQuery; -import static org.elasticsearch.join.query.JoinQueryBuilders.hasParentQuery; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.notNullValue; - -public class LegacyChildQuerySearchIT extends ChildQuerySearchIT { - - @Override - protected boolean legacy() { - return true; - } - - public void testIndexChildDocWithNoParentMapping() throws IOException { - assertAcked(prepareCreate("test") - .addMapping("parent") - .addMapping("child1")); - ensureGreen(); - - client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get(); - try { - client().prepareIndex("test", "child1", "c1").setParent("p1").setSource("c_field", "blue").get(); - fail(); - } catch (IllegalArgumentException e) { - assertThat(e.toString(), containsString("can't specify parent if no parent field has been configured")); - } - try { - client().prepareIndex("test", "child2", "c2").setParent("p1").setSource("c_field", "blue").get(); - fail(); - } catch (IllegalArgumentException e) { - assertThat(e.toString(), containsString("can't specify parent if no parent field has been configured")); - } - - refresh(); - } - - public void testAddingParentToExistingMapping() throws IOException { - createIndex("test"); - ensureGreen(); - - PutMappingResponse putMappingResponse = client().admin().indices() - .preparePutMapping("test").setType("child").setSource("number", "type=integer") - .get(); - assertThat(putMappingResponse.isAcknowledged(), equalTo(true)); - - GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").get(); - Map mapping = getMappingsResponse.getMappings().get("test").get("child").getSourceAsMap(); - assertThat(mapping.size(), greaterThanOrEqualTo(1)); // there are potentially some meta fields configured randomly - assertThat(mapping.get("properties"), notNullValue()); - - try { - // Adding _parent metadata field to existing mapping is prohibited: - client().admin().indices().preparePutMapping("test").setType("child").setSource(jsonBuilder().startObject().startObject("child") - .startObject("_parent").field("type", "parent").endObject() - .endObject().endObject()).get(); - fail(); - } catch (IllegalArgumentException e) { - assertThat(e.toString(), containsString("The _parent field's type option can't be changed: [null]->[parent]")); - } - } - - // Issue #5783 - public void testQueryBeforeChildType() throws Exception { - assertAcked(prepareCreate("test") - .addMapping("features") - .addMapping("posts", "_parent", "type=features") - .addMapping("specials")); - ensureGreen(); - - client().prepareIndex("test", "features", "1").setSource("field", "foo").get(); - client().prepareIndex("test", "posts", "1").setParent("1").setSource("field", "bar").get(); - refresh(); - - SearchResponse resp; - resp = client().prepareSearch("test") - .setSource(new SearchSourceBuilder().query(hasChildQuery("posts", - QueryBuilders.matchQuery("field", "bar"), ScoreMode.None))) - .get(); - assertHitCount(resp, 1L); - } - - // Issue #6256 - public void testParentFieldInMultiMatchField() throws Exception { - assertAcked(prepareCreate("test") - .addMapping("type1") - .addMapping("type2", "_parent", "type=type1") - ); - ensureGreen(); - - client().prepareIndex("test", "type2", "1").setParent("1").setSource("field", "value").get(); - refresh(); - - SearchResponse response = client().prepareSearch("test") - .setQuery(multiMatchQuery("1", "_parent#type1")) - .get(); - - assertThat(response.getHits().getTotalHits(), equalTo(1L)); - assertThat(response.getHits().getAt(0).getId(), equalTo("1")); - } - - public void testParentFieldToNonExistingType() { - assertAcked(prepareCreate("test") - .addMapping("parent").addMapping("child", "_parent", "type=parent2")); - client().prepareIndex("test", "parent", "1").setSource("{}", XContentType.JSON).get(); - client().prepareIndex("test", "child", "1").setParent("1").setSource("{}", XContentType.JSON).get(); - refresh(); - - try { - client().prepareSearch("test") - .setQuery(hasChildQuery("child", matchAllQuery(), ScoreMode.None)) - .get(); - fail(); - } catch (SearchPhaseExecutionException e) { - } - } - - /* - Test for https://github.com/elastic/elasticsearch/issues/3444 - */ - public void testBulkUpdateDocAsUpsertWithParent() throws Exception { - assertAcked(prepareCreate("test") - .addMapping("parent", "{\"parent\":{}}", XContentType.JSON) - .addMapping("child", "{\"child\": {\"_parent\": {\"type\": \"parent\"}}}", XContentType.JSON)); - ensureGreen(); - - BulkRequestBuilder builder = client().prepareBulk(); - - // It's important to use JSON parsing here and request objects: issue 3444 is related to incomplete option parsing - byte[] addParent = ( - "{" + - " \"index\" : {" + - " \"_index\" : \"test\"," + - " \"_type\" : \"parent\"," + - " \"_id\" : \"parent1\"" + - " }" + - "}" + - "\n" + - "{" + - " \"field1\" : \"value1\"" + - "}" + - "\n").getBytes(StandardCharsets.UTF_8); - - byte[] addChild = ( - "{" + - " \"update\" : {" + - " \"_index\" : \"test\"," + - " \"_type\" : \"child\"," + - " \"_id\" : \"child1\"," + - " \"parent\" : \"parent1\"" + - " }" + - "}" + - "\n" + - "{" + - " \"doc\" : {" + - " \"field1\" : \"value1\"" + - " }," + - " \"doc_as_upsert\" : \"true\"" + - "}" + - "\n").getBytes(StandardCharsets.UTF_8); - - builder.add(addParent, 0, addParent.length, XContentType.JSON); - builder.add(addChild, 0, addChild.length, XContentType.JSON); - - BulkResponse bulkResponse = builder.get(); - assertThat(bulkResponse.getItems().length, equalTo(2)); - assertThat(bulkResponse.getItems()[0].isFailed(), equalTo(false)); - assertThat(bulkResponse.getItems()[1].isFailed(), equalTo(false)); - - client().admin().indices().prepareRefresh("test").get(); - - //we check that the _parent field was set on the child document by using the has parent query - SearchResponse searchResponse = client().prepareSearch("test") - .setQuery(hasParentQuery("parent", QueryBuilders.matchAllQuery(), false)) - .get(); - - assertNoFailures(searchResponse); - assertSearchHits(searchResponse, "child1"); - } - - /* - Test for https://github.com/elastic/elasticsearch/issues/3444 - */ - public void testBulkUpdateUpsertWithParent() throws Exception { - assertAcked(prepareCreate("test") - .addMapping("parent", "{\"parent\":{}}", XContentType.JSON) - .addMapping("child", "{\"child\": {\"_parent\": {\"type\": \"parent\"}}}", XContentType.JSON)); - ensureGreen(); - - BulkRequestBuilder builder = client().prepareBulk(); - - byte[] addParent = ( - "{" + - " \"index\" : {" + - " \"_index\" : \"test\"," + - " \"_type\" : \"parent\"," + - " \"_id\" : \"parent1\"" + - " }" + - "}" + - "\n" + - "{" + - " \"field1\" : \"value1\"" + - "}" + - "\n").getBytes(StandardCharsets.UTF_8); - - byte[] addChild1 = ( - "{" + - " \"update\" : {" + - " \"_index\" : \"test\"," + - " \"_type\" : \"child\"," + - " \"_id\" : \"child1\"," + - " \"parent\" : \"parent1\"" + - " }" + - "}" + - "\n" + - "{" + - " \"script\" : {" + - " \"inline\" : \"ctx._source.field2 = 'value2'\"" + - " }," + - " \"lang\" : \"" + InnerHitsIT.CustomScriptPlugin.NAME + "\"," + - " \"upsert\" : {" + - " \"field1\" : \"value1'\"" + - " }" + - "}" + - "\n").getBytes(StandardCharsets.UTF_8); - - byte[] addChild2 = ( - "{" + - " \"update\" : {" + - " \"_index\" : \"test\"," + - " \"_type\" : \"child\"," + - " \"_id\" : \"child1\"," + - " \"parent\" : \"parent1\"" + - " }" + - "}" + - "\n" + - "{" + - " \"script\" : \"ctx._source.field2 = 'value2'\"," + - " \"upsert\" : {" + - " \"field1\" : \"value1'\"" + - " }" + - "}" + - "\n").getBytes(StandardCharsets.UTF_8); - - builder.add(addParent, 0, addParent.length, XContentType.JSON); - builder.add(addChild1, 0, addChild1.length, XContentType.JSON); - builder.add(addChild2, 0, addChild2.length, XContentType.JSON); - - BulkResponse bulkResponse = builder.get(); - assertThat(bulkResponse.getItems().length, equalTo(3)); - assertThat(bulkResponse.getItems()[0].isFailed(), equalTo(false)); - assertThat(bulkResponse.getItems()[1].isFailed(), equalTo(false)); - assertThat(bulkResponse.getItems()[2].isFailed(), equalTo(true)); - assertThat(bulkResponse.getItems()[2].getFailure().getCause().getCause().getMessage(), - equalTo("script_lang not supported [painless]")); - - client().admin().indices().prepareRefresh("test").get(); - - SearchResponse searchResponse = client().prepareSearch("test") - .setQuery(hasParentQuery("parent", QueryBuilders.matchAllQuery(), false)) - .get(); - - assertSearchHits(searchResponse, "child1"); - } - -} diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasChildQueryBuilderTests.java deleted file mode 100644 index 3eb16a925676c..0000000000000 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasChildQueryBuilderTests.java +++ /dev/null @@ -1,353 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.join.query; - -import com.carrotsearch.randomizedtesting.generators.RandomPicks; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.MatchNoDocsQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermInSetQuery; -import org.apache.lucene.search.TermQuery; -import org.apache.lucene.search.join.ScoreMode; -import org.apache.lucene.search.similarities.PerFieldSimilarityWrapper; -import org.apache.lucene.search.similarities.Similarity; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.TypeFieldMapper; -import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.mapper.UidFieldMapper; -import org.elasticsearch.index.query.IdsQueryBuilder; -import org.elasticsearch.index.query.InnerHitBuilder; -import org.elasticsearch.index.query.InnerHitContextBuilder; -import org.elasticsearch.index.query.MatchAllQueryBuilder; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.index.query.QueryShardException; -import org.elasticsearch.index.query.TermQueryBuilder; -import org.elasticsearch.index.query.WrapperQueryBuilder; -import org.elasticsearch.index.similarity.SimilarityService; -import org.elasticsearch.join.ParentJoinPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.search.fetch.subphase.InnerHitsContext; -import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.sort.FieldSortBuilder; -import org.elasticsearch.search.sort.SortOrder; -import org.elasticsearch.test.AbstractQueryTestCase; -import org.elasticsearch.test.VersionUtils; - -import java.io.IOException; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -import static org.elasticsearch.join.query.JoinQueryBuilders.hasChildQuery; -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.CoreMatchers.notNullValue; - -public class LegacyHasChildQueryBuilderTests extends AbstractQueryTestCase { - protected static final String PARENT_TYPE = "parent"; - protected static final String CHILD_TYPE = "child"; - - private static String similarity; - - boolean requiresRewrite = false; - - @Override - protected Collection> getPlugins() { - return Collections.singletonList(ParentJoinPlugin.class); - } - - @Override - protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { - similarity = randomFrom("boolean", "BM25"); - // TODO: use a single type when inner hits have been changed to work with join field, - // this test randomly generates queries with inner hits - mapperService.merge(PARENT_TYPE, new CompressedXContent(Strings.toString(PutMappingRequest.buildFromSimplifiedDef(PARENT_TYPE, - STRING_FIELD_NAME, "type=text", - STRING_FIELD_NAME_2, "type=keyword", - INT_FIELD_NAME, "type=integer", - DOUBLE_FIELD_NAME, "type=double", - BOOLEAN_FIELD_NAME, "type=boolean", - DATE_FIELD_NAME, "type=date", - OBJECT_FIELD_NAME, "type=object" - ))), MapperService.MergeReason.MAPPING_UPDATE); - mapperService.merge(CHILD_TYPE, new CompressedXContent(Strings.toString(PutMappingRequest.buildFromSimplifiedDef(CHILD_TYPE, - "_parent", "type=" + PARENT_TYPE, - STRING_FIELD_NAME, "type=text", - "custom_string", "type=text,similarity=" + similarity, - INT_FIELD_NAME, "type=integer", - DOUBLE_FIELD_NAME, "type=double", - BOOLEAN_FIELD_NAME, "type=boolean", - DATE_FIELD_NAME, "type=date", - OBJECT_FIELD_NAME, "type=object" - ))), MapperService.MergeReason.MAPPING_UPDATE); - } - - @Override - protected Settings indexSettings() { - return Settings.builder() - .put(super.indexSettings()) - .put("index.version.created", Version.V_5_6_0) // multi type - .build(); - } - - /** - * @return a {@link HasChildQueryBuilder} with random values all over the place - */ - @Override - protected HasChildQueryBuilder doCreateTestQueryBuilder() { - int min = randomIntBetween(0, Integer.MAX_VALUE / 2); - int max = randomIntBetween(min, Integer.MAX_VALUE); - - QueryBuilder innerQueryBuilder = new MatchAllQueryBuilder(); - if (randomBoolean()) { - requiresRewrite = true; - innerQueryBuilder = new WrapperQueryBuilder(innerQueryBuilder.toString()); - } - - HasChildQueryBuilder hqb = new HasChildQueryBuilder(CHILD_TYPE, innerQueryBuilder, - RandomPicks.randomFrom(random(), ScoreMode.values())); - hqb.minMaxChildren(min, max); - hqb.ignoreUnmapped(randomBoolean()); - if (randomBoolean()) { - hqb.innerHit(new InnerHitBuilder() - .setName(randomAlphaOfLengthBetween(1, 10)) - .setSize(randomIntBetween(0, 100)) - .addSort(new FieldSortBuilder(STRING_FIELD_NAME_2).order(SortOrder.ASC)) - .setIgnoreUnmapped(hqb.ignoreUnmapped())); - } - return hqb; - } - - @Override - protected void doAssertLuceneQuery(HasChildQueryBuilder queryBuilder, Query query, SearchContext searchContext) throws IOException { - assertThat(query, instanceOf(HasChildQueryBuilder.LateParsingQuery.class)); - HasChildQueryBuilder.LateParsingQuery lpq = (HasChildQueryBuilder.LateParsingQuery) query; - assertEquals(queryBuilder.minChildren(), lpq.getMinChildren()); - assertEquals(queryBuilder.maxChildren(), lpq.getMaxChildren()); - assertEquals(queryBuilder.scoreMode(), lpq.getScoreMode()); // WTF is this why do we have two? - if (queryBuilder.innerHit() != null) { - // have to rewrite again because the provided queryBuilder hasn't been rewritten (directly returned from - // doCreateTestQueryBuilder) - queryBuilder = (HasChildQueryBuilder) queryBuilder.rewrite(searchContext.getQueryShardContext()); - Map innerHitBuilders = new HashMap<>(); - InnerHitContextBuilder.extractInnerHits(queryBuilder, innerHitBuilders); - for (InnerHitContextBuilder builder : innerHitBuilders.values()) { - builder.build(searchContext, searchContext.innerHits()); - } - assertNotNull(searchContext.innerHits()); - assertEquals(1, searchContext.innerHits().getInnerHits().size()); - assertTrue(searchContext.innerHits().getInnerHits().containsKey(queryBuilder.innerHit().getName())); - InnerHitsContext.InnerHitSubContext innerHits = - searchContext.innerHits().getInnerHits().get(queryBuilder.innerHit().getName()); - assertEquals(innerHits.size(), queryBuilder.innerHit().getSize()); - assertEquals(innerHits.sort().sort.getSort().length, 1); - assertEquals(innerHits.sort().sort.getSort()[0].getField(), STRING_FIELD_NAME_2); - } - } - - /** - * Test (de)serialization on all previous released versions - */ - public void testSerializationBWC() throws IOException { - for (Version version : VersionUtils.allReleasedVersions()) { - HasChildQueryBuilder testQuery = createTestQueryBuilder(); - if (version.before(Version.V_5_2_0) && testQuery.innerHit() != null) { - // ignore unmapped for inner_hits has been added on 5.2 - testQuery.innerHit().setIgnoreUnmapped(false); - } - assertSerialization(testQuery, version); - } - } - - public void testIllegalValues() { - QueryBuilder query = new MatchAllQueryBuilder(); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> hasChildQuery(null, query, ScoreMode.None)); - assertEquals("[has_child] requires 'type' field", e.getMessage()); - - e = expectThrows(IllegalArgumentException.class, () -> hasChildQuery("foo", null, ScoreMode.None)); - assertEquals("[has_child] requires 'query' field", e.getMessage()); - - e = expectThrows(IllegalArgumentException.class, () -> hasChildQuery("foo", query, null)); - assertEquals("[has_child] requires 'score_mode' field", e.getMessage()); - - int positiveValue = randomIntBetween(0, Integer.MAX_VALUE); - HasChildQueryBuilder foo = hasChildQuery("foo", query, ScoreMode.None); // all good - e = expectThrows(IllegalArgumentException.class, () -> foo.minMaxChildren(randomIntBetween(Integer.MIN_VALUE, -1), positiveValue)); - assertEquals("[has_child] requires non-negative 'min_children' field", e.getMessage()); - - e = expectThrows(IllegalArgumentException.class, () -> foo.minMaxChildren(positiveValue, randomIntBetween(Integer.MIN_VALUE, -1))); - assertEquals("[has_child] requires non-negative 'max_children' field", e.getMessage()); - - e = expectThrows(IllegalArgumentException.class, () -> foo.minMaxChildren(positiveValue, positiveValue - 10)); - assertEquals("[has_child] 'max_children' is less than 'min_children'", e.getMessage()); - } - - public void testFromJson() throws IOException { - String query = - "{\n" + - " \"has_child\" : {\n" + - " \"query\" : {\n" + - " \"range\" : {\n" + - " \"mapped_string\" : {\n" + - " \"from\" : \"agJhRET\",\n" + - " \"to\" : \"zvqIq\",\n" + - " \"include_lower\" : true,\n" + - " \"include_upper\" : true,\n" + - " \"boost\" : 1.0\n" + - " }\n" + - " }\n" + - " },\n" + - " \"type\" : \"child\",\n" + - " \"score_mode\" : \"avg\",\n" + - " \"min_children\" : 883170873,\n" + - " \"max_children\" : 1217235442,\n" + - " \"ignore_unmapped\" : false,\n" + - " \"boost\" : 2.0,\n" + - " \"_name\" : \"WNzYMJKRwePuRBh\",\n" + - " \"inner_hits\" : {\n" + - " \"name\" : \"inner_hits_name\",\n" + - " \"ignore_unmapped\" : false,\n" + - " \"from\" : 0,\n" + - " \"size\" : 100,\n" + - " \"version\" : false,\n" + - " \"explain\" : false,\n" + - " \"track_scores\" : false,\n" + - " \"sort\" : [ {\n" + - " \"mapped_string\" : {\n" + - " \"order\" : \"asc\"\n" + - " }\n" + - " } ]\n" + - " }\n" + - " }\n" + - "}"; - HasChildQueryBuilder queryBuilder = (HasChildQueryBuilder) parseQuery(query); - checkGeneratedJson(query, queryBuilder); - assertEquals(query, queryBuilder.maxChildren(), 1217235442); - assertEquals(query, queryBuilder.minChildren(), 883170873); - assertEquals(query, queryBuilder.boost(), 2.0f, 0.0f); - assertEquals(query, queryBuilder.queryName(), "WNzYMJKRwePuRBh"); - assertEquals(query, queryBuilder.childType(), "child"); - assertEquals(query, queryBuilder.scoreMode(), ScoreMode.Avg); - assertNotNull(query, queryBuilder.innerHit()); - InnerHitBuilder expected = new InnerHitBuilder("child") - .setName("inner_hits_name") - .setSize(100) - .addSort(new FieldSortBuilder("mapped_string").order(SortOrder.ASC)); - assertEquals(query, queryBuilder.innerHit(), expected); - } - - public void testToQueryInnerQueryType() throws IOException { - String[] searchTypes = new String[]{PARENT_TYPE}; - QueryShardContext shardContext = createShardContext(); - shardContext.setTypes(searchTypes); - HasChildQueryBuilder hasChildQueryBuilder = hasChildQuery(CHILD_TYPE, new IdsQueryBuilder().addIds("id"), ScoreMode.None); - Query query = hasChildQueryBuilder.toQuery(shardContext); - //verify that the context types are still the same as the ones we previously set - assertThat(shardContext.getTypes(), equalTo(searchTypes)); - assertLateParsingQuery(query, CHILD_TYPE, "id"); - } - - static void assertLateParsingQuery(Query query, String type, String id) throws IOException { - assertThat(query, instanceOf(HasChildQueryBuilder.LateParsingQuery.class)); - HasChildQueryBuilder.LateParsingQuery lateParsingQuery = (HasChildQueryBuilder.LateParsingQuery) query; - assertThat(lateParsingQuery.getInnerQuery(), instanceOf(BooleanQuery.class)); - BooleanQuery booleanQuery = (BooleanQuery) lateParsingQuery.getInnerQuery(); - assertThat(booleanQuery.clauses().size(), equalTo(2)); - //check the inner ids query, we have to call rewrite to get to check the type it's executed against - assertThat(booleanQuery.clauses().get(0).getOccur(), equalTo(BooleanClause.Occur.MUST)); - assertThat(booleanQuery.clauses().get(0).getQuery(), instanceOf(TermInSetQuery.class)); - TermInSetQuery termsQuery = (TermInSetQuery) booleanQuery.clauses().get(0).getQuery(); - Query rewrittenTermsQuery = termsQuery.rewrite(null); - assertThat(rewrittenTermsQuery, instanceOf(ConstantScoreQuery.class)); - ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) rewrittenTermsQuery; - assertThat(constantScoreQuery.getQuery(), instanceOf(BooleanQuery.class)); - BooleanQuery booleanTermsQuery = (BooleanQuery) constantScoreQuery.getQuery(); - assertThat(booleanTermsQuery.clauses().toString(), booleanTermsQuery.clauses().size(), equalTo(1)); - assertThat(booleanTermsQuery.clauses().get(0).getOccur(), equalTo(BooleanClause.Occur.SHOULD)); - assertThat(booleanTermsQuery.clauses().get(0).getQuery(), instanceOf(TermQuery.class)); - TermQuery termQuery = (TermQuery) booleanTermsQuery.clauses().get(0).getQuery(); - assertThat(termQuery.getTerm().field(), equalTo(UidFieldMapper.NAME)); - //we want to make sure that the inner ids query gets executed against the child type rather - // than the main type we initially set to the context - BytesRef[] ids = Uid.createUidsForTypesAndIds(Collections.singletonList(type), Collections.singletonList(id)); - assertThat(termQuery.getTerm().bytes(), equalTo(ids[0])); - //check the type filter - assertThat(booleanQuery.clauses().get(1).getOccur(), equalTo(BooleanClause.Occur.FILTER)); - assertEquals(new TypeFieldMapper.TypesQuery(new BytesRef(type)), booleanQuery.clauses().get(1).getQuery()); - } - - @Override - public void testMustRewrite() throws IOException { - try { - super.testMustRewrite(); - } catch (UnsupportedOperationException e) { - if (requiresRewrite == false) { - throw e; - } - } - } - - public void testNonDefaultSimilarity() throws Exception { - QueryShardContext shardContext = createShardContext(); - HasChildQueryBuilder hasChildQueryBuilder = - hasChildQuery(CHILD_TYPE, new TermQueryBuilder("custom_string", "value"), ScoreMode.None); - HasChildQueryBuilder.LateParsingQuery query = (HasChildQueryBuilder.LateParsingQuery) hasChildQueryBuilder.toQuery(shardContext); - Similarity expected = SimilarityService.BUILT_IN.get(similarity) - .apply(Settings.EMPTY, Version.CURRENT, null); - assertThat(((PerFieldSimilarityWrapper) query.getSimilarity()).get("custom_string"), instanceOf(expected.getClass())); - } - - public void testIgnoreUnmapped() throws IOException { - final HasChildQueryBuilder queryBuilder = new HasChildQueryBuilder("unmapped", new MatchAllQueryBuilder(), ScoreMode.None); - queryBuilder.ignoreUnmapped(true); - Query query = queryBuilder.toQuery(createShardContext()); - assertThat(query, notNullValue()); - assertThat(query, instanceOf(MatchNoDocsQuery.class)); - - final HasChildQueryBuilder failingQueryBuilder = new HasChildQueryBuilder("unmapped", new MatchAllQueryBuilder(), ScoreMode.None); - failingQueryBuilder.ignoreUnmapped(false); - QueryShardException e = expectThrows(QueryShardException.class, () -> failingQueryBuilder.toQuery(createShardContext())); - assertThat(e.getMessage(), containsString("[" + HasChildQueryBuilder.NAME + "] no mapping found for type [unmapped]")); - } - - public void testIgnoreUnmappedWithRewrite() throws IOException { - // WrapperQueryBuilder makes sure we always rewrite - final HasChildQueryBuilder queryBuilder - = new HasChildQueryBuilder("unmapped", new WrapperQueryBuilder(new MatchAllQueryBuilder().toString()), ScoreMode.None); - queryBuilder.ignoreUnmapped(true); - QueryShardContext queryShardContext = createShardContext(); - Query query = queryBuilder.rewrite(queryShardContext).toQuery(queryShardContext); - assertThat(query, notNullValue()); - assertThat(query, instanceOf(MatchNoDocsQuery.class)); - } -} diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasParentQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasParentQueryBuilderTests.java deleted file mode 100644 index 468c1f48ea4f0..0000000000000 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasParentQueryBuilderTests.java +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.join.query; - -import org.apache.lucene.search.MatchNoDocsQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.join.ScoreMode; -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.query.IdsQueryBuilder; -import org.elasticsearch.index.query.InnerHitBuilder; -import org.elasticsearch.index.query.InnerHitContextBuilder; -import org.elasticsearch.index.query.MatchAllQueryBuilder; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.index.query.QueryShardException; -import org.elasticsearch.index.query.TermQueryBuilder; -import org.elasticsearch.index.query.WrapperQueryBuilder; -import org.elasticsearch.join.ParentJoinPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.search.fetch.subphase.InnerHitsContext; -import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.sort.FieldSortBuilder; -import org.elasticsearch.search.sort.SortOrder; -import org.elasticsearch.test.AbstractQueryTestCase; -import org.elasticsearch.test.VersionUtils; - -import java.io.IOException; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -import static org.elasticsearch.join.query.JoinQueryBuilders.hasParentQuery; -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.CoreMatchers.notNullValue; - -public class LegacyHasParentQueryBuilderTests extends AbstractQueryTestCase { - protected static final String PARENT_TYPE = "parent"; - protected static final String CHILD_TYPE = "child"; - - boolean requiresRewrite = false; - - @Override - protected Collection> getPlugins() { - return Collections.singletonList(ParentJoinPlugin.class); - } - - @Override - protected Settings indexSettings() { - return Settings.builder() - .put(super.indexSettings()) - .put("index.version.created", Version.V_5_6_0) // legacy needs multi types - .build(); - } - - @Override - protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { - // TODO: use a single type when inner hits have been changed to work with join field, - // this test randomly generates queries with inner hits - mapperService.merge(PARENT_TYPE, new CompressedXContent(Strings.toString(PutMappingRequest.buildFromSimplifiedDef(PARENT_TYPE, - STRING_FIELD_NAME, "type=text", - STRING_FIELD_NAME_2, "type=keyword", - INT_FIELD_NAME, "type=integer", - DOUBLE_FIELD_NAME, "type=double", - BOOLEAN_FIELD_NAME, "type=boolean", - DATE_FIELD_NAME, "type=date", - OBJECT_FIELD_NAME, "type=object" - ))), MapperService.MergeReason.MAPPING_UPDATE); - mapperService.merge(CHILD_TYPE, new CompressedXContent(Strings.toString(PutMappingRequest.buildFromSimplifiedDef(CHILD_TYPE, - "_parent", "type=" + PARENT_TYPE, - STRING_FIELD_NAME, "type=text", - STRING_FIELD_NAME_2, "type=keyword", - INT_FIELD_NAME, "type=integer", - DOUBLE_FIELD_NAME, "type=double", - BOOLEAN_FIELD_NAME, "type=boolean", - DATE_FIELD_NAME, "type=date", - OBJECT_FIELD_NAME, "type=object" - ))), MapperService.MergeReason.MAPPING_UPDATE); - mapperService.merge("just_a_type", new CompressedXContent(Strings.toString(PutMappingRequest.buildFromSimplifiedDef("just_a_type" - ))), MapperService.MergeReason.MAPPING_UPDATE); - } - - /** - * @return a {@link HasChildQueryBuilder} with random values all over the place - */ - @Override - protected HasParentQueryBuilder doCreateTestQueryBuilder() { - QueryBuilder innerQueryBuilder = new MatchAllQueryBuilder(); - if (randomBoolean()) { - requiresRewrite = true; - innerQueryBuilder = new WrapperQueryBuilder(innerQueryBuilder.toString()); - } - HasParentQueryBuilder hqb = new HasParentQueryBuilder(PARENT_TYPE, innerQueryBuilder, randomBoolean()); - hqb.ignoreUnmapped(randomBoolean()); - if (randomBoolean()) { - hqb.innerHit(new InnerHitBuilder() - .setName(randomAlphaOfLengthBetween(1, 10)) - .setSize(randomIntBetween(0, 100)) - .addSort(new FieldSortBuilder(STRING_FIELD_NAME_2).order(SortOrder.ASC)) - .setIgnoreUnmapped(hqb.ignoreUnmapped())); - } - return hqb; - } - - @Override - protected void doAssertLuceneQuery(HasParentQueryBuilder queryBuilder, Query query, SearchContext searchContext) throws IOException { - assertThat(query, instanceOf(HasChildQueryBuilder.LateParsingQuery.class)); - HasChildQueryBuilder.LateParsingQuery lpq = (HasChildQueryBuilder.LateParsingQuery) query; - assertEquals(queryBuilder.score() ? ScoreMode.Max : ScoreMode.None, lpq.getScoreMode()); - - if (queryBuilder.innerHit() != null) { - // have to rewrite again because the provided queryBuilder hasn't been rewritten (directly returned from - // doCreateTestQueryBuilder) - queryBuilder = (HasParentQueryBuilder) queryBuilder.rewrite(searchContext.getQueryShardContext()); - - assertNotNull(searchContext); - Map innerHitBuilders = new HashMap<>(); - InnerHitContextBuilder.extractInnerHits(queryBuilder, innerHitBuilders); - for (InnerHitContextBuilder builder : innerHitBuilders.values()) { - builder.build(searchContext, searchContext.innerHits()); - } - assertNotNull(searchContext.innerHits()); - assertEquals(1, searchContext.innerHits().getInnerHits().size()); - assertTrue(searchContext.innerHits().getInnerHits().containsKey(queryBuilder.innerHit().getName())); - InnerHitsContext.InnerHitSubContext innerHits = searchContext.innerHits() - .getInnerHits().get(queryBuilder.innerHit().getName()); - assertEquals(innerHits.size(), queryBuilder.innerHit().getSize()); - assertEquals(innerHits.sort().sort.getSort().length, 1); - assertEquals(innerHits.sort().sort.getSort()[0].getField(), STRING_FIELD_NAME_2); - } - } - - /** - * Test (de)serialization on all previous released versions - */ - public void testSerializationBWC() throws IOException { - for (Version version : VersionUtils.allReleasedVersions()) { - HasParentQueryBuilder testQuery = createTestQueryBuilder(); - if (version.before(Version.V_5_2_0) && testQuery.innerHit() != null) { - // ignore unmapped for inner_hits has been added on 5.2 - testQuery.innerHit().setIgnoreUnmapped(false); - } - assertSerialization(testQuery, version); - } - } - - public void testIllegalValues() throws IOException { - QueryBuilder query = new MatchAllQueryBuilder(); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> hasParentQuery(null, query, false)); - assertThat(e.getMessage(), equalTo("[has_parent] requires 'type' field")); - - e = expectThrows(IllegalArgumentException.class, - () -> hasParentQuery("foo", null, false)); - assertThat(e.getMessage(), equalTo("[has_parent] requires 'query' field")); - - QueryShardContext context = createShardContext(); - HasParentQueryBuilder qb = hasParentQuery("just_a_type", new MatchAllQueryBuilder(), false); - QueryShardException qse = expectThrows(QueryShardException.class, () -> qb.doToQuery(context)); - assertThat(qse.getMessage(), equalTo("[has_parent] no child types found for type [just_a_type]")); - } - - public void testToQueryInnerQueryType() throws IOException { - String[] searchTypes = new String[]{CHILD_TYPE}; - QueryShardContext shardContext = createShardContext(); - shardContext.setTypes(searchTypes); - HasParentQueryBuilder hasParentQueryBuilder = new HasParentQueryBuilder(PARENT_TYPE, new IdsQueryBuilder().addIds("id"), - false); - Query query = hasParentQueryBuilder.toQuery(shardContext); - //verify that the context types are still the same as the ones we previously set - assertThat(shardContext.getTypes(), equalTo(searchTypes)); - LegacyHasChildQueryBuilderTests.assertLateParsingQuery(query, PARENT_TYPE, "id"); - } - - @Override - public void testMustRewrite() throws IOException { - try { - super.testMustRewrite(); - } catch (UnsupportedOperationException e) { - if (requiresRewrite == false) { - throw e; - } - } - } - - public void testFromJson() throws IOException { - String json = - "{\n" + - " \"has_parent\" : {\n" + - " \"query\" : {\n" + - " \"term\" : {\n" + - " \"tag\" : {\n" + - " \"value\" : \"something\",\n" + - " \"boost\" : 1.0\n" + - " }\n" + - " }\n" + - " },\n" + - " \"parent_type\" : \"blog\",\n" + - " \"score\" : true,\n" + - " \"ignore_unmapped\" : false,\n" + - " \"boost\" : 1.0\n" + - " }\n" + - "}"; - HasParentQueryBuilder parsed = (HasParentQueryBuilder) parseQuery(json); - checkGeneratedJson(json, parsed); - assertEquals(json, "blog", parsed.type()); - assertEquals(json, "something", ((TermQueryBuilder) parsed.query()).value()); - } - - public void testIgnoreUnmapped() throws IOException { - final HasParentQueryBuilder queryBuilder = new HasParentQueryBuilder("unmapped", new MatchAllQueryBuilder(), false); - queryBuilder.ignoreUnmapped(true); - Query query = queryBuilder.toQuery(createShardContext()); - assertThat(query, notNullValue()); - assertThat(query, instanceOf(MatchNoDocsQuery.class)); - - final HasParentQueryBuilder failingQueryBuilder = new HasParentQueryBuilder("unmapped", new MatchAllQueryBuilder(), false); - failingQueryBuilder.ignoreUnmapped(false); - QueryShardException e = expectThrows(QueryShardException.class, () -> failingQueryBuilder.toQuery(createShardContext())); - assertThat(e.getMessage(), - containsString("[" + HasParentQueryBuilder.NAME + "] query configured 'parent_type' [unmapped] is not a valid type")); - } - - public void testIgnoreUnmappedWithRewrite() throws IOException { - // WrapperQueryBuilder makes sure we always rewrite - final HasParentQueryBuilder queryBuilder = - new HasParentQueryBuilder("unmapped", new WrapperQueryBuilder(new MatchAllQueryBuilder().toString()), false); - queryBuilder.ignoreUnmapped(true); - QueryShardContext queryShardContext = createShardContext(); - Query query = queryBuilder.rewrite(queryShardContext).toQuery(queryShardContext); - assertThat(query, notNullValue()); - assertThat(query, instanceOf(MatchNoDocsQuery.class)); - } -} diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyInnerHitsIT.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyInnerHitsIT.java deleted file mode 100644 index 7dff631837393..0000000000000 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyInnerHitsIT.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.join.query; - -public class LegacyInnerHitsIT extends InnerHitsIT { - - @Override - protected boolean legacy() { - return true; - } - -} diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyParentIdQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyParentIdQueryBuilderTests.java deleted file mode 100644 index 961ae943475a5..0000000000000 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyParentIdQueryBuilderTests.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.join.query; - -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.DocValuesTermsQuery; -import org.apache.lucene.search.MatchNoDocsQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermQuery; -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.TypeFieldMapper; -import org.elasticsearch.index.query.QueryShardException; -import org.elasticsearch.join.ParentJoinPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.test.AbstractQueryTestCase; -import org.hamcrest.Matchers; - -import java.io.IOException; -import java.util.Collection; -import java.util.Collections; - -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.CoreMatchers.notNullValue; - -public class LegacyParentIdQueryBuilderTests extends AbstractQueryTestCase { - - protected static final String PARENT_TYPE = "parent"; - protected static final String CHILD_TYPE = "child"; - - @Override - protected Collection> getPlugins() { - return Collections.singletonList(ParentJoinPlugin.class); - } - - @Override - protected Settings indexSettings() { - return Settings.builder() - .put(super.indexSettings()) - .put("index.version.created", Version.V_5_6_0) // legacy needs multi type - .build(); - } - - @Override - protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { - mapperService.merge(PARENT_TYPE, new CompressedXContent(Strings.toString(PutMappingRequest.buildFromSimplifiedDef(PARENT_TYPE, - STRING_FIELD_NAME, "type=text", - INT_FIELD_NAME, "type=integer", - DOUBLE_FIELD_NAME, "type=double", - BOOLEAN_FIELD_NAME, "type=boolean", - DATE_FIELD_NAME, "type=date", - OBJECT_FIELD_NAME, "type=object" - ))), MapperService.MergeReason.MAPPING_UPDATE); - mapperService.merge(CHILD_TYPE, new CompressedXContent(Strings.toString(PutMappingRequest.buildFromSimplifiedDef(CHILD_TYPE, - "_parent", "type=" + PARENT_TYPE, - STRING_FIELD_NAME, "type=text", - INT_FIELD_NAME, "type=integer", - DOUBLE_FIELD_NAME, "type=double", - BOOLEAN_FIELD_NAME, "type=boolean", - DATE_FIELD_NAME, "type=date", - OBJECT_FIELD_NAME, "type=object" - ))), MapperService.MergeReason.MAPPING_UPDATE); - } - - @Override - protected ParentIdQueryBuilder doCreateTestQueryBuilder() { - return new ParentIdQueryBuilder(CHILD_TYPE, randomAlphaOfLength(4)).ignoreUnmapped(randomBoolean()); - } - - @Override - protected void doAssertLuceneQuery(ParentIdQueryBuilder queryBuilder, Query query, SearchContext context) throws IOException { - assertThat(query, Matchers.instanceOf(BooleanQuery.class)); - BooleanQuery booleanQuery = (BooleanQuery) query; - assertThat(booleanQuery.clauses().size(), Matchers.equalTo(2)); - DocValuesTermsQuery idQuery = (DocValuesTermsQuery) booleanQuery.clauses().get(0).getQuery(); - // there are no getters to get the field and terms on DocValuesTermsQuery, so lets validate by creating a - // new query based on the builder: - assertThat(idQuery, Matchers.equalTo(new DocValuesTermsQuery("_parent#" + PARENT_TYPE, queryBuilder.getId()))); - - TermQuery typeQuery = (TermQuery) booleanQuery.clauses().get(1).getQuery(); - assertThat(typeQuery.getTerm().field(), Matchers.equalTo(TypeFieldMapper.NAME)); - assertThat(typeQuery.getTerm().text(), Matchers.equalTo(queryBuilder.getType())); - } - - public void testIgnoreUnmapped() throws IOException { - final ParentIdQueryBuilder queryBuilder = new ParentIdQueryBuilder("unmapped", "foo"); - queryBuilder.ignoreUnmapped(true); - Query query = queryBuilder.toQuery(createShardContext()); - assertThat(query, notNullValue()); - assertThat(query, instanceOf(MatchNoDocsQuery.class)); - - final ParentIdQueryBuilder failingQueryBuilder = new ParentIdQueryBuilder("unmapped", "foo"); - failingQueryBuilder.ignoreUnmapped(false); - QueryShardException e = expectThrows(QueryShardException.class, () -> failingQueryBuilder.toQuery(createShardContext())); - assertThat(e.getMessage(), containsString("[" + ParentIdQueryBuilder.NAME + "] no mapping found for type [unmapped]")); - } - -} diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentChildTestCase.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentChildTestCase.java index 2e2cdfb200453..87b16bc448ef1 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentChildTestCase.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentChildTestCase.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.join.query; -import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; @@ -63,17 +62,9 @@ public Settings indexSettings() { .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), true) .put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), true); - if (legacy()) { - builder.put("index.version.created", Version.V_5_6_0); - } - return builder.build(); } - protected boolean legacy() { - return false; - } - protected IndexRequestBuilder createIndexRequest(String index, String type, String id, String parentId, Object... fields) { Map source = new HashMap<>(); for (int i = 0; i < fields.length; i += 2) { @@ -123,28 +114,19 @@ public static Map addFieldMappings(Map map, Stri private IndexRequestBuilder createIndexRequest(String index, String type, String id, String parentId, Map source) { String name = type; - if (legacy() == false) { - type = "doc"; - } + type = "doc"; IndexRequestBuilder indexRequestBuilder = client().prepareIndex(index, type, id); - if (legacy()) { - if (parentId != null) { - indexRequestBuilder.setParent(parentId); - } - indexRequestBuilder.setSource(source); + Map joinField = new HashMap<>(); + if (parentId != null) { + joinField.put("name", name); + joinField.put("parent", parentId); + indexRequestBuilder.setRouting(parentId); } else { - Map joinField = new HashMap<>(); - if (parentId != null) { - joinField.put("name", name); - joinField.put("parent", parentId); - indexRequestBuilder.setRouting(parentId); - } else { - joinField.put("name", name); - } - source.put("join_field", joinField); - indexRequestBuilder.setSource(source); + joinField.put("name", name); } + source.put("join_field", joinField); + indexRequestBuilder.setSource(source); return indexRequestBuilder; } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index 3ee163c8fc5a3..902d46c66aa65 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -69,7 +69,6 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.DocumentMapperForType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ParseContext; @@ -582,33 +581,22 @@ protected Query doToQuery(QueryShardContext context) throws IOException { final List docs = new ArrayList<>(); final DocumentMapper docMapper; final MapperService mapperService = context.getMapperService(); - if (context.getIndexSettings().isSingleType()) { - Collection types = mapperService.types(); - if (types.size() != 1) { - throw new IllegalStateException("Only a single type should exist, but [" + types.size() + " types exists"); - } - String type = types.iterator().next(); - if (documentType != null) { - DEPRECATION_LOGGER.deprecated("[document_type] parameter has been deprecated because types have been deprecated"); - if (documentType.equals(type) == false) { - throw new IllegalArgumentException("specified document_type [" + documentType + - "] is not equal to the actual type [" + type + "]"); - } - } - docMapper = mapperService.documentMapper(type); - for (BytesReference document : documents) { - docs.add(docMapper.parse(source(context.index().getName(), type, "_temp_id", document, documentXContentType))); - } - } else { - if (documentType == null) { - throw new IllegalArgumentException("[percolate] query is missing required [document_type] parameter"); - } - DocumentMapperForType docMapperForType = mapperService.documentMapperWithAutoCreate(documentType); - docMapper = docMapperForType.getDocumentMapper(); - for (BytesReference document : documents) { - docs.add(docMapper.parse(source(context.index().getName(), documentType, "_temp_id", document, documentXContentType))); + Collection types = mapperService.types(); + if (types.size() != 1) { + throw new IllegalStateException("Only a single type should exist, but [" + types.size() + " types exists"); + } + String type = types.iterator().next(); + if (documentType != null) { + DEPRECATION_LOGGER.deprecated("[document_type] parameter has been deprecated because types have been deprecated"); + if (documentType.equals(type) == false) { + throw new IllegalArgumentException("specified document_type [" + documentType + + "] is not equal to the actual type [" + type + "]"); } } + docMapper = mapperService.documentMapper(type); + for (BytesReference document : documents) { + docs.add(docMapper.parse(source(context.index().getName(), type, "_temp_id", document, documentXContentType))); + } FieldNameAnalyzer fieldNameAnalyzer = (FieldNameAnalyzer) docMapper.mappers().indexAnalyzer(); // Need to this custom impl because FieldNameAnalyzer is strict and the percolator sometimes isn't when diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java index 5f4bcc35a4630..4d5e3d2a988f9 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhase.java @@ -57,7 +57,11 @@ final class PercolatorMatchedSlotSubFetchPhase implements FetchSubPhase { @Override public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException { - List percolateQueries = locatePercolatorQuery(context.query()); + innerHitsExecute(context.query(), context.searcher(), hits); + } + + static void innerHitsExecute(Query mainQuery, IndexSearcher indexSearcher, SearchHit[] hits) throws IOException { + List percolateQueries = locatePercolatorQuery(mainQuery); if (percolateQueries.isEmpty()) { return; } @@ -81,11 +85,15 @@ public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOExcept } PercolateQuery.QueryStore queryStore = percolateQuery.getQueryStore(); - List ctxs = context.searcher().getIndexReader().leaves(); + List ctxs = indexSearcher.getIndexReader().leaves(); for (SearchHit hit : hits) { LeafReaderContext ctx = ctxs.get(ReaderUtil.subIndex(hit.docId(), ctxs)); int segmentDocId = hit.docId() - ctx.docBase; Query query = queryStore.getQueries(ctx).apply(segmentDocId); + if (query == null) { + // This is not a document with a percolator field. + continue; + } TopDocs topDocs = percolatorIndexSearcher.search(query, memoryIndexMaxDoc, new Sort(SortField.FIELD_DOC)); if (topDocs.totalHits == 0) { diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java index 8f1bb2a9310d3..33c40c2739cfc 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java @@ -38,6 +38,7 @@ import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.spans.SpanFirstQuery; import org.apache.lucene.search.spans.SpanNearQuery; import org.apache.lucene.search.spans.SpanNotQuery; @@ -54,7 +55,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -128,7 +128,7 @@ private QueryAnalyzer() { * @param indexVersion The create version of the index containing the percolator queries. */ static Result analyze(Query query, Version indexVersion) { - Class queryClass = query.getClass(); + Class queryClass = query.getClass(); if (queryClass.isAnonymousClass()) { // Sometimes queries have anonymous classes in that case we need the direct super class. // (for example blended term query) @@ -235,20 +235,18 @@ private static BiFunction multiPhraseQuery() { return new Result(true, Collections.emptySet(), 0); } - if (version.onOrAfter(Version.V_6_1_0)) { - Set extractions = new HashSet<>(); - for (Term[] termArr : terms) { - extractions.addAll(Arrays.stream(termArr).map(QueryExtraction::new).collect(toSet())); - } - return new Result(false, extractions, terms.length); - } else { - Set bestTermArr = null; - for (Term[] termArr : terms) { - Set queryExtractions = Arrays.stream(termArr).map(QueryExtraction::new).collect(toSet()); - bestTermArr = selectBestExtraction(bestTermArr, queryExtractions); + // This query has the same problem as boolean queries when it comes to duplicated terms + // So to keep things simple, we just rewrite to a boolean query + BooleanQuery.Builder builder = new BooleanQuery.Builder(); + for (Term[] termArr : terms) { + BooleanQuery.Builder subBuilder = new BooleanQuery.Builder(); + for (Term term : termArr) { + subBuilder.add(new TermQuery(term), Occur.SHOULD); } - return new Result(false, bestTermArr, 1); + builder.add(subBuilder.build(), Occur.FILTER); } + // Make sure to unverify the result + return booleanQuery().apply(builder.build(), version).unverify(); }; } @@ -263,41 +261,35 @@ private static BiFunction spanNearQuery() { return (query, version) -> { SpanNearQuery spanNearQuery = (SpanNearQuery) query; if (version.onOrAfter(Version.V_6_1_0)) { - Set results = Arrays.stream(spanNearQuery.getClauses()).map(clause -> analyze(clause, version)).collect(toSet()); - int msm = 0; - Set extractions = new HashSet<>(); - Set seenRangeFields = new HashSet<>(); - for (Result result : results) { - QueryExtraction[] t = result.extractions.toArray(new QueryExtraction[1]); - if (result.extractions.size() == 1 && t[0].range != null) { - if (seenRangeFields.add(t[0].range.fieldName)) { - msm += 1; - } - } else { - msm += result.minimumShouldMatch; - } - extractions.addAll(result.extractions); + // This has the same problem as boolean queries when it comes to duplicated clauses + // so we rewrite to a boolean query to keep things simple. + BooleanQuery.Builder builder = new BooleanQuery.Builder(); + for (SpanQuery clause : spanNearQuery.getClauses()) { + builder.add(clause, Occur.FILTER); } - return new Result(false, extractions, msm); + // make sure to unverify the result + return booleanQuery().apply(builder.build(), version).unverify(); } else { - Set bestClauses = null; + Result bestClause = null; for (SpanQuery clause : spanNearQuery.getClauses()) { Result temp = analyze(clause, version); - bestClauses = selectBestExtraction(temp.extractions, bestClauses); + bestClause = selectBestResult(temp, bestClause); } - return new Result(false, bestClauses, 1); + return bestClause; } }; } private static BiFunction spanOrQuery() { return (query, version) -> { - Set terms = new HashSet<>(); SpanOrQuery spanOrQuery = (SpanOrQuery) query; + // handle it like a boolean query to not dulplicate eg. logic + // about duplicated terms + BooleanQuery.Builder builder = new BooleanQuery.Builder(); for (SpanQuery clause : spanOrQuery.getClauses()) { - terms.addAll(analyze(clause, version).extractions); + builder.add(clause, Occur.SHOULD); } - return new Result(false, terms, Math.min(1, terms.size())); + return booleanQuery().apply(builder.build(), version); }; } @@ -318,158 +310,75 @@ private static BiFunction spanFirstQuery() { private static BiFunction booleanQuery() { return (query, version) -> { BooleanQuery bq = (BooleanQuery) query; - List clauses = bq.clauses(); int minimumShouldMatch = bq.getMinimumNumberShouldMatch(); - int numRequiredClauses = 0; - int numOptionalClauses = 0; - int numProhibitedClauses = 0; - for (BooleanClause clause : clauses) { + List requiredClauses = new ArrayList<>(); + List optionalClauses = new ArrayList<>(); + boolean hasProhibitedClauses = false; + for (BooleanClause clause : bq.clauses()) { if (clause.isRequired()) { - numRequiredClauses++; - } - if (clause.isProhibited()) { - numProhibitedClauses++; - } - if (clause.getOccur() == BooleanClause.Occur.SHOULD) { - numOptionalClauses++; + requiredClauses.add(clause.getQuery()); + } else if (clause.isProhibited()) { + hasProhibitedClauses = true; + } else { + assert clause.getOccur() == Occur.SHOULD; + optionalClauses.add(clause.getQuery()); } } - if (minimumShouldMatch > numOptionalClauses) { + + if (minimumShouldMatch > optionalClauses.size() + || (requiredClauses.isEmpty() && optionalClauses.isEmpty())) { return new Result(false, Collections.emptySet(), 0); } - if (numRequiredClauses > 0) { - if (version.onOrAfter(Version.V_6_1_0)) { - UnsupportedQueryException uqe = null; - List results = new ArrayList<>(numRequiredClauses); - for (BooleanClause clause : clauses) { - if (clause.isRequired()) { - // skip must_not clauses, we don't need to remember the things that do *not* match... - // skip should clauses, this bq has must clauses, so we don't need to remember should clauses, - // since they are completely optional. - - try { - Result subResult = analyze(clause.getQuery(), version); - if (subResult.matchAllDocs == false && subResult.extractions.isEmpty()) { - // doesn't match anything - return subResult; - } - results.add(subResult); - } catch (UnsupportedQueryException e) { - uqe = e; - } - } - } - - if (results.isEmpty()) { - if (uqe != null) { - // we're unable to select the best clause and an exception occurred, so we bail - throw uqe; - } else { - // We didn't find a clause and no exception occurred, so this bq only contained MatchNoDocsQueries, - return new Result(true, Collections.emptySet(), 1); - } - } else { - int msm = 0; - boolean requiredShouldClauses = minimumShouldMatch > 0 && numOptionalClauses > 0; - boolean verified = uqe == null && numProhibitedClauses == 0 && requiredShouldClauses == false; - boolean matchAllDocs = true; - Set extractions = new HashSet<>(); - Set seenRangeFields = new HashSet<>(); - for (Result result : results) { - // In case that there are duplicate query extractions we need to be careful with incrementing msm, - // because that could lead to valid matches not becoming candidate matches: - // query: (field:val1 AND field:val2) AND (field:val2 AND field:val3) - // doc: field: val1 val2 val3 - // So lets be protective and decrease the msm: - int resultMsm = result.minimumShouldMatch; - for (QueryExtraction queryExtraction : result.extractions) { - if (queryExtraction.range != null) { - // In case of range queries each extraction does not simply increment the minimum_should_match - // for that percolator query like for a term based extraction, so that can lead to more false - // positives for percolator queries with range queries than term based queries. - // The is because the way number fields are extracted from the document to be percolated. - // Per field a single range is extracted and if a percolator query has two or more range queries - // on the same field, then the minimum should match can be higher than clauses in the CoveringQuery. - // Therefore right now the minimum should match is incremented once per number field when processing - // the percolator query at index time. - if (seenRangeFields.add(queryExtraction.range.fieldName)) { - resultMsm = 1; - } else { - resultMsm = 0; - } - } - if (extractions.contains(queryExtraction)) { - // To protect against negative msm: - // (sub results could consist out of disjunction and conjunction and - // then we do not know which extraction contributed to msm) - resultMsm = Math.max(0, resultMsm - 1); - } - } - msm += resultMsm; - - if (result.verified == false - // If some inner extractions are optional, the result can't be verified - || result.minimumShouldMatch < result.extractions.size()) { - verified = false; - } - matchAllDocs &= result.matchAllDocs; - extractions.addAll(result.extractions); - } - if (matchAllDocs) { - return new Result(matchAllDocs, verified); - } else { - return new Result(verified, extractions, msm); - } + if (requiredClauses.size() > 0) { + if (minimumShouldMatch > 0) { + // mix of required clauses and required optional clauses, we turn it into + // a pure conjunction by moving the optional clauses to a sub query to + // simplify logic + BooleanQuery.Builder minShouldMatchQuery = new BooleanQuery.Builder(); + minShouldMatchQuery.setMinimumNumberShouldMatch(minimumShouldMatch); + for (Query q : optionalClauses) { + minShouldMatchQuery.add(q, Occur.SHOULD); } + requiredClauses.add(minShouldMatchQuery.build()); + optionalClauses.clear(); + minimumShouldMatch = 0; } else { - Set bestClause = null; - UnsupportedQueryException uqe = null; - for (BooleanClause clause : clauses) { - if (clause.isRequired() == false) { - // skip must_not clauses, we don't need to remember the things that do *not* match... - // skip should clauses, this bq has must clauses, so we don't need to remember should clauses, - // since they are completely optional. - continue; - } - - Result temp; - try { - temp = analyze(clause.getQuery(), version); - } catch (UnsupportedQueryException e) { - uqe = e; - continue; - } - bestClause = selectBestExtraction(temp.extractions, bestClause); - } - if (bestClause != null) { - return new Result(false, bestClause, 1); - } else { - if (uqe != null) { - // we're unable to select the best clause and an exception occurred, so we bail - throw uqe; - } else { - // We didn't find a clause and no exception occurred, so this bq only contained MatchNoDocsQueries, - return new Result(true, Collections.emptySet(), 1); - } - } + optionalClauses.clear(); // only matter for scoring, not matching } + } + + // Now we now have either a pure conjunction or a pure disjunction, with at least one clause + Result result; + if (requiredClauses.size() > 0) { + assert optionalClauses.isEmpty(); + assert minimumShouldMatch == 0; + result = handleConjunctionQuery(requiredClauses, version); } else { - List disjunctions = new ArrayList<>(numOptionalClauses); - for (BooleanClause clause : clauses) { - if (clause.getOccur() == BooleanClause.Occur.SHOULD) { - disjunctions.add(clause.getQuery()); - } + assert requiredClauses.isEmpty(); + if (minimumShouldMatch == 0) { + // Lucene always requires one matching clause for disjunctions + minimumShouldMatch = 1; } - return handleDisjunction(disjunctions, minimumShouldMatch, numProhibitedClauses > 0, version); + result = handleDisjunctionQuery(optionalClauses, minimumShouldMatch, version); } + + if (hasProhibitedClauses) { + result = result.unverify(); + } + + return result; }; } private static BiFunction disjunctionMaxQuery() { return (query, version) -> { List disjuncts = ((DisjunctionMaxQuery) query).getDisjuncts(); - return handleDisjunction(disjuncts, 1, false, version); + if (disjuncts.isEmpty()) { + return new Result(false, Collections.emptySet(), 0); + } else { + return handleDisjunctionQuery(disjuncts, 1, version); + } }; } @@ -536,19 +445,148 @@ private static BiFunction toParentBlockJoinQuery() { }; } - private static Result handleDisjunction(List disjunctions, int requiredShouldClauses, boolean otherClauses, - Version version) { + private static Result handleConjunctionQuery(List conjunctions, Version version) { + UnsupportedQueryException uqe = null; + List results = new ArrayList<>(conjunctions.size()); + boolean success = false; + for (Query query : conjunctions) { + try { + Result subResult = analyze(query, version); + if (subResult.isMatchNoDocs()) { + return subResult; + } + results.add(subResult); + success = true; + } catch (UnsupportedQueryException e) { + uqe = e; + } + } + + if (success == false) { + // No clauses could be extracted + if (uqe != null) { + + throw uqe; + } else { + // Empty conjunction + return new Result(true, Collections.emptySet(), 0); + } + } + Result result = handleConjunction(results, version); + if (uqe != null) { + result = result.unverify(); + } + return result; + } + + private static Result handleConjunction(List conjunctions, Version version) { + if (conjunctions.isEmpty()) { + throw new IllegalArgumentException("Must have at least on conjunction sub result"); + } + if (version.onOrAfter(Version.V_6_1_0)) { + for (Result subResult : conjunctions) { + if (subResult.isMatchNoDocs()) { + return subResult; + } + } + int msm = 0; + boolean verified = true; + boolean matchAllDocs = true; + boolean hasDuplicateTerms = false;Set extractions = new HashSet<>(); + Set seenRangeFields = new HashSet<>(); + for (Result result : conjunctions) { + // In case that there are duplicate query extractions we need to be careful with incrementing msm, + // because that could lead to valid matches not becoming candidate matches: + // query: (field:val1 AND field:val2) AND (field:val2 AND field:val3) + // doc: field: val1 val2 val3 + // So lets be protective and decrease the msm: + int resultMsm = result.minimumShouldMatch; + for (QueryExtraction queryExtraction : result.extractions) { + if (queryExtraction.range != null) { + // In case of range queries each extraction does not simply increment the minimum_should_match + // for that percolator query like for a term based extraction, so that can lead to more false + // positives for percolator queries with range queries than term based queries. + // The is because the way number fields are extracted from the document to be percolated. + // Per field a single range is extracted and if a percolator query has two or more range queries + // on the same field, then the minimum should match can be higher than clauses in the CoveringQuery. + // Therefore right now the minimum should match is incremented once per number field when processing + // the percolator query at index time. + if (seenRangeFields.add(queryExtraction.range.fieldName)) { + resultMsm = 1; + } else { + resultMsm = 0; + } + } + + if (extractions.contains(queryExtraction)) { + + resultMsm = 0; + verified = false; + break; + } + } + msm += resultMsm; + + if (result.verified == false + // If some inner extractions are optional, the result can't be verified + || result.minimumShouldMatch < result.extractions.size()) { + verified = false; + } + matchAllDocs &= result.matchAllDocs; + extractions.addAll(result.extractions); + } + if (matchAllDocs) { + return new Result(matchAllDocs, verified); + } else { + return new Result(verified, extractions, hasDuplicateTerms ? 1 : msm); + } + } else { + Result bestClause = null; + for (Result result : conjunctions) { + bestClause = selectBestResult(result, bestClause); + } + return bestClause; + } + } + + private static Result handleDisjunctionQuery(List disjunctions, int requiredShouldClauses, Version version) { + List subResults = new ArrayList<>(); + for (Query query : disjunctions) { + // if either query fails extraction, we need to propagate as we could miss hits otherwise + Result subResult = analyze(query, version); + subResults.add(subResult); + } + return handleDisjunction(subResults, requiredShouldClauses, version); + } + + private static Result handleDisjunction(List disjunctions, int requiredShouldClauses, Version version) { // Keep track of the msm for each clause: - List clauses = new ArrayList<>(disjunctions.size()); - boolean verified = otherClauses == false; + List clauses = new ArrayList<>(disjunctions.size()); + boolean verified; if (version.before(Version.V_6_1_0)) { - verified &= requiredShouldClauses <= 1; + verified = requiredShouldClauses <= 1; + } else { + verified = true; } int numMatchAllClauses = 0; + boolean hasRangeExtractions = false; + + // In case that there are duplicate extracted terms / ranges then the msm should always be equal to the clause + // with lowest msm, because the at percolate time there is no way to know the number of repetitions per + // extracted term and field value from a percolator document may have more 'weight' than others. + // Example percolator query: value1 OR value2 OR value2 OR value3 OR value3 OR value3 OR value4 OR value5 (msm set to 3) + // In the above example query the extracted msm would be 3 + // Example document1: value1 value2 value3 + // With the msm and extracted terms this would match and is expected behaviour + // Example document2: value3 + // This document should match too (value3 appears in 3 clauses), but with msm set to 3 and the fact + // that fact that only distinct values are indexed in extracted terms field this document would + // never match. + boolean hasDuplicateTerms = false; + Set terms = new HashSet<>(); for (int i = 0; i < disjunctions.size(); i++) { - Query disjunct = disjunctions.get(i); - Result subResult = analyze(disjunct, version); + Result subResult = disjunctions.get(i); if (subResult.verified == false // one of the sub queries requires more than one term to match, we can't // verify it with a single top-level min_should_match @@ -564,35 +602,37 @@ private static Result handleDisjunction(List disjunctions, int requiredSh int resultMsm = subResult.minimumShouldMatch; for (QueryExtraction extraction : subResult.extractions) { if (terms.add(extraction) == false) { - resultMsm = Math.max(0, resultMsm - 1); + verified = false; + hasDuplicateTerms = true; } } - clauses.add(new DisjunctionClause(resultMsm, subResult.extractions.stream() - .filter(extraction -> extraction.range != null) - .map(extraction -> extraction.range.fieldName) - .collect(toSet()))); + if (hasRangeExtractions == false) { + hasRangeExtractions = subResult.extractions.stream().anyMatch(qe -> qe.range != null); + } + clauses.add(resultMsm); } boolean matchAllDocs = numMatchAllClauses > 0 && numMatchAllClauses >= requiredShouldClauses; int msm = 0; - if (version.onOrAfter(Version.V_6_1_0)) { - Set seenRangeFields = new HashSet<>(); + if (version.onOrAfter(Version.V_6_1_0) && + // Having ranges would mean we need to juggle with the msm and that complicates this logic a lot, + // so for now lets not do it. + hasRangeExtractions == false) { // Figure out what the combined msm is for this disjunction: // (sum the lowest required clauses, otherwise we're too strict and queries may not match) clauses = clauses.stream() - .filter(o -> o.msm > 0) - .sorted(Comparator.comparingInt(o -> o.msm)) + .filter(val -> val > 0) + .sorted() .collect(Collectors.toList()); - int limit = Math.min(clauses.size(), Math.max(1, requiredShouldClauses)); - for (int i = 0; i < limit; i++) { - if (clauses.get(i).rangeFieldNames.isEmpty() == false) { - for (String rangeField: clauses.get(i).rangeFieldNames) { - if (seenRangeFields.add(rangeField)) { - msm += 1; - } - } - } else { - msm += clauses.get(i).msm; + + // When there are duplicated query extractions, percolator can no longer reliably determine msm across this disjunction + if (hasDuplicateTerms) { + // pick lowest msm: + msm = clauses.get(0); + } else { + int limit = Math.min(clauses.size(), Math.max(1, requiredShouldClauses)); + for (int i = 0; i < limit; i++) { + msm += clauses.get(i); } } } else { @@ -605,33 +645,40 @@ private static Result handleDisjunction(List disjunctions, int requiredSh } } - static class DisjunctionClause { - - final int msm; - final Set rangeFieldNames; - - DisjunctionClause(int msm, Set rangeFieldNames) { - this.msm = msm; - this.rangeFieldNames = rangeFieldNames; - } - } - - static Set selectBestExtraction(Set extractions1, Set extractions2) { - assert extractions1 != null || extractions2 != null; - if (extractions1 == null) { - return extractions2; - } else if (extractions2 == null) { - return extractions1; + /** + * Return an extraction for the conjunction of {@code result1} and {@code result2} + * by picking up clauses that look most restrictive and making it unverified if + * the other clause is not null and doesn't match all documents. This is used by + * 6.0.0 indices which didn't use the terms_set query. + */ + static Result selectBestResult(Result result1, Result result2) { + assert result1 != null || result2 != null; + if (result1 == null) { + return result2; + } else if (result2 == null) { + return result1; + } else if (result1.matchAllDocs) { // conjunction with match_all + Result result = result2; + if (result1.verified == false) { + result = result.unverify(); + } + return result; + } else if (result2.matchAllDocs) { // conjunction with match_all + Result result = result1; + if (result2.verified == false) { + result = result.unverify(); + } + return result; } else { // Prefer term based extractions over range based extractions: boolean onlyRangeBasedExtractions = true; - for (QueryExtraction clause : extractions1) { + for (QueryExtraction clause : result1.extractions) { if (clause.term != null) { onlyRangeBasedExtractions = false; break; } } - for (QueryExtraction clause : extractions2) { + for (QueryExtraction clause : result2.extractions) { if (clause.term != null) { onlyRangeBasedExtractions = false; break; @@ -639,28 +686,28 @@ static Set selectBestExtraction(Set extraction } if (onlyRangeBasedExtractions) { - BytesRef extraction1SmallestRange = smallestRange(extractions1); - BytesRef extraction2SmallestRange = smallestRange(extractions2); + BytesRef extraction1SmallestRange = smallestRange(result1.extractions); + BytesRef extraction2SmallestRange = smallestRange(result2.extractions); if (extraction1SmallestRange == null) { - return extractions2; + return result2.unverify(); } else if (extraction2SmallestRange == null) { - return extractions1; + return result1.unverify(); } // Keep the clause with smallest range, this is likely to be the rarest. if (extraction1SmallestRange.compareTo(extraction2SmallestRange) <= 0) { - return extractions1; + return result1.unverify(); } else { - return extractions2; + return result2.unverify(); } } else { - int extraction1ShortestTerm = minTermLength(extractions1); - int extraction2ShortestTerm = minTermLength(extractions2); + int extraction1ShortestTerm = minTermLength(result1.extractions); + int extraction2ShortestTerm = minTermLength(result2.extractions); // keep the clause with longest terms, this likely to be rarest. if (extraction1ShortestTerm >= extraction2ShortestTerm) { - return extractions1; + return result1.unverify(); } else { - return extractions2; + return result2.unverify(); } } } @@ -695,6 +742,13 @@ private static BytesRef smallestRange(Set terms) { return min; } + /** + * Query extraction result. A result is a candidate for a given document either if: + * - `matchAllDocs` is true + * - `extractions` and the document have `minimumShouldMatch` terms in common + * Further more, the match doesn't need to be verified if `verified` is true, checking + * `matchAllDocs` and `extractions` is enough. + */ static class Result { final Set extractions; @@ -702,24 +756,36 @@ static class Result { final int minimumShouldMatch; final boolean matchAllDocs; - Result(boolean verified, Set extractions, int minimumShouldMatch) { + private Result(boolean matchAllDocs, boolean verified, Set extractions, int minimumShouldMatch) { if (minimumShouldMatch > extractions.size()) { throw new IllegalArgumentException("minimumShouldMatch can't be greater than the number of extractions: " + minimumShouldMatch + " > " + extractions.size()); } + this.matchAllDocs = matchAllDocs; this.extractions = extractions; this.verified = verified; this.minimumShouldMatch = minimumShouldMatch; - this.matchAllDocs = false; + } + + Result(boolean verified, Set extractions, int minimumShouldMatch) { + this(false, verified, extractions, minimumShouldMatch); } Result(boolean matchAllDocs, boolean verified) { - this.extractions = Collections.emptySet(); - this.verified = verified; - this.minimumShouldMatch = 0; - this.matchAllDocs = matchAllDocs; + this(matchAllDocs, verified, Collections.emptySet(), 0); } + Result unverify() { + if (verified) { + return new Result(matchAllDocs, false, extractions, minimumShouldMatch); + } else { + return this; + } + } + + boolean isMatchNoDocs() { + return matchAllDocs == false && extractions.isEmpty(); + } } static class QueryExtraction { diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java index 27d72b2926749..106358b6cf031 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java @@ -38,9 +38,12 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.MultiDocValues; +import org.apache.lucene.index.MultiFields; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.Term; +import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.memory.MemoryIndex; import org.apache.lucene.queries.BlendedTermQuery; import org.apache.lucene.queries.CommonTermsQuery; @@ -318,6 +321,103 @@ private BooleanQuery createRandomBooleanQuery(int depth, List fields, Ma return builder.build(); } + public void testDuel2() throws Exception { + List stringValues = new ArrayList<>(); + stringValues.add("value1"); + stringValues.add("value2"); + stringValues.add("value3"); + + MappedFieldType intFieldType = mapperService.documentMapper("type").mappers() + .getMapper("int_field").fieldType(); + List ranges = new ArrayList<>(); + ranges.add(new int[]{-5, 5}); + ranges.add(new int[]{0, 10}); + ranges.add(new int[]{15, 50}); + + List documents = new ArrayList<>(); + { + addQuery(new TermQuery(new Term("string_field", randomFrom(stringValues))), documents); + } + { + addQuery(new PhraseQuery(0, "string_field", stringValues.toArray(new String[0])), documents); + } + { + int[] range = randomFrom(ranges); + Query rangeQuery = intFieldType.rangeQuery(range[0], range[1], true, true, null, null, null, null); + addQuery(rangeQuery, documents); + } + { + int numBooleanQueries = randomIntBetween(1, 5); + for (int i = 0; i < numBooleanQueries; i++) { + Query randomBQ = randomBQ(1, stringValues, ranges, intFieldType); + addQuery(randomBQ, documents); + } + } + { + addQuery(new MatchNoDocsQuery(), documents); + } + { + addQuery(new MatchAllDocsQuery(), documents); + } + + indexWriter.addDocuments(documents); + indexWriter.close(); + directoryReader = DirectoryReader.open(directory); + IndexSearcher shardSearcher = newSearcher(directoryReader); + // Disable query cache, because ControlQuery cannot be cached... + shardSearcher.setQueryCache(null); + + Document document = new Document(); + for (String value : stringValues) { + document.add(new TextField("string_field", value, Field.Store.NO)); + logger.info("Test with document: {}" + document); + MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer()); + duelRun(queryStore, memoryIndex, shardSearcher); + } + for (int[] range : ranges) { + List numberFields = + NumberFieldMapper.NumberType.INTEGER.createFields("int_field", between(range[0], range[1]), true, true, false); + for (Field numberField : numberFields) { + document.add(numberField); + } + logger.info("Test with document: {}" + document); + MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer()); + duelRun(queryStore, memoryIndex, shardSearcher); + } + } + + private BooleanQuery randomBQ(int depth, List stringValues, List ranges, MappedFieldType intFieldType) { + final int numClauses = randomIntBetween(1, 4); + final boolean onlyShouldClauses = randomBoolean(); + final BooleanQuery.Builder builder = new BooleanQuery.Builder(); + + int numShouldClauses = 0; + for (int i = 0; i < numClauses; i++) { + Query subQuery; + if (randomBoolean() && depth <= 3) { + subQuery = randomBQ(depth + 1, stringValues, ranges, intFieldType); + } else if (randomBoolean()) { + int[] range = randomFrom(ranges); + subQuery = intFieldType.rangeQuery(range[0], range[1], true, true, null, null, null, null); + } else { + subQuery = new TermQuery(new Term("string_field", randomFrom(stringValues))); + } + + Occur occur; + if (onlyShouldClauses) { + occur = Occur.SHOULD; + } else { + occur = randomFrom(Arrays.asList(Occur.FILTER, Occur.MUST, Occur.SHOULD)); + } + if (occur == Occur.SHOULD) { + numShouldClauses++; + } + builder.add(subQuery, occur); + } + builder.setMinimumNumberShouldMatch(randomIntBetween(0, numShouldClauses)); + return builder.build(); + } + public void testDuelIdBased() throws Exception { List> queryFunctions = new ArrayList<>(); queryFunctions.add((id) -> new PrefixQuery(new Term("field", id))); @@ -858,6 +958,90 @@ public void testDuplicatedClauses() throws Exception { assertEquals(1, topDocs.scoreDocs[1].doc); } + public void testDuplicatedClauses2() throws Exception { + List docs = new ArrayList<>(); + + BooleanQuery.Builder builder = new BooleanQuery.Builder(); + builder.setMinimumNumberShouldMatch(3); + builder.add(new TermQuery(new Term("field", "value1")), Occur.SHOULD); + builder.add(new TermQuery(new Term("field", "value2")), Occur.SHOULD); + builder.add(new TermQuery(new Term("field", "value2")), Occur.SHOULD); + builder.add(new TermQuery(new Term("field", "value3")), Occur.SHOULD); + builder.add(new TermQuery(new Term("field", "value3")), Occur.SHOULD); + builder.add(new TermQuery(new Term("field", "value3")), Occur.SHOULD); + builder.add(new TermQuery(new Term("field", "value4")), Occur.SHOULD); + builder.add(new TermQuery(new Term("field", "value5")), Occur.SHOULD); + addQuery(builder.build(), docs); + + indexWriter.addDocuments(docs); + indexWriter.close(); + directoryReader = DirectoryReader.open(directory); + IndexSearcher shardSearcher = newSearcher(directoryReader); + shardSearcher.setQueryCache(null); + + Version v = Version.CURRENT; + List sources = Collections.singletonList(new BytesArray("{}")); + + MemoryIndex memoryIndex = new MemoryIndex(); + memoryIndex.addField("field", "value1 value4 value5", new WhitespaceAnalyzer()); + IndexSearcher percolateSearcher = memoryIndex.createSearcher(); + PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, v); + TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true); + assertEquals(1L, topDocs.totalHits); + assertEquals(0, topDocs.scoreDocs[0].doc); + + memoryIndex = new MemoryIndex(); + memoryIndex.addField("field", "value1 value2", new WhitespaceAnalyzer()); + percolateSearcher = memoryIndex.createSearcher(); + query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, v); + topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true); + assertEquals(1L, topDocs.totalHits); + assertEquals(0, topDocs.scoreDocs[0].doc); + + memoryIndex = new MemoryIndex(); + memoryIndex.addField("field", "value3", new WhitespaceAnalyzer()); + percolateSearcher = memoryIndex.createSearcher(); + query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, v); + topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true); + assertEquals(1L, topDocs.totalHits); + assertEquals(0, topDocs.scoreDocs[0].doc); + } + + public void testMsmAndRanges_disjunction() throws Exception { + // Recreates a similar scenario that made testDuel() fail randomly: + // https://github.com/elastic/elasticsearch/issues/29393 + List docs = new ArrayList<>(); + BooleanQuery.Builder builder = new BooleanQuery.Builder(); + builder.setMinimumNumberShouldMatch(2); + + BooleanQuery.Builder builder1 = new BooleanQuery.Builder(); + builder1.add(new TermQuery(new Term("field", "value1")), Occur.FILTER); + builder.add(builder1.build(), Occur.SHOULD); + builder.add(new TermQuery(new Term("field", "value2")), Occur.MUST_NOT); + builder.add(IntPoint.newRangeQuery("int_field", 0, 5), Occur.SHOULD); + builder.add(IntPoint.newRangeQuery("int_field", 6, 10), Occur.SHOULD); + addQuery(builder.build(), docs); + + indexWriter.addDocuments(docs); + indexWriter.close(); + directoryReader = DirectoryReader.open(directory); + IndexSearcher shardSearcher = newSearcher(directoryReader); + shardSearcher.setQueryCache(null); + + Version v = Version.CURRENT; + List sources = Collections.singletonList(new BytesArray("{}")); + + Document document = new Document(); + document.add(new IntPoint("int_field", 4)); + document.add(new IntPoint("int_field", 7)); + MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer()); + IndexSearcher percolateSearcher = memoryIndex.createSearcher(); + PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, v); + TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true); + assertEquals(1L, topDocs.totalHits); + assertEquals(0, topDocs.scoreDocs[0].doc); + } + private void duelRun(PercolateQuery.QueryStore queryStore, MemoryIndex memoryIndex, IndexSearcher shardSearcher) throws IOException { boolean requireScore = randomBoolean(); IndexSearcher percolateSearcher = memoryIndex.createSearcher(); @@ -900,7 +1084,17 @@ private void duelRun(PercolateQuery.QueryStore queryStore, MemoryIndex memoryInd // Additional stored information that is useful when debugging: String queryToString = shardSearcher.doc(controlTopDocs.scoreDocs[i].doc).get("query_to_string"); - logger.error("topDocs.scoreDocs[{}].query_to_string={}", i, queryToString); + logger.error("controlTopDocs.scoreDocs[{}].query_to_string={}", i, queryToString); + + TermsEnum tenum = MultiFields.getFields(shardSearcher.getIndexReader()).terms(fieldType.queryTermsField.name()).iterator(); + StringBuilder builder = new StringBuilder(); + for (BytesRef term = tenum.next(); term != null; term = tenum.next()) { + PostingsEnum penum = tenum.postings(null); + if (penum.advance(controlTopDocs.scoreDocs[i].doc) == controlTopDocs.scoreDocs[i].doc) { + builder.append(term.utf8ToString()).append(','); + } + } + logger.error("controlTopDocs.scoreDocs[{}].query_terms_field={}", i, builder.toString()); NumericDocValues numericValues = MultiDocValues.getNumericValues(shardSearcher.getIndexReader(), fieldType.minimumShouldMatchField.name()); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java index 16be166b0addc..3b3ff4ed15c87 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java @@ -103,9 +103,6 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws mapperService.merge(docType, new CompressedXContent(Strings.toString(PutMappingRequest.buildFromSimplifiedDef(docType, STRING_FIELD_NAME, "type=text" ))), MapperService.MergeReason.MAPPING_UPDATE); - if (mapperService.getIndexSettings().isSingleType() == false) { - PercolateQueryBuilderTests.docType = docType; - } } @Override @@ -244,13 +241,7 @@ public void testRequiredParameters() { public void testFromJsonNoDocumentType() throws IOException { QueryShardContext queryShardContext = createShardContext(); QueryBuilder queryBuilder = parseQuery("{\"percolate\" : { \"document\": {}, \"field\":\"" + queryField + "\"}}"); - if (indexVersionCreated.before(Version.V_6_0_0_alpha1)) { - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> queryBuilder.toQuery(queryShardContext)); - assertThat(e.getMessage(), equalTo("[percolate] query is missing required [document_type] parameter")); - } else { - queryBuilder.toQuery(queryShardContext); - } + queryBuilder.toQuery(queryShardContext); } public void testBothDocumentAndDocumentsSpecified() throws IOException { diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java index b338151c5acd0..5b5ac41d25f1f 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java @@ -877,7 +877,7 @@ public void testDuplicatedClauses() throws Exception { assertThat(values.get(1), equalTo("field\0value2")); assertThat(values.get(2), equalTo("field\0value3")); int msm = doc.rootDoc().getFields(fieldType.minimumShouldMatchField.name())[0].numericValue().intValue(); - assertThat(msm, equalTo(3)); + assertThat(msm, equalTo(2)); qb = boolQuery() .must(boolQuery().must(termQuery("field", "value1")).must(termQuery("field", "value2"))) @@ -901,7 +901,7 @@ public void testDuplicatedClauses() throws Exception { assertThat(values.get(3), equalTo("field\0value4")); assertThat(values.get(4), equalTo("field\0value5")); msm = doc.rootDoc().getFields(fieldType.minimumShouldMatchField.name())[0].numericValue().intValue(); - assertThat(msm, equalTo(4)); + assertThat(msm, equalTo(2)); qb = boolQuery() .minimumShouldMatch(3) diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java index d4b48174d76d1..a428726225b95 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java @@ -18,15 +18,82 @@ */ package org.elasticsearch.percolator; +import org.apache.lucene.analysis.core.WhitespaceAnalyzer; +import org.apache.lucene.document.Document; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.index.Term; +import org.apache.lucene.index.memory.MemoryIndex; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; +import org.apache.lucene.store.Directory; import org.apache.lucene.util.FixedBitSet; +import org.elasticsearch.search.SearchHit; import org.elasticsearch.test.ESTestCase; +import java.util.Collections; import java.util.stream.IntStream; public class PercolatorMatchedSlotSubFetchPhaseTests extends ESTestCase { + public void testHitsExecute() throws Exception { + try (Directory directory = newDirectory()) { + // Need a one doc index: + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + Document document = new Document(); + indexWriter.addDocument(document); + } + + try (DirectoryReader reader = DirectoryReader.open(directory)) { + IndexSearcher indexSearcher = new IndexSearcher(reader); + + // A match: + { + SearchHit[] hits = new SearchHit[]{new SearchHit(0)}; + PercolateQuery.QueryStore queryStore = ctx -> docId -> new TermQuery(new Term("field", "value")); + MemoryIndex memoryIndex = new MemoryIndex(); + memoryIndex.addField("field", "value", new WhitespaceAnalyzer()); + PercolateQuery percolateQuery = new PercolateQuery("_name", queryStore, Collections.emptyList(), + new MatchAllDocsQuery(), memoryIndex.createSearcher(), new MatchNoDocsQuery()); + + PercolatorMatchedSlotSubFetchPhase.innerHitsExecute(percolateQuery, indexSearcher, hits); + assertNotNull(hits[0].field(PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX)); + assertEquals(0, (int) hits[0].field(PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX).getValue()); + } + + // No match: + { + SearchHit[] hits = new SearchHit[]{new SearchHit(0)}; + PercolateQuery.QueryStore queryStore = ctx -> docId -> new TermQuery(new Term("field", "value")); + MemoryIndex memoryIndex = new MemoryIndex(); + memoryIndex.addField("field", "value1", new WhitespaceAnalyzer()); + PercolateQuery percolateQuery = new PercolateQuery("_name", queryStore, Collections.emptyList(), + new MatchAllDocsQuery(), memoryIndex.createSearcher(), new MatchNoDocsQuery()); + + PercolatorMatchedSlotSubFetchPhase.innerHitsExecute(percolateQuery, indexSearcher, hits); + assertNull(hits[0].field(PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX)); + } + + // No query: + { + SearchHit[] hits = new SearchHit[]{new SearchHit(0)}; + PercolateQuery.QueryStore queryStore = ctx -> docId -> null; + MemoryIndex memoryIndex = new MemoryIndex(); + memoryIndex.addField("field", "value", new WhitespaceAnalyzer()); + PercolateQuery percolateQuery = new PercolateQuery("_name", queryStore, Collections.emptyList(), + new MatchAllDocsQuery(), memoryIndex.createSearcher(), new MatchNoDocsQuery()); + + PercolatorMatchedSlotSubFetchPhase.innerHitsExecute(percolateQuery, indexSearcher, hits); + assertNull(hits[0].field(PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX)); + } + } + } + } + public void testConvertTopDocsToSlots() { ScoreDoc[] scoreDocs = new ScoreDoc[randomInt(128)]; for (int i = 0; i < scoreDocs.length; i++) { diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java index 3e11f91c4bcea..ee31a81ae168a 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchIT.java @@ -97,7 +97,7 @@ public void testPercolatorQuery() throws Exception { logger.info("percolating doc with 1 field"); response = client().prepareSearch() .setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) - .addSort("_uid", SortOrder.ASC) + .addSort("_id", SortOrder.ASC) .get(); assertHitCount(response, 2); assertThat(response.getHits().getAt(0).getId(), equalTo("1")); @@ -109,7 +109,7 @@ public void testPercolatorQuery() throws Exception { logger.info("percolating doc with 2 fields"); response = client().prepareSearch() .setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) - .addSort("_uid", SortOrder.ASC) + .addSort("_id", SortOrder.ASC) .get(); assertHitCount(response, 3); assertThat(response.getHits().getAt(0).getId(), equalTo("1")); @@ -125,7 +125,7 @@ public void testPercolatorQuery() throws Exception { BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").endObject()), BytesReference.bytes(jsonBuilder().startObject().field("field1", "value").field("field2", "value").endObject()) ), XContentType.JSON)) - .addSort("_uid", SortOrder.ASC) + .addSort("_id", SortOrder.ASC) .get(); assertHitCount(response, 3); assertThat(response.getHits().getAt(0).getId(), equalTo("1")); @@ -312,7 +312,7 @@ public void testPercolatorQueryExistingDocument() throws Exception { logger.info("percolating doc with 1 field"); response = client().prepareSearch() .setQuery(new PercolateQueryBuilder("query", "test", "type", "5", null, null, null)) - .addSort("_uid", SortOrder.ASC) + .addSort("_id", SortOrder.ASC) .get(); assertHitCount(response, 2); assertThat(response.getHits().getAt(0).getId(), equalTo("1")); @@ -321,7 +321,7 @@ public void testPercolatorQueryExistingDocument() throws Exception { logger.info("percolating doc with 2 fields"); response = client().prepareSearch() .setQuery(new PercolateQueryBuilder("query", "test", "type", "6", null, null, null)) - .addSort("_uid", SortOrder.ASC) + .addSort("_id", SortOrder.ASC) .get(); assertHitCount(response, 3); assertThat(response.getHits().getAt(0).getId(), equalTo("1")); @@ -408,7 +408,7 @@ public void testPercolatorSpecificQueries() throws Exception { .endObject()); SearchResponse response = client().prepareSearch() .setQuery(new PercolateQueryBuilder("query", source, XContentType.JSON)) - .addSort("_uid", SortOrder.ASC) + .addSort("_id", SortOrder.ASC) .get(); assertHitCount(response, 4); assertThat(response.getHits().getAt(0).getId(), equalTo("1")); @@ -455,7 +455,7 @@ public void testPercolatorQueryWithHighlighting() throws Exception { SearchResponse searchResponse = client().prepareSearch() .setQuery(new PercolateQueryBuilder("query", document, XContentType.JSON)) .highlighter(new HighlightBuilder().field("field1")) - .addSort("_uid", SortOrder.ASC) + .addSort("_id", SortOrder.ASC) .get(); assertHitCount(searchResponse, 5); @@ -482,7 +482,7 @@ public void testPercolatorQueryWithHighlighting() throws Exception { .should(new PercolateQueryBuilder("query", document2, XContentType.JSON).setName("query2")) ) .highlighter(new HighlightBuilder().field("field1")) - .addSort("_uid", SortOrder.ASC) + .addSort("_id", SortOrder.ASC) .get(); logger.info("searchResponse={}", searchResponse); assertHitCount(searchResponse, 5); @@ -506,7 +506,7 @@ public void testPercolatorQueryWithHighlighting() throws Exception { BytesReference.bytes(jsonBuilder().startObject().field("field1", "brown fox").endObject()) ), XContentType.JSON)) .highlighter(new HighlightBuilder().field("field1")) - .addSort("_uid", SortOrder.ASC) + .addSort("_id", SortOrder.ASC) .get(); assertHitCount(searchResponse, 5); assertThat(searchResponse.getHits().getAt(0).getFields().get("_percolator_document_slot").getValues(), @@ -546,7 +546,7 @@ public void testPercolatorQueryWithHighlighting() throws Exception { ), XContentType.JSON).setName("query2")) ) .highlighter(new HighlightBuilder().field("field1")) - .addSort("_uid", SortOrder.ASC) + .addSort("_id", SortOrder.ASC) .get(); logger.info("searchResponse={}", searchResponse); assertHitCount(searchResponse, 5); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java index d9977c388b248..712d5688827f2 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java @@ -65,6 +65,7 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.Comparator; import java.util.HashSet; import java.util.List; @@ -74,7 +75,7 @@ import static org.elasticsearch.percolator.QueryAnalyzer.UnsupportedQueryException; import static org.elasticsearch.percolator.QueryAnalyzer.analyze; -import static org.elasticsearch.percolator.QueryAnalyzer.selectBestExtraction; +import static org.elasticsearch.percolator.QueryAnalyzer.selectBestResult; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.sameInstance; @@ -163,6 +164,20 @@ public void testExtractQueryMetadata_multiPhraseQuery_pre6dot1() { assertThat(terms.get(0).bytes().utf8ToString(), equalTo("_very_long_term")); } + public void testExtractQueryMetadata_multiPhraseQuery_dups() { + MultiPhraseQuery multiPhraseQuery = new MultiPhraseQuery.Builder() + .add(new Term("_field", "_term1")) + .add(new Term[] {new Term("_field", "_term1"), new Term("_field", "_term2")}) + .build(); + + Result result = analyze(multiPhraseQuery, Version.CURRENT); + assertFalse(result.matchAllDocs); + assertFalse(result.verified); + assertTermsEqual(result.extractions, new Term("_field", "_term1"), new Term("_field", "_term2")); + assertEquals(1, result.minimumShouldMatch); // because of the dup term + } + + public void testExtractQueryMetadata_booleanQuery() { BooleanQuery.Builder builder = new BooleanQuery.Builder(); TermQuery termQuery1 = new TermQuery(new Term("_field", "term0")); @@ -370,18 +385,28 @@ public void testExtractQueryMetadata_booleanQueryWithMustNot() { builder.add(termQuery1, BooleanClause.Occur.MUST_NOT); PhraseQuery phraseQuery = new PhraseQuery("_field", "_term1", "term2"); builder.add(phraseQuery, BooleanClause.Occur.SHOULD); - BooleanQuery booleanQuery = builder.build(); + Result result = analyze(booleanQuery, Version.CURRENT); assertThat(result.verified, is(false)); assertThat(result.minimumShouldMatch, equalTo(2)); - List terms = new ArrayList<>(result.extractions); - assertThat(terms.size(), equalTo(2)); - terms.sort(Comparator.comparing(qt -> qt.term)); - assertThat(terms.get(0).field(), equalTo(phraseQuery.getTerms()[0].field())); - assertThat(terms.get(0).bytes(), equalTo(phraseQuery.getTerms()[0].bytes())); - assertThat(terms.get(1).field(), equalTo(phraseQuery.getTerms()[1].field())); - assertThat(terms.get(1).bytes(), equalTo(phraseQuery.getTerms()[1].bytes())); + assertTermsEqual(result.extractions, phraseQuery.getTerms()); + + builder = new BooleanQuery.Builder(); + builder.add(termQuery1, BooleanClause.Occur.MUST_NOT); + builder.add(new MatchAllDocsQuery(), BooleanClause.Occur.MUST); + booleanQuery = builder.build(); + result = analyze(booleanQuery, Version.CURRENT); + assertThat(result.matchAllDocs, is(true)); + assertThat(result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(0)); + assertTermsEqual(result.extractions); + + result = analyze(booleanQuery, Version.V_6_0_0); + assertThat(result.matchAllDocs, is(true)); + assertThat(result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(0)); + assertTermsEqual(result.extractions); } public void testExactMatch_booleanQuery() { @@ -514,10 +539,8 @@ public void testBooleanQueryWithMustAndShouldClauses() { builder.setMinimumNumberShouldMatch(1); result = analyze(builder.build(), Version.CURRENT); assertThat("Must clause is exact, but m_s_m is 1 so one should clause must match too", result.verified, is(false)); - assertThat(result.minimumShouldMatch, equalTo(1)); - assertThat(result.extractions.size(), equalTo(1)); - extractions = new ArrayList<>(result.extractions); - assertThat(extractions.get(0).term, equalTo(new Term("_field", "_term3"))); + assertThat(result.minimumShouldMatch, equalTo(2)); + assertTermsEqual(result.extractions, termQuery1.getTerm(), termQuery2.getTerm(), termQuery3.getTerm()); builder = new BooleanQuery.Builder(); BooleanQuery.Builder innerBuilder = new BooleanQuery.Builder(); @@ -651,7 +674,7 @@ public void testExtractQueryMetadata_spanOrQuery() { SpanTermQuery spanTermQuery2 = new SpanTermQuery(new Term("_field", "_very_long_term")); SpanOrQuery spanOrQuery = new SpanOrQuery(spanTermQuery1, spanTermQuery2); Result result = analyze(spanOrQuery, Version.CURRENT); - assertThat(result.verified, is(false)); + assertThat(result.verified, is(true)); assertThat(result.minimumShouldMatch, equalTo(1)); assertTermsEqual(result.extractions, spanTermQuery1.getTerm(), spanTermQuery2.getTerm()); } @@ -943,64 +966,111 @@ public void testFunctionScoreQuery_withMatchAll() { assertThat(result.extractions.isEmpty(), is(true)); } - public void testSelectBestExtraction() { + public void testSelectBestResult() { Set queryTerms1 = terms(new int[0], "12", "1234", "12345"); + Result result1 = new Result(true, queryTerms1, 1); Set queryTerms2 = terms(new int[0], "123", "1234", "12345"); - Set result = selectBestExtraction(queryTerms1, queryTerms2); - assertSame(queryTerms2, result); + Result result2 = new Result(true, queryTerms2, 1); + Result result = selectBestResult(result1, result2); + assertSame(queryTerms2, result.extractions); + assertFalse(result.verified); queryTerms1 = terms(new int[]{1, 2, 3}); + result1 = new Result(true, queryTerms1, 1); queryTerms2 = terms(new int[]{2, 3, 4}); - result = selectBestExtraction(queryTerms1, queryTerms2); - assertSame(queryTerms1, result); + result2 = new Result(true, queryTerms2, 1); + result = selectBestResult(result1, result2); + assertSame(queryTerms1, result.extractions); + assertFalse(result.verified); queryTerms1 = terms(new int[]{4, 5, 6}); + result1 = new Result(true, queryTerms1, 1); queryTerms2 = terms(new int[]{1, 2, 3}); - result = selectBestExtraction(queryTerms1, queryTerms2); - assertSame(queryTerms2, result); + result2 = new Result(true, queryTerms2, 1); + result = selectBestResult(result1, result2); + assertSame(queryTerms2, result.extractions); + assertFalse(result.verified); queryTerms1 = terms(new int[]{1, 2, 3}, "123", "456"); + result1 = new Result(true, queryTerms1, 1); queryTerms2 = terms(new int[]{2, 3, 4}, "123", "456"); - result = selectBestExtraction(queryTerms1, queryTerms2); - assertSame(queryTerms1, result); + result2 = new Result(true, queryTerms2, 1); + result = selectBestResult(result1, result2); + assertSame(queryTerms1, result.extractions); + assertFalse(result.verified); queryTerms1 = terms(new int[]{10}); + result1 = new Result(true, queryTerms1, 1); queryTerms2 = terms(new int[]{1}); - result = selectBestExtraction(queryTerms1, queryTerms2); - assertSame(queryTerms2, result); + result2 = new Result(true, queryTerms2, 1); + result = selectBestResult(result1, result2); + assertSame(queryTerms2, result.extractions); queryTerms1 = terms(new int[]{10}, "123"); + result1 = new Result(true, queryTerms1, 1); queryTerms2 = terms(new int[]{1}); - result = selectBestExtraction(queryTerms1, queryTerms2); - assertSame(queryTerms1, result); + result2 = new Result(true, queryTerms2, 1); + result = selectBestResult(result1, result2); + assertSame(queryTerms1, result.extractions); + assertFalse(result.verified); queryTerms1 = terms(new int[]{10}, "1", "123"); + result1 = new Result(true, queryTerms1, 1); queryTerms2 = terms(new int[]{1}, "1", "2"); - result = selectBestExtraction(queryTerms1, queryTerms2); - assertSame(queryTerms1, result); + result2 = new Result(true, queryTerms2, 1); + result = selectBestResult(result1, result2); + assertSame(queryTerms1, result.extractions); + assertFalse(result.verified); queryTerms1 = terms(new int[]{1, 2, 3}, "123", "456"); + result1 = new Result(true, queryTerms1, 1); queryTerms2 = terms(new int[]{2, 3, 4}, "1", "456"); - result = selectBestExtraction(queryTerms1, queryTerms2); - assertSame("Ignoring ranges, so then prefer queryTerms1, because it has the longest shortest term", queryTerms1, result); + result2 = new Result(true, queryTerms2, 1); + result = selectBestResult(result1, result2); + assertSame("Ignoring ranges, so then prefer queryTerms1, because it has the longest shortest term", + queryTerms1, result.extractions); + assertFalse(result.verified); queryTerms1 = terms(new int[]{}); + result1 = new Result(false, queryTerms1, 0); queryTerms2 = terms(new int[]{}); - result = selectBestExtraction(queryTerms1, queryTerms2); - assertSame("In case query extractions are empty", queryTerms2, result); + result2 = new Result(false, queryTerms2, 0); + result = selectBestResult(result1, result2); + assertSame("In case query extractions are empty", queryTerms2, result.extractions); + assertFalse(result.verified); queryTerms1 = terms(new int[]{1}); + result1 = new Result(true, queryTerms1, 1); queryTerms2 = terms(new int[]{}); - result = selectBestExtraction(queryTerms1, queryTerms2); - assertSame("In case query a single extraction is empty", queryTerms1, result); + result2 = new Result(false, queryTerms2, 0); + result = selectBestResult(result1, result2); + assertSame("In case query a single extraction is empty", queryTerms1, result.extractions); + assertFalse(result.verified); queryTerms1 = terms(new int[]{}); + result1 = new Result(false, queryTerms1, 0); queryTerms2 = terms(new int[]{1}); - result = selectBestExtraction(queryTerms1, queryTerms2); - assertSame("In case query a single extraction is empty", queryTerms2, result); + result2 = new Result(true, queryTerms2, 1); + result = selectBestResult(result1, result2); + assertSame("In case query a single extraction is empty", queryTerms2, result.extractions); + assertFalse(result.verified); + + result1 = new Result(true, true); + queryTerms2 = terms(new int[]{1}); + result2 = new Result(true, queryTerms2, 1); + result = selectBestResult(result1, result2); + assertSame("Conjunction with a match_all", result2, result); + assertTrue(result.verified); + + queryTerms1 = terms(new int[]{1}); + result1 = new Result(true, queryTerms2, 1); + result2 = new Result(true, true); + result = selectBestResult(result1, result2); + assertSame("Conjunction with a match_all", result1, result); + assertTrue(result.verified); } - public void testSelectBestExtraction_random() { + public void testselectBestResult_random() { Set terms1 = new HashSet<>(); int shortestTerms1Length = Integer.MAX_VALUE; int sumTermLength = randomIntBetween(1, 128); @@ -1021,9 +1091,11 @@ public void testSelectBestExtraction_random() { sumTermLength -= length; } - Set result = selectBestExtraction(terms1, terms2); + Result result1 = new Result(true, terms1, 1); + Result result2 = new Result(true, terms2, 1); + Result result = selectBestResult(result1, result2); Set expected = shortestTerms1Length >= shortestTerms2Length ? terms1 : terms2; - assertThat(result, sameInstance(expected)); + assertThat(result.extractions, sameInstance(expected)); } public void testPointRangeQuery() { @@ -1224,7 +1296,7 @@ public void testPointRangeQuerySelectRanges() { boolQuery.add(LongPoint.newRangeQuery("_field2", 10, 15), BooleanClause.Occur.SHOULD); result = analyze(boolQuery.build(), Version.CURRENT); assertFalse(result.verified); - assertThat(result.minimumShouldMatch, equalTo(2)); + assertThat(result.minimumShouldMatch, equalTo(1)); assertEquals(2, result.extractions.size()); assertEquals("_field2", new ArrayList<>(result.extractions).get(0).range.fieldName); assertEquals("_field1", new ArrayList<>(result.extractions).get(1).range.fieldName); @@ -1264,9 +1336,9 @@ public void testExtractQueryMetadata_duplicatedClauses() { BooleanClause.Occur.MUST ); Result result = analyze(builder.build(), Version.CURRENT); - assertThat(result.verified, is(true)); + assertThat(result.verified, is(false)); assertThat(result.matchAllDocs, is(false)); - assertThat(result.minimumShouldMatch, equalTo(4)); + assertThat(result.minimumShouldMatch, equalTo(2)); assertTermsEqual(result.extractions, new Term("field", "value1"), new Term("field", "value2"), new Term("field", "value3"), new Term("field", "value4")); @@ -1300,6 +1372,21 @@ public void testExtractQueryMetadata_duplicatedClauses() { new Term("field", "value3"), new Term("field", "value4")); } + public void testEmptyQueries() { + BooleanQuery.Builder builder = new BooleanQuery.Builder(); + Result result = analyze(builder.build(), Version.CURRENT); + assertThat(result.verified, is(false)); + assertThat(result.matchAllDocs, is(false)); + assertThat(result.minimumShouldMatch, equalTo(0)); + assertThat(result.extractions.size(), equalTo(0)); + + result = analyze(new DisjunctionMaxQuery(Collections.emptyList(), 0f), Version.CURRENT); + assertThat(result.verified, is(false)); + assertThat(result.matchAllDocs, is(false)); + assertThat(result.minimumShouldMatch, equalTo(0)); + assertThat(result.extractions.size(), equalTo(0)); + } + private static void assertDimension(byte[] expected, Consumer consumer) { byte[] dest = new byte[expected.length]; consumer.accept(dest); diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java index 770c91e82a6a1..8f17c8203b7e8 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java @@ -223,7 +223,7 @@ public void addSummaryFields(List summaryFields) { return RatedDocument.fromXContent(p); }, RATINGS_FIELD); PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> - SearchSourceBuilder.fromXContent(p), REQUEST_FIELD); + SearchSourceBuilder.fromXContent(p, false), REQUEST_FIELD); PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> p.map(), PARAMS_FIELD); PARSER.declareStringArray(RatedRequest::addSummaryFields, FIELDS_FIELD); PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), TEMPLATE_ID_FIELD); diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RestRankEvalAction.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RestRankEvalAction.java index 34cf953ea50b7..c3616d3b9b56c 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RestRankEvalAction.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RestRankEvalAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.rankeval; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; @@ -109,6 +110,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli private static void parseRankEvalRequest(RankEvalRequest rankEvalRequest, RestRequest request, XContentParser parser) { rankEvalRequest.indices(Strings.splitStringByCommaToArray(request.param("index"))); + rankEvalRequest.indicesOptions(IndicesOptions.fromRequest(request, rankEvalRequest.indicesOptions())); RankEvalSpec spec = RankEvalSpec.parse(parser); rankEvalRequest.setRankEvalSpec(spec); } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java index 50ab9bcf27271..019ae274466ab 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java @@ -107,7 +107,7 @@ protected void doExecute(RankEvalRequest request, ActionListener relevantDocs = createRelevant("2", "3", "4", "5", "6"); + relevantDocs.add(new RatedDocument("test2", "7", TestRatingEnum.RELEVANT.ordinal())); + List specifications = new ArrayList<>(); + specifications.add(new RatedRequest("amsterdam_query", relevantDocs, amsterdamQuery)); + RankEvalSpec task = new RankEvalSpec(specifications, new PrecisionAtK()); + + RankEvalRequest request = new RankEvalRequest(task, new String[] { "test", "test2" }); + request.setRankEvalSpec(task); + + RankEvalResponse response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); + Breakdown details = (PrecisionAtK.Breakdown) response.getPartialResults().get("amsterdam_query").getMetricDetails(); + assertEquals(7, details.getRetrieved()); + assertEquals(6, details.getRelevantRetrieved()); + + // test that ignore_unavailable=true works but returns one result less + assertTrue(client().admin().indices().prepareClose("test2").get().isAcknowledged()); + + request.indicesOptions(IndicesOptions.fromParameters(null, "true", null, SearchRequest.DEFAULT_INDICES_OPTIONS)); + response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); + details = (PrecisionAtK.Breakdown) response.getPartialResults().get("amsterdam_query").getMetricDetails(); + assertEquals(6, details.getRetrieved()); + assertEquals(5, details.getRelevantRetrieved()); + + // test that ignore_unavailable=false or default settings throw an IndexClosedException + assertTrue(client().admin().indices().prepareClose("test2").get().isAcknowledged()); + request.indicesOptions(IndicesOptions.fromParameters(null, "false", null, SearchRequest.DEFAULT_INDICES_OPTIONS)); + response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); + assertEquals(1, response.getFailures().size()); + assertThat(response.getFailures().get("amsterdam_query"), instanceOf(IndexClosedException.class)); + + // test expand_wildcards + request = new RankEvalRequest(task, new String[] { "tes*" }); + request.indicesOptions(IndicesOptions.fromParameters("none", null, null, SearchRequest.DEFAULT_INDICES_OPTIONS)); + response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); + details = (PrecisionAtK.Breakdown) response.getPartialResults().get("amsterdam_query").getMetricDetails(); + assertEquals(0, details.getRetrieved()); + + request.indicesOptions(IndicesOptions.fromParameters("open", null, null, SearchRequest.DEFAULT_INDICES_OPTIONS)); + response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); + details = (PrecisionAtK.Breakdown) response.getPartialResults().get("amsterdam_query").getMetricDetails(); + assertEquals(6, details.getRetrieved()); + assertEquals(5, details.getRelevantRetrieved()); + + request.indicesOptions(IndicesOptions.fromParameters("closed", null, null, SearchRequest.DEFAULT_INDICES_OPTIONS)); + response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); + assertEquals(1, response.getFailures().size()); + assertThat(response.getFailures().get("amsterdam_query"), instanceOf(IndexClosedException.class)); + + // test allow_no_indices + request = new RankEvalRequest(task, new String[] { "bad*" }); + request.indicesOptions(IndicesOptions.fromParameters(null, null, "true", SearchRequest.DEFAULT_INDICES_OPTIONS)); + response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); + details = (PrecisionAtK.Breakdown) response.getPartialResults().get("amsterdam_query").getMetricDetails(); + assertEquals(0, details.getRetrieved()); + + request.indicesOptions(IndicesOptions.fromParameters(null, null, "false", SearchRequest.DEFAULT_INDICES_OPTIONS)); + response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); + assertEquals(1, response.getFailures().size()); + assertThat(response.getFailures().get("amsterdam_query"), instanceOf(IndexNotFoundException.class)); + } + private static List createRelevant(String... docs) { List relevant = new ArrayList<>(); for (String doc : docs) { diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalSpecTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalSpecTests.java index 94338e570a5d2..b49811a9bcaec 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalSpecTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalSpecTests.java @@ -51,6 +51,7 @@ import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.startsWith; public class RankEvalSpecTests extends ESTestCase { @@ -133,7 +134,7 @@ public void testXContentParsingIsNotLenient() throws IOException { BytesReference withRandomFields = insertRandomFields(xContentType, originalBytes, null, random()); try (XContentParser parser = createParser(xContentType.xContent(), withRandomFields)) { Exception exception = expectThrows(Exception.class, () -> RankEvalSpec.parse(parser)); - assertThat(exception.getMessage(), startsWith("[rank_eval] failed to parse field")); + assertThat(exception.getMessage(), containsString("[rank_eval] failed to parse field")); } } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java index 0f23178c68391..ad962178f581f 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.rankeval; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -27,6 +28,7 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; @@ -51,6 +53,7 @@ import static java.util.stream.Collectors.toList; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.startsWith; public class RatedRequestsTests extends ESTestCase { @@ -134,11 +137,13 @@ public void testXContentParsingIsNotLenient() throws IOException { BytesReference withRandomFields = insertRandomFields(xContentType, originalBytes, null, random()); try (XContentParser parser = createParser(xContentType.xContent(), withRandomFields)) { Exception exception = expectThrows(Exception.class, () -> RatedRequest.fromXContent(parser)); - if (exception instanceof IllegalArgumentException) { - assertThat(exception.getMessage(), startsWith("[request] unknown field")); + if (exception instanceof XContentParseException) { + XContentParseException xcpe = (XContentParseException) exception; + assertThat(ExceptionsHelper.detailedMessage(xcpe), containsString("unknown field")); + assertThat(ExceptionsHelper.detailedMessage(xcpe), containsString("parser not found")); } - if (exception instanceof ParsingException) { - assertThat(exception.getMessage(), startsWith("[request] failed to parse field")); + if (exception instanceof XContentParseException) { + assertThat(exception.getMessage(), containsString("[request] failed to parse field")); } } } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java index c2eb99afa18a8..8350d86615008 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java @@ -45,7 +45,6 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.IndexFieldMapper; -import org.elasticsearch.index.mapper.ParentFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.TypeFieldMapper; @@ -181,7 +180,6 @@ public BiFunction, ScrollableHitSource.Hit, RequestWrapper> * Copies the metadata from a hit to the request. */ protected RequestWrapper copyMetadata(RequestWrapper request, ScrollableHitSource.Hit doc) { - request.setParent(doc.getParent()); copyRouting(request, doc.getRouting()); return request; } @@ -550,10 +548,6 @@ public interface RequestWrapper> { void setVersionType(VersionType versionType); - void setParent(String parent); - - String getParent(); - void setRouting(String routing); String getRouting(); @@ -621,16 +615,6 @@ public void setVersionType(VersionType versionType) { request.versionType(versionType); } - @Override - public void setParent(String parent) { - request.parent(parent); - } - - @Override - public String getParent() { - return request.parent(); - } - @Override public void setRouting(String routing) { request.routing(routing); @@ -720,16 +704,6 @@ public void setVersionType(VersionType versionType) { request.versionType(versionType); } - @Override - public void setParent(String parent) { - request.parent(parent); - } - - @Override - public String getParent() { - return request.parent(); - } - @Override public void setRouting(String routing) { request.routing(routing); @@ -807,8 +781,6 @@ public RequestWrapper apply(RequestWrapper request, ScrollableHitSource.Hi context.put(IdFieldMapper.NAME, doc.getId()); Long oldVersion = doc.getVersion(); context.put(VersionFieldMapper.NAME, oldVersion); - String oldParent = doc.getParent(); - context.put(ParentFieldMapper.NAME, oldParent); String oldRouting = doc.getRouting(); context.put(RoutingFieldMapper.NAME, oldRouting); context.put(SourceFieldMapper.NAME, request.getSource()); @@ -846,10 +818,6 @@ public RequestWrapper apply(RequestWrapper request, ScrollableHitSource.Hi if (false == Objects.equals(oldVersion, newValue)) { scriptChangedVersion(request, newValue); } - newValue = context.remove(ParentFieldMapper.NAME); - if (false == Objects.equals(oldParent, newValue)) { - scriptChangedParent(request, newValue); - } /* * Its important that routing comes after parent in case you want to * change them both. @@ -879,7 +847,6 @@ protected RequestWrapper scriptChangedOpType(RequestWrapper request, OpTyp RequestWrapper delete = wrap(new DeleteRequest(request.getIndex(), request.getType(), request.getId())); delete.setVersion(request.getVersion()); delete.setVersionType(VersionType.INTERNAL); - delete.setParent(request.getParent()); delete.setRouting(request.getRouting()); return delete; default: @@ -897,8 +864,6 @@ protected RequestWrapper scriptChangedOpType(RequestWrapper request, OpTyp protected abstract void scriptChangedRouting(RequestWrapper request, Object to); - protected abstract void scriptChangedParent(RequestWrapper request, Object to); - } public enum OpType { diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AsyncDeleteByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AsyncDeleteByQueryAction.java index 8dd30a9fa9d65..c86911649ac34 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AsyncDeleteByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AsyncDeleteByQueryAction.java @@ -69,7 +69,6 @@ protected RequestWrapper buildRequest(ScrollableHitSource.Hit doc */ @Override protected RequestWrapper copyMetadata(RequestWrapper request, ScrollableHitSource.Hit doc) { - request.setParent(doc.getParent()); request.setRouting(doc.getRouting()); return request; } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollParallelizationHelper.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollParallelizationHelper.java index 617173a6e92ef..2aff0d7a5c501 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollParallelizationHelper.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/BulkByScrollParallelizationHelper.java @@ -27,7 +27,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.index.Index; -import org.elasticsearch.index.mapper.UidFieldMapper; +import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.slice.SliceBuilder; import org.elasticsearch.tasks.TaskId; @@ -127,7 +127,7 @@ private static > void sendS LeaderBulkByScrollTaskState worker = task.getLeaderState(); int totalSlices = worker.getSlices(); TaskId parentTaskId = new TaskId(localNodeId, task.getId()); - for (final SearchRequest slice : sliceIntoSubRequests(request.getSearchRequest(), UidFieldMapper.NAME, totalSlices)) { + for (final SearchRequest slice : sliceIntoSubRequests(request.getSearchRequest(), IdFieldMapper.NAME, totalSlices)) { // TODO move the request to the correct node. maybe here or somehow do it as part of startup for reindex in general.... Request requestForSlice = request.forSlice(parentTaskId, slice, totalSlices); ActionListener sliceListener = ActionListener.wrap( diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java index 2e85d567743ee..f218d6ae8dfaa 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java @@ -77,7 +77,7 @@ public class RestReindexAction extends AbstractBaseReindexRestHandler buildRequest(ScrollableHitSource.Hit doc) * here on out operates on the index request rather than the template. */ index.routing(mainRequest.getDestination().routing()); - index.parent(mainRequest.getDestination().parent()); index.setPipeline(mainRequest.getDestination().getPipeline()); // OpType is synthesized from version so it is handled when we copy version above. @@ -432,14 +431,6 @@ protected void scriptChangedVersion(RequestWrapper request, Object to) { } } - @Override - protected void scriptChangedParent(RequestWrapper request, Object to) { - // Have to override routing with parent just in case its changed - String routing = Objects.toString(to, null); - request.setParent(routing); - request.setRouting(routing); - } - @Override protected void scriptChangedRouting(RequestWrapper request, Object to) { request.setRouting(Objects.toString(to, null)); diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java index e21a6408bd85a..8e55df60734dc 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java @@ -34,7 +34,6 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.IndexFieldMapper; -import org.elasticsearch.index.mapper.ParentFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.mapper.TypeFieldMapper; import org.elasticsearch.script.Script; @@ -161,11 +160,6 @@ protected void scriptChangedRouting(RequestWrapper request, Object to) { throw new IllegalArgumentException("Modifying [" + RoutingFieldMapper.NAME + "] not allowed"); } - @Override - protected void scriptChangedParent(RequestWrapper request, Object to) { - throw new IllegalArgumentException("Modifying [" + ParentFieldMapper.NAME + "] not allowed"); - } - } } } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteResponseParsers.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteResponseParsers.java index d18e9c85bcdab..4834c6f9ce0a2 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteResponseParsers.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteResponseParsers.java @@ -86,23 +86,20 @@ private RemoteResponseParsers() {} } }, new ParseField("_source")); ParseField routingField = new ParseField("_routing"); - ParseField parentField = new ParseField("_parent"); ParseField ttlField = new ParseField("_ttl"); + ParseField parentField = new ParseField("_parent"); HIT_PARSER.declareString(BasicHit::setRouting, routingField); - HIT_PARSER.declareString(BasicHit::setParent, parentField); - // Pre-2.0.0 parent and routing come back in "fields" + // Pre-2.0.0 routing come back in "fields" class Fields { String routing; - String parent; } ObjectParser fieldsParser = new ObjectParser<>("fields", Fields::new); HIT_PARSER.declareObject((hit, fields) -> { hit.setRouting(fields.routing); - hit.setParent(fields.parent); }, fieldsParser, new ParseField("fields")); fieldsParser.declareString((fields, routing) -> fields.routing = routing, routingField); - fieldsParser.declareString((fields, parent) -> fields.parent = parent, parentField); fieldsParser.declareLong((fields, ttl) -> {}, ttlField); // ignore ttls since they have been removed + fieldsParser.declareString((fields, parent) -> {}, parentField); // ignore parents since they have been removed } /** diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java index 80419a9b9d7a2..566c97c61c455 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java @@ -31,6 +31,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.bulk.BackoffPolicy; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.index.reindex.ScrollableHitSource; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.client.ResponseException; @@ -199,7 +200,7 @@ public void onSuccess(org.elasticsearch.client.Response response) { try (XContentParser xContentParser = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, content)) { parsedResponse = parser.apply(xContentParser, xContentType); - } catch (ParsingException e) { + } catch (XContentParseException e) { /* Because we're streaming the response we can't get a copy of it here. The best we can do is hint that it * is totally wrong and we're probably not talking to Elasticsearch. */ throw new ElasticsearchException( diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryBasicTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryBasicTests.java index 0b8dea6ea41f2..df165ca12e7a4 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryBasicTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryBasicTests.java @@ -307,25 +307,4 @@ public void testMultipleSources() throws Exception { } - /** - * Test delete by query support for filtering by type. This entire feature - * can and should be removed when we drop support for types index with - * multiple types from core. - */ - public void testFilterByType() throws Exception { - assertAcked(client().admin().indices().prepareCreate("test") - .setSettings(Settings.builder().put("index.version.created", Version.V_5_6_0.id))); // allows for multiple types - indexRandom(true, - client().prepareIndex("test", "test1", "1").setSource("foo", "a"), - client().prepareIndex("test", "test2", "2").setSource("foo", "a"), - client().prepareIndex("test", "test2", "3").setSource("foo", "b")); - - assertHitCount(client().prepareSearch("test").setSize(0).get(), 3); - - // Deletes doc of the type "type2" that also matches foo:a - DeleteByQueryRequestBuilder builder = deleteByQuery().source("test").filter(termQuery("foo", "a")).refresh(true); - builder.source().setTypes("test2"); - assertThat(builder.get(), matcher().deleted(1)); - assertHitCount(client().prepareSearch("test").setSize(0).get(), 2); - } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexParentChildTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexParentChildTests.java deleted file mode 100644 index b7737beb33af6..0000000000000 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexParentChildTests.java +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.reindex; - -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.join.ParentJoinPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.script.MockScriptPlugin; -import org.elasticsearch.test.InternalSettingsPlugin; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.function.Function; - -import static org.elasticsearch.index.query.QueryBuilders.idsQuery; -import static org.elasticsearch.index.query.QueryBuilders.typeQuery; -import static org.elasticsearch.join.query.JoinQueryBuilders.hasParentQuery; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasToString; -import static org.hamcrest.Matchers.instanceOf; - -/** - * Reindex tests for legacy parent/child. Tests for the new {@code join} - * field are in a qa project. - */ -public class ReindexParentChildTests extends ReindexTestCase { - QueryBuilder findsCountry; - QueryBuilder findsCity; - QueryBuilder findsNeighborhood; - - @Override - protected boolean ignoreExternalCluster() { - return true; - } - - @Override - protected Collection> nodePlugins() { - final List> plugins = new ArrayList<>(super.nodePlugins()); - plugins.add(ParentJoinPlugin.class); - plugins.add(InternalSettingsPlugin.class); - plugins.add(CustomScriptPlugin.class); - return Collections.unmodifiableList(plugins); - } - - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - - public void testParentChild() throws Exception { - createParentChildIndex("source"); - createParentChildIndex("dest"); - createParentChildDocs("source", true); - - // Copy parent to the new index - ReindexRequestBuilder copy = reindex().source("source").destination("dest").filter(findsCountry).refresh(true); - assertThat(copy.get(), matcher().created(1)); - - // Copy the child to a new index - copy = reindex().source("source").destination("dest").filter(findsCity).refresh(true); - assertThat(copy.get(), matcher().created(1)); - - // Make sure parent/child is intact on that index - assertSearchHits(client().prepareSearch("dest").setQuery(findsCity).get(), "pittsburgh"); - - // Copy the grandchild to a new index - copy = reindex().source("source").destination("dest").filter(findsNeighborhood).refresh(true); - assertThat(copy.get(), matcher().created(1)); - - // Make sure parent/child is intact on that index - assertSearchHits(client().prepareSearch("dest").setQuery(findsNeighborhood).get(), - "make-believe"); - - // Copy the parent/child/grandchild structure all at once to a third index - createParentChildIndex("dest_all_at_once"); - copy = reindex().source("source").destination("dest_all_at_once").refresh(true); - assertThat(copy.get(), matcher().created(3)); - - // Make sure parent/child/grandchild is intact there too - assertSearchHits(client().prepareSearch("dest_all_at_once").setQuery(findsNeighborhood).get(), - "make-believe"); - } - - /** - * Tests for adding the {@code _parent} via script and adding *both* {@code _parent} and {@code _routing} values via scripts. - */ - public void testScriptAddsParent() throws Exception { - assertAcked(client().admin().indices().prepareCreate("source") - .setSettings(Settings.builder().put("index.version.created", Version.V_5_6_0.id))); // allows for multiple types - - createParentChildIndex("dest"); - createParentChildDocs("source", false); - - ReindexRequestBuilder copy = reindex().source("source").destination("dest").filter(typeQuery("country")).refresh(true); - assertThat(copy.get(), matcher().created(1)); - copy = reindex().source("source").destination("dest").filter(typeQuery("city")) - .script(mockScript("ctx._parent='united states'")).refresh(true); - assertThat(copy.get(), matcher().created(1)); - assertSearchHits(client().prepareSearch("dest").setQuery(findsCity).get(), "pittsburgh"); - - copy = reindex().source("source").destination("dest").filter(typeQuery("neighborhood")) - .script(mockScript("ctx._parent='pittsburgh';ctx._routing='united states'")).refresh(true); - assertThat(copy.get(), matcher().created(1)); - assertSearchHits(client().prepareSearch("dest").setQuery(findsNeighborhood).get(), "make-believe"); - } - - public void testErrorMessageWhenBadParentChild() throws Exception { - createParentChildIndex("source"); - createParentChildDocs("source", true); - - ReindexRequestBuilder copy = reindex().source("source").destination("dest").filter(findsCity); - final BulkByScrollResponse response = copy.get(); - assertThat(response.getBulkFailures().size(), equalTo(1)); - final Exception cause = response.getBulkFailures().get(0).getCause(); - assertThat(cause, instanceOf(IllegalArgumentException.class)); - assertThat(cause, hasToString(containsString("can't specify parent if no parent field has been configured"))); - } - - /** - * Setup a parent/child index and return a query that should find the child - * using the parent. - */ - private void createParentChildIndex(String indexName) throws Exception { - CreateIndexRequestBuilder create = client().admin().indices().prepareCreate(indexName); - create.setSettings(Settings.builder().put("index.version.created", Version.V_5_6_0.id)); // allows for multiple types - create.addMapping("city", "{\"_parent\": {\"type\": \"country\"}}", XContentType.JSON); - create.addMapping("neighborhood", "{\"_parent\": {\"type\": \"city\"}}", XContentType.JSON); - assertAcked(create); - ensureGreen(); - } - - private void createParentChildDocs(String indexName, boolean addParents) throws Exception { - indexRandom(true, - client().prepareIndex(indexName, "country", "united states") - .setSource("foo", "bar"), - client().prepareIndex(indexName, "city", "pittsburgh") - .setParent(addParents ? "united states" : null) - .setSource("foo", "bar"), - client().prepareIndex(indexName, "neighborhood", "make-believe") - .setParent(addParents ? "pittsburgh" : null) - .setRouting(addParents ? "united states" : null) - .setSource("foo", "bar")); - - findsCountry = idsQuery("country").addIds("united states"); - findsCity = hasParentQuery("country", findsCountry, false); - findsNeighborhood = hasParentQuery("city", findsCity, false); - - if (addParents) { - // Make sure we built the parent/child relationship - assertSearchHits(client().prepareSearch(indexName).setQuery(findsCity).get(), "pittsburgh"); - assertSearchHits(client().prepareSearch(indexName).setQuery(findsNeighborhood).get(), "make-believe"); - } - } - - public static class CustomScriptPlugin extends MockScriptPlugin { - @Override - @SuppressWarnings("unchecked") - protected Map, Object>> pluginScripts() { - Map, Object>> scripts = new HashMap<>(); - - scripts.put("ctx._parent='united states'", vars -> { - Map ctx = (Map) vars.get("ctx"); - ctx.put("_parent", "united states"); - return null; - }); - scripts.put("ctx._parent='pittsburgh';ctx._routing='united states'", vars -> { - Map ctx = (Map) vars.get("ctx"); - ctx.put("_parent", "pittsburgh"); - ctx.put("_routing", "united states"); - return null; - }); - - return scripts; - } - } -} diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexScriptTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexScriptTests.java index 4e2834a771a94..6d3ce558c7567 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexScriptTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexScriptTests.java @@ -92,12 +92,6 @@ public void testSettingVersionToJunkIsAnError() throws Exception { } } - public void testSetParent() throws Exception { - String parent = randomRealisticUnicodeOfLengthBetween(5, 20); - IndexRequest index = applyScript((Map ctx) -> ctx.put("_parent", parent)); - assertEquals(parent, index.parent()); - } - public void testSetRouting() throws Exception { String routing = randomRealisticUnicodeOfLengthBetween(5, 20); IndexRequest index = applyScript((Map ctx) -> ctx.put("_routing", routing)); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWithScriptTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWithScriptTests.java index 9f43ea2d17067..4006d16fbcb11 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWithScriptTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWithScriptTests.java @@ -39,7 +39,7 @@ public void testModifyingCtxNotAllowed() { * error message to the user, not some ClassCastException. */ Object[] options = new Object[] {"cat", new Object(), 123, new Date(), Math.PI}; - for (String ctxVar: new String[] {"_index", "_type", "_id", "_version", "_parent", "_routing"}) { + for (String ctxVar: new String[] {"_index", "_type", "_id", "_version", "_routing"}) { try { applyScript((Map ctx) -> ctx.put(ctxVar, randomFrom(options))); } catch (IllegalArgumentException e) { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java index f67a5b627fb4c..92f370f8f6364 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java @@ -207,7 +207,6 @@ public void testParseScrollFullyLoaded() throws Exception { assertEquals("AVToMiDL50DjIiBO3yKA", r.getHits().get(0).getId()); assertEquals("{\"test\":\"test3\"}", r.getHits().get(0).getSource().utf8ToString()); assertEquals("testrouting", r.getHits().get(0).getRouting()); - assertEquals("testparent", r.getHits().get(0).getParent()); called.set(true); }); assertTrue(called.get()); @@ -222,7 +221,6 @@ public void testParseScrollFullyLoadedFrom1_7() throws Exception { assertEquals("AVToMiDL50DjIiBO3yKA", r.getHits().get(0).getId()); assertEquals("{\"test\":\"test3\"}", r.getHits().get(0).getSource().utf8ToString()); assertEquals("testrouting", r.getHits().get(0).getRouting()); - assertEquals("testparent", r.getHits().get(0).getParent()); called.set(true); }); assertTrue(called.get()); diff --git a/modules/repository-url/build.gradle b/modules/repository-url/build.gradle index 7008111ca9c54..79fe5e7aaefa7 100644 --- a/modules/repository-url/build.gradle +++ b/modules/repository-url/build.gradle @@ -16,12 +16,36 @@ * specific language governing permissions and limitations * under the License. */ +import org.elasticsearch.gradle.test.AntFixture esplugin { description 'Module for URL repository' classname 'org.elasticsearch.plugin.repository.url.URLRepositoryPlugin' } +forbiddenApisTest { + // we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage + bundledSignatures -= 'jdk-non-portable' + bundledSignatures += 'jdk-internal' +} + +// This directory is shared between two URL repositories and one FS repository in YAML integration tests +File repositoryDir = new File(project.buildDir, "shared-repository") + +/** A task to start the URLFixture which exposes the repositoryDir over HTTP **/ +task urlFixture(type: AntFixture) { + doFirst { + repositoryDir.mkdirs() + } + env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }" + executable = new File(project.runtimeJavaHome, 'bin/java') + args 'org.elasticsearch.repositories.url.URLFixture', baseDir, "${repositoryDir.absolutePath}" +} + integTestCluster { - setting 'repositories.url.allowed_urls', 'http://snapshot.test*' + dependsOn urlFixture + // repositoryDir is used by a FS repository to create snapshots + setting 'path.repo', "${repositoryDir.absolutePath}" + // repositoryDir is used by two URL repositories to restore snapshots + setting 'repositories.url.allowed_urls', "http://snapshot.test*,http://${ -> urlFixture.addressAndPort }" } diff --git a/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java b/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java index 238b14ce013ad..f33fa98f0e3be 100644 --- a/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java +++ b/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java @@ -21,9 +21,34 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; - +import org.apache.http.HttpEntity; +import org.apache.http.entity.ContentType; +import org.apache.http.nio.entity.NStringEntity; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.URI; +import java.net.URL; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.notNullValue; public class RepositoryURLClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @@ -35,5 +60,70 @@ public RepositoryURLClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate public static Iterable parameters() throws Exception { return ESClientYamlSuiteTestCase.createParameters(); } + + /** + * This method registers 3 snapshot/restore repositories: + * - repository-fs: this FS repository is used to create snapshots. + * - repository-url: this URL repository is used to restore snapshots created using the previous repository. It uses + * the URLFixture to restore snapshots over HTTP. + * - repository-file: similar as the previous repository but using a file:// prefix instead of http://. + **/ + @Before + public void registerRepositories() throws IOException { + Response clusterSettingsResponse = client().performRequest("GET", "/_cluster/settings?include_defaults=true" + + "&filter_path=defaults.path.repo,defaults.repositories.url.allowed_urls"); + Map clusterSettings = entityAsMap(clusterSettingsResponse); + + @SuppressWarnings("unchecked") + List pathRepos = (List) XContentMapValues.extractValue("defaults.path.repo", clusterSettings); + assertThat(pathRepos, notNullValue()); + assertThat(pathRepos, hasSize(1)); + + final String pathRepo = pathRepos.get(0); + final URI pathRepoUri = PathUtils.get(pathRepo).toUri().normalize(); + + // Create a FS repository using the path.repo location + Response createFsRepositoryResponse = client().performRequest("PUT", "_snapshot/repository-fs", emptyMap(), + buildRepositorySettings(FsRepository.TYPE, Settings.builder().put("location", pathRepo).build())); + assertThat(createFsRepositoryResponse.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); + + // Create a URL repository using the file://{path.repo} URL + Response createFileRepositoryResponse = client().performRequest("PUT", "_snapshot/repository-file", emptyMap(), + buildRepositorySettings(URLRepository.TYPE, Settings.builder().put("url", pathRepoUri.toString()).build())); + assertThat(createFileRepositoryResponse.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); + + // Create a URL repository using the http://{fixture} URL + @SuppressWarnings("unchecked") + List allowedUrls = (List) XContentMapValues.extractValue("defaults.repositories.url.allowed_urls", clusterSettings); + for (String allowedUrl : allowedUrls) { + try { + InetAddress inetAddress = InetAddress.getByName(new URL(allowedUrl).getHost()); + if (inetAddress.isAnyLocalAddress() || inetAddress.isLoopbackAddress()) { + Response createUrlRepositoryResponse = client().performRequest("PUT", "_snapshot/repository-url", emptyMap(), + buildRepositorySettings(URLRepository.TYPE, Settings.builder().put("url", allowedUrl).build())); + assertThat(createUrlRepositoryResponse.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); + break; + } + } catch (Exception e) { + logger.debug("Failed to resolve inet address for allowed URL [{}], skipping", allowedUrl); + } + } + } + + private static HttpEntity buildRepositorySettings(final String type, final Settings settings) throws IOException { + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.field("type", type); + builder.startObject("settings"); + { + settings.toXContent(builder, ToXContent.EMPTY_PARAMS); + } + builder.endObject(); + } + builder.endObject(); + return new NStringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); + } + } } diff --git a/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLFixture.java b/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLFixture.java new file mode 100644 index 0000000000000..c9a36ec859021 --- /dev/null +++ b/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLFixture.java @@ -0,0 +1,162 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.url; + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.mocksocket.MockHttpServer; +import org.elasticsearch.rest.RestStatus; + +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.net.Inet6Address; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardCopyOption; +import java.util.Map; +import java.util.Objects; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singleton; +import static java.util.Collections.singletonMap; + +/** + * This {@link URLFixture} exposes a filesystem directory over HTTP. It is used in repository-url + * integration tests to expose a directory created by a regular FS repository. + */ +public class URLFixture { + + public static void main(String[] args) throws Exception { + if (args == null || args.length != 2) { + throw new IllegalArgumentException("URLFixture "); + } + + final InetSocketAddress socketAddress = new InetSocketAddress(InetAddress.getLoopbackAddress(), 0); + final HttpServer httpServer = MockHttpServer.createHttp(socketAddress, 0); + + try { + final Path workingDirectory = dir(args[0]); + /// Writes the PID of the current Java process in a `pid` file located in the working directory + writeFile(workingDirectory, "pid", ManagementFactory.getRuntimeMXBean().getName().split("@")[0]); + + final String addressAndPort = addressToString(httpServer.getAddress()); + // Writes the address and port of the http server in a `ports` file located in the working directory + writeFile(workingDirectory, "ports", addressAndPort); + + // Exposes the repository over HTTP + final String url = "http://" + addressAndPort; + httpServer.createContext("/", new ResponseHandler(dir(args[1]))); + httpServer.start(); + + // Wait to be killed + Thread.sleep(Long.MAX_VALUE); + + } finally { + httpServer.stop(0); + } + } + + @SuppressForbidden(reason = "Paths#get is fine - we don't have environment here") + private static Path dir(final String dir) { + return Paths.get(dir); + } + + private static void writeFile(final Path dir, final String fileName, final String content) throws IOException { + final Path tempPidFile = Files.createTempFile(dir, null, null); + Files.write(tempPidFile, singleton(content)); + Files.move(tempPidFile, dir.resolve(fileName), StandardCopyOption.ATOMIC_MOVE); + } + + private static String addressToString(final SocketAddress address) { + final InetSocketAddress inetSocketAddress = (InetSocketAddress) address; + if (inetSocketAddress.getAddress() instanceof Inet6Address) { + return "[" + inetSocketAddress.getHostString() + "]:" + inetSocketAddress.getPort(); + } else { + return inetSocketAddress.getHostString() + ":" + inetSocketAddress.getPort(); + } + } + + static class ResponseHandler implements HttpHandler { + + private final Path repositoryDir; + + ResponseHandler(final Path repositoryDir) { + this.repositoryDir = repositoryDir; + } + + @Override + public void handle(HttpExchange exchange) throws IOException { + Response response; + if ("GET".equalsIgnoreCase(exchange.getRequestMethod())) { + String path = exchange.getRequestURI().toString(); + if (path.length() > 0 && path.charAt(0) == '/') { + path = path.substring(1); + } + + Path normalizedRepositoryDir = repositoryDir.normalize(); + Path normalizedPath = normalizedRepositoryDir.resolve(path).normalize(); + + if (normalizedPath.startsWith(normalizedRepositoryDir)) { + if (Files.exists(normalizedPath) && Files.isReadable(normalizedPath) && Files.isRegularFile(normalizedPath)) { + byte[] content = Files.readAllBytes(normalizedPath); + Map headers = singletonMap("Content-Length", String.valueOf(content.length)); + response = new Response(RestStatus.OK, headers, "application/octet-stream", content); + } else { + response = new Response(RestStatus.NOT_FOUND, emptyMap(), "text/plain", new byte[0]); + } + } else { + response = new Response(RestStatus.FORBIDDEN, emptyMap(), "text/plain", new byte[0]); + } + } else { + response = new Response(RestStatus.INTERNAL_SERVER_ERROR, emptyMap(), "text/plain", + "Unsupported HTTP method".getBytes(StandardCharsets.UTF_8)); + } + exchange.sendResponseHeaders(response.status.getStatus(), response.body.length); + if (response.body.length > 0) { + exchange.getResponseBody().write(response.body); + } + exchange.close(); + } + } + + /** + * Represents a HTTP Response. + */ + static class Response { + + final RestStatus status; + final Map headers; + final String contentType; + final byte[] body; + + Response(final RestStatus status, final Map headers, final String contentType, final byte[] body) { + this.status = Objects.requireNonNull(status); + this.headers = Objects.requireNonNull(headers); + this.contentType = Objects.requireNonNull(contentType); + this.body = Objects.requireNonNull(body); + } + } +} diff --git a/modules/repository-url/src/test/resources/rest-api-spec/test/repository_url/10_basic.yml b/modules/repository-url/src/test/resources/rest-api-spec/test/repository_url/10_basic.yml index 75e7873299869..7edbc4c08fbf7 100644 --- a/modules/repository-url/src/test/resources/rest-api-spec/test/repository_url/10_basic.yml +++ b/modules/repository-url/src/test/resources/rest-api-spec/test/repository_url/10_basic.yml @@ -1,6 +1,108 @@ -# Integration tests for URL Repository component +# Integration tests for repository-url # -"URL Repository plugin loaded": +# This test is based on 3 repositories, all registered before this +# test is executed. The repository-fs is used to create snapshots +# in a shared directory on the filesystem. Then the test uses a URL +# repository with a "http://" prefix to test the restore of the +# snapshots. In order to do that it uses a URLFixture that exposes +# the content of the shared directory over HTTP. A second URL +# repository is used to test the snapshot restore but this time +# with a "file://" prefix. +setup: + + # Ensure that the FS repository is registered, so we can create + # snapshots that we later restore using the URL repository + - do: + snapshot.get_repository: + repository: repository-fs + + # Index documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _type: doc + _id: 1 + - snapshot: one + - index: + _index: docs + _type: doc + _id: 2 + - snapshot: one + - index: + _index: docs + _type: doc + _id: 3 + - snapshot: one + + # Create a first snapshot using the FS repository + - do: + snapshot.create: + repository: repository-fs + snapshot: snapshot-one + wait_for_completion: true + + # Index more documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _type: doc + _id: 4 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 5 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 6 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 7 + - snapshot: two + + # Create a second snapshot + - do: + snapshot.create: + repository: repository-fs + snapshot: snapshot-two + wait_for_completion: true + + - do: + snapshot.get: + repository: repository-fs + snapshot: snapshot-one,snapshot-two + +--- +teardown: + + - do: + indices.delete: + index: docs + ignore_unavailable: true + + # Remove the snapshots + - do: + snapshot.delete: + repository: repository-fs + snapshot: snapshot-two + + - do: + snapshot.delete: + repository: repository-fs + snapshot: snapshot-one + +--- +"Module repository-url is loaded": - do: cluster.state: {} @@ -10,23 +112,129 @@ - do: nodes.info: {} - - match: { nodes.$master.modules.0.name: repository-url } + - match: { nodes.$master.modules.0.name: repository-url } --- -setup: +"Restore with repository-url using http://": + # Ensure that the URL repository is registered - do: - snapshot.create_repository: - repository: test_repo1 - body: - type: url - settings: - url: "http://snapshot.test1" + snapshot.get_repository: + repository: repository-url + + - match: { repository-url.type : "url" } + - match: { repository-url.settings.url: '/http://(.+):\d+/' } - do: - snapshot.create_repository: - repository: test_repo2 - body: - type: url - settings: - url: "http://snapshot.test2" + snapshot.get: + repository: repository-url + snapshot: snapshot-one,snapshot-two + + - is_true: snapshots + - match: { snapshots.0.state : SUCCESS } + - match: { snapshots.1.state : SUCCESS } + + # Delete the index + - do: + indices.delete: + index: docs + + # Restore the second snapshot + - do: + snapshot.restore: + repository: repository-url + snapshot: snapshot-two + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 7} + + # Delete the index again + - do: + indices.delete: + index: docs + + # Restore the first snapshot + - do: + snapshot.restore: + repository: repository-url + snapshot: snapshot-one + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 3} + + - do: + catch: /cannot delete snapshot from a readonly repository/ + snapshot.delete: + repository: repository-url + snapshot: snapshot-two + +--- +"Restore with repository-url using file://": + + # Ensure that the URL repository is registered + - do: + snapshot.get_repository: + repository: repository-file + + - match: { repository-file.type : "url" } + - match: { repository-file.settings.url: '/file://(.+)/' } + + - do: + snapshot.get: + repository: repository-file + snapshot: snapshot-one,snapshot-two + + - is_true: snapshots + - match: { snapshots.0.state : SUCCESS } + - match: { snapshots.1.state : SUCCESS } + + # Delete the index + - do: + indices.delete: + index: docs + + # Restore the second snapshot + - do: + snapshot.restore: + repository: repository-file + snapshot: snapshot-two + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 7} + + # Delete the index again + - do: + indices.delete: + index: docs + + # Restore the first snapshot + - do: + snapshot.restore: + repository: repository-file + snapshot: snapshot-one + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 3} + + - do: + catch: /cannot delete snapshot from a readonly repository/ + snapshot.delete: + repository: repository-file + snapshot: snapshot-one + diff --git a/modules/repository-url/src/test/resources/rest-api-spec/test/repository_url/20_repository.yml b/modules/repository-url/src/test/resources/rest-api-spec/test/repository_url/20_repository.yml index 39cfeee192c9b..77bdac3ac1573 100644 --- a/modules/repository-url/src/test/resources/rest-api-spec/test/repository_url/20_repository.yml +++ b/modules/repository-url/src/test/resources/rest-api-spec/test/repository_url/20_repository.yml @@ -14,3 +14,18 @@ repository: test_repo1 - is_true : test_repo1 + +--- +"Repository cannot be be registered": + + - do: + catch: /doesn't match any of the locations specified by path.repo or repositories.url.allowed_urls/ + snapshot.create_repository: + repository: test_repo2 + body: + type: url + settings: + url: "http://snapshot.unknown" + + + diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.3.0-snapshot-98a6b3d.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.3.0-snapshot-98a6b3d.jar.sha1 deleted file mode 100644 index 49aa857cf9429..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.3.0-snapshot-98a6b3d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ece1b4232697fad170c589f0df887efa6e66dd4f \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.3.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.3.0.jar.sha1 new file mode 100644 index 0000000000000..de70972e975f0 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.3.0.jar.sha1 @@ -0,0 +1 @@ +c09216a18658d5b2912566efff8665e45edc24b4 \ No newline at end of file diff --git a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperIT.java b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperIT.java index 5220d44dca308..ce8a635ffb6f1 100644 --- a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperIT.java +++ b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapperIT.java @@ -87,7 +87,7 @@ public void testBasicUsage() throws Exception { .fetchSource(false) .query(QueryBuilders.termQuery("collate", randomBoolean() ? equilavent[0] : equilavent[1])) .sort("collate") - .sort("_uid", SortOrder.DESC) // secondary sort should kick in because both will collate to same value + .sort("_id", SortOrder.DESC) // secondary sort should kick in because both will collate to same value ); SearchResponse response = client().search(request).actionGet(); @@ -128,7 +128,7 @@ public void testMultipleValues() throws Exception { .query(QueryBuilders.termQuery("collate", "a")) // if mode max we use c and b as sort values, if max we use "a" for both .sort(SortBuilders.fieldSort("collate").sortMode(SortMode.MAX).order(SortOrder.DESC)) - .sort("_uid", SortOrder.DESC) // will be ignored + .sort("_id", SortOrder.DESC) // will be ignored ); SearchResponse response = client().search(request).actionGet(); @@ -145,7 +145,7 @@ public void testMultipleValues() throws Exception { .query(QueryBuilders.termQuery("collate", "a")) // if mode max we use c and b as sort values, if max we use "a" for both .sort(SortBuilders.fieldSort("collate").sortMode(SortMode.MIN).order(SortOrder.DESC)) - .sort("_uid", SortOrder.DESC) // will NOT be ignored and will determine order + .sort("_id", SortOrder.DESC) // will NOT be ignored and will determine order ); response = client().search(request).actionGet(); @@ -188,7 +188,7 @@ public void testNormalization() throws Exception { .fetchSource(false) .query(QueryBuilders.termQuery("collate", randomBoolean() ? equilavent[0] : equilavent[1])) .sort("collate") - .sort("_uid", SortOrder.DESC) // secondary sort should kick in because both will collate to same value + .sort("_id", SortOrder.DESC) // secondary sort should kick in because both will collate to same value ); SearchResponse response = client().search(request).actionGet(); @@ -230,7 +230,7 @@ public void testSecondaryStrength() throws Exception { .fetchSource(false) .query(QueryBuilders.termQuery("collate", randomBoolean() ? equilavent[0] : equilavent[1])) .sort("collate") - .sort("_uid", SortOrder.DESC) // secondary sort should kick in because both will collate to same value + .sort("_id", SortOrder.DESC) // secondary sort should kick in because both will collate to same value ); SearchResponse response = client().search(request).actionGet(); @@ -273,7 +273,7 @@ public void testIgnorePunctuation() throws Exception { .fetchSource(false) .query(QueryBuilders.termQuery("collate", randomBoolean() ? equilavent[0] : equilavent[1])) .sort("collate") - .sort("_uid", SortOrder.DESC) // secondary sort should kick in because both will collate to same value + .sort("_id", SortOrder.DESC) // secondary sort should kick in because both will collate to same value ); SearchResponse response = client().search(request).actionGet(); @@ -316,7 +316,7 @@ public void testIgnoreWhitespace() throws Exception { .source(new SearchSourceBuilder() .fetchSource(false) .sort("collate", SortOrder.ASC) - .sort("_uid", SortOrder.ASC) // secondary sort should kick in on docs 1 and 3 because same value collate value + .sort("_id", SortOrder.ASC) // secondary sort should kick in on docs 1 and 3 because same value collate value ); SearchResponse response = client().search(request).actionGet(); @@ -398,7 +398,7 @@ public void testIgnoreAccentsButNotCase() throws Exception { .source(new SearchSourceBuilder() .fetchSource(false) .sort("collate", SortOrder.ASC) - .sort("_uid", SortOrder.DESC) + .sort("_id", SortOrder.DESC) ); SearchResponse response = client().search(request).actionGet(); @@ -492,7 +492,7 @@ public void testCustomRules() throws Exception { .fetchSource(false) .query(QueryBuilders.termQuery("collate", randomBoolean() ? equilavent[0] : equilavent[1])) .sort("collate", SortOrder.ASC) - .sort("_uid", SortOrder.DESC) // secondary sort should kick in because both will collate to same value + .sort("_id", SortOrder.DESC) // secondary sort should kick in because both will collate to same value ); SearchResponse response = client().search(request).actionGet(); diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.3.0-snapshot-98a6b3d.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.3.0-snapshot-98a6b3d.jar.sha1 deleted file mode 100644 index 16f43319ded3a..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.3.0-snapshot-98a6b3d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a16521e8f7240a9b93ea8ced157298b9d18bca43 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.3.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.3.0.jar.sha1 new file mode 100644 index 0000000000000..40ff3efe2642c --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.3.0.jar.sha1 @@ -0,0 +1 @@ +c9d5bbd0affa90b46e173c762c35419a54977c35 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.3.0-snapshot-98a6b3d.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.3.0-snapshot-98a6b3d.jar.sha1 deleted file mode 100644 index e86c0765b3868..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.3.0-snapshot-98a6b3d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0dc6db8e16bf1ed6ebaa914fcbfbb4970af23747 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.3.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.3.0.jar.sha1 new file mode 100644 index 0000000000000..9442635addda9 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.3.0.jar.sha1 @@ -0,0 +1 @@ +4e6c63fa8ae005d81d12f0d88ffa98346b443ac4 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.3.0-snapshot-98a6b3d.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.3.0-snapshot-98a6b3d.jar.sha1 deleted file mode 100644 index b6f58cf3fe622..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.3.0-snapshot-98a6b3d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -de43b057e8800f6c7b26907035664feb686127af \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.3.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.3.0.jar.sha1 new file mode 100644 index 0000000000000..780824c4d4558 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.3.0.jar.sha1 @@ -0,0 +1 @@ +37b7ff0a6493f139cb77f5bda965ac0189c8efd1 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.3.0-snapshot-98a6b3d.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.3.0-snapshot-98a6b3d.jar.sha1 deleted file mode 100644 index cac837ab4a6fc..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.3.0-snapshot-98a6b3d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c5e6a6d99a04ea5121bfd77470a7818725516ead \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.3.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.3.0.jar.sha1 new file mode 100644 index 0000000000000..ba241e6a09915 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.3.0.jar.sha1 @@ -0,0 +1 @@ +d189185da23b2221c4d532da5e2cacce735f8a0c \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.3.0-snapshot-98a6b3d.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.3.0-snapshot-98a6b3d.jar.sha1 deleted file mode 100644 index 909569fec9c95..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.3.0-snapshot-98a6b3d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d755dcef8763b783b7cbba7154a62f91e413007c \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.3.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.3.0.jar.sha1 new file mode 100644 index 0000000000000..fb7e5befe4774 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.3.0.jar.sha1 @@ -0,0 +1 @@ +74462b51de45afe708f1042cc901fe7370413871 \ No newline at end of file diff --git a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java index d3830ab210662..5b3895578e0cc 100644 --- a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java +++ b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.mapper.EnabledAttributeMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; @@ -101,7 +102,7 @@ public static class TypeParser implements MetadataFieldMapper.TypeParser { String fieldName = entry.getKey(); Object fieldNode = entry.getValue(); if (fieldName.equals("enabled")) { - boolean enabled = TypeParsers.nodeBooleanValue(name, "enabled", fieldNode, parserContext); + boolean enabled = XContentMapValues.nodeBooleanValue(fieldNode, name + ".enabled"); builder.enabled(enabled ? EnabledAttributeMapper.ENABLED : EnabledAttributeMapper.DISABLED); iterator.remove(); } diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index bb5e1e757812f..77ebcfec5328a 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ +import org.elasticsearch.gradle.test.AntFixture esplugin { description 'The Azure Repository plugin adds support for Azure storage repositories.' @@ -42,9 +43,28 @@ thirdPartyAudit.excludes = [ 'org.slf4j.LoggerFactory', ] -integTestCluster { - keystoreSetting 'azure.client.default.account', 'cloudazureresource' - keystoreSetting 'azure.client.default.key', 'abcdefgh' - keystoreSetting 'azure.client.secondary.account', 'cloudazureresource' - keystoreSetting 'azure.client.secondary.key', 'abcdefgh' +forbiddenApisTest { + // we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage + bundledSignatures -= 'jdk-non-portable' + bundledSignatures += 'jdk-internal' +} + +/** A task to start the fixture which emulates an Azure Storage service **/ +task azureStorageFixture(type: AntFixture) { + dependsOn compileTestJava + env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }" + executable = new File(project.runtimeJavaHome, 'bin/java') + args 'org.elasticsearch.repositories.azure.AzureStorageFixture', baseDir, 'container_test' } + +integTestCluster { + dependsOn azureStorageFixture + + keystoreSetting 'azure.client.integration_test.account', "azure_integration_test_account" + /* The key is "azure_integration_test_key" encoded using base64 */ + keystoreSetting 'azure.client.integration_test.key', "YXp1cmVfaW50ZWdyYXRpb25fdGVzdF9rZXk=" + // Use a closure on the string to delay evaluation until tests are executed. The endpoint_suffix is used + // in a hacky way to change the protocol and endpoint. We must fix that. + setting 'azure.client.integration_test.endpoint_suffix', + "ignored;DefaultEndpointsProtocol=http;BlobEndpoint=http://${ -> azureStorageFixture.addressAndPort }" +} \ No newline at end of file diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java new file mode 100644 index 0000000000000..025ee45b9c3a0 --- /dev/null +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.azure; + +import com.microsoft.azure.storage.StorageException; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.repositories.ESBlobStoreTestCase; + +import java.io.IOException; +import java.net.URISyntaxException; + +public class AzureBlobStoreTests extends ESBlobStoreTestCase { + + @Override + protected BlobStore newBlobStore() throws IOException { + try { + RepositoryMetaData repositoryMetaData = new RepositoryMetaData("azure", "ittest", Settings.EMPTY); + AzureStorageServiceMock client = new AzureStorageServiceMock(); + return new AzureBlobStore(repositoryMetaData, Settings.EMPTY, client); + } catch (URISyntaxException | StorageException e) { + throw new IOException(e); + } + } +} diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositoryF.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositoryF.java deleted file mode 100644 index 981e0889e73e5..0000000000000 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositoryF.java +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.repositories.azure; - -import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.MockNode; -import org.elasticsearch.node.Node; - -import java.io.IOException; -import java.util.Collections; -import java.util.concurrent.CountDownLatch; - -/** - * Azure Repository - * Main class to easily run Azure from a IDE. - * It sets all the options to run the Azure plugin and access it from Sense. - * - * In order to run this class set configure the following: - * 1) Set `-Des.path.home=` to a directory containing an ES config directory - * 2) Set `-Dcloud.azure.storage.my_account.account=account_name` - * 3) Set `-Dcloud.azure.storage.my_account.key=account_key` - * - * Then you can run REST calls like: - *
- # Clean test env
- curl -XDELETE localhost:9200/foo?pretty
- curl -XDELETE localhost:9200/_snapshot/my_backup1?pretty
- curl -XDELETE localhost:9200/_snapshot/my_backup2?pretty
-
- # Create data
- curl -XPUT localhost:9200/foo/bar/1?pretty -d '{
- "foo": "bar"
- }'
- curl -XPOST localhost:9200/foo/_refresh?pretty
- curl -XGET localhost:9200/foo/_count?pretty
-
- # Create repository using default account
- curl -XPUT localhost:9200/_snapshot/my_backup1?pretty -d '{
-   "type": "azure"
- }'
-
- # Backup
- curl -XPOST "localhost:9200/_snapshot/my_backup1/snap1?pretty&wait_for_completion=true"
-
- # Remove data
- curl -XDELETE localhost:9200/foo?pretty
-
- # Restore data
- curl -XPOST "localhost:9200/_snapshot/my_backup1/snap1/_restore?pretty&wait_for_completion=true"
- curl -XGET localhost:9200/foo/_count?pretty
- 
- * - * If you want to define a secondary repository: - * - * 4) Set `-Dcloud.azure.storage.my_account.default=true` - * 5) Set `-Dcloud.azure.storage.my_account2.account=account_name` - * 6) Set `-Dcloud.azure.storage.my_account2.key=account_key_secondary` - * - * Then you can run REST calls like: - *
- # Remove data
- curl -XDELETE localhost:9200/foo?pretty
-
- # Create repository using account2 (secondary)
- curl -XPUT localhost:9200/_snapshot/my_backup2?pretty -d '{
-   "type": "azure",
-   "settings": {
-     "account" : "my_account2",
-     "location_mode": "secondary_only"
-   }
- }'
-
- # Restore data from the secondary endpoint
- curl -XPOST "localhost:9200/_snapshot/my_backup2/snap1/_restore?pretty&wait_for_completion=true"
- curl -XGET localhost:9200/foo/_count?pretty
- 
- */ -public class AzureRepositoryF { - public static void main(String[] args) throws Throwable { - Settings.Builder settings = Settings.builder(); - settings.put("http.cors.enabled", "true"); - settings.put("http.cors.allow-origin", "*"); - settings.put("cluster.name", AzureRepositoryF.class.getSimpleName()); - - // Example for azure repo settings - // settings.put("cloud.azure.storage.my_account1.account", "account_name"); - // settings.put("cloud.azure.storage.my_account1.key", "account_key"); - // settings.put("cloud.azure.storage.my_account1.default", true); - // settings.put("cloud.azure.storage.my_account2.account", "account_name"); - // settings.put("cloud.azure.storage.my_account2.key", "account_key_secondary"); - - final CountDownLatch latch = new CountDownLatch(1); - final Node node = new MockNode(settings.build(), Collections.singletonList(AzureRepositoryPlugin.class)); - Runtime.getRuntime().addShutdownHook(new Thread() { - @Override - public void run() { - try { - IOUtils.close(node); - } catch (IOException e) { - throw new ElasticsearchException(e); - } finally { - latch.countDown(); - } - } - }); - node.start(); - latch.await(); - } -} diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java index 01b26bad343d5..26b02278eddc0 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java @@ -47,7 +47,6 @@ private AzureRepository azureRepository(Settings settings) throws StorageExcepti TestEnvironment.newEnvironment(internalSettings), NamedXContentRegistry.EMPTY, null); } - public void testReadonlyDefault() throws StorageException, IOException, URISyntaxException { assertThat(azureRepository(Settings.EMPTY).isReadOnly(), is(false)); } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageFixture.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageFixture.java new file mode 100644 index 0000000000000..ebd8241e710ea --- /dev/null +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageFixture.java @@ -0,0 +1,136 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.azure; + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.mocksocket.MockHttpServer; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.net.Inet6Address; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardCopyOption; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.singleton; +import static java.util.Collections.singletonList; + +/** + * {@link AzureStorageFixture} is a fixture that emulates an Azure Storage service. + *

+ * It starts an asynchronous socket server that binds to a random local port. The server parses + * HTTP requests and uses a {@link AzureStorageTestServer} to handle them before returning + * them to the client as HTTP responses. + */ +public class AzureStorageFixture { + + public static void main(String[] args) throws Exception { + if (args == null || args.length != 2) { + throw new IllegalArgumentException("AzureStorageFixture "); + } + + final InetSocketAddress socketAddress = new InetSocketAddress(InetAddress.getLoopbackAddress(), 0); + final HttpServer httpServer = MockHttpServer.createHttp(socketAddress, 0); + + try { + final Path workingDirectory = workingDir(args[0]); + /// Writes the PID of the current Java process in a `pid` file located in the working directory + writeFile(workingDirectory, "pid", ManagementFactory.getRuntimeMXBean().getName().split("@")[0]); + + final String addressAndPort = addressToString(httpServer.getAddress()); + // Writes the address and port of the http server in a `ports` file located in the working directory + writeFile(workingDirectory, "ports", addressAndPort); + + // Emulates Azure + final String storageUrl = "http://" + addressAndPort; + final AzureStorageTestServer testServer = new AzureStorageTestServer(storageUrl); + testServer.createContainer(args[1]); + + httpServer.createContext("/", new ResponseHandler(testServer)); + httpServer.start(); + + // Wait to be killed + Thread.sleep(Long.MAX_VALUE); + + } finally { + httpServer.stop(0); + } + } + + @SuppressForbidden(reason = "Paths#get is fine - we don't have environment here") + private static Path workingDir(final String dir) { + return Paths.get(dir); + } + + private static void writeFile(final Path dir, final String fileName, final String content) throws IOException { + final Path tempPidFile = Files.createTempFile(dir, null, null); + Files.write(tempPidFile, singleton(content)); + Files.move(tempPidFile, dir.resolve(fileName), StandardCopyOption.ATOMIC_MOVE); + } + + private static String addressToString(final SocketAddress address) { + final InetSocketAddress inetSocketAddress = (InetSocketAddress) address; + if (inetSocketAddress.getAddress() instanceof Inet6Address) { + return "[" + inetSocketAddress.getHostString() + "]:" + inetSocketAddress.getPort(); + } else { + return inetSocketAddress.getHostString() + ":" + inetSocketAddress.getPort(); + } + } + + static class ResponseHandler implements HttpHandler { + + private final AzureStorageTestServer server; + + private ResponseHandler(final AzureStorageTestServer server) { + this.server = server; + } + + @Override + public void handle(HttpExchange exchange) throws IOException { + String method = exchange.getRequestMethod(); + String path = server.getEndpoint() + exchange.getRequestURI().getRawPath(); + String query = exchange.getRequestURI().getRawQuery(); + Map> headers = exchange.getRequestHeaders(); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + Streams.copy(exchange.getRequestBody(), out); + + final AzureStorageTestServer.Response response = server.handle(method, path, query, headers, out.toByteArray()); + + Map> responseHeaders = exchange.getResponseHeaders(); + responseHeaders.put("Content-Type", singletonList(response.contentType)); + response.headers.forEach((k, v) -> responseHeaders.put(k, singletonList(v))); + exchange.sendResponseHeaders(response.status.getStatus(), response.body.length); + if (response.body.length > 0) { + exchange.getResponseBody().write(response.body); + } + exchange.close(); + } + } +} diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java index 68b84594d62ca..80035d8f78840 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java @@ -25,8 +25,8 @@ import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.internal.io.Streams; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; @@ -66,6 +66,8 @@ public void createContainer(String account, LocationMode mode, String container) @Override public void deleteFiles(String account, LocationMode mode, String container, String path) { + final Map blobs = listBlobsByPrefix(account, mode, container, path, null); + blobs.keySet().forEach(key -> deleteBlob(account, mode, container, key)); } @Override diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageTestServer.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageTestServer.java new file mode 100644 index 0000000000000..584428f9a45b0 --- /dev/null +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageTestServer.java @@ -0,0 +1,425 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.azure; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.path.PathTrie; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.RestUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicLong; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; + +/** + * {@link AzureStorageTestServer} emulates an Azure Storage service through a {@link #handle(String, String, String, Map, byte[])} + * method that provides appropriate responses for specific requests like the real Azure platform would do. + * It is based on official documentation available at https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-rest-api. + */ +public class AzureStorageTestServer { + + private static byte[] EMPTY_BYTE = new byte[0]; + + /** List of the containers stored on this test server **/ + private final Map containers = ConcurrentCollections.newConcurrentMap(); + + /** Request handlers for the requests made by the Azure client **/ + private final PathTrie handlers; + + /** Server endpoint **/ + private final String endpoint; + + /** Increments for the requests ids **/ + private final AtomicLong requests = new AtomicLong(0); + + /** + * Creates a {@link AzureStorageTestServer} with a custom endpoint + */ + AzureStorageTestServer(final String endpoint) { + this.endpoint = Objects.requireNonNull(endpoint, "endpoint must not be null"); + this.handlers = defaultHandlers(endpoint, containers); + } + + /** Creates a container in the test server **/ + void createContainer(final String containerName) { + containers.put(containerName, new Container(containerName)); + } + + public String getEndpoint() { + return endpoint; + } + + /** + * Returns a response for the given request + * + * @param method the HTTP method of the request + * @param path the path of the URL of the request + * @param query the queryString of the URL of request + * @param headers the HTTP headers of the request + * @param body the HTTP request body + * @return a {@link Response} + * @throws IOException if something goes wrong + */ + public Response handle(final String method, + final String path, + final String query, + final Map> headers, + byte[] body) throws IOException { + + final long requestId = requests.incrementAndGet(); + + final Map params = new HashMap<>(); + if (query != null) { + RestUtils.decodeQueryString(query, 0, params); + } + + final RequestHandler handler = handlers.retrieve(method + " " + path, params); + if (handler != null) { + return handler.execute(params, headers, body, requestId); + } else { + return newInternalError(requestId); + } + } + + @FunctionalInterface + interface RequestHandler { + + /** + * Simulates the execution of a Azure Storage request and returns a corresponding response. + * + * @param params the request's query string parameters + * @param headers the request's headers + * @param body the request body provided as a byte array + * @param requestId a unique id for the incoming request + * @return the corresponding response + * + * @throws IOException if something goes wrong + */ + Response execute(Map params, Map> headers, byte[] body, long requestId) throws IOException; + } + + /** Builds the default request handlers **/ + private static PathTrie defaultHandlers(final String endpoint, final Map containers) { + final PathTrie handlers = new PathTrie<>(RestUtils.REST_DECODER); + + // Get Blob Properties + // + // https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-properties + objectsPaths("HEAD " + endpoint + "/{container}").forEach(path -> + handlers.insert(path, (params, headers, body, requestId) -> { + final String containerName = params.get("container"); + + final Container container =containers.get(containerName); + if (container == null) { + return newContainerNotFoundError(requestId); + } + + final String blobName = objectName(params); + for (Map.Entry object : container.objects.entrySet()) { + if (object.getKey().equals(blobName)) { + Map responseHeaders = new HashMap<>(); + responseHeaders.put("x-ms-blob-content-length", String.valueOf(object.getValue().length)); + responseHeaders.put("x-ms-blob-type", "blockblob"); + return new Response(RestStatus.OK, responseHeaders, "text/plain", EMPTY_BYTE); + } + } + return newBlobNotFoundError(requestId); + }) + ); + + // PUT Blob + // + // https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob + objectsPaths("PUT " + endpoint + "/{container}").forEach(path -> + handlers.insert(path, (params, headers, body, requestId) -> { + final String destContainerName = params.get("container"); + + final Container destContainer =containers.get(destContainerName); + if (destContainer == null) { + return newContainerNotFoundError(requestId); + } + + final String destBlobName = objectName(params); + + // Request is a copy request + List headerCopySource = headers.getOrDefault("x-ms-copy-source", emptyList()); + if (headerCopySource.isEmpty() == false) { + String srcBlobName = headerCopySource.get(0); + + Container srcContainer = null; + for (Container container : containers.values()) { + String prefix = endpoint + "/" + container.name + "/"; + if (srcBlobName.startsWith(prefix)) { + srcBlobName = srcBlobName.replaceFirst(prefix, ""); + srcContainer = container; + break; + } + } + + if (srcContainer == null || srcContainer.objects.containsKey(srcBlobName) == false) { + return newBlobNotFoundError(requestId); + } + + byte[] bytes = srcContainer.objects.get(srcBlobName); + if (bytes != null) { + destContainer.objects.put(destBlobName, bytes); + return new Response(RestStatus.ACCEPTED, singletonMap("x-ms-copy-status", "success"), "text/plain", EMPTY_BYTE); + } else { + return newBlobNotFoundError(requestId); + } + } else { + destContainer.objects.put(destBlobName, body); + } + + return new Response(RestStatus.CREATED, emptyMap(), "text/plain", EMPTY_BYTE); + }) + ); + + // GET Object + // + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html + objectsPaths("GET " + endpoint + "/{container}").forEach(path -> + handlers.insert(path, (params, headers, body, requestId) -> { + final String containerName = params.get("container"); + + final Container container =containers.get(containerName); + if (container == null) { + return newContainerNotFoundError(requestId); + } + + final String blobName = objectName(params); + if (container.objects.containsKey(blobName)) { + Map responseHeaders = new HashMap<>(); + responseHeaders.put("x-ms-copy-status", "success"); + responseHeaders.put("x-ms-blob-type", "blockblob"); + return new Response(RestStatus.OK, responseHeaders, "application/octet-stream", container.objects.get(blobName)); + + } + return newBlobNotFoundError(requestId); + }) + ); + + // Delete Blob + // + // https://docs.microsoft.com/en-us/rest/api/storageservices/delete-blob + objectsPaths("DELETE " + endpoint + "/{container}").forEach(path -> + handlers.insert(path, (params, headers, body, requestId) -> { + final String containerName = params.get("container"); + + final Container container =containers.get(containerName); + if (container == null) { + return newContainerNotFoundError(requestId); + } + + final String blobName = objectName(params); + if (container.objects.remove(blobName) != null) { + return new Response(RestStatus.ACCEPTED, emptyMap(), "text/plain", EMPTY_BYTE); + } + return newBlobNotFoundError(requestId); + }) + ); + + // List Blobs + // + // https://docs.microsoft.com/en-us/rest/api/storageservices/list-blobs + handlers.insert("GET " + endpoint + "/{container}/", (params, headers, body, requestId) -> { + final String containerName = params.get("container"); + + final Container container =containers.get(containerName); + if (container == null) { + return newContainerNotFoundError(requestId); + } + + final String prefix = params.get("prefix"); + return newEnumerationResultsResponse(requestId, container, prefix); + }); + + // Get Container Properties + // + // https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-properties + handlers.insert("HEAD " + endpoint + "/{container}", (params, headers, body, requestId) -> { + String container = params.get("container"); + if (Strings.hasText(container) && containers.containsKey(container)) { + return new Response(RestStatus.OK, emptyMap(), "text/plain", EMPTY_BYTE); + } else { + return newContainerNotFoundError(requestId); + } + }); + + return handlers; + } + + /** + * Represents a Azure Storage container. + */ + static class Container { + + /** Container name **/ + final String name; + + /** Blobs contained in the container **/ + final Map objects; + + Container(final String name) { + this.name = Objects.requireNonNull(name); + this.objects = ConcurrentCollections.newConcurrentMap(); + } + } + + /** + * Represents a HTTP Response. + */ + static class Response { + + final RestStatus status; + final Map headers; + final String contentType; + final byte[] body; + + Response(final RestStatus status, final Map headers, final String contentType, final byte[] body) { + this.status = Objects.requireNonNull(status); + this.headers = Objects.requireNonNull(headers); + this.contentType = Objects.requireNonNull(contentType); + this.body = Objects.requireNonNull(body); + } + } + + /** + * Decline a path like "http://host:port/{bucket}" into 10 derived paths like: + * - http://host:port/{bucket}/{path0} + * - http://host:port/{bucket}/{path0}/{path1} + * - http://host:port/{bucket}/{path0}/{path1}/{path2} + * - etc + */ + private static List objectsPaths(final String path) { + final List paths = new ArrayList<>(); + String p = path; + for (int i = 0; i < 10; i++) { + p = p + "/{path" + i + "}"; + paths.add(p); + } + return paths; + } + + /** + * Retrieves the object name from all derived paths named {pathX} where 0 <= X < 10. + * + * This is the counterpart of {@link #objectsPaths(String)} + */ + private static String objectName(final Map params) { + final StringBuilder name = new StringBuilder(); + for (int i = 0; i < 10; i++) { + String value = params.getOrDefault("path" + i, null); + if (value != null) { + if (name.length() > 0) { + name.append('/'); + } + name.append(value); + } + } + return name.toString(); + } + + + /** + * Azure EnumerationResults Response + */ + private static Response newEnumerationResultsResponse(final long requestId, final Container container, final String prefix) { + final String id = Long.toString(requestId); + final StringBuilder response = new StringBuilder(); + response.append(""); + response.append(""); + if (prefix != null) { + response.append("").append(prefix).append(""); + } else { + response.append(""); + } + response.append("").append(container.objects.size()).append(""); + response.append(""); + + int count = 0; + for (Map.Entry object : container.objects.entrySet()) { + String objectName = object.getKey(); + if (prefix == null || objectName.startsWith(prefix)) { + response.append(""); + response.append("").append(objectName).append(""); + response.append(""); + response.append("").append(object.getValue().length).append(""); + response.append("").append(count++).append(""); + response.append("success"); + response.append("BlockBlob"); + response.append(""); + response.append(""); + } + } + + response.append(""); + response.append(""); + response.append(""); + + return new Response(RestStatus.OK, singletonMap("x-amz-request-id", id), "application/xml", response.toString().getBytes(UTF_8)); + } + + private static Response newContainerNotFoundError(final long requestId) { + return newError(requestId, RestStatus.NOT_FOUND, "ContainerNotFound", "The specified container does not exist"); + } + + private static Response newBlobNotFoundError(final long requestId) { + return newError(requestId, RestStatus.NOT_FOUND, "BlobNotFound", "The specified blob does not exist"); + } + + private static Response newInternalError(final long requestId) { + return newError(requestId, RestStatus.INTERNAL_SERVER_ERROR, "InternalError", "The server encountered an internal error"); + } + + /** + * Azure Error + * + * https://docs.microsoft.com/en-us/rest/api/storageservices/status-and-error-codes2 + */ + private static Response newError(final long requestId, + final RestStatus status, + final String code, + final String message) { + + final StringBuilder response = new StringBuilder(); + response.append(""); + response.append(""); + response.append("").append(code).append(""); + response.append("").append(message).append(""); + response.append(""); + + final Map headers = new HashMap<>(2); + headers.put("x-ms-request-id", String.valueOf(requestId)); + headers.put("x-ms-error-code", code); + + return new Response(status, headers, "application/xml", response.toString().getBytes(UTF_8)); + } +} diff --git a/plugins/repository-azure/src/test/resources/rest-api-spec/test/repository_azure/10_basic.yml b/plugins/repository-azure/src/test/resources/rest-api-spec/test/repository_azure/10_basic.yml index fb929f1e822ff..25726fa8f9b96 100644 --- a/plugins/repository-azure/src/test/resources/rest-api-spec/test/repository_azure/10_basic.yml +++ b/plugins/repository-azure/src/test/resources/rest-api-spec/test/repository_azure/10_basic.yml @@ -1,6 +1,6 @@ -# Integration tests for Azure Repository component +# Integration tests for repository-azure # -"Azure Repository loaded": +"Plugin repository-azure is loaded": - do: cluster.state: {} @@ -11,3 +11,177 @@ nodes.info: {} - match: { nodes.$master.plugins.0.name: repository-azure } +--- +"Snapshot/Restore with repository-azure": + + # Register repository + - do: + snapshot.create_repository: + repository: repository + body: + type: azure + settings: + container: "container_test" + client: "integration_test" + + - match: { acknowledged: true } + + # Get repository + - do: + snapshot.get_repository: + repository: repository + + - match: {repository.settings.container : "container_test"} + - match: {repository.settings.client : "integration_test"} + + # Index documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _type: doc + _id: 1 + - snapshot: one + - index: + _index: docs + _type: doc + _id: 2 + - snapshot: one + - index: + _index: docs + _type: doc + _id: 3 + - snapshot: one + + - do: + count: + index: docs + + - match: {count: 3} + + # Create a first snapshot + - do: + snapshot.create: + repository: repository + snapshot: snapshot-one + wait_for_completion: true + + - match: { snapshot.snapshot: snapshot-one } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.include_global_state: true } + - match: { snapshot.shards.failed : 0 } + + - do: + snapshot.status: + repository: repository + snapshot: snapshot-one + + - is_true: snapshots + - match: { snapshots.0.snapshot: snapshot-one } + - match: { snapshots.0.state : SUCCESS } + + # Index more documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _type: doc + _id: 4 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 5 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 6 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 7 + - snapshot: two + + - do: + count: + index: docs + + - match: {count: 7} + + # Create a second snapshot + - do: + snapshot.create: + repository: repository + snapshot: snapshot-two + wait_for_completion: true + + - match: { snapshot.snapshot: snapshot-two } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.shards.failed : 0 } + + - do: + snapshot.get: + repository: repository + snapshot: snapshot-one,snapshot-two + + - is_true: snapshots + - match: { snapshots.0.state : SUCCESS } + - match: { snapshots.1.state : SUCCESS } + + # Delete the index + - do: + indices.delete: + index: docs + + # Restore the second snapshot + - do: + snapshot.restore: + repository: repository + snapshot: snapshot-two + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 7} + + # Delete the index again + - do: + indices.delete: + index: docs + + # Restore the first snapshot + - do: + snapshot.restore: + repository: repository + snapshot: snapshot-one + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 3} + + # Remove the snapshots + - do: + snapshot.delete: + repository: repository + snapshot: snapshot-two + + - do: + snapshot.delete: + repository: repository + snapshot: snapshot-one + + # Remove our repository + - do: + snapshot.delete_repository: + repository: repository diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index bf2768a4312d8..f627b7aee10c4 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -100,9 +100,7 @@ task createServiceAccountFile() { integTestCluster { dependsOn createServiceAccountFile, googleCloudStorageFixture - setupCommand 'create-elasticsearch-keystore', 'bin/elasticsearch-keystore', 'create' - setupCommand 'add-credentials-to-elasticsearch-keystore', - 'bin/elasticsearch-keystore', 'add-file', 'gcs.client.integration_test.credentials_file', "${serviceAccountFile.absolutePath}" + keystoreFile 'gcs.client.integration_test.credentials_file', "${serviceAccountFile.absolutePath}" /* Use a closure on the string to delay evaluation until tests are executed */ setting 'gcs.client.integration_test.endpoint', "http://${ -> googleCloudStorageFixture.addressAndPort }" diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java index 7b985ebd176d6..27736e24dbf51 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java @@ -24,12 +24,13 @@ import org.elasticsearch.repositories.ESBlobStoreContainerTestCase; import java.util.Locale; +import java.util.concurrent.ConcurrentHashMap; public class GoogleCloudStorageBlobStoreContainerTests extends ESBlobStoreContainerTestCase { @Override protected BlobStore newBlobStore() { String bucket = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); - return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucket, MockStorage.newStorageClient(bucket, getTestName())); + return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucket, new MockStorage(bucket, new ConcurrentHashMap<>())); } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index 1a173b440659d..19551f3b082fa 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -27,14 +27,13 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; -import org.junit.BeforeClass; +import org.junit.AfterClass; -import java.net.SocketPermission; -import java.security.AccessController; import java.util.Collection; import java.util.Collections; import java.util.Map; -import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -42,9 +41,9 @@ public class GoogleCloudStorageBlobStoreRepositoryTests extends ESBlobStoreRepos private static final String BUCKET = "gcs-repository-test"; - // Static storage client shared among all nodes in order to act like a remote repository service: + // Static list of blobs shared among all nodes in order to act like a remote repository service: // all nodes must see the same content - private static final AtomicReference storage = new AtomicReference<>(); + private static final ConcurrentMap blobs = new ConcurrentHashMap<>(); @Override protected Collection> nodePlugins() { @@ -62,15 +61,17 @@ protected void createTestRepository(String name) { .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); } - @BeforeClass - public static void setUpStorage() { - storage.set(MockStorage.newStorageClient(BUCKET, GoogleCloudStorageBlobStoreRepositoryTests.class.getName())); + @AfterClass + public static void wipeRepository() { + blobs.clear(); } public static class MockGoogleCloudStoragePlugin extends GoogleCloudStoragePlugin { + public MockGoogleCloudStoragePlugin(final Settings settings) { super(settings); } + @Override protected GoogleCloudStorageService createStorageService(Environment environment) { return new MockGoogleCloudStorageService(environment, getClientsSettings()); @@ -85,9 +86,7 @@ public static class MockGoogleCloudStorageService extends GoogleCloudStorageServ @Override public Storage createClient(String clientName) { - // The actual impl might open a connection. So check we have permission when this call is made. - AccessController.checkPermission(new SocketPermission("*", "connect")); - return storage.get(); + return new MockStorage(BUCKET, blobs); } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java index 00c0538d198bd..5e25307805235 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java @@ -24,12 +24,13 @@ import org.elasticsearch.repositories.ESBlobStoreTestCase; import java.util.Locale; +import java.util.concurrent.ConcurrentHashMap; public class GoogleCloudStorageBlobStoreTests extends ESBlobStoreTestCase { @Override protected BlobStore newBlobStore() { String bucket = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); - return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucket, MockStorage.newStorageClient(bucket, getTestName())); + return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucket, new MockStorage(bucket, new ConcurrentHashMap<>())); } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java index 35606d724cc4c..31c85d35f3fe8 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java @@ -22,7 +22,7 @@ import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.io.Streams; +import org.elasticsearch.core.internal.io.Streams; import org.elasticsearch.mocksocket.MockHttpServer; import org.elasticsearch.repositories.gcs.GoogleCloudStorageTestServer.Response; diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java index a04dae294975a..325cea132beb6 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java @@ -19,74 +19,289 @@ package org.elasticsearch.repositories.gcs; +import com.google.api.client.googleapis.json.GoogleJsonError; +import com.google.api.client.googleapis.json.GoogleJsonResponseException; +import com.google.api.client.http.AbstractInputStreamContent; +import com.google.api.client.http.HttpHeaders; +import com.google.api.client.http.HttpMethods; +import com.google.api.client.http.HttpRequest; +import com.google.api.client.http.HttpRequestInitializer; +import com.google.api.client.http.HttpResponseException; import com.google.api.client.http.LowLevelHttpRequest; import com.google.api.client.http.LowLevelHttpResponse; -import com.google.api.client.json.jackson2.JacksonFactory; +import com.google.api.client.http.MultipartContent; +import com.google.api.client.json.JsonFactory; +import com.google.api.client.testing.http.MockHttpTransport; import com.google.api.client.testing.http.MockLowLevelHttpRequest; import com.google.api.client.testing.http.MockLowLevelHttpResponse; import com.google.api.services.storage.Storage; +import com.google.api.services.storage.model.Bucket; +import com.google.api.services.storage.model.StorageObject; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.rest.RestStatus; +import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; -import java.util.Map; +import java.io.InputStream; +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.concurrent.ConcurrentMap; + +import static org.mockito.Mockito.mock; /** - * {@link MockStorage} is a utility class that provides {@link Storage} clients that works - * against an embedded {@link GoogleCloudStorageTestServer}. + * {@link MockStorage} mocks a {@link Storage} client by storing all the blobs + * in a given concurrent map. */ -class MockStorage extends com.google.api.client.testing.http.MockHttpTransport { +class MockStorage extends Storage { - /** - * Embedded test server that emulates a Google Cloud Storage service - **/ - private final GoogleCloudStorageTestServer server = new GoogleCloudStorageTestServer(); + /* A custom HTTP header name used to propagate the name of the blobs to delete in batch requests */ + private static final String DELETION_HEADER = "x-blob-to-delete"; - private MockStorage() { + private final String bucketName; + private final ConcurrentMap blobs; + + MockStorage(final String bucket, final ConcurrentMap blobs) { + super(new MockedHttpTransport(blobs), mock(JsonFactory.class), mock(HttpRequestInitializer.class)); + this.bucketName = bucket; + this.blobs = blobs; } @Override - public LowLevelHttpRequest buildRequest(String method, String url) throws IOException { - return new MockLowLevelHttpRequest() { - @Override - public LowLevelHttpResponse execute() throws IOException { - return convert(server.handle(method, url, getHeaders(), getContentAsBytes())); - } + public Buckets buckets() { + return new MockBuckets(); + } - /** Returns the LowLevelHttpRequest body as an array of bytes **/ - byte[] getContentAsBytes() throws IOException { - ByteArrayOutputStream out = new ByteArrayOutputStream(); - if (getStreamingContent() != null) { - getStreamingContent().writeTo(out); + @Override + public Objects objects() { + return new MockObjects(); + } + + class MockBuckets extends Buckets { + + @Override + public Get get(String getBucket) { + return new Get(getBucket) { + @Override + public Bucket execute() { + if (bucketName.equals(getBucket())) { + Bucket bucket = new Bucket(); + bucket.setId(bucketName); + return bucket; + } else { + return null; + } } - return out.toByteArray(); - } - }; + }; + } } - private static MockLowLevelHttpResponse convert(final GoogleCloudStorageTestServer.Response response) { - final MockLowLevelHttpResponse lowLevelHttpResponse = new MockLowLevelHttpResponse(); - for (Map.Entry header : response.headers.entrySet()) { - lowLevelHttpResponse.addHeader(header.getKey(), header.getValue()); + class MockObjects extends Objects { + + @Override + public Get get(String getBucket, String getObject) { + return new Get(getBucket, getObject) { + @Override + public StorageObject execute() throws IOException { + if (bucketName.equals(getBucket()) == false) { + throw newBucketNotFoundException(getBucket()); + } + if (blobs.containsKey(getObject()) == false) { + throw newObjectNotFoundException(getObject()); + } + + StorageObject storageObject = new StorageObject(); + storageObject.setId(getObject()); + return storageObject; + } + + @Override + public InputStream executeMediaAsInputStream() throws IOException { + if (bucketName.equals(getBucket()) == false) { + throw newBucketNotFoundException(getBucket()); + } + if (blobs.containsKey(getObject()) == false) { + throw newObjectNotFoundException(getObject()); + } + return new ByteArrayInputStream(blobs.get(getObject())); + } + }; + } + + @Override + public Insert insert(String insertBucket, StorageObject insertObject, AbstractInputStreamContent insertStream) { + return new Insert(insertBucket, insertObject) { + @Override + public StorageObject execute() throws IOException { + if (bucketName.equals(getBucket()) == false) { + throw newBucketNotFoundException(getBucket()); + } + + ByteArrayOutputStream out = new ByteArrayOutputStream(); + Streams.copy(insertStream.getInputStream(), out); + blobs.put(getName(), out.toByteArray()); + return null; + } + }; + } + + @Override + public List list(String listBucket) { + return new List(listBucket) { + @Override + public com.google.api.services.storage.model.Objects execute() throws IOException { + if (bucketName.equals(getBucket()) == false) { + throw newBucketNotFoundException(getBucket()); + } + + final com.google.api.services.storage.model.Objects objects = new com.google.api.services.storage.model.Objects(); + + final java.util.List storageObjects = new ArrayList<>(); + for (Entry blob : blobs.entrySet()) { + if (getPrefix() == null || blob.getKey().startsWith(getPrefix())) { + StorageObject storageObject = new StorageObject(); + storageObject.setId(blob.getKey()); + storageObject.setName(blob.getKey()); + storageObject.setSize(BigInteger.valueOf((long) blob.getValue().length)); + storageObjects.add(storageObject); + } + } + + objects.setItems(storageObjects); + return objects; + } + }; } - lowLevelHttpResponse.setContentType(response.contentType); - lowLevelHttpResponse.setStatusCode(response.status.getStatus()); - lowLevelHttpResponse.setReasonPhrase(response.status.toString()); - if (response.body != null) { - lowLevelHttpResponse.setContent(response.body); - lowLevelHttpResponse.setContentLength(response.body.length); + + @Override + public Delete delete(String deleteBucket, String deleteObject) { + return new Delete(deleteBucket, deleteObject) { + @Override + public Void execute() throws IOException { + if (bucketName.equals(getBucket()) == false) { + throw newBucketNotFoundException(getBucket()); + } + + if (blobs.containsKey(getObject()) == false) { + throw newObjectNotFoundException(getObject()); + } + + blobs.remove(getObject()); + return null; + } + + @Override + public HttpRequest buildHttpRequest() throws IOException { + HttpRequest httpRequest = super.buildHttpRequest(); + httpRequest.getHeaders().put(DELETION_HEADER, getObject()); + return httpRequest; + } + }; + } + + @Override + public Copy copy(String srcBucket, String srcObject, String destBucket, String destObject, StorageObject content) { + return new Copy(srcBucket, srcObject, destBucket, destObject, content) { + @Override + public StorageObject execute() throws IOException { + if (bucketName.equals(getSourceBucket()) == false) { + throw newBucketNotFoundException(getSourceBucket()); + } + if (bucketName.equals(getDestinationBucket()) == false) { + throw newBucketNotFoundException(getDestinationBucket()); + } + + final byte[] bytes = blobs.get(getSourceObject()); + if (bytes == null) { + throw newObjectNotFoundException(getSourceObject()); + } + blobs.put(getDestinationObject(), bytes); + + StorageObject storageObject = new StorageObject(); + storageObject.setId(getDestinationObject()); + return storageObject; + } + }; } - return lowLevelHttpResponse; + } + + private static GoogleJsonResponseException newBucketNotFoundException(final String bucket) { + HttpResponseException.Builder builder = new HttpResponseException.Builder(404, "Bucket not found: " + bucket, new HttpHeaders()); + return new GoogleJsonResponseException(builder, new GoogleJsonError()); + } + + private static GoogleJsonResponseException newObjectNotFoundException(final String object) { + HttpResponseException.Builder builder = new HttpResponseException.Builder(404, "Object not found: " + object, new HttpHeaders()); + return new GoogleJsonResponseException(builder, new GoogleJsonError()); } /** - * Instanciates a mocked Storage client for tests. + * {@link MockedHttpTransport} extends the existing testing transport to analyze the content + * of {@link com.google.api.client.googleapis.batch.BatchRequest} and delete the appropriates + * blobs. We use this because {@link Storage#batch()} is final and there is no other way to + * extend batch requests for testing purposes. */ - public static Storage newStorageClient(final String bucket, final String applicationName) { - MockStorage mockStorage = new MockStorage(); - mockStorage.server.createBucket(bucket); + static class MockedHttpTransport extends MockHttpTransport { + + private final ConcurrentMap blobs; + + MockedHttpTransport(final ConcurrentMap blobs) { + this.blobs = blobs; + } - return new Storage.Builder(mockStorage, JacksonFactory.getDefaultInstance(), null) - .setApplicationName(applicationName) - .build(); + @Override + public LowLevelHttpRequest buildRequest(final String method, final String url) throws IOException { + // We analyze the content of the Batch request to detect our custom HTTP header, + // and extract from it the name of the blob to delete. Then we reply a simple + // batch response so that the client parser is happy. + // + // See https://cloud.google.com/storage/docs/json_api/v1/how-tos/batch for the + // format of the batch request body. + if (HttpMethods.POST.equals(method) && url.endsWith("/batch")) { + return new MockLowLevelHttpRequest() { + @Override + public LowLevelHttpResponse execute() throws IOException { + final String contentType = new MultipartContent().getType(); + + final StringBuilder builder = new StringBuilder(); + try (ByteArrayOutputStream out = new ByteArrayOutputStream()) { + getStreamingContent().writeTo(out); + + Streams.readAllLines(new ByteArrayInputStream(out.toByteArray()), line -> { + if (line != null && line.startsWith(DELETION_HEADER)) { + builder.append("--__END_OF_PART__\r\n"); + builder.append("Content-Type: application/http").append("\r\n"); + builder.append("\r\n"); + builder.append("HTTP/1.1 "); + + final String blobName = line.substring(line.indexOf(':') + 1).trim(); + if (blobs.containsKey(blobName)) { + builder.append(RestStatus.OK.getStatus()); + blobs.remove(blobName); + } else { + builder.append(RestStatus.NOT_FOUND.getStatus()); + } + builder.append("\r\n"); + builder.append("Content-Type: application/json; charset=UTF-8").append("\r\n"); + builder.append("Content-Length: 0").append("\r\n"); + builder.append("\r\n"); + } + }); + builder.append("\r\n"); + builder.append("--__END_OF_PART__--"); + } + + MockLowLevelHttpResponse response = new MockLowLevelHttpResponse(); + response.setStatusCode(200); + response.setContent(builder.toString()); + response.setContentType(contentType); + return response; + } + }; + } else { + return super.buildRequest(method, url); + } + } } } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 51bb6f2024cd4..09d9782aa91f8 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -151,8 +151,7 @@ class S3Repository extends BlobStoreRepository { /** * Constructs an s3 backed repository */ - S3Repository(RepositoryMetaData metadata, Settings settings, - NamedXContentRegistry namedXContentRegistry, AwsS3Service s3Service) throws IOException { + S3Repository(RepositoryMetaData metadata, Settings settings, NamedXContentRegistry namedXContentRegistry, AwsS3Service s3Service) { super(metadata, settings, namedXContentRegistry); String bucket = BUCKET_SETTING.get(metadata.settings()); diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/MockAmazonS3.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/MockAmazonS3.java index a090fdd5281fd..caa1c0b467e52 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/MockAmazonS3.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/MockAmazonS3.java @@ -20,14 +20,14 @@ package org.elasticsearch.repositories.s3; import com.amazonaws.AmazonClientException; -import com.amazonaws.AmazonServiceException; import com.amazonaws.SdkClientException; import com.amazonaws.services.s3.AbstractAmazonS3; import com.amazonaws.services.s3.model.AmazonS3Exception; import com.amazonaws.services.s3.model.CopyObjectRequest; import com.amazonaws.services.s3.model.CopyObjectResult; import com.amazonaws.services.s3.model.DeleteObjectRequest; -import com.amazonaws.services.s3.model.GetObjectMetadataRequest; +import com.amazonaws.services.s3.model.DeleteObjectsRequest; +import com.amazonaws.services.s3.model.DeleteObjectsResult; import com.amazonaws.services.s3.model.GetObjectRequest; import com.amazonaws.services.s3.model.ListObjectsRequest; import com.amazonaws.services.s3.model.ObjectListing; @@ -37,197 +37,163 @@ import com.amazonaws.services.s3.model.S3Object; import com.amazonaws.services.s3.model.S3ObjectInputStream; import com.amazonaws.services.s3.model.S3ObjectSummary; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.Streams; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; import java.io.IOException; -import java.io.InputStream; -import java.io.UncheckedIOException; -import java.net.InetAddress; -import java.net.Socket; import java.util.ArrayList; import java.util.List; import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; +import java.util.Objects; +import java.util.concurrent.ConcurrentMap; -import static org.junit.Assert.assertTrue; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; class MockAmazonS3 extends AbstractAmazonS3 { - private final int mockSocketPort; - - private Map blobs = new ConcurrentHashMap<>(); - - // in ESBlobStoreContainerTestCase.java, the maximum - // length of the input data is 100 bytes - private byte[] byteCounter = new byte[100]; - - - MockAmazonS3(int mockSocketPort) { - this.mockSocketPort = mockSocketPort; - } - - // Simulate a socket connection to check that SocketAccess.doPrivileged() is used correctly. - // Any method of AmazonS3 might potentially open a socket to the S3 service. Firstly, a call - // to any method of AmazonS3 has to be wrapped by SocketAccess.doPrivileged(). - // Secondly, each method on the stack from doPrivileged to opening the socket has to be - // located in a jar that is provided by the plugin. - // Thirdly, a SocketPermission has to be configured in plugin-security.policy. - // By opening a socket in each method of MockAmazonS3 it is ensured that in production AmazonS3 - // is able to to open a socket to the S3 Service without causing a SecurityException - private void simulateS3SocketConnection() { - try (Socket socket = new Socket(InetAddress.getByName("127.0.0.1"), mockSocketPort)) { - assertTrue(socket.isConnected()); // NOOP to keep static analysis happy - } catch (IOException e) { - throw new UncheckedIOException(e); - } + private final ConcurrentMap blobs; + private final String bucket; + private final boolean serverSideEncryption; + private final String cannedACL; + private final String storageClass; + + MockAmazonS3(final ConcurrentMap blobs, + final String bucket, + final boolean serverSideEncryption, + final String cannedACL, + final String storageClass) { + this.blobs = Objects.requireNonNull(blobs); + this.bucket = Objects.requireNonNull(bucket); + this.serverSideEncryption = serverSideEncryption; + this.cannedACL = cannedACL; + this.storageClass = storageClass; } - @Override - public boolean doesBucketExist(String bucket) { - return true; + public boolean doesBucketExist(final String bucket) { + return this.bucket.equalsIgnoreCase(bucket); } @Override - public boolean doesObjectExist(String bucketName, String objectName) throws AmazonServiceException, SdkClientException { - simulateS3SocketConnection(); + public boolean doesObjectExist(final String bucketName, final String objectName) throws SdkClientException { + assertThat(bucketName, equalTo(bucket)); return blobs.containsKey(objectName); } @Override - public ObjectMetadata getObjectMetadata( - GetObjectMetadataRequest getObjectMetadataRequest) - throws AmazonClientException, AmazonServiceException { - simulateS3SocketConnection(); - String blobName = getObjectMetadataRequest.getKey(); - - if (!blobs.containsKey(blobName)) { - throw new AmazonS3Exception("[" + blobName + "] does not exist."); - } - - return new ObjectMetadata(); // nothing is done with it - } - - @Override - public PutObjectResult putObject(PutObjectRequest putObjectRequest) - throws AmazonClientException, AmazonServiceException { - simulateS3SocketConnection(); - String blobName = putObjectRequest.getKey(); - - if (blobs.containsKey(blobName)) { - throw new AmazonS3Exception("[" + blobName + "] already exists."); + public PutObjectResult putObject(final PutObjectRequest request) throws AmazonClientException { + assertThat(request.getBucketName(), equalTo(bucket)); + assertThat(request.getMetadata().getSSEAlgorithm(), serverSideEncryption ? equalTo("AES256") : nullValue()); + assertThat(request.getCannedAcl(), notNullValue()); + assertThat(request.getCannedAcl().toString(), cannedACL != null ? equalTo(cannedACL) : equalTo("private")); + assertThat(request.getStorageClass(), storageClass != null ? equalTo(storageClass) : equalTo("STANDARD")); + + + final String blobName = request.getKey(); + final ByteArrayOutputStream out = new ByteArrayOutputStream(); + try { + Streams.copy(request.getInputStream(), out); + blobs.put(blobName, out.toByteArray()); + } catch (IOException e) { + throw new AmazonClientException(e); } - - blobs.put(blobName, putObjectRequest.getInputStream()); return new PutObjectResult(); } @Override - public S3Object getObject(GetObjectRequest getObjectRequest) - throws AmazonClientException, AmazonServiceException { - simulateS3SocketConnection(); - // in ESBlobStoreContainerTestCase.java, the prefix is empty, - // so the key and blobName are equivalent to each other - String blobName = getObjectRequest.getKey(); - - if (!blobs.containsKey(blobName)) { - throw new AmazonS3Exception("[" + blobName + "] does not exist."); + public S3Object getObject(final GetObjectRequest request) throws AmazonClientException { + assertThat(request.getBucketName(), equalTo(bucket)); + + final String blobName = request.getKey(); + final byte[] content = blobs.get(blobName); + if (content == null) { + AmazonS3Exception exception = new AmazonS3Exception("[" + blobName + "] does not exist."); + exception.setStatusCode(404); + throw exception; } - // the HTTP request attribute is irrelevant for reading - S3ObjectInputStream stream = new S3ObjectInputStream( - blobs.get(blobName), null, false); + ObjectMetadata metadata = new ObjectMetadata(); + metadata.setContentLength(content.length); + S3Object s3Object = new S3Object(); - s3Object.setObjectContent(stream); + s3Object.setObjectContent(new S3ObjectInputStream(new ByteArrayInputStream(content), null, false)); + s3Object.setKey(blobName); + s3Object.setObjectMetadata(metadata); + return s3Object; } @Override - public ObjectListing listObjects(ListObjectsRequest listObjectsRequest) - throws AmazonClientException, AmazonServiceException { - simulateS3SocketConnection(); - MockObjectListing list = new MockObjectListing(); - list.setTruncated(false); - - String blobName; - String prefix = listObjectsRequest.getPrefix(); - - ArrayList mockObjectSummaries = new ArrayList<>(); - - for (Map.Entry blob : blobs.entrySet()) { - blobName = blob.getKey(); - S3ObjectSummary objectSummary = new S3ObjectSummary(); - - if (prefix.isEmpty() || blobName.startsWith(prefix)) { - objectSummary.setKey(blobName); - - try { - objectSummary.setSize(getSize(blob.getValue())); - } catch (IOException e) { - throw new AmazonS3Exception("Object listing " + - "failed for blob [" + blob.getKey() + "]"); - } - - mockObjectSummaries.add(objectSummary); + public ObjectListing listObjects(final ListObjectsRequest request) throws AmazonClientException { + assertThat(request.getBucketName(), equalTo(bucket)); + + final ObjectListing listing = new ObjectListing(); + listing.setBucketName(request.getBucketName()); + listing.setPrefix(request.getPrefix()); + + for (Map.Entry blob : blobs.entrySet()) { + if (Strings.isEmpty(request.getPrefix()) || blob.getKey().startsWith(request.getPrefix())) { + S3ObjectSummary summary = new S3ObjectSummary(); + summary.setBucketName(request.getBucketName()); + summary.setKey(blob.getKey()); + summary.setSize(blob.getValue().length); + listing.getObjectSummaries().add(summary); } } - - list.setObjectSummaries(mockObjectSummaries); - return list; + return listing; } @Override - public CopyObjectResult copyObject(CopyObjectRequest copyObjectRequest) - throws AmazonClientException, AmazonServiceException { - simulateS3SocketConnection(); - String sourceBlobName = copyObjectRequest.getSourceKey(); - String targetBlobName = copyObjectRequest.getDestinationKey(); - - if (!blobs.containsKey(sourceBlobName)) { - throw new AmazonS3Exception("Source blob [" + - sourceBlobName + "] does not exist."); - } + public CopyObjectResult copyObject(final CopyObjectRequest request) throws AmazonClientException { + assertThat(request.getSourceBucketName(), equalTo(bucket)); + assertThat(request.getDestinationBucketName(), equalTo(bucket)); + + final String sourceBlobName = request.getSourceKey(); - if (blobs.containsKey(targetBlobName)) { - throw new AmazonS3Exception("Target blob [" + - targetBlobName + "] already exists."); + final byte[] content = blobs.get(sourceBlobName); + if (content == null) { + AmazonS3Exception exception = new AmazonS3Exception("[" + sourceBlobName + "] does not exist."); + exception.setStatusCode(404); + throw exception; } - blobs.put(targetBlobName, blobs.get(sourceBlobName)); - return new CopyObjectResult(); // nothing is done with it + blobs.put(request.getDestinationKey(), content); + return new CopyObjectResult(); } @Override - public void deleteObject(DeleteObjectRequest deleteObjectRequest) - throws AmazonClientException, AmazonServiceException { - simulateS3SocketConnection(); - String blobName = deleteObjectRequest.getKey(); - - if (!blobs.containsKey(blobName)) { - throw new AmazonS3Exception("[" + blobName + "] does not exist."); + public void deleteObject(final DeleteObjectRequest request) throws AmazonClientException { + assertThat(request.getBucketName(), equalTo(bucket)); + + final String blobName = request.getKey(); + if (blobs.remove(blobName) == null) { + AmazonS3Exception exception = new AmazonS3Exception("[" + blobName + "] does not exist."); + exception.setStatusCode(404); + throw exception; } - - blobs.remove(blobName); } - private int getSize(InputStream stream) throws IOException { - int size = stream.read(byteCounter); - stream.reset(); // in case we ever need the size again - return size; - } - - private class MockObjectListing extends ObjectListing { - // the objectSummaries attribute in ObjectListing.java - // is read-only, but we need to be able to write to it, - // so we create a mock of it to work around this - private List mockObjectSummaries; - - @Override - public List getObjectSummaries() { - return mockObjectSummaries; - } - - private void setObjectSummaries(List objectSummaries) { - mockObjectSummaries = objectSummaries; + @Override + public DeleteObjectsResult deleteObjects(DeleteObjectsRequest request) throws SdkClientException { + assertThat(request.getBucketName(), equalTo(bucket)); + + final List deletions = new ArrayList<>(); + for (DeleteObjectsRequest.KeyVersion key : request.getKeys()) { + if (blobs.remove(key.getKey()) == null) { + AmazonS3Exception exception = new AmazonS3Exception("[" + key + "] does not exist."); + exception.setStatusCode(404); + throw exception; + } else { + DeleteObjectsResult.DeletedObject deletion = new DeleteObjectsResult.DeletedObject(); + deletion.setKey(key.getKey()); + deletions.add(deletion); + } } + return new DeleteObjectsResult(deletions); } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreContainerTests.java index 5998540e7a8fa..453ef3213f0b6 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreContainerTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreContainerTests.java @@ -37,26 +37,19 @@ import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.mocksocket.MockServerSocket; import org.elasticsearch.repositories.ESBlobStoreContainerTestCase; -import org.junit.AfterClass; -import org.junit.BeforeClass; import org.mockito.ArgumentCaptor; import java.io.ByteArrayInputStream; import java.io.IOException; -import java.net.InetAddress; -import java.net.ServerSocket; import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.Locale; import java.util.stream.Collectors; import java.util.stream.IntStream; +import static org.elasticsearch.repositories.s3.S3BlobStoreTests.randomMockS3BlobStore; import static org.hamcrest.Matchers.instanceOf; import static org.mockito.Matchers.any; import static org.mockito.Mockito.doNothing; @@ -67,36 +60,11 @@ public class S3BlobStoreContainerTests extends ESBlobStoreContainerTestCase { - private static ServerSocket mockS3ServerSocket; - - private static Thread mockS3AcceptorThread; - - // Opens a MockSocket to simulate connections to S3 checking that SocketPermissions are set up correctly. - // See MockAmazonS3.simulateS3SocketConnection. - @BeforeClass - public static void openMockSocket() throws IOException { - mockS3ServerSocket = new MockServerSocket(0, 50, InetAddress.getByName("127.0.0.1")); - mockS3AcceptorThread = new Thread(() -> { - while (!mockS3ServerSocket.isClosed()) { - try { - // Accept connections from MockAmazonS3. - mockS3ServerSocket.accept(); - } catch (IOException e) { - } - } - }); - mockS3AcceptorThread.start(); + protected BlobStore newBlobStore() { + return randomMockS3BlobStore(); } - protected BlobStore newBlobStore() throws IOException { - MockAmazonS3 client = new MockAmazonS3(mockS3ServerSocket.getLocalPort()); - String bucket = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); - - return new S3BlobStore(Settings.EMPTY, client, bucket, false, - new ByteSizeValue(10, ByteSizeUnit.MB), "public-read-write", "standard"); - } - - public void testExecuteSingleUploadBlobSizeTooLarge() throws IOException { + public void testExecuteSingleUploadBlobSizeTooLarge() { final long blobSize = ByteSizeUnit.GB.toBytes(randomIntBetween(6, 10)); final S3BlobStore blobStore = mock(S3BlobStore.class); final S3BlobContainer blobContainer = new S3BlobContainer(mock(BlobPath.class), blobStore); @@ -106,7 +74,7 @@ public void testExecuteSingleUploadBlobSizeTooLarge() throws IOException { assertEquals("Upload request size [" + blobSize + "] can't be larger than 5gb", e.getMessage()); } - public void testExecuteSingleUploadBlobSizeLargerThanBufferSize() throws IOException { + public void testExecuteSingleUploadBlobSizeLargerThanBufferSize() { final S3BlobStore blobStore = mock(S3BlobStore.class); when(blobStore.bufferSizeInBytes()).thenReturn(ByteSizeUnit.MB.toBytes(1)); @@ -168,7 +136,7 @@ public void testExecuteSingleUpload() throws IOException { } } - public void testExecuteMultipartUploadBlobSizeTooLarge() throws IOException { + public void testExecuteMultipartUploadBlobSizeTooLarge() { final long blobSize = ByteSizeUnit.TB.toBytes(randomIntBetween(6, 10)); final S3BlobStore blobStore = mock(S3BlobStore.class); final S3BlobContainer blobContainer = new S3BlobContainer(mock(BlobPath.class), blobStore); @@ -179,7 +147,7 @@ public void testExecuteMultipartUploadBlobSizeTooLarge() throws IOException { assertEquals("Multipart upload request size [" + blobSize + "] can't be larger than 5tb", e.getMessage()); } - public void testExecuteMultipartUploadBlobSizeTooSmall() throws IOException { + public void testExecuteMultipartUploadBlobSizeTooSmall() { final long blobSize = ByteSizeUnit.MB.toBytes(randomIntBetween(1, 4)); final S3BlobStore blobStore = mock(S3BlobStore.class); final S3BlobContainer blobContainer = new S3BlobContainer(mock(BlobPath.class), blobStore); @@ -291,7 +259,7 @@ public void testExecuteMultipartUpload() throws IOException { assertEquals(expectedEtags, actualETags); } - public void testExecuteMultipartUploadAborted() throws IOException { + public void testExecuteMultipartUploadAborted() { final String bucketName = randomAlphaOfLengthBetween(1, 10); final String blobName = randomAlphaOfLengthBetween(1, 10); final BlobPath blobPath = new BlobPath(); @@ -418,12 +386,4 @@ private static void assertNumberOfMultiparts(final int expectedParts, final long assertEquals("Expected number of parts [" + expectedParts + "] but got [" + result.v1() + "]", expectedParts, (long) result.v1()); assertEquals("Expected remaining [" + expectedRemaining + "] but got [" + result.v2() + "]", expectedRemaining, (long) result.v2()); } - - @AfterClass - public static void closeMockSocket() throws IOException, InterruptedException { - mockS3ServerSocket.close(); - mockS3AcceptorThread.join(); - mockS3AcceptorThread = null; - mockS3ServerSocket = null; - } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java new file mode 100644 index 0000000000000..e3e89c41514de --- /dev/null +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.s3; + +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.model.CannedAccessControlList; +import com.amazonaws.services.s3.model.StorageClass; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.Repository; +import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.util.Collection; +import java.util.Collections; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +import static java.util.Collections.emptyMap; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + +public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCase { + + private static final ConcurrentMap blobs = new ConcurrentHashMap<>(); + private static String bucket; + private static String client; + private static ByteSizeValue bufferSize; + private static boolean serverSideEncryption; + private static String cannedACL; + private static String storageClass; + + @BeforeClass + public static void setUpRepositorySettings() { + bucket = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); + client = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); + bufferSize = new ByteSizeValue(randomIntBetween(5, 50), ByteSizeUnit.MB); + serverSideEncryption = randomBoolean(); + if (randomBoolean()) { + cannedACL = randomFrom(CannedAccessControlList.values()).toString(); + } + if (randomBoolean()) { + storageClass = randomValueOtherThan(StorageClass.Glacier, () -> randomFrom(StorageClass.values())).toString(); + } + } + + @AfterClass + public static void wipeRepository() { + blobs.clear(); + } + + @Override + protected void createTestRepository(final String name) { + assertAcked(client().admin().cluster().preparePutRepository(name) + .setType(S3Repository.TYPE) + .setSettings(Settings.builder() + .put(S3Repository.BUCKET_SETTING.getKey(), bucket) + .put(InternalAwsS3Service.CLIENT_NAME.getKey(), client) + .put(S3Repository.BUFFER_SIZE_SETTING.getKey(), bufferSize) + .put(S3Repository.SERVER_SIDE_ENCRYPTION_SETTING.getKey(), serverSideEncryption) + .put(S3Repository.CANNED_ACL_SETTING.getKey(), cannedACL) + .put(S3Repository.STORAGE_CLASS_SETTING.getKey(), storageClass))); + } + + @Override + protected Collection> nodePlugins() { + return Collections.singletonList(TestS3RepositoryPlugin.class); + } + + public static class TestS3RepositoryPlugin extends S3RepositoryPlugin { + + public TestS3RepositoryPlugin(final Settings settings) { + super(settings); + } + + @Override + public Map getRepositories(final Environment env, final NamedXContentRegistry registry) { + return Collections.singletonMap(S3Repository.TYPE, (metadata) -> + new S3Repository(metadata, env.settings(), registry, new InternalAwsS3Service(env.settings(), emptyMap()) { + @Override + public synchronized AmazonS3 client(final Settings repositorySettings) { + return new MockAmazonS3(blobs, bucket, serverSideEncryption, cannedACL, storageClass); + } + })); + } + } +} diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreTests.java index 17bea5239fe7e..4a23e4efa9a29 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreTests.java @@ -19,18 +19,29 @@ package org.elasticsearch.repositories.s3; +import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.CannedAccessControlList; import com.amazonaws.services.s3.model.StorageClass; +import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.blobstore.BlobStoreException; -import org.elasticsearch.repositories.s3.S3BlobStore; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.repositories.ESBlobStoreTestCase; -import java.io.IOException; +import java.util.Locale; +import java.util.concurrent.ConcurrentHashMap; import static org.hamcrest.Matchers.equalTo; -public class S3BlobStoreTests extends ESTestCase { - public void testInitCannedACL() throws IOException { +public class S3BlobStoreTests extends ESBlobStoreTestCase { + + @Override + protected BlobStore newBlobStore() { + return randomMockS3BlobStore(); + } + + public void testInitCannedACL() { String[] aclList = new String[]{ "private", "public-read", "public-read-write", "authenticated-read", "log-delivery-write", "bucket-owner-read", "bucket-owner-full-control"}; @@ -52,16 +63,12 @@ public void testInitCannedACL() throws IOException { } } - public void testInvalidCannedACL() throws IOException { - try { - S3BlobStore.initCannedACL("test_invalid"); - fail("CannedACL should fail"); - } catch (BlobStoreException ex) { - assertThat(ex.getMessage(), equalTo("cannedACL is not valid: [test_invalid]")); - } + public void testInvalidCannedACL() { + BlobStoreException ex = expectThrows(BlobStoreException.class, () -> S3BlobStore.initCannedACL("test_invalid")); + assertThat(ex.getMessage(), equalTo("cannedACL is not valid: [test_invalid]")); } - public void testInitStorageClass() throws IOException { + public void testInitStorageClass() { // it should default to `standard` assertThat(S3BlobStore.initStorageClass(null), equalTo(StorageClass.Standard)); assertThat(S3BlobStore.initStorageClass(""), equalTo(StorageClass.Standard)); @@ -72,25 +79,43 @@ public void testInitStorageClass() throws IOException { assertThat(S3BlobStore.initStorageClass("reduced_redundancy"), equalTo(StorageClass.ReducedRedundancy)); } - public void testCaseInsensitiveStorageClass() throws IOException { + public void testCaseInsensitiveStorageClass() { assertThat(S3BlobStore.initStorageClass("sTandaRd"), equalTo(StorageClass.Standard)); assertThat(S3BlobStore.initStorageClass("sTandaRd_Ia"), equalTo(StorageClass.StandardInfrequentAccess)); assertThat(S3BlobStore.initStorageClass("reduCED_redundancy"), equalTo(StorageClass.ReducedRedundancy)); } - public void testInvalidStorageClass() throws IOException { - try { - S3BlobStore.initStorageClass("whatever"); - } catch(BlobStoreException ex) { - assertThat(ex.getMessage(), equalTo("`whatever` is not a valid S3 Storage Class.")); - } + public void testInvalidStorageClass() { + BlobStoreException ex = expectThrows(BlobStoreException.class, () -> S3BlobStore.initStorageClass("whatever")); + assertThat(ex.getMessage(), equalTo("`whatever` is not a valid S3 Storage Class.")); } - public void testRejectGlacierStorageClass() throws IOException { - try { - S3BlobStore.initStorageClass("glacier"); - } catch(BlobStoreException ex) { - assertThat(ex.getMessage(), equalTo("Glacier storage class is not supported")); + public void testRejectGlacierStorageClass() { + BlobStoreException ex = expectThrows(BlobStoreException.class, () -> S3BlobStore.initStorageClass("glacier")); + assertThat(ex.getMessage(), equalTo("Glacier storage class is not supported")); + } + + /** + * Creates a new {@link S3BlobStore} with random settings. + *

+ * The blobstore uses a {@link MockAmazonS3} client. + */ + public static S3BlobStore randomMockS3BlobStore() { + String bucket = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); + ByteSizeValue bufferSize = new ByteSizeValue(randomIntBetween(5, 100), ByteSizeUnit.MB); + boolean serverSideEncryption = randomBoolean(); + + String cannedACL = null; + if (randomBoolean()) { + cannedACL = randomFrom(CannedAccessControlList.values()).toString(); } + + String storageClass = null; + if (randomBoolean()) { + storageClass = randomValueOtherThan(StorageClass.Glacier, () -> randomFrom(StorageClass.values())).toString(); + } + + AmazonS3 client = new MockAmazonS3(new ConcurrentHashMap<>(), bucket, serverSideEncryption, cannedACL, storageClass); + return new S3BlobStore(Settings.EMPTY, client, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass); } } diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 2589f3a51f714..e50e0c45c8dc9 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -687,6 +687,7 @@ public void testEmptyShard() throws IOException { * Tests recovery of an index with or without a translog and the * statistics we gather about that. */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29544") public void testRecovery() throws IOException { int count; boolean shouldHaveTranslog; @@ -698,6 +699,9 @@ public void testRecovery() throws IOException { shouldHaveTranslog = randomBoolean(); indexRandomDocuments(count, true, true, i -> jsonBuilder().startObject().field("field", "value").endObject()); + + // make sure all recoveries are done + ensureNoInitializingShards(); // Explicitly flush so we're sure to have a bunch of documents in the Lucene index client().performRequest("POST", "/_flush"); if (shouldHaveTranslog) { diff --git a/qa/reindex-from-old/build.gradle b/qa/reindex-from-old/build.gradle index c9388c42bf54a..c4b4927a4a2b1 100644 --- a/qa/reindex-from-old/build.gradle +++ b/qa/reindex-from-old/build.gradle @@ -24,8 +24,11 @@ should be able to use the standard launching mechanism which is more flexible and reliable. """ + import org.apache.tools.ant.taskdefs.condition.Os +import static org.elasticsearch.gradle.BuildPlugin.getJavaHome + apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' @@ -51,9 +54,8 @@ dependencies { es090 'org.elasticsearch:elasticsearch:0.90.13@zip' } -if (project.runtimeJavaVersion >= JavaVersion.VERSION_1_9 || Os.isFamily(Os.FAMILY_WINDOWS)) { - /* We can't run the dependencies with Java 9 so for now we'll skip the whole - * thing. We can't get the pid files in windows so we skip that as well.... */ +if (Os.isFamily(Os.FAMILY_WINDOWS)) { + // we can't get the pid files in windows so we skip that integTest.enabled = false } else { /* Set up tasks to unzip and run the old versions of ES before running the @@ -75,7 +77,7 @@ if (project.runtimeJavaVersion >= JavaVersion.VERSION_1_9 || Os.isFamily(Os.FAMI dependsOn unzip executable = new File(project.runtimeJavaHome, 'bin/java') env 'CLASSPATH', "${ -> project.configurations.oldesFixture.asPath }" - env 'JAVA_HOME', project.runtimeJavaHome + env 'JAVA_HOME', "${-> getJavaHome(project, 7, "JAVA7_HOME must be set to run reindex-from-old")}" args 'oldes.OldElasticsearch', baseDir, unzip.temporaryDir, diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/IngestDocumentMustacheIT.java b/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/IngestDocumentMustacheIT.java index 16dbbc6f8cbab..ec993262a4d59 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/IngestDocumentMustacheIT.java +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/IngestDocumentMustacheIT.java @@ -33,7 +33,7 @@ public class IngestDocumentMustacheIT extends AbstractScriptTestCase { public void testAccessMetaDataViaTemplate() { Map document = new HashMap<>(); document.put("foo", "bar"); - IngestDocument ingestDocument = new IngestDocument("index", "type", "id", null, null, null, null, document); + IngestDocument ingestDocument = new IngestDocument("index", "type", "id", null, null, null, document); ingestDocument.setFieldValue(compile("field1"), ValueSource.wrap("1 {{foo}}", scriptService)); assertThat(ingestDocument.getFieldValue("field1", String.class), equalTo("1 bar")); @@ -48,7 +48,7 @@ public void testAccessMapMetaDataViaTemplate() { innerObject.put("baz", "hello baz"); innerObject.put("qux", Collections.singletonMap("fubar", "hello qux and fubar")); document.put("foo", innerObject); - IngestDocument ingestDocument = new IngestDocument("index", "type", "id", null, null, null, null, document); + IngestDocument ingestDocument = new IngestDocument("index", "type", "id", null, null, null, document); ingestDocument.setFieldValue(compile("field1"), ValueSource.wrap("1 {{foo.bar}} {{foo.baz}} {{foo.qux.fubar}}", scriptService)); assertThat(ingestDocument.getFieldValue("field1", String.class), equalTo("1 hello bar hello baz hello qux and fubar")); @@ -67,7 +67,7 @@ public void testAccessListMetaDataViaTemplate() { list.add(value); list.add(null); document.put("list2", list); - IngestDocument ingestDocument = new IngestDocument("index", "type", "id", null, null, null, null, document); + IngestDocument ingestDocument = new IngestDocument("index", "type", "id", null, null, null, document); ingestDocument.setFieldValue(compile("field1"), ValueSource.wrap("1 {{list1.0}} {{list2.0}}", scriptService)); assertThat(ingestDocument.getFieldValue("field1", String.class), equalTo("1 foo {field=value}")); } @@ -77,7 +77,7 @@ public void testAccessIngestMetadataViaTemplate() { Map ingestMap = new HashMap<>(); ingestMap.put("timestamp", "bogus_timestamp"); document.put("_ingest", ingestMap); - IngestDocument ingestDocument = new IngestDocument("index", "type", "id", null, null, null, null, document); + IngestDocument ingestDocument = new IngestDocument("index", "type", "id", null, null, null, document); ingestDocument.setFieldValue(compile("ingest_timestamp"), ValueSource.wrap("{{_ingest.timestamp}} and {{_source._ingest.timestamp}}", scriptService)); assertThat(ingestDocument.getFieldValue("ingest_timestamp", String.class), diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/ValueSourceMustacheIT.java b/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/ValueSourceMustacheIT.java index a80b693851fc1..e7005080ea88e 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/ValueSourceMustacheIT.java +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/java/org/elasticsearch/ingest/ValueSourceMustacheIT.java @@ -64,7 +64,7 @@ public void testValueSourceWithTemplates() { } public void testAccessSourceViaTemplate() { - IngestDocument ingestDocument = new IngestDocument("marvel", "type", "id", null, null, null, null, new HashMap<>()); + IngestDocument ingestDocument = new IngestDocument("marvel", "type", "id", null, null, null, new HashMap<>()); assertThat(ingestDocument.hasField("marvel"), is(false)); ingestDocument.setFieldValue(compile("{{_index}}"), ValueSource.wrap("{{_index}}", scriptService)); assertThat(ingestDocument.getFieldValue("marvel", String.class), equalTo("marvel")); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json b/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json index 2ff171bf528e2..c30ee70e2eb82 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json @@ -37,10 +37,6 @@ "type" : "string", "description" : "Default document type for items which don't provide one" }, - "fields": { - "type": "list", - "description" : "Default comma-separated list of fields to return in the response for updates, can be overridden on each sub-request" - }, "_source": { "type" : "list", "description" : "True or false to return the _source field or not, or default list of fields to return, can be overridden on each sub-request" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json index a58598b3bb396..574206a0dc3ed 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json @@ -4,7 +4,7 @@ "methods": ["POST", "PUT"], "url": { "path": "/{index}/{type}", - "paths": ["/{index}/{type}", "/{index}/{type}/{id}"], + "paths": ["/{index}/{type}", "/{index}/{type}/{id}", "/{index}/_doc/{id}", "/{index}/_doc"], "parts": { "id": { "type" : "string", @@ -17,7 +17,6 @@ }, "type": { "type" : "string", - "required" : true, "description" : "The type of the document" } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create.json index f876df36f882b..2ff9d3f68d9d9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create.json @@ -13,6 +13,10 @@ } }, "params": { + "include_type_name": { + "type" : "string", + "description" : "Whether a type should be expected in the body of the mappings." + }, "wait_for_active_shards": { "type" : "string", "description" : "Set the number of active shards to wait for before the operation returns." diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_mapping.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_mapping.json index c3c0622844bb1..ae54c7c10e677 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_mapping.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_mapping.json @@ -16,6 +16,10 @@ } }, "params": { + "include_type_name": { + "type" : "string", + "description" : "Whether to add the type name to the response" + }, "ignore_unavailable": { "type" : "boolean", "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json index c6b547914ef79..4efb615329639 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json @@ -4,7 +4,7 @@ "methods": ["PUT", "POST"], "url": { "path": "/{index}/{type}/_mapping", - "paths": ["/{index}/{type}/_mapping", "/{index}/_mapping/{type}", "/_mapping/{type}", "/{index}/{type}/_mappings", "/{index}/_mappings/{type}", "/_mappings/{type}"], + "paths": ["/{index}/{type}/_mapping", "/{index}/_mapping/{type}", "/_mapping/{type}", "/{index}/{type}/_mappings", "/{index}/_mappings/{type}", "/_mappings/{type}", "{index}/_mappings", "{index}/_mapping"], "parts": { "index": { "type" : "list", @@ -12,11 +12,14 @@ }, "type": { "type" : "string", - "required" : true, "description" : "The name of the document type" } }, "params": { + "include_type_name": { + "type" : "string", + "description" : "Whether a type should be expected in the body of the mappings." + }, "timeout": { "type" : "time", "description" : "Explicit operation timeout" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json index 7c9cf627530ef..3055cb8e32e2e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json @@ -16,6 +16,10 @@ "type": "time", "description": "Specify timeout for connection to master" }, + "timeout": { + "type" : "time", + "description" : "Explicit operation timeout" + }, "preserve_existing": { "type": "boolean", "description": "Whether to update existing settings. If set to `true` existing settings on an index remain unchanged, the default is `false`" @@ -34,10 +38,10 @@ "default": "open", "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both." }, - "flat_settings": { - "type": "boolean", - "description": "Return settings in flat format (default: false)" - } + "flat_settings": { + "type": "boolean", + "description": "Return settings in flat format (default: false)" + } } }, "body": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/rank_eval.json b/rest-api-spec/src/main/resources/rest-api-spec/api/rank_eval.json index 51798c92babf6..5c9cebf741135 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/rank_eval.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/rank_eval.json @@ -1,26 +1,36 @@ -{ - "rank_eval": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/search-rank-eval.html", - "methods": ["POST"], - "url": { - "path": "/_rank_eval", - "paths": ["/_rank_eval", "/{index}/_rank_eval", "/{index}/{type}/_rank_eval"], +{ + "rank_eval": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/search-rank-eval.html", + "methods": ["GET", "POST"], + "url": { + "path": "/_rank_eval", + "paths": ["/_rank_eval", "/{index}/_rank_eval"], "parts": { "index": { "type": "list", "description" : "A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices" - }, - "type": { - "type" : "list", - "description" : "A comma-separated list of document types to search; leave empty to perform the operation on all types" } }, - "params": {} + "params": { + "ignore_unavailable": { + "type" : "boolean", + "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)" + }, + "allow_no_indices": { + "type" : "boolean", + "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" + }, + "expand_wildcards": { + "type" : "enum", + "options" : ["open","closed","none","all"], + "default" : "open", + "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both." + } + } }, - "body": { - "description": "The search definition using the Query DSL and the prototype for the eval request.", - "required": true - } - } + "body": { + "description": "The ranking evaluation search definition, including search requests, document ratings and ranking metric definition.", + "required": true + } + } } - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json index 5e1dcf72e9519..ffa99cc9dc312 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json @@ -27,10 +27,6 @@ "type": "string", "description": "Sets the number of shard copies that must be active before proceeding with the update operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)" }, - "fields": { - "type": "list", - "description": "A comma-separated list of fields to return in the response" - }, "_source": { "type" : "list", "description" : "True or false to return the _source field or not, or a list of fields to return" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/50_parent.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/50_parent.yml deleted file mode 100644 index cb0dfcfe78cf7..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/50_parent.yml +++ /dev/null @@ -1,39 +0,0 @@ ---- -"Parent": - - - do: - indices.create: - index: test_1 - body: - mappings: - test: - _parent: { type: "foo" } - - - do: - catch: /routing_missing_exception/ - create: - index: test_1 - type: test - id: 1 - body: { foo: bar } - - - do: - create: - index: test_1 - type: test - id: 1 - parent: 5 - body: { foo: bar } - - - do: - get: - index: test_1 - type: test - id: 1 - parent: 5 - stored_fields: [_parent, _routing] - - - match: { _id: "1"} - - match: { _parent: "5"} - - match: { _routing: "5"} - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/55_parent_with_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/55_parent_with_routing.yml deleted file mode 100644 index 5f352ac90c3c7..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/55_parent_with_routing.yml +++ /dev/null @@ -1,55 +0,0 @@ ---- -"Parent with routing": - - - do: - indices.create: - index: test_1 - body: - mappings: - test: - _parent: { type: "foo" } - settings: - number_of_shards: 5 - number_of_replicas: 0 - - - do: - cluster.health: - wait_for_status: green - - - do: - create: - index: test_1 - type: test - id: 1 - parent: 5 - routing: 4 - body: { foo: bar } - - - do: - get: - index: test_1 - type: test - id: 1 - parent: 5 - routing: 4 - stored_fields: [_parent, _routing] - - - match: { _id: "1"} - - match: { _parent: "5"} - - match: { _routing: "4"} - - - do: - catch: missing - get: - index: test_1 - type: test - id: 1 - parent: 5 - - - do: - get: - index: test_1 - type: test - id: 1 - routing: 4 - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/60_refresh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/60_refresh.yml index deb5e1e6850ab..e461691ace200 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/60_refresh.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/60_refresh.yml @@ -18,7 +18,6 @@ - do: search: index: test_1 - type: test body: query: { term: { _id: 1 }} @@ -36,7 +35,6 @@ - do: search: index: test_1 - type: test body: query: { term: { _id: 2 }} @@ -56,7 +54,6 @@ - do: search: index: test_1 - type: test body: query: { term: { _id: 1 }} @@ -76,7 +73,6 @@ - do: search: index: create_60_refresh_1 - type: test body: query: { term: { _id: create_60_refresh_id1 }} - match: { hits.total: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/40_parent.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/40_parent.yml deleted file mode 100644 index 82fc8a325d614..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/40_parent.yml +++ /dev/null @@ -1,36 +0,0 @@ ---- -"Parent": - - do: - indices.create: - index: test_1 - body: - settings: - number_of_shards: 5 - number_of_routing_shards: 5 - mappings: - test: - _parent: { type: "foo" } - - - do: - index: - index: test_1 - type: test - id: 1 - parent: 5 - body: { foo: bar } - - - do: - catch: missing - delete: - index: test_1 - type: test - id: 1 - parent: 1 - - - do: - delete: - index: test_1 - type: test - id: 1 - parent: 5 - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/42_missing_parent.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/42_missing_parent.yml deleted file mode 100644 index d72c5a83d599f..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/42_missing_parent.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -"Delete on all shards when parent not specified": - - - do: - indices.create: - index: test_1 - body: - mappings: - test: - _parent: { type: "foo" } - - do: - cluster.health: - wait_for_status: yellow - - - do: - index: - index: test_1 - type: test - id: 1 - parent: 5 - body: { foo: bar } - - - do: - catch: /routing_missing_exception/ - delete: - index: test_1 - type: test - id: 1 - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/45_parent_with_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/45_parent_with_routing.yml deleted file mode 100644 index 8b11e82b75459..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/45_parent_with_routing.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- -"Parent with routing": - - - do: - indices.create: - index: test_1 - body: - mappings: - test: - _parent: { type: "foo" } - settings: - number_of_shards: 5 - number_of_replicas: 0 - - - do: - cluster.health: - wait_for_status: green - - - do: - index: - index: test_1 - type: test - id: 1 - parent: 5 - routing: 4 - body: { foo: bar } - - - do: - catch: missing - delete: - index: test_1 - type: test - id: 1 - parent: 5 - routing: 1 - - - do: - delete: - index: test_1 - type: test - id: 1 - parent: 5 - routing: 4 - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yml index f86c7250a37ca..ad27bb68601ae 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yml @@ -37,7 +37,6 @@ - do: search: index: test_1 - type: test body: query: { terms: { _id: [1,3] }} @@ -52,7 +51,6 @@ - do: search: index: test_1 - type: test body: query: { terms: { _id: [1,3] }} @@ -72,7 +70,6 @@ - do: search: index: test_1 - type: test body: query: { terms: { _id: [1,3] }} @@ -92,7 +89,6 @@ - do: search: index: test_1 - type: test body: query: { term: { _id: 1 }} - match: { hits.total: 1 } @@ -107,7 +103,6 @@ - do: search: index: test_1 - type: test body: query: { term: { _id: 1 }} - match: { hits.total: 0 } @@ -126,7 +121,6 @@ - do: search: index: delete_50_refresh_1 - type: test body: query: { term: { _id: delete_50_refresh_id1 }} - match: { hits.total: 1 } @@ -142,7 +136,6 @@ - do: search: index: delete_50_refresh_1 - type: test body: query: { term: { _id: delete_50_refresh_id1 }} - match: { hits.total: 0 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/exists/30_parent.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/exists/30_parent.yml deleted file mode 100644 index 4c92605756a37..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/exists/30_parent.yml +++ /dev/null @@ -1,39 +0,0 @@ ---- -setup: - - do: - indices.create: - index: test_1 - body: - mappings: - test: - _parent: { type: "foo" } - ---- -"Parent": - - do: - index: - index: test_1 - type: test - id: 1 - parent: 5 - body: { foo: bar } - - - do: - exists: - index: test_1 - type: test - id: 1 - parent: 5 - - - is_true: '' - ---- -"Parent omitted": - - - do: - catch: bad_request - exists: - index: test_1 - type: test - id: 1 - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/exists/55_parent_with_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/exists/55_parent_with_routing.yml deleted file mode 100644 index bf617a23260f7..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/exists/55_parent_with_routing.yml +++ /dev/null @@ -1,57 +0,0 @@ ---- -"Parent with routing": - - - do: - indices.create: - index: test_1 - body: - mappings: - test: - _parent: { type: "foo" } - settings: - number_of_shards: 5 - number_of_replicas: 0 - - - do: - cluster.health: - wait_for_status: green - - - do: - index: - index: test_1 - type: test - id: 1 - parent: 5 - routing: 4 - body: { foo: bar } - - - is_true: '' - - - do: - exists: - index: test_1 - type: test - id: 1 - parent: 5 - routing: 4 - - - is_true: '' - - - do: - exists: - index: test_1 - type: test - id: 1 - parent: 5 - - - is_false: '' - - - do: - exists: - index: test_1 - type: test - id: 1 - routing: 4 - - - is_true: '' - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/30_parent.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/30_parent.yml deleted file mode 100644 index 04f578b88d6e6..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/30_parent.yml +++ /dev/null @@ -1,42 +0,0 @@ ---- -setup: - - do: - indices.create: - index: test_1 - body: - mappings: - test: - _parent: { type: "foo" } - - - do: - index: - index: test_1 - type: test - id: 1 - parent: 中文 - body: { foo: bar } - ---- -"Parent": - - - do: - get: - index: test_1 - type: test - id: 1 - parent: 中文 - stored_fields: [_parent, _routing] - - - match: { _id: "1"} - - match: { _parent: 中文 } - - match: { _routing: 中文} - ---- -"Parent omitted": - - do: - catch: bad_request - get: - index: test_1 - type: test - id: 1 - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/55_parent_with_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/55_parent_with_routing.yml deleted file mode 100644 index db71d16423009..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/55_parent_with_routing.yml +++ /dev/null @@ -1,55 +0,0 @@ ---- -"Parent with routing": - - - do: - indices.create: - index: test_1 - body: - mappings: - test: - _parent: { type: "foo" } - settings: - number_of_shards: 5 - number_of_replicas: 0 - - - do: - cluster.health: - wait_for_status: green - - - do: - index: - index: test_1 - type: test - id: 1 - parent: 5 - routing: 4 - body: { foo: bar } - - - do: - get: - index: test_1 - type: test - id: 1 - parent: 5 - routing: 4 - stored_fields: [_parent, _routing] - - - match: { _id: "1"} - - match: { _parent: "5"} - - match: { _routing: "4"} - - - do: - catch: missing - get: - index: test_1 - type: test - id: 1 - parent: 5 - - - do: - get: - index: test_1 - type: test - id: 1 - routing: 4 - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/30_parent.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/30_parent.yml deleted file mode 100644 index fe589c9823472..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/30_parent.yml +++ /dev/null @@ -1,40 +0,0 @@ ---- -setup: - - do: - indices.create: - index: test_1 - body: - mappings: - test: - _parent: { type: "foo" } - - - do: - index: - index: test_1 - type: test - id: 1 - parent: 5 - body: { foo: bar } - - ---- -"Parent": - - do: - get_source: - index: test_1 - type: test - id: 1 - parent: 5 - - - match: { '': {foo: bar}} - ---- -"Parent omitted": - - - do: - catch: bad_request - get_source: - index: test_1 - type: test - id: 1 - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/55_parent_with_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/55_parent_with_routing.yml deleted file mode 100644 index 38ca9d5f22d15..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get_source/55_parent_with_routing.yml +++ /dev/null @@ -1,52 +0,0 @@ ---- -"Parent with routing": - - - do: - indices.create: - index: test_1 - body: - mappings: - test: - _parent: { type: "foo" } - settings: - number_of_shards: 5 - number_of_replicas: 0 - - - do: - cluster.health: - wait_for_status: green - - - do: - index: - index: test_1 - type: test - id: 1 - parent: 5 - routing: 4 - body: { foo: bar } - - - do: - get_source: - index: test_1 - type: test - id: 1 - parent: 5 - routing: 4 - - - match: { '': {foo: bar}} - - - do: - catch: missing - get_source: - index: test_1 - type: test - id: 1 - parent: 5 - - - do: - get_source: - index: test_1 - type: test - id: 1 - routing: 4 - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/50_parent.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/50_parent.yml deleted file mode 100644 index 89077dac4ae8e..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/50_parent.yml +++ /dev/null @@ -1,39 +0,0 @@ ---- -"Parent": - - - do: - indices.create: - index: test_1 - body: - mappings: - test: - _parent: { type: "foo" } - - - do: - catch: /routing_missing_exception/ - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - - - do: - index: - index: test_1 - type: test - id: 1 - parent: 5 - body: { foo: bar } - - - do: - get: - index: test_1 - type: test - id: 1 - parent: 5 - stored_fields: [_parent, _routing] - - - match: { _id: "1"} - - match: { _parent: "5"} - - match: { _routing: "5"} - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/55_parent_with_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/55_parent_with_routing.yml deleted file mode 100644 index db71d16423009..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/55_parent_with_routing.yml +++ /dev/null @@ -1,55 +0,0 @@ ---- -"Parent with routing": - - - do: - indices.create: - index: test_1 - body: - mappings: - test: - _parent: { type: "foo" } - settings: - number_of_shards: 5 - number_of_replicas: 0 - - - do: - cluster.health: - wait_for_status: green - - - do: - index: - index: test_1 - type: test - id: 1 - parent: 5 - routing: 4 - body: { foo: bar } - - - do: - get: - index: test_1 - type: test - id: 1 - parent: 5 - routing: 4 - stored_fields: [_parent, _routing] - - - match: { _id: "1"} - - match: { _parent: "5"} - - match: { _routing: "4"} - - - do: - catch: missing - get: - index: test_1 - type: test - id: 1 - parent: 5 - - - do: - get: - index: test_1 - type: test - id: 1 - routing: 4 - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yml index 5d20406abeef0..cd78a4e4282fe 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yml @@ -19,7 +19,6 @@ - do: search: index: test_1 - type: test body: query: { term: { _id: 1 }} @@ -37,7 +36,6 @@ - do: search: index: test_1 - type: test body: query: { term: { _id: 2 }} @@ -57,7 +55,6 @@ - do: search: index: test_1 - type: test body: query: { term: { _id: 1 }} @@ -77,7 +74,6 @@ - do: search: index: index_60_refresh_1 - type: test body: query: { term: { _id: index_60_refresh_id1 }} - match: { hits.total: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.flush/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.flush/10_basic.yml index 659435ae19615..f80f6b1096b27 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.flush/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.flush/10_basic.yml @@ -21,3 +21,34 @@ indices.stats: {level: shards} - is_true: indices.testing.shards.0.0.commit.user_data.sync_id + +--- +"Flush stats": + - skip: + version: " - 6.2.99" + reason: periodic flush stats is introduced in 6.3.0 + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + index.translog.flush_threshold_size: 160b + - do: + indices.flush: + index: test + - do: + indices.stats: { index: test } + - match: { indices.test.primaries.flush.periodic: 0 } + - match: { indices.test.primaries.flush.total: 1 } + - do: + index: + index: test + type: doc + id: 1 + body: { "message": "a long message to make a periodic flush happen after this index operation" } + - do: + indices.stats: { index: test } + # periodic flush is async + - gte: { indices.test.primaries.flush.periodic: 0 } + - gte: { indices.test.primaries.flush.total: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_no_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_no_types.yml new file mode 100644 index 0000000000000..40effe01b080f --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_no_types.yml @@ -0,0 +1,120 @@ +--- +"Create indices and manage mappings without types": + + - skip: + version: " - 6.99.99" + reason: include_type_name was introduced in 7.0.0 + + - do: + indices.create: + index: index + include_type_name: false + body: + mappings: + properties: + foo: + type: keyword + + - do: + indices.get_mapping: + index: index + include_type_name: false + + - match: { index.mappings.properties.foo.type: "keyword" } + + - do: + indices.put_mapping: + index: index + include_type_name: false + body: + properties: + bar: + type: float + + - do: + indices.get_mapping: + index: index + include_type_name: false + + - match: { index.mappings.properties.foo.type: "keyword" } + - match: { index.mappings.properties.bar.type: "float" } + +# Explicit id + - do: + index: + index: index + id: 1 + body: { foo: bar } + +# Implicit id + - do: + index: + index: index + body: { foo: bar } + +# Bulk with explicit id + - do: + bulk: + index: index + body: | + { "index": { "_id": "2" } } + { "doc": { "foo": "baz" } } + +# Bulk with implicit id + - do: + bulk: + index: index + body: | + { "index": { } } + { "doc": { "foo": "baz" } } + + - do: + indices.refresh: + index: index + + - do: + count: + index: index + + - match: { count: 4 } + +--- +"PUT mapping with a type and include_type_name: false": + + - skip: + version: " - 6.99.99" + reason: include_type_name was introduced in 7.0.0 + + - do: + indices.create: + index: index + + - do: + catch: /illegal_argument_exception/ + indices.put_mapping: + index: index + type: _doc + include_type_name: false + body: + properties: + bar: + type: float + +--- +"Empty index with the include_type_name=false option": + + - skip: + version: " - 6.99.99" + reason: include_type_name was introduced in 7.0.0 + + - do: + indices.create: + index: index + include_type_name: false + + - do: + indices.get_mapping: + index: index + include_type_name: false + + - match: { index.mappings: {} } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/10_basic.yml index 1d22d4a4c1f53..a797edcfa4ee3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/10_basic.yml @@ -70,7 +70,6 @@ - do: search: index: logs_search - type: test - match: { hits.total: 1 } - match: { hits.hits.0._index: "logs-000002"} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml index aec3c41672ddc..550b868ff4988 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml @@ -93,7 +93,6 @@ - do: search: index: test - type: test body: sort: ["rank"] size: 1 @@ -105,7 +104,6 @@ - do: search: index: test - type: test body: sort: ["rank"] query: {"range": { "rank": { "from": 0 } } } @@ -128,7 +126,6 @@ - do: search: index: test - type: test body: sort: _doc @@ -146,7 +143,6 @@ - do: search: index: test - type: test body: sort: ["rank"] query: {"range": { "rank": { "from": 0 } } } @@ -163,7 +159,6 @@ catch: /disabling \[track_total_hits\] is not allowed in a scroll context/ search: index: test - type: test scroll: 1m body: sort: ["rank"] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/30_parent.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/30_parent.yml deleted file mode 100644 index ad064df69ce55..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/30_parent.yml +++ /dev/null @@ -1,56 +0,0 @@ ---- -"Parent": - - - do: - indices.create: - index: test_1 - body: - mappings: - test: - _parent: { type: "foo" } - settings: - number_of_shards: 5 - - - do: - index: - index: test_1 - type: test - id: 1 - parent: 4 - body: { foo: bar } - - - do: - index: - index: test_1 - type: test - id: 2 - parent: 5 - body: { foo: bar } - - - do: - mget: - index: test_1 - type: test - body: - docs: - - { _id: 1 } - - { _id: 1, parent: 5, stored_fields: [ _parent, _routing ] } - - { _id: 1, parent: 4, stored_fields: [ _parent, _routing ] } - - { _id: 2, parent: 5, stored_fields: [ _parent, _routing ] } - - - is_false: docs.0.found - - is_false: docs.1.found - - - is_true: docs.2.found - - match: { docs.2._index: test_1 } - - match: { docs.2._type: test } - - match: { docs.2._id: "1" } - - match: { docs.2._parent: "4" } - - match: { docs.2._routing: "4" } - - - is_true: docs.3.found - - match: { docs.3._index: test_1 } - - match: { docs.3._type: test } - - match: { docs.3._id: "2" } - - match: { docs.3._parent: "5" } - - match: { docs.3._routing: "5" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/55_parent_with_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/55_parent_with_routing.yml deleted file mode 100644 index 1fa1ce2cddb9a..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/55_parent_with_routing.yml +++ /dev/null @@ -1,47 +0,0 @@ ---- -"Parent": - - - do: - indices.create: - index: test_1 - body: - mappings: - test: - _parent: { type: "foo" } - settings: - number_of_shards: 5 - number_of_replicas: 0 - - - do: - cluster.health: - wait_for_status: green - - - do: - index: - index: test_1 - type: test - id: 1 - parent: 4 - routing: 5 - body: { foo: bar } - - - do: - mget: - index: test_1 - type: test - stored_fields: [ _routing , _parent] - body: - docs: - - { _id: 1 } - - { _id: 1, parent: 4 } - - { _id: 1, parent: 4, routing: 5 } - - - is_false: docs.0.found - - is_false: docs.1.found - - - is_true: docs.2.found - - match: { docs.2._index: test_1 } - - match: { docs.2._type: test } - - match: { docs.2._id: "1" } - - match: { docs.2._parent: "4" } - - match: { docs.2._routing: "5" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/10_basic.yml index 31049b07e21e6..ee577f6a228f7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/10_basic.yml @@ -32,7 +32,6 @@ - do: search: index: test_1 - type: test body: query: more_like_this: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/20_docs.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/20_docs.yml index 415a38f00c10e..42b6619a5f8e0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/20_docs.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/20_docs.yml @@ -37,7 +37,6 @@ - do: search: index: test_1 - type: test body: query: more_like_this: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/30_unlike.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/30_unlike.yml index 01cd372e8cf27..f3d641081e834 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/30_unlike.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/30_unlike.yml @@ -37,7 +37,6 @@ - do: search: index: test_1 - type: test body: query: more_like_this: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/12_slices.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/12_slices.yml index 47bcbdb83c47c..845cbc54bc7bb 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/12_slices.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/12_slices.yml @@ -41,10 +41,6 @@ setup: --- "Sliced scroll": - - skip: - version: " - 5.3.0" - reason: Prior version uses a random seed per node to compute the hash of the keys. - - do: search: index: test_sliced_scroll diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/160_extended_stats_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/160_extended_stats_metric.yml index aff30d17de167..6ad8166a6a8a3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/160_extended_stats_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/160_extended_stats_metric.yml @@ -281,7 +281,7 @@ setup: sigma: -1 - do: - catch: /parsing_exception/ + catch: /x_content_parse_exception/ search: body: aggs: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms.yml index a708ff19d7e34..fe8c33926d04d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms.yml @@ -67,14 +67,12 @@ - do: search: index: goodbad - type: doc - match: {hits.total: 7} - do: search: index: goodbad - type: doc body: {"aggs": {"class": {"terms": {"field": "class"},"aggs": {"sig_terms": {"significant_terms": {"field": "text"}}}}}} - match: {aggregations.class.buckets.0.sig_terms.buckets.0.key: "bad"} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/90_sig_text.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/90_sig_text.yml index bfbf171e8cc34..6f368463aa0c8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/90_sig_text.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/90_sig_text.yml @@ -72,14 +72,12 @@ - do: search: index: goodbad - type: doc - match: {hits.total: 7} - do: search: index: goodbad - type: doc body: {"aggs": {"class": {"terms": {"field": "class"},"aggs": {"sig_text": {"significant_text": {"field": "text"}}}}}} - match: {aggregations.class.buckets.0.sig_text.buckets.0.key: "bad"} @@ -159,14 +157,12 @@ - do: search: index: goodbad - type: doc - match: {hits.total: 7} - do: search: index: goodbad - type: doc body: {"aggs": {"class": {"terms": {"field": "class"},"aggs": {"sig_text": {"significant_text": {"field": "text", "filter_duplicate_text": true}}}}}} - match: {aggregations.class.buckets.0.sig_text.buckets.0.key: "bad"} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml index 1ae9c48e59c1d..a7998a0b2f96f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml @@ -64,7 +64,6 @@ setup: - do: search: index: test - type: test body: collapse: { field: numeric_group } sort: [{ sort: desc }] @@ -100,7 +99,6 @@ setup: - do: search: index: test - type: test body: from: 2 collapse: { field: numeric_group } @@ -125,7 +123,6 @@ setup: - do: search: index: test - type: test body: collapse: { field: numeric_group, inner_hits: { name: sub_hits, size: 2, sort: [{ sort: asc }] } } sort: [{ sort: desc }] @@ -169,7 +166,6 @@ setup: - do: search: index: test - type: test body: collapse: { field: numeric_group, max_concurrent_group_searches: 10, inner_hits: { name: sub_hits, size: 2, sort: [{ sort: asc }] } } sort: [{ sort: desc }] @@ -215,7 +211,6 @@ setup: catch: /cannot use \`collapse\` in a scroll context/ search: index: test - type: test scroll: 1s body: collapse: { field: numeric_group } @@ -231,7 +226,6 @@ setup: catch: /cannot use \`collapse\` in conjunction with \`search_after\`/ search: index: test - type: test body: collapse: { field: numeric_group } search_after: [6] @@ -248,7 +242,6 @@ setup: catch: /cannot use \`collapse\` in conjunction with \`rescore\`/ search: index: test - type: test body: collapse: { field: numeric_group } rescore: @@ -269,7 +262,6 @@ setup: - do: search: index: test - type: test body: size: 0 collapse: { field: numeric_group, inner_hits: { name: sub_hits, size: 1} } @@ -288,7 +280,6 @@ setup: - do: search: index: test - type: test body: collapse: field: numeric_group @@ -345,7 +336,6 @@ setup: - do: search: index: test - type: test body: collapse: { field: numeric_group, inner_hits: { name: sub_hits, version: true, size: 2, sort: [{ sort: asc }] } } sort: [{ sort: desc }] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query.yml index f3380f513966d..868754d5930e0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query.yml @@ -581,21 +581,6 @@ setup: - match: {hits.total: 4} ---- -"Test exists query on _uid field": - - skip: - version: " - 6.0.99" - reason: exists on _uid not supported prior to 6.1.0 - - do: - search: - index: test - body: - query: - exists: - field: _uid - - - match: {hits.total: 4} - --- "Test exists query on _index field": - skip: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/20_default_values.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/20_default_values.yml index 52fbd19185335..359d5d80c6801 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/20_default_values.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/20_default_values.yml @@ -29,7 +29,6 @@ setup: - do: search: index: _all - type: test body: query: match: @@ -40,7 +39,6 @@ setup: - do: search: index: test_1 - type: test body: query: match: @@ -54,7 +52,6 @@ setup: - do: search: index: test_2 - type: test body: query: match: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml index 920f55ae1be5d..3392adb50ac19 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml @@ -36,7 +36,6 @@ setup: - do: search: index: test - type: test body: size: 1 query: @@ -54,7 +53,6 @@ setup: - do: search: index: test - type: test body: size: 1 query: @@ -73,7 +71,6 @@ setup: - do: search: index: test - type: test body: size: 1 query: @@ -92,7 +89,6 @@ setup: - do: search: index: test - type: test body: size: 1 query: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/50_parent.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/50_parent.yml deleted file mode 100644 index e65f80d705cb2..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/50_parent.yml +++ /dev/null @@ -1,75 +0,0 @@ ---- -setup: - - do: - indices.create: - index: test_1 - body: - mappings: - test: - _parent: { type: "foo" } ---- -"Parent": - - - do: - catch: /routing_missing_exception/ - update: - index: test_1 - type: test - id: 1 - body: - doc: { foo: baz } - upsert: { foo: bar } - - - do: - update: - index: test_1 - type: test - id: 1 - parent: 5 - body: - doc: { foo: baz } - upsert: { foo: bar } - - - do: - get: - index: test_1 - type: test - id: 1 - parent: 5 - stored_fields: [_parent, _routing] - - - match: { _parent: "5"} - - match: { _routing: "5"} - - - do: - update: - index: test_1 - type: test - id: 1 - parent: 5 - _source: foo - body: - doc: { foo: baz } - - - match: { get._source.foo: baz } - ---- -"Parent omitted": - - - do: - index: - index: test_1 - type: test - id: 1 - parent: 5 - body: { foo: bar } - - - do: - catch: bad_request - update: - index: test_1 - type: test - id: 1 - body: - doc: { foo: baz } - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/55_parent_with_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/55_parent_with_routing.yml deleted file mode 100644 index e75eddff9a87e..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/55_parent_with_routing.yml +++ /dev/null @@ -1,64 +0,0 @@ ---- -"Parent with routing": - - - do: - indices.create: - index: test_1 - body: - mappings: - test: - _parent: { type: "foo" } - settings: - number_of_shards: 5 - number_of_replicas: 0 - - - do: - cluster.health: - wait_for_status: green - - - do: - update: - index: test_1 - type: test - id: 1 - parent: 5 - routing: 4 - body: - doc: { foo: baz } - upsert: { foo: bar } - - - do: - get: - index: test_1 - type: test - id: 1 - routing: 4 - parent: 5 - stored_fields: [_parent, _routing] - - - match: { _parent: "5"} - - match: { _routing: "4"} - - - do: - catch: missing - update: - index: test_1 - type: test - id: 1 - parent: 5 - body: - doc: { foo: baz } - - - do: - update: - index: test_1 - type: test - id: 1 - parent: 5 - routing: 4 - _source: foo - body: - doc: { foo: baz } - - - match: { get._source.foo: baz } - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yml index 62d8fd125ff90..8ac1568a1275a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yml @@ -21,7 +21,6 @@ - do: search: index: test_1 - type: test body: query: { term: { _id: 1 }} @@ -41,7 +40,6 @@ - do: search: index: test_1 - type: test body: query: { term: { _id: 2 }} @@ -71,7 +69,6 @@ - do: search: index: test_1 - type: test body: query: { term: { cat: dog }} @@ -91,7 +88,6 @@ - do: search: index: update_60_refresh_1 - type: test body: query: { term: { _id: update_60_refresh_id1 }} - match: { hits.total: 1 } @@ -109,7 +105,6 @@ - do: search: index: update_60_refresh_1 - type: test body: query: { match: { test: asdf } } - match: { hits.total: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/85_fields_meta.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/85_fields_meta.yml index 7a6a58e12c0ff..f7791d0986399 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/85_fields_meta.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/85_fields_meta.yml @@ -8,10 +8,6 @@ - do: indices.create: index: test_1 - body: - mappings: - test: - _parent: { type: "foo" } - do: update: @@ -19,12 +15,11 @@ type: test id: 1 parent: 5 - fields: [ _parent, _routing ] + fields: [ _routing ] body: doc: { foo: baz } upsert: { foo: bar } - - match: { get._parent: "5" } - match: { get._routing: "5" } - do: @@ -33,6 +28,6 @@ type: test id: 1 parent: 5 - stored_fields: [ _parent, _routing ] + stored_fields: [ _routing ] diff --git a/server/cli/src/main/java/org/elasticsearch/cli/Terminal.java b/server/cli/src/main/java/org/elasticsearch/cli/Terminal.java index 85abd61677445..d9923def6ca0a 100644 --- a/server/cli/src/main/java/org/elasticsearch/cli/Terminal.java +++ b/server/cli/src/main/java/org/elasticsearch/cli/Terminal.java @@ -163,7 +163,11 @@ public String readText(String text) { getWriter().print(text); BufferedReader reader = new BufferedReader(new InputStreamReader(System.in, Charset.defaultCharset())); try { - return reader.readLine(); + final String line = reader.readLine(); + if (line == null) { + throw new IllegalStateException("unable to read from standard input; is standard input open and a tty attached?"); + } + return line; } catch (IOException ioe) { throw new RuntimeException(ioe); } diff --git a/server/licenses/lucene-analyzers-common-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-analyzers-common-7.3.0-snapshot-98a6b3d.jar.sha1 deleted file mode 100644 index c167b717385d5..0000000000000 --- a/server/licenses/lucene-analyzers-common-7.3.0-snapshot-98a6b3d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a731424734fd976b409f1963ba88471caccc18aa \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-7.3.0.jar.sha1 b/server/licenses/lucene-analyzers-common-7.3.0.jar.sha1 new file mode 100644 index 0000000000000..5a50f9dd77f5e --- /dev/null +++ b/server/licenses/lucene-analyzers-common-7.3.0.jar.sha1 @@ -0,0 +1 @@ +4325a5cdf8d3fa23f326cd86a2297fee2bc844f5 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-backward-codecs-7.3.0-snapshot-98a6b3d.jar.sha1 deleted file mode 100644 index cdaec87d35b28..0000000000000 --- a/server/licenses/lucene-backward-codecs-7.3.0-snapshot-98a6b3d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5f8ad8c3f8c404803aa81a43ac6f732e19c00935 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.3.0.jar.sha1 b/server/licenses/lucene-backward-codecs-7.3.0.jar.sha1 new file mode 100644 index 0000000000000..309f301ad8c81 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-7.3.0.jar.sha1 @@ -0,0 +1 @@ +3b618a21a924cb35ac1f27d3ca47d9ed04f43588 \ No newline at end of file diff --git a/server/licenses/lucene-core-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-core-7.3.0-snapshot-98a6b3d.jar.sha1 deleted file mode 100644 index ecb3bb28e238c..0000000000000 --- a/server/licenses/lucene-core-7.3.0-snapshot-98a6b3d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -19b1a1fff6bb077e0660e4f0666807e24dd26865 \ No newline at end of file diff --git a/server/licenses/lucene-core-7.3.0.jar.sha1 b/server/licenses/lucene-core-7.3.0.jar.sha1 new file mode 100644 index 0000000000000..e12c932b38dd0 --- /dev/null +++ b/server/licenses/lucene-core-7.3.0.jar.sha1 @@ -0,0 +1 @@ +040e2de30c5e6bad868b144e371730200719ceb3 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-grouping-7.3.0-snapshot-98a6b3d.jar.sha1 deleted file mode 100644 index 03f9bf1a4c87e..0000000000000 --- a/server/licenses/lucene-grouping-7.3.0-snapshot-98a6b3d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -94dd26d685ae981905b775780e6c824f723b14af \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.3.0.jar.sha1 b/server/licenses/lucene-grouping-7.3.0.jar.sha1 new file mode 100644 index 0000000000000..703384a64de9a --- /dev/null +++ b/server/licenses/lucene-grouping-7.3.0.jar.sha1 @@ -0,0 +1 @@ +20a5c472a8be9bec7aa40472791389e875b9e1f2 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-highlighter-7.3.0-snapshot-98a6b3d.jar.sha1 deleted file mode 100644 index 32327ca414ddb..0000000000000 --- a/server/licenses/lucene-highlighter-7.3.0-snapshot-98a6b3d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9783a0bb56fb8bbd17280d3def97a656999f6a88 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.3.0.jar.sha1 b/server/licenses/lucene-highlighter-7.3.0.jar.sha1 new file mode 100644 index 0000000000000..6e38e2560636f --- /dev/null +++ b/server/licenses/lucene-highlighter-7.3.0.jar.sha1 @@ -0,0 +1 @@ +1f92c7d3d9bc2765fe6195bcc4fcb160d11175cc \ No newline at end of file diff --git a/server/licenses/lucene-join-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-join-7.3.0-snapshot-98a6b3d.jar.sha1 deleted file mode 100644 index 6b521d7de7fe1..0000000000000 --- a/server/licenses/lucene-join-7.3.0-snapshot-98a6b3d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -01eda74d798af85f846ebd74f53ec7a16e6e2ba1 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.3.0.jar.sha1 b/server/licenses/lucene-join-7.3.0.jar.sha1 new file mode 100644 index 0000000000000..d7213d76a62aa --- /dev/null +++ b/server/licenses/lucene-join-7.3.0.jar.sha1 @@ -0,0 +1 @@ +da4af75a7e4fe7843fbfa4b58e6a238b6b706d64 \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-memory-7.3.0-snapshot-98a6b3d.jar.sha1 deleted file mode 100644 index 6bfaf1c715f89..0000000000000 --- a/server/licenses/lucene-memory-7.3.0-snapshot-98a6b3d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -29b8b6324722dc6dda784731e3e918de9715422c \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.3.0.jar.sha1 b/server/licenses/lucene-memory-7.3.0.jar.sha1 new file mode 100644 index 0000000000000..6bb4a4d832d54 --- /dev/null +++ b/server/licenses/lucene-memory-7.3.0.jar.sha1 @@ -0,0 +1 @@ +fc45b02a5086ec454e6d6ae81fc2cbe7be1c0902 \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-misc-7.3.0-snapshot-98a6b3d.jar.sha1 deleted file mode 100644 index 74d01520b6479..0000000000000 --- a/server/licenses/lucene-misc-7.3.0-snapshot-98a6b3d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e1ae49522164a721d67459e59792db6f4dff70fc \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.3.0.jar.sha1 b/server/licenses/lucene-misc-7.3.0.jar.sha1 new file mode 100644 index 0000000000000..43c777150a3e1 --- /dev/null +++ b/server/licenses/lucene-misc-7.3.0.jar.sha1 @@ -0,0 +1 @@ +b6a2418a94b84c29c4b9fcfe4381f2cc1aa4c214 \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-queries-7.3.0-snapshot-98a6b3d.jar.sha1 deleted file mode 100644 index 172a57bed49fe..0000000000000 --- a/server/licenses/lucene-queries-7.3.0-snapshot-98a6b3d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -87595367717ddc9fbf95bbf649216a5d7954d9d7 \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.3.0.jar.sha1 b/server/licenses/lucene-queries-7.3.0.jar.sha1 new file mode 100644 index 0000000000000..b0ef2b4d0eb84 --- /dev/null +++ b/server/licenses/lucene-queries-7.3.0.jar.sha1 @@ -0,0 +1 @@ +6292a5579a6ab3423ceca60d2ea41cd86481e7c0 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-queryparser-7.3.0-snapshot-98a6b3d.jar.sha1 deleted file mode 100644 index ac6aec921a30c..0000000000000 --- a/server/licenses/lucene-queryparser-7.3.0-snapshot-98a6b3d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5befbb58ef76c79fc8afebbca781b01320b8ffad \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.3.0.jar.sha1 b/server/licenses/lucene-queryparser-7.3.0.jar.sha1 new file mode 100644 index 0000000000000..87a1d74498d82 --- /dev/null +++ b/server/licenses/lucene-queryparser-7.3.0.jar.sha1 @@ -0,0 +1 @@ +95b2563e5337377dde2eb987b3fce144be5e7a77 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-sandbox-7.3.0-snapshot-98a6b3d.jar.sha1 deleted file mode 100644 index 412b072e09d2e..0000000000000 --- a/server/licenses/lucene-sandbox-7.3.0-snapshot-98a6b3d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3d7aa72ccec38ef902b149da36548fb227eeb58a \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.3.0.jar.sha1 b/server/licenses/lucene-sandbox-7.3.0.jar.sha1 new file mode 100644 index 0000000000000..605263a2296ff --- /dev/null +++ b/server/licenses/lucene-sandbox-7.3.0.jar.sha1 @@ -0,0 +1 @@ +1efd2fa7cba1e359e3fbb8b4c11cab37024b2178 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-spatial-7.3.0-snapshot-98a6b3d.jar.sha1 deleted file mode 100644 index 5c8d749cf978b..0000000000000 --- a/server/licenses/lucene-spatial-7.3.0-snapshot-98a6b3d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ac1755a69f14c53f7846ef7d9b405d44caf53091 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.3.0.jar.sha1 b/server/licenses/lucene-spatial-7.3.0.jar.sha1 new file mode 100644 index 0000000000000..4fcd32b5d29bb --- /dev/null +++ b/server/licenses/lucene-spatial-7.3.0.jar.sha1 @@ -0,0 +1 @@ +93512c2160bdc3e602141329e5945a91918b6752 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-spatial-extras-7.3.0-snapshot-98a6b3d.jar.sha1 deleted file mode 100644 index 09e57350f1cdd..0000000000000 --- a/server/licenses/lucene-spatial-extras-7.3.0-snapshot-98a6b3d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d2fa5db0ce9fb5a1b4e9f18d818b14e082ef5a0 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.3.0.jar.sha1 b/server/licenses/lucene-spatial-extras-7.3.0.jar.sha1 new file mode 100644 index 0000000000000..0f078420cdb19 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-7.3.0.jar.sha1 @@ -0,0 +1 @@ +47090d8ddf99f6bbb64ee8ab7a76c3cd3165b88f \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-spatial3d-7.3.0-snapshot-98a6b3d.jar.sha1 deleted file mode 100644 index e59ab0d054d0d..0000000000000 --- a/server/licenses/lucene-spatial3d-7.3.0-snapshot-98a6b3d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -99aefdef8178e54f93b743452c5d36bf7e8b3a2d \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.3.0.jar.sha1 b/server/licenses/lucene-spatial3d-7.3.0.jar.sha1 new file mode 100644 index 0000000000000..268ed39a78405 --- /dev/null +++ b/server/licenses/lucene-spatial3d-7.3.0.jar.sha1 @@ -0,0 +1 @@ +ed8f07d67445d5acde6597996461640b2d92fa08 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-suggest-7.3.0-snapshot-98a6b3d.jar.sha1 deleted file mode 100644 index 805298afb193e..0000000000000 --- a/server/licenses/lucene-suggest-7.3.0-snapshot-98a6b3d.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6257a8a1860ec5f57439c420637d5f20bab124ae \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.3.0.jar.sha1 b/server/licenses/lucene-suggest-7.3.0.jar.sha1 new file mode 100644 index 0000000000000..798238ce58bc1 --- /dev/null +++ b/server/licenses/lucene-suggest-7.3.0.jar.sha1 @@ -0,0 +1 @@ +6034ccf6b27c659ab7a2678680bae8390fbfc40a \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java index 09db7089ff629..fa0796cafe184 100644 --- a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java @@ -71,13 +71,6 @@ public interface DocWriteRequest extends IndicesRequest { */ String routing(); - - /** - * Get the parent for this request - * @return the Parent - */ - String parent(); - /** * Get the document version for this request * @return the document version diff --git a/server/src/main/java/org/elasticsearch/action/GenericAction.java b/server/src/main/java/org/elasticsearch/action/GenericAction.java index 7b54f2f6836fb..6220a1b2062bf 100644 --- a/server/src/main/java/org/elasticsearch/action/GenericAction.java +++ b/server/src/main/java/org/elasticsearch/action/GenericAction.java @@ -57,7 +57,7 @@ public TransportRequestOptions transportOptions(Settings settings) { @Override public boolean equals(Object o) { - return name.equals(((GenericAction) o).name()); + return o instanceof GenericAction && name.equals(((GenericAction) o).name()); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java index 25a62d03f0b80..50d3bc8535704 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java @@ -64,7 +64,7 @@ public ClusterHealthRequest(StreamInput in) throws IOException { indices[i] = in.readString(); } } - timeout = new TimeValue(in); + timeout = in.readTimeValue(); if (in.readBoolean()) { waitForStatus = ClusterHealthStatus.fromValue(in.readByte()); } @@ -90,7 +90,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(index); } } - timeout.writeTo(out); + out.writeTimeValue(timeout); if (waitForStatus == null) { out.writeBoolean(false); } else { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java index 74779711c73a9..1fca07fb02479 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java @@ -184,7 +184,7 @@ public void readFrom(StreamInput in) throws IOException { timedOut = in.readBoolean(); numberOfInFlightFetch = in.readInt(); delayedUnassignedShards= in.readInt(); - taskMaxWaitingTime = new TimeValue(in); + taskMaxWaitingTime = in.readTimeValue(); } @Override @@ -197,7 +197,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(timedOut); out.writeInt(numberOfInFlightFetch); out.writeInt(delayedUnassignedShards); - taskMaxWaitingTime.writeTo(out); + out.writeTimeValue(taskMaxWaitingTime); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java index 60b03c710af8e..e4bd5b9128ec5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java @@ -99,7 +99,7 @@ public void readFrom(StreamInput in) throws IOException { threads = in.readInt(); ignoreIdleThreads = in.readBoolean(); type = in.readString(); - interval = new TimeValue(in); + interval = in.readTimeValue(); snapshots = in.readInt(); } @@ -109,7 +109,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeInt(threads); out.writeBoolean(ignoreIdleThreads); out.writeString(type); - interval.writeTo(out); + out.writeTimeValue(interval); out.writeInt(snapshots); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java index 07d40b5ffcaa0..b8eb33edc82de 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java @@ -105,7 +105,7 @@ public ActionRequestValidationException validate() { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); taskId = TaskId.readFromStream(in); - timeout = in.readOptionalWriteable(TimeValue::new); + timeout = in.readOptionalTimeValue(); waitForCompletion = in.readBoolean(); } @@ -113,7 +113,7 @@ public void readFrom(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); taskId.writeTo(out); - out.writeOptionalWriteable(timeout); + out.writeOptionalTimeValue(timeout); out.writeBoolean(waitForCompletion); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java index 686bf8a74b85d..594564b681562 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java @@ -28,27 +28,34 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; import java.util.Map; +import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; -import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; +import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; /** * Request for an update index settings action */ -public class UpdateSettingsRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable { +public class UpdateSettingsRequest extends AcknowledgedRequest + implements IndicesRequest.Replaceable, ToXContentObject { private String[] indices; private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, true); private Settings settings = EMPTY_SETTINGS; private boolean preserveExisting = false; + private boolean flatSettings = false; public UpdateSettingsRequest() { } @@ -68,6 +75,29 @@ public UpdateSettingsRequest(Settings settings, String... indices) { this.settings = settings; } + /** + * Sets the value of "flat_settings". + * Used only by the high-level REST client. + * + * @param flatSettings + * value of "flat_settings" flag to be set + * @return this request + */ + public UpdateSettingsRequest flatSettings(boolean flatSettings) { + this.flatSettings = flatSettings; + return this; + } + + /** + * Return settings in flat format. + * Used only by the high-level REST client. + * + * @return true if settings need to be returned in flat format; false otherwise. + */ + public boolean flatSettings() { + return flatSettings; + } + @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; @@ -178,4 +208,55 @@ public void writeTo(StreamOutput out) throws IOException { writeSettingsToStream(settings, out); out.writeBoolean(preserveExisting); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + settings.toXContent(builder, params); + builder.endObject(); + return builder; + } + + public UpdateSettingsRequest fromXContent(XContentParser parser) throws IOException { + Map settings = new HashMap<>(); + Map bodySettings = parser.map(); + Object innerBodySettings = bodySettings.get("settings"); + // clean up in case the body is wrapped with "settings" : { ... } + if (innerBodySettings instanceof Map) { + @SuppressWarnings("unchecked") + Map innerBodySettingsMap = (Map) innerBodySettings; + settings.putAll(innerBodySettingsMap); + } else { + settings.putAll(bodySettings); + } + return this.settings(settings); + } + + @Override + public String toString() { + return "indices : " + Arrays.toString(indices) + "," + Strings.toString(this); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + UpdateSettingsRequest that = (UpdateSettingsRequest) o; + return masterNodeTimeout.equals(that.masterNodeTimeout) + && timeout.equals(that.timeout) + && Objects.equals(settings, that.settings) + && Objects.equals(indicesOptions, that.indicesOptions) + && Objects.equals(preserveExisting, that.preserveExisting) + && Arrays.equals(indices, that.indices); + } + + @Override + public int hashCode() { + return Objects.hash(masterNodeTimeout, timeout, settings, indicesOptions, preserveExisting, Arrays.hashCode(indices)); + } + } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponse.java index b1475843aac5f..79116eb8cf5a7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponse.java @@ -22,6 +22,8 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -30,6 +32,13 @@ */ public class UpdateSettingsResponse extends AcknowledgedResponse { + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "update_index_settings", true, args -> new UpdateSettingsResponse((boolean) args[0])); + + static { + declareAcknowledgedField(PARSER); + } + UpdateSettingsResponse() { } @@ -48,4 +57,9 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); writeAcknowledged(out); } + + public static UpdateSettingsResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index 39a185741db92..fdafb3b2b805e 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -299,7 +299,7 @@ public BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nu */ public synchronized BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultPipeline, @Nullable Object payload, XContentType xContentType) throws Exception { - bulkRequest.add(data, defaultIndex, defaultType, null, null, null, defaultPipeline, payload, true, xContentType); + bulkRequest.add(data, defaultIndex, defaultType, null, null, defaultPipeline, payload, true, xContentType); executeIfNeeded(); return this; } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index 8a8353c731b94..ebc095b1670b3 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -36,8 +36,6 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; @@ -66,8 +64,6 @@ * @see org.elasticsearch.client.Client#bulk(BulkRequest) */ public class BulkRequest extends ActionRequest implements CompositeIndicesRequest, WriteRequest { - private static final DeprecationLogger DEPRECATION_LOGGER = - new DeprecationLogger(Loggers.getLogger(BulkRequest.class)); private static final int REQUEST_OVERHEAD = 50; @@ -75,13 +71,11 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques private static final ParseField TYPE = new ParseField("_type"); private static final ParseField ID = new ParseField("_id"); private static final ParseField ROUTING = new ParseField("routing"); - private static final ParseField PARENT = new ParseField("parent"); private static final ParseField OP_TYPE = new ParseField("op_type"); private static final ParseField VERSION = new ParseField("version"); private static final ParseField VERSION_TYPE = new ParseField("version_type"); private static final ParseField RETRY_ON_CONFLICT = new ParseField("retry_on_conflict"); private static final ParseField PIPELINE = new ParseField("pipeline"); - private static final ParseField FIELDS = new ParseField("fields"); private static final ParseField SOURCE = new ParseField("_source"); /** @@ -278,7 +272,7 @@ public BulkRequest add(byte[] data, int from, int length, @Nullable String defau */ public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, XContentType xContentType) throws IOException { - return add(data, defaultIndex, defaultType, null, null, null, null, null, true, xContentType); + return add(data, defaultIndex, defaultType, null, null, null, null, true, xContentType); } /** @@ -286,12 +280,13 @@ public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Null */ public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, boolean allowExplicitIndex, XContentType xContentType) throws IOException { - return add(data, defaultIndex, defaultType, null, null, null, null, null, allowExplicitIndex, xContentType); + return add(data, defaultIndex, defaultType, null, null, null, null, allowExplicitIndex, xContentType); } - public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String - defaultRouting, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSourceContext, @Nullable String - defaultPipeline, @Nullable Object payload, boolean allowExplicitIndex, XContentType xContentType) throws IOException { + public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, + @Nullable String defaultRouting, @Nullable FetchSourceContext defaultFetchSourceContext, + @Nullable String defaultPipeline, @Nullable Object payload, boolean allowExplicitIndex, + XContentType xContentType) throws IOException { XContent xContent = xContentType.xContent(); int line = 0; int from = 0; @@ -333,9 +328,7 @@ public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Null String type = defaultType; String id = null; String routing = defaultRouting; - String parent = null; FetchSourceContext fetchSourceContext = defaultFetchSourceContext; - String[] fields = defaultFields; String opType = null; long version = Versions.MATCH_ANY; VersionType versionType = VersionType.INTERNAL; @@ -363,8 +356,6 @@ public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Null id = parser.text(); } else if (ROUTING.match(currentFieldName, parser.getDeprecationHandler())) { routing = parser.text(); - } else if (PARENT.match(currentFieldName, parser.getDeprecationHandler())) { - parent = parser.text(); } else if (OP_TYPE.match(currentFieldName, parser.getDeprecationHandler())) { opType = parser.text(); } else if (VERSION.match(currentFieldName, parser.getDeprecationHandler())) { @@ -375,21 +366,14 @@ public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Null retryOnConflict = parser.intValue(); } else if (PIPELINE.match(currentFieldName, parser.getDeprecationHandler())) { pipeline = parser.text(); - } else if (FIELDS.match(currentFieldName, parser.getDeprecationHandler())) { - throw new IllegalArgumentException("Action/metadata line [" + line + "] contains a simple value for parameter [fields] while a list is expected"); } else if (SOURCE.match(currentFieldName, parser.getDeprecationHandler())) { fetchSourceContext = FetchSourceContext.fromXContent(parser); } else { throw new IllegalArgumentException("Action/metadata line [" + line + "] contains an unknown parameter [" + currentFieldName + "]"); } } else if (token == XContentParser.Token.START_ARRAY) { - if (FIELDS.match(currentFieldName, parser.getDeprecationHandler())) { - DEPRECATION_LOGGER.deprecated("Deprecated field [fields] used, expected [_source] instead"); - List values = parser.list(); - fields = values.toArray(new String[values.size()]); - } else { - throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]"); - } + throw new IllegalArgumentException("Malformed action/metadata line [" + line + + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]"); } else if (token == XContentParser.Token.START_OBJECT && SOURCE.match(currentFieldName, parser.getDeprecationHandler())) { fetchSourceContext = FetchSourceContext.fromXContent(parser); } else if (token != XContentParser.Token.VALUE_NULL) { @@ -402,7 +386,7 @@ public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Null } if ("delete".equals(action)) { - add(new DeleteRequest(index, type, id).routing(routing).parent(parent).version(version).versionType(versionType), payload); + add(new DeleteRequest(index, type, id).routing(routing).version(version).versionType(versionType), payload); } else { nextMarker = findNextMarker(marker, from, data, length); if (nextMarker == -1) { @@ -414,23 +398,22 @@ public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Null // of index request. if ("index".equals(action)) { if (opType == null) { - internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).version(version).versionType(versionType) + internalAdd(new IndexRequest(index, type, id).routing(routing).version(version).versionType(versionType) .setPipeline(pipeline) .source(sliceTrimmingCarriageReturn(data, from, nextMarker,xContentType), xContentType), payload); } else { - internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).version(version).versionType(versionType) + internalAdd(new IndexRequest(index, type, id).routing(routing).version(version).versionType(versionType) .create("create".equals(opType)).setPipeline(pipeline) .source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType), payload); } } else if ("create".equals(action)) { - internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).version(version).versionType(versionType) + internalAdd(new IndexRequest(index, type, id).routing(routing).version(version).versionType(versionType) .create(true).setPipeline(pipeline) .source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType), payload); } else if ("update".equals(action)) { - UpdateRequest updateRequest = new UpdateRequest(index, type, id).routing(routing).parent(parent).retryOnConflict(retryOnConflict) + UpdateRequest updateRequest = new UpdateRequest(index, type, id).routing(routing).retryOnConflict(retryOnConflict) .version(version).versionType(versionType) - .routing(routing) - .parent(parent); + .routing(routing); // EMPTY is safe here because we never call namedObject try (InputStream dataStream = sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType).streamInput(); XContentParser sliceParser = xContent.createParser(NamedXContentRegistry.EMPTY, @@ -440,10 +423,6 @@ public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Null if (fetchSourceContext != null) { updateRequest.fetchSource(fetchSourceContext); } - if (fields != null) { - updateRequest.fields(fields); - } - IndexRequest upsertRequest = updateRequest.upsertRequest(); if (upsertRequest != null) { upsertRequest.version(version); @@ -593,7 +572,7 @@ public void readFrom(StreamInput in) throws IOException { requests.add(DocWriteRequest.readDocumentRequest(in)); } refreshPolicy = RefreshPolicy.readFrom(in); - timeout = new TimeValue(in); + timeout = in.readTimeValue(); } @Override @@ -605,7 +584,7 @@ public void writeTo(StreamOutput out) throws IOException { DocWriteRequest.writeDocumentRequest(out, request); } refreshPolicy.writeTo(out); - timeout.writeTo(out); + out.writeTimeValue(timeout); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 5a3544377155c..f8fd4fa924b30 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -299,7 +299,7 @@ protected void doRun() throws Exception { TransportUpdateAction.resolveAndValidateRouting(metaData, concreteIndex.getName(), (UpdateRequest) docWriteRequest); break; case DELETE: - docWriteRequest.routing(metaData.resolveIndexRouting(docWriteRequest.parent(), docWriteRequest.routing(), docWriteRequest.index())); + docWriteRequest.routing(metaData.resolveIndexRouting(docWriteRequest.routing(), docWriteRequest.index())); // check if routing is required, if so, throw error if routing wasn't specified if (docWriteRequest.routing() == null && metaData.routingRequired(concreteIndex.getName(), docWriteRequest.type())) { throw new RoutingMissingException(concreteIndex.getName(), docWriteRequest.type(), docWriteRequest.id()); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 7221118d2ef50..f9b27a1e62040 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -291,8 +291,7 @@ static BulkItemResultHolder processUpdateResponse(final UpdateRequest updateRequ indexResponse.getId(), indexResponse.getSeqNo(), indexResponse.getPrimaryTerm(), indexResponse.getVersion(), indexResponse.getResult()); - if ((updateRequest.fetchSource() != null && updateRequest.fetchSource().fetchSource()) || - (updateRequest.fields() != null && updateRequest.fields().length > 0)) { + if (updateRequest.fetchSource() != null && updateRequest.fetchSource().fetchSource()) { final BytesReference indexSourceAsBytes = updateIndexRequest.source(); final Tuple> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true, updateIndexRequest.getContentType()); @@ -513,7 +512,7 @@ private static Engine.Result performOpOnReplica(DocWriteResponse primaryResponse final SourceToParse sourceToParse = SourceToParse.source(shardId.getIndexName(), indexRequest.type(), indexRequest.id(), indexRequest.source(), indexRequest.getContentType()) - .routing(indexRequest.routing()).parent(indexRequest.parent()); + .routing(indexRequest.routing()); return replica.applyIndexOperationOnReplica(primaryResponse.getSeqNo(), primaryResponse.getVersion(), indexRequest.versionType().versionTypeForReplicationAndRecovery(), indexRequest.getAutoGeneratedTimestamp(), indexRequest.isRetry(), sourceToParse, update -> { @@ -539,7 +538,7 @@ static Engine.IndexResult executeIndexRequestOnPrimary(IndexRequest request, Ind MappingUpdatePerformer mappingUpdater) throws Exception { final SourceToParse sourceToParse = SourceToParse.source(request.index(), request.type(), request.id(), request.source(), request.getContentType()) - .routing(request.routing()).parent(request.parent()); + .routing(request.routing()); try { // if a mapping update is required to index this request, issue a mapping update on the master, and abort the // current indexing operation so that it can be retried with the updated mapping from the master diff --git a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java index 50d1f4cb8e9c7..879e8e665cd44 100644 --- a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.delete; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; import org.elasticsearch.action.DocWriteRequest; @@ -51,8 +52,6 @@ public class DeleteRequest extends ReplicatedWriteRequest impleme private String id; @Nullable private String routing; - @Nullable - private String parent; private long version = Versions.MATCH_ANY; private VersionType versionType = VersionType.INTERNAL; @@ -130,22 +129,6 @@ public DeleteRequest id(String id) { return this; } - /** - * @return The parent for this request. - */ - @Override - public String parent() { - return parent; - } - - /** - * Sets the parent id of this document. - */ - public DeleteRequest parent(String parent) { - this.parent = parent; - return this; - } - /** * Controls the shard routing of the request. Using this value to hash the shard * and not the id. @@ -202,7 +185,9 @@ public void readFrom(StreamInput in) throws IOException { type = in.readString(); id = in.readString(); routing = in.readOptionalString(); - parent = in.readOptionalString(); + if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + in.readOptionalString(); // _parent + } version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); } @@ -213,7 +198,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(type); out.writeString(id); out.writeOptionalString(routing()); - out.writeOptionalString(parent()); + if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + out.writeOptionalString(null); // _parent + } out.writeLong(version); out.writeByte(versionType.getValue()); } diff --git a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java index b9b0f95f8de90..9060af8e17c8c 100644 --- a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequestBuilder.java @@ -55,15 +55,6 @@ public DeleteRequestBuilder setId(String id) { return this; } - /** - * Sets the parent id of this document. Will simply set the routing to this value, as it is only - * used for routing with delete requests. - */ - public DeleteRequestBuilder setParent(String parent) { - request.parent(parent); - return this; - } - /** * Controls the shard routing of the delete request. Using this value to hash the shard * and not the id. diff --git a/server/src/main/java/org/elasticsearch/action/get/GetRequest.java b/server/src/main/java/org/elasticsearch/action/get/GetRequest.java index ea5dda45279e6..05100e5880983 100644 --- a/server/src/main/java/org/elasticsearch/action/get/GetRequest.java +++ b/server/src/main/java/org/elasticsearch/action/get/GetRequest.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.get; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.RealtimeRequest; import org.elasticsearch.action.ValidateActions; @@ -48,7 +49,6 @@ public class GetRequest extends SingleShardRequest implements Realti private String type; private String id; private String routing; - private String parent; private String preference; private String[] storedFields; @@ -126,21 +126,6 @@ public GetRequest id(String id) { return this; } - /** - * @return The parent for this request. - */ - public String parent() { - return parent; - } - - /** - * Sets the parent id of this document. - */ - public GetRequest parent(String parent) { - this.parent = parent; - return this; - } - /** * Controls the shard routing of the request. Using this value to hash the shard * and not the id. @@ -260,7 +245,9 @@ public void readFrom(StreamInput in) throws IOException { type = in.readString(); id = in.readString(); routing = in.readOptionalString(); - parent = in.readOptionalString(); + if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + in.readOptionalString(); + } preference = in.readOptionalString(); refresh = in.readBoolean(); storedFields = in.readOptionalStringArray(); @@ -277,7 +264,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(type); out.writeString(id); out.writeOptionalString(routing); - out.writeOptionalString(parent); + if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + out.writeOptionalString(null); + } out.writeOptionalString(preference); out.writeBoolean(refresh); diff --git a/server/src/main/java/org/elasticsearch/action/get/GetRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/get/GetRequestBuilder.java index 1ca8dbde65200..9f59d3ecaef60 100644 --- a/server/src/main/java/org/elasticsearch/action/get/GetRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/get/GetRequestBuilder.java @@ -56,15 +56,6 @@ public GetRequestBuilder setId(String id) { return this; } - /** - * Sets the parent id of this document. Will simply set the routing to this value, as it is only - * used for routing with delete requests. - */ - public GetRequestBuilder setParent(String parent) { - request.parent(parent); - return this; - } - /** * Controls the shard routing of the request. Using this value to hash the shard * and not the id. diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java index 31e5e2dfff20a..b93c8a7f037c8 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.get; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; @@ -58,7 +59,6 @@ public class MultiGetRequest extends ActionRequest private static final ParseField TYPE = new ParseField("_type"); private static final ParseField ID = new ParseField("_id"); private static final ParseField ROUTING = new ParseField("routing"); - private static final ParseField PARENT = new ParseField("parent"); private static final ParseField VERSION = new ParseField("version"); private static final ParseField VERSION_TYPE = new ParseField("version_type"); private static final ParseField FIELDS = new ParseField("fields"); @@ -74,7 +74,6 @@ public static class Item implements Streamable, IndicesRequest, ToXContentObject private String type; private String id; private String routing; - private String parent; private String[] storedFields; private long version = Versions.MATCH_ANY; private VersionType versionType = VersionType.INTERNAL; @@ -141,18 +140,6 @@ public String routing() { return this.routing; } - public Item parent(String parent) { - this.parent = parent; - return this; - } - - /** - * @return The parent for this request. - */ - public String parent() { - return parent; - } - public Item storedFields(String... fields) { this.storedFields = fields; return this; @@ -204,7 +191,9 @@ public void readFrom(StreamInput in) throws IOException { type = in.readOptionalString(); id = in.readString(); routing = in.readOptionalString(); - parent = in.readOptionalString(); + if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + in.readOptionalString(); // _parent + } storedFields = in.readOptionalStringArray(); version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); @@ -218,7 +207,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(type); out.writeString(id); out.writeOptionalString(routing); - out.writeOptionalString(parent); + if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + out.writeOptionalString(null); // _parent + } out.writeOptionalStringArray(storedFields); out.writeLong(version); out.writeByte(versionType.getValue()); @@ -233,7 +224,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(TYPE.getPreferredName(), type); builder.field(ID.getPreferredName(), id); builder.field(ROUTING.getPreferredName(), routing); - builder.field(PARENT.getPreferredName(), parent); builder.field(STORED_FIELDS.getPreferredName(), storedFields); builder.field(VERSION.getPreferredName(), version); builder.field(VERSION_TYPE.getPreferredName(), VersionType.toString(versionType)); @@ -256,7 +246,6 @@ public boolean equals(Object o) { if (!id.equals(item.id)) return false; if (!index.equals(item.index)) return false; if (routing != null ? !routing.equals(item.routing) : item.routing != null) return false; - if (parent != null ? !parent.equals(item.parent) : item.parent != null) return false; if (type != null ? !type.equals(item.type) : item.type != null) return false; if (versionType != item.versionType) return false; @@ -269,7 +258,6 @@ public int hashCode() { result = 31 * result + (type != null ? type.hashCode() : 0); result = 31 * result + id.hashCode(); result = 31 * result + (routing != null ? routing.hashCode() : 0); - result = 31 * result + (parent != null ? parent.hashCode() : 0); result = 31 * result + (storedFields != null ? Arrays.hashCode(storedFields) : 0); result = 31 * result + Long.hashCode(version); result = 31 * result + versionType.hashCode(); @@ -407,7 +395,6 @@ private static void parseDocuments(XContentParser parser, List items, @Nul String type = defaultType; String id = null; String routing = defaultRouting; - String parent = null; List storedFields = null; long version = Versions.MATCH_ANY; VersionType versionType = VersionType.INTERNAL; @@ -429,8 +416,6 @@ private static void parseDocuments(XContentParser parser, List items, @Nul id = parser.text(); } else if (ROUTING.match(currentFieldName, parser.getDeprecationHandler())) { routing = parser.text(); - } else if (PARENT.match(currentFieldName, parser.getDeprecationHandler())) { - parent = parser.text(); } else if (FIELDS.match(currentFieldName, parser.getDeprecationHandler())) { throw new ParsingException(parser.getTokenLocation(), "Unsupported field [fields] used, expected [stored_fields] instead"); @@ -510,7 +495,7 @@ private static void parseDocuments(XContentParser parser, List items, @Nul } else { aFields = defaultFields; } - items.add(new Item(index, type, id).routing(routing).storedFields(aFields).parent(parent).version(version).versionType(versionType) + items.add(new Item(index, type, id).routing(routing).storedFields(aFields).version(version).versionType(versionType) .fetchSourceContext(fetchSourceContext == FetchSourceContext.FETCH_SOURCE ? defaultFetchSource : fetchSourceContext)); } } diff --git a/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java b/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java index 599a6655e02e8..0aeacb38ffa56 100644 --- a/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java @@ -69,7 +69,7 @@ protected ShardIterator shards(ClusterState state, InternalRequest request) { @Override protected void resolveRequest(ClusterState state, InternalRequest request) { // update the routing (request#index here is possibly an alias) - request.request().routing(state.metaData().resolveIndexRouting(request.request().parent(), request.request().routing(), request.request().index())); + request.request().routing(state.metaData().resolveIndexRouting(request.request().routing(), request.request().index())); // Fail fast on the node that received the request. if (request.request().routing() == null && state.getMetaData().routingRequired(request.concreteIndex(), request.request().type())) { throw new RoutingMissingException(request.concreteIndex(), request.request().type(), request.request().id()); diff --git a/server/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java b/server/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java index bea65283cc034..31ca0e0322b6b 100644 --- a/server/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/TransportMultiGetAction.java @@ -68,7 +68,7 @@ protected void doExecute(final MultiGetRequest request, final ActionListener implement private String id; @Nullable private String routing; - @Nullable - private String parent; private BytesReference source; @@ -254,19 +252,6 @@ public String routing() { return this.routing; } - /** - * Sets the parent id of this document. - */ - public IndexRequest parent(String parent) { - this.parent = parent; - return this; - } - - @Override - public String parent() { - return this.parent; - } - /** * Sets the ingest pipeline to be executed before indexing the document */ @@ -490,14 +475,6 @@ public void process(Version indexCreatedVersion, @Nullable MappingMetaData mappi if (mappingMd.routing().required() && routing == null) { throw new RoutingMissingException(concreteIndex, type, id); } - - if (parent != null && !mappingMd.hasParentField()) { - throw new IllegalArgumentException("can't specify parent if no parent field has been configured"); - } - } else { - if (parent != null) { - throw new IllegalArgumentException("can't specify parent if no parent field has been configured"); - } } if ("".equals(id)) { @@ -520,7 +497,7 @@ public void process(Version indexCreatedVersion, @Nullable MappingMetaData mappi /* resolve the routing if needed */ public void resolveRouting(MetaData metaData) { - routing(metaData.resolveIndexRouting(parent, routing, index)); + routing(metaData.resolveIndexRouting(routing, index)); } @Override @@ -529,10 +506,12 @@ public void readFrom(StreamInput in) throws IOException { type = in.readOptionalString(); id = in.readOptionalString(); routing = in.readOptionalString(); - parent = in.readOptionalString(); + if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + in.readOptionalString(); // _parent + } if (in.getVersion().before(Version.V_6_0_0_alpha1)) { in.readOptionalString(); // timestamp - in.readOptionalWriteable(TimeValue::new); // ttl + in.readOptionalTimeValue(); // ttl } source = in.readBytesReference(); opType = OpType.fromId(in.readByte()); @@ -554,7 +533,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(type); out.writeOptionalString(id); out.writeOptionalString(routing); - out.writeOptionalString(parent); + if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + out.writeOptionalString(null); // _parent + } if (out.getVersion().before(Version.V_6_0_0_alpha1)) { // Serialize a fake timestamp. 5.x expect this value to be set by the #process method so we can't use null. // On the other hand, indices created on 5.x do not index the timestamp field. Therefore passing a 0 (or any value) for diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java index 88b094a33f521..1f7d5e0bca89a 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequestBuilder.java @@ -71,15 +71,6 @@ public IndexRequestBuilder setRouting(String routing) { return this; } - /** - * Sets the parent id of this document. If routing is not set, automatically set it as the - * routing as well. - */ - public IndexRequestBuilder setParent(String parent) { - request.parent(parent); - return this; - } - /** * Sets the source. */ diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java index aeb4b47719dd6..3aa697b8e997c 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java @@ -193,8 +193,6 @@ private static List parseDocs(Map config) { dataMap, MetaData.ID.getFieldName(), "_id"); String routing = ConfigurationUtils.readOptionalStringOrIntProperty(null, null, dataMap, MetaData.ROUTING.getFieldName()); - String parent = ConfigurationUtils.readOptionalStringOrIntProperty(null, null, - dataMap, MetaData.PARENT.getFieldName()); Long version = null; if (dataMap.containsKey(MetaData.VERSION.getFieldName())) { version = (Long) ConfigurationUtils.readObject(null, null, dataMap, MetaData.VERSION.getFieldName()); @@ -205,7 +203,7 @@ private static List parseDocs(Map config) { MetaData.VERSION_TYPE.getFieldName())); } IngestDocument ingestDocument = - new IngestDocument(index, type, id, routing, parent, version, versionType, document); + new IngestDocument(index, type, id, routing, version, versionType, document); ingestDocumentList.add(ingestDocument); } return ingestDocumentList; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 8df82279cb2f9..a7be0f41ffbae 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -224,7 +224,9 @@ public String[] types() { /** * The document types to execute the search against. Defaults to be executed against * all types. + * @deprecated Types are going away, prefer filtering on a type. */ + @Deprecated public SearchRequest types(String... types) { Objects.requireNonNull(types, "types must not be null"); for (String type : types) { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index 5342b55d5421b..1ddecf133150e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -61,7 +61,9 @@ public SearchRequestBuilder setIndices(String... indices) { /** * The document types to execute the search against. Defaults to be executed against * all types. + * @deprecated Types are going away, prefer filtering on a field. */ + @Deprecated public SearchRequestBuilder setTypes(String... types) { request.types(types); return this; diff --git a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java index 900955b7b7d1e..eca9e9e05ea5b 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.unit.TimeValue; import java.io.IOException; +import java.util.Objects; import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; @@ -31,7 +32,8 @@ * Abstract class that allows to mark action requests that support acknowledgements. * Facilitates consistency across different api. */ -public abstract class AcknowledgedRequest> extends MasterNodeRequest implements AckedRequest { +public abstract class AcknowledgedRequest> extends MasterNodeRequest + implements AckedRequest { public static final TimeValue DEFAULT_ACK_TIMEOUT = timeValueSeconds(30); @@ -78,13 +80,13 @@ public TimeValue ackTimeout() { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - timeout = new TimeValue(in); + timeout = in.readTimeValue(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - timeout.writeTo(out); + out.writeTimeValue(timeout); } } diff --git a/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java index 2bad309f1cc3b..bcc0139934ec5 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.unit.TimeValue; import java.io.IOException; +import java.util.Objects; /** * A based request for master based operation. @@ -40,13 +41,13 @@ protected MasterNodeRequest() { protected MasterNodeRequest(StreamInput in) throws IOException { super(in); - masterNodeTimeout = new TimeValue(in); + masterNodeTimeout = in.readTimeValue(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - masterNodeTimeout.writeTo(out); + out.writeTimeValue(masterNodeTimeout); } /** @@ -74,6 +75,7 @@ public void readFrom(StreamInput in) throws IOException { // TODO(talevy): throw exception once all MasterNodeRequest // subclasses have been migrated to Writeable Readers super.readFrom(in); - masterNodeTimeout = new TimeValue(in); + masterNodeTimeout = in.readTimeValue(); } + } diff --git a/server/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesRequest.java b/server/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesRequest.java index 2f8490cc87208..dac584a614429 100644 --- a/server/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesRequest.java @@ -107,7 +107,7 @@ public void readFrom(StreamInput in) throws IOException { super.readFrom(in); nodesIds = in.readStringArray(); concreteNodes = in.readOptionalArray(DiscoveryNode::new, DiscoveryNode[]::new); - timeout = in.readOptionalWriteable(TimeValue::new); + timeout = in.readOptionalTimeValue(); } @Override @@ -115,6 +115,6 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeStringArrayNullable(nodesIds); out.writeOptionalArray(concreteNodes); - out.writeOptionalWriteable(timeout); + out.writeOptionalTimeValue(timeout); } } diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java index 81584a7bb6467..3dc222f9e3a90 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java @@ -187,7 +187,7 @@ public void readFrom(StreamInput in) throws IOException { shardId = null; } waitForActiveShards = ActiveShardCount.readFrom(in); - timeout = new TimeValue(in); + timeout = in.readTimeValue(); index = in.readString(); routedBasedOnClusterVersion = in.readVLong(); } @@ -202,7 +202,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(false); } waitForActiveShards.writeTo(out); - timeout.writeTo(out); + out.writeTimeValue(timeout); out.writeString(index); out.writeVLong(routedBasedOnClusterVersion); } diff --git a/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java b/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java index 791617231b51b..2490110927f98 100644 --- a/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java @@ -118,7 +118,7 @@ public void readFrom(StreamInput in) throws IOException { } else { shardId = null; } - timeout = new TimeValue(in); + timeout = in.readTimeValue(); concreteIndex = in.readOptionalString(); } @@ -127,7 +127,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(index); out.writeOptionalStreamable(shardId); - timeout.writeTo(out); + out.writeTimeValue(timeout); out.writeOptionalString(concreteIndex); } diff --git a/server/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksRequest.java b/server/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksRequest.java index e912eebb4fb39..cbfdfc294c581 100644 --- a/server/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksRequest.java @@ -144,7 +144,7 @@ public void readFrom(StreamInput in) throws IOException { parentTaskId = TaskId.readFromStream(in); nodes = in.readStringArray(); actions = in.readStringArray(); - timeout = in.readOptionalWriteable(TimeValue::new); + timeout = in.readOptionalTimeValue(); } @Override @@ -154,7 +154,7 @@ public void writeTo(StreamOutput out) throws IOException { parentTaskId.writeTo(out); out.writeStringArrayNullable(nodes); out.writeStringArrayNullable(actions); - out.writeOptionalWriteable(timeout); + out.writeOptionalTimeValue(timeout); } public boolean match(Task task) { diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java index e75f510d80c02..053eb6939da97 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java @@ -65,7 +65,6 @@ public class TermVectorsRequest extends SingleShardRequest i private static final ParseField TYPE = new ParseField("_type"); private static final ParseField ID = new ParseField("_id"); private static final ParseField ROUTING = new ParseField("routing"); - private static final ParseField PARENT = new ParseField("parent"); private static final ParseField VERSION = new ParseField("version"); private static final ParseField VERSION_TYPE = new ParseField("version_type"); private static final ParseField FIELDS = new ParseField("fields"); @@ -87,8 +86,6 @@ public class TermVectorsRequest extends SingleShardRequest i private String routing; - private String parent; - private VersionType versionType = VersionType.INTERNAL; private long version = Versions.MATCH_ANY; @@ -185,7 +182,6 @@ public TermVectorsRequest(TermVectorsRequest other) { this.flagsEnum = other.getFlags().clone(); this.preference = other.preference(); this.routing = other.routing(); - this.parent = other.parent(); if (other.selectedFields != null) { this.selectedFields = new HashSet<>(other.selectedFields); } @@ -204,7 +200,6 @@ public TermVectorsRequest(MultiGetRequest.Item item) { this.type = item.type(); this.selectedFields(item.storedFields()); this.routing(item.routing()); - this.parent(item.parent()); } public EnumSet getFlags() { @@ -293,18 +288,6 @@ public TermVectorsRequest routing(String routing) { return this; } - public String parent() { - return parent; - } - - /** - * Sets the parent id of this document. - */ - public TermVectorsRequest parent(String parent) { - this.parent = parent; - return this; - } - public String preference() { return this.preference; } @@ -522,7 +505,10 @@ public void readFrom(StreamInput in) throws IOException { } } routing = in.readOptionalString(); - parent = in.readOptionalString(); + + if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + in.readOptionalString(); // _parent + } preference = in.readOptionalString(); long flags = in.readVLong(); @@ -565,7 +551,9 @@ public void writeTo(StreamOutput out) throws IOException { } } out.writeOptionalString(routing); - out.writeOptionalString(parent); + if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + out.writeOptionalString(null); // _parent + } out.writeOptionalString(preference); long longFlags = 0; for (Flag flag : flagsEnum) { @@ -650,8 +638,6 @@ public static void parseRequest(TermVectorsRequest termVectorsRequest, XContentP termVectorsRequest.doc(jsonBuilder().copyCurrentStructure(parser)); } else if (ROUTING.match(currentFieldName, parser.getDeprecationHandler())) { termVectorsRequest.routing = parser.text(); - } else if (PARENT.match(currentFieldName, parser.getDeprecationHandler())) { - termVectorsRequest.parent = parser.text(); } else if (VERSION.match(currentFieldName, parser.getDeprecationHandler())) { termVectorsRequest.version = parser.longValue(); } else if (VERSION_TYPE.match(currentFieldName, parser.getDeprecationHandler())) { diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequestBuilder.java index 47bd09b100857..34ce90156d119 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequestBuilder.java @@ -88,15 +88,6 @@ public TermVectorsRequestBuilder setRouting(String routing) { return this; } - /** - * Sets the parent id of this document. Will simply set the routing to this value, as it is only - * used for routing with delete requests. - */ - public TermVectorsRequestBuilder setParent(String parent) { - request.parent(parent); - return this; - } - /** * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to * _local to prefer local shards or a custom value, which guarantees that the same order diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsAction.java b/server/src/main/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsAction.java index fc44ba64a9822..1d164087ed023 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsAction.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsAction.java @@ -64,7 +64,7 @@ protected void doExecute(final MultiTermVectorsRequest request, final ActionList Map shardRequests = new HashMap<>(); for (int i = 0; i < request.requests.size(); i++) { TermVectorsRequest termVectorsRequest = request.requests.get(i); - termVectorsRequest.routing(clusterState.metaData().resolveIndexRouting(termVectorsRequest.parent(), termVectorsRequest.routing(), termVectorsRequest.index())); + termVectorsRequest.routing(clusterState.metaData().resolveIndexRouting(termVectorsRequest.routing(), termVectorsRequest.index())); if (!clusterState.metaData().hasConcreteIndex(termVectorsRequest.index())) { responses.set(i, new MultiTermVectorsItemResponse(null, new MultiTermVectorsResponse.Failure(termVectorsRequest.index(), termVectorsRequest.type(), termVectorsRequest.id(), new IndexNotFoundException(termVectorsRequest.index())))); diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java b/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java index 289f40f1a34a8..a259f5b828a05 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java @@ -78,7 +78,7 @@ protected boolean resolveIndex(TermVectorsRequest request) { @Override protected void resolveRequest(ClusterState state, InternalRequest request) { // update the routing (request#index here is possibly an alias or a parent) - request.request().routing(state.metaData().resolveIndexRouting(request.request().parent(), request.request().routing(), request.request().index())); + request.request().routing(state.metaData().resolveIndexRouting(request.request().routing(), request.request().index())); // Fail fast on the node that received the request. if (request.request().routing() == null && state.getMetaData().routingRequired(request.concreteIndex(), request.request().type())) { throw new RoutingMissingException(request.concreteIndex(), request.request().type(), request.request().id()); diff --git a/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java index a9d0e305f14ca..242dfe635ec91 100644 --- a/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java +++ b/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java @@ -105,7 +105,7 @@ protected void resolveRequest(ClusterState state, UpdateRequest request) { } public static void resolveAndValidateRouting(MetaData metaData, String concreteIndex, UpdateRequest request) { - request.routing((metaData.resolveIndexRouting(request.parent(), request.routing(), request.index()))); + request.routing((metaData.resolveIndexRouting(request.routing(), request.index()))); // Fail fast on the node that received the request, rather than failing when translating on the index or delete request. if (request.routing() == null && metaData.routingRequired(concreteIndex, request.type())) { throw new RoutingMissingException(concreteIndex, request.type(), request.id()); @@ -180,8 +180,7 @@ protected void shardOperation(final UpdateRequest request, final ActionListener< bulkAction.execute(toSingleItemBulkRequest(upsertRequest), wrapBulkResponse( ActionListener.wrap(response -> { UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), response.getVersion(), response.getResult()); - if ((request.fetchSource() != null && request.fetchSource().fetchSource()) || - (request.fields() != null && request.fields().length > 0)) { + if (request.fetchSource() != null && request.fetchSource().fetchSource()) { Tuple> sourceAndContent = XContentHelper.convertToMap(upsertSourceBytes, true, upsertRequest.getContentType()); update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), upsertSourceBytes)); diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index ab10aa710cce6..4c5accbb4ccb5 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.settings.Settings; @@ -40,7 +39,6 @@ import org.elasticsearch.index.engine.DocumentMissingException; import org.elasticsearch.index.engine.DocumentSourceMissingException; import org.elasticsearch.index.get.GetResult; -import org.elasticsearch.index.mapper.ParentFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; @@ -50,7 +48,7 @@ import org.elasticsearch.search.lookup.SourceLookup; import java.io.IOException; -import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.function.LongSupplier; @@ -152,7 +150,7 @@ Result prepareUpsert(ShardId shardId, UpdateRequest request, final GetResult get indexRequest.index(request.index()) .type(request.type()).id(request.id()).setRefreshPolicy(request.getRefreshPolicy()).routing(request.routing()) - .parent(request.parent()).timeout(request.timeout()).waitForActiveShards(request.waitForActiveShards()) + .timeout(request.timeout()).waitForActiveShards(request.waitForActiveShards()) // it has to be a "create!" .create(true); @@ -191,20 +189,6 @@ static String calculateRouting(GetResult getResult, @Nullable IndexRequest updat } } - /** - * Calculate a parent value to be used, either the included index request's parent, or retrieved document's parent when defined. - */ - @Nullable - static String calculateParent(GetResult getResult, @Nullable IndexRequest updateIndexRequest) { - if (updateIndexRequest != null && updateIndexRequest.parent() != null) { - return updateIndexRequest.parent(); - } else if (getResult.getFields().containsKey(ParentFieldMapper.NAME)) { - return getResult.field(ParentFieldMapper.NAME).getValue().toString(); - } else { - return null; - } - } - /** * Prepare the request for merging the existing document with a new one, can optionally detect a noop change. Returns a {@code Result} * containing a new {@code IndexRequest} to be executed on the primary and replicas. @@ -213,7 +197,6 @@ Result prepareUpdateIndexRequest(ShardId shardId, UpdateRequest request, GetResu final long updateVersion = calculateUpdateVersion(request, getResult); final IndexRequest currentRequest = request.doc(); final String routing = calculateRouting(getResult, currentRequest); - final String parent = calculateParent(getResult, currentRequest); final Tuple> sourceAndContent = XContentHelper.convertToMap(getResult.internalSourceRef(), true); final XContentType updateSourceContentType = sourceAndContent.v1(); final Map updatedSourceAsMap = sourceAndContent.v2(); @@ -230,7 +213,7 @@ Result prepareUpdateIndexRequest(ShardId shardId, UpdateRequest request, GetResu return new Result(update, DocWriteResponse.Result.NOOP, updatedSourceAsMap, updateSourceContentType); } else { final IndexRequest finalIndexRequest = Requests.indexRequest(request.index()) - .type(request.type()).id(request.id()).routing(routing).parent(parent) + .type(request.type()).id(request.id()).routing(routing) .source(updatedSourceAsMap, updateSourceContentType).version(updateVersion).versionType(request.versionType()) .waitForActiveShards(request.waitForActiveShards()).timeout(request.timeout()) .setRefreshPolicy(request.getRefreshPolicy()); @@ -247,7 +230,6 @@ Result prepareUpdateScriptRequest(ShardId shardId, UpdateRequest request, GetRes final long updateVersion = calculateUpdateVersion(request, getResult); final IndexRequest currentRequest = request.doc(); final String routing = calculateRouting(getResult, currentRequest); - final String parent = calculateParent(getResult, currentRequest); final Tuple> sourceAndContent = XContentHelper.convertToMap(getResult.internalSourceRef(), true); final XContentType updateSourceContentType = sourceAndContent.v1(); final Map sourceAsMap = sourceAndContent.v2(); @@ -259,7 +241,6 @@ Result prepareUpdateScriptRequest(ShardId shardId, UpdateRequest request, GetRes ctx.put(ContextFields.ID, getResult.getId()); ctx.put(ContextFields.VERSION, getResult.getVersion()); ctx.put(ContextFields.ROUTING, routing); - ctx.put(ContextFields.PARENT, parent); ctx.put(ContextFields.SOURCE, sourceAsMap); ctx.put(ContextFields.NOW, nowInMillis.getAsLong()); @@ -272,14 +253,14 @@ Result prepareUpdateScriptRequest(ShardId shardId, UpdateRequest request, GetRes switch (operation) { case INDEX: final IndexRequest indexRequest = Requests.indexRequest(request.index()) - .type(request.type()).id(request.id()).routing(routing).parent(parent) + .type(request.type()).id(request.id()).routing(routing) .source(updatedSourceAsMap, updateSourceContentType).version(updateVersion).versionType(request.versionType()) .waitForActiveShards(request.waitForActiveShards()).timeout(request.timeout()) .setRefreshPolicy(request.getRefreshPolicy()); return new Result(indexRequest, DocWriteResponse.Result.UPDATED, updatedSourceAsMap, updateSourceContentType); case DELETE: DeleteRequest deleteRequest = Requests.deleteRequest(request.index()) - .type(request.type()).id(request.id()).routing(routing).parent(parent) + .type(request.type()).id(request.id()).routing(routing) .version(updateVersion).versionType(request.versionType()).waitForActiveShards(request.waitForActiveShards()) .timeout(request.timeout()).setRefreshPolicy(request.getRefreshPolicy()); return new Result(deleteRequest, DocWriteResponse.Result.DELETED, updatedSourceAsMap, updateSourceContentType); @@ -310,61 +291,33 @@ private Map executeScript(Script script, Map ctx /** * Applies {@link UpdateRequest#fetchSource()} to the _source of the updated document to be returned in a update response. - * For BWC this function also extracts the {@link UpdateRequest#fields()} from the updated document to be returned in a update response */ public static GetResult extractGetResult(final UpdateRequest request, String concreteIndex, long version, final Map source, XContentType sourceContentType, @Nullable final BytesReference sourceAsBytes) { - if ((request.fields() == null || request.fields().length == 0) && - (request.fetchSource() == null || request.fetchSource().fetchSource() == false)) { + if (request.fetchSource() == null || request.fetchSource().fetchSource() == false) { return null; } - SourceLookup sourceLookup = new SourceLookup(); - sourceLookup.setSource(source); - boolean sourceRequested = false; - Map fields = null; - if (request.fields() != null && request.fields().length > 0) { - for (String field : request.fields()) { - if (field.equals("_source")) { - sourceRequested = true; - continue; - } - Object value = sourceLookup.extractValue(field); - if (value != null) { - if (fields == null) { - fields = new HashMap<>(2); - } - DocumentField documentField = fields.get(field); - if (documentField == null) { - documentField = new DocumentField(field, new ArrayList<>(2)); - fields.put(field, documentField); - } - documentField.getValues().add(value); - } - } - } BytesReference sourceFilteredAsBytes = sourceAsBytes; - if (request.fetchSource() != null && request.fetchSource().fetchSource()) { - sourceRequested = true; - if (request.fetchSource().includes().length > 0 || request.fetchSource().excludes().length > 0) { - Object value = sourceLookup.filter(request.fetchSource()); - try { - final int initialCapacity = Math.min(1024, sourceAsBytes.length()); - BytesStreamOutput streamOutput = new BytesStreamOutput(initialCapacity); - try (XContentBuilder builder = new XContentBuilder(sourceContentType.xContent(), streamOutput)) { - builder.value(value); - sourceFilteredAsBytes = BytesReference.bytes(builder); - } - } catch (IOException e) { - throw new ElasticsearchException("Error filtering source", e); + if (request.fetchSource().includes().length > 0 || request.fetchSource().excludes().length > 0) { + SourceLookup sourceLookup = new SourceLookup(); + sourceLookup.setSource(source); + Object value = sourceLookup.filter(request.fetchSource()); + try { + final int initialCapacity = Math.min(1024, sourceAsBytes.length()); + BytesStreamOutput streamOutput = new BytesStreamOutput(initialCapacity); + try (XContentBuilder builder = new XContentBuilder(sourceContentType.xContent(), streamOutput)) { + builder.value(value); + sourceFilteredAsBytes = BytesReference.bytes(builder); } + } catch (IOException e) { + throw new ElasticsearchException("Error filtering source", e); } } // TODO when using delete/none, we can still return the source as bytes by generating it (using the sourceContentType) - return new GetResult(concreteIndex, request.type(), request.id(), version, true, - sourceRequested ? sourceFilteredAsBytes : null, fields); + return new GetResult(concreteIndex, request.type(), request.id(), version, true, sourceFilteredAsBytes, Collections.emptyMap()); } public static class Result { @@ -452,6 +405,5 @@ public static class ContextFields { public static final String ID = "_id"; public static final String VERSION = "_version"; public static final String ROUTING = "_routing"; - public static final String PARENT = "_parent"; } } diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index 2dcd35dfb36b9..3f74f7311c202 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.update; -import java.util.Arrays; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -28,11 +28,14 @@ import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequest; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -46,28 +49,55 @@ import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import java.io.IOException; -import java.util.Collections; import java.util.HashMap; -import java.util.List; import java.util.Map; import static org.elasticsearch.action.ValidateActions.addValidationError; public class UpdateRequest extends InstanceShardOperationRequest implements DocWriteRequest, WriteRequest, ToXContentObject { + private static ObjectParser PARSER; + + private static final ParseField SCRIPT_FIELD = new ParseField("script"); + private static final ParseField SCRIPTED_UPSERT_FIELD = new ParseField("scripted_upsert"); + private static final ParseField UPSERT_FIELD = new ParseField("upsert"); + private static final ParseField DOC_FIELD = new ParseField("doc"); + private static final ParseField DOC_AS_UPSERT_FIELD = new ParseField("doc_as_upsert"); + private static final ParseField DETECT_NOOP_FIELD = new ParseField("detect_noop"); + private static final ParseField SOURCE_FIELD = new ParseField("_source"); + + static { + PARSER = new ObjectParser<>(UpdateRequest.class.getSimpleName()); + PARSER.declareField((request, script) -> request.script = script, + (parser, context) -> Script.parse(parser), SCRIPT_FIELD, ObjectParser.ValueType.OBJECT_OR_STRING); + PARSER.declareBoolean(UpdateRequest::scriptedUpsert, SCRIPTED_UPSERT_FIELD); + PARSER.declareObject((request, builder) -> request.safeUpsertRequest().source(builder), + (parser, context) -> { + XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType()); + builder.copyCurrentStructure(parser); + return builder; + }, UPSERT_FIELD); + PARSER.declareObject((request, builder) -> request.safeDoc().source(builder), + (parser, context) -> { + XContentBuilder docBuilder = XContentFactory.contentBuilder(parser.contentType()); + docBuilder.copyCurrentStructure(parser); + return docBuilder; + }, DOC_FIELD); + PARSER.declareBoolean(UpdateRequest::docAsUpsert, DOC_AS_UPSERT_FIELD); + PARSER.declareBoolean(UpdateRequest::detectNoop, DETECT_NOOP_FIELD); + PARSER.declareField(UpdateRequest::fetchSource, + (parser, context) -> FetchSourceContext.fromXContent(parser), SOURCE_FIELD, + ObjectParser.ValueType.OBJECT_ARRAY_BOOLEAN_OR_STRING); + } private String type; private String id; @Nullable private String routing; - @Nullable - private String parent; - @Nullable Script script; - private String[] fields; private FetchSourceContext fetchSourceContext; private long version = Versions.MATCH_ANY; @@ -194,18 +224,6 @@ public String routing() { return this.routing; } - /** - * The parent id is used for the upsert request. - */ - public UpdateRequest parent(String parent) { - this.parent = parent; - return this; - } - - public String parent() { - return parent; - } - public ShardId getShardId() { return this.shardId; } @@ -378,16 +396,6 @@ public UpdateRequest script(String script, @Nullable String scriptLang, ScriptTy return this; } - /** - * Explicitly specify the fields that will be returned. By default, nothing is returned. - * @deprecated Use {@link UpdateRequest#fetchSource(String[], String[])} instead - */ - @Deprecated - public UpdateRequest fields(String... fields) { - this.fields = fields; - return this; - } - /** * Indicate that _source should be returned with every hit, with an * "include" and/or "exclude" set which can include simple wildcard @@ -402,7 +410,9 @@ public UpdateRequest fields(String... fields) { */ public UpdateRequest fetchSource(@Nullable String include, @Nullable String exclude) { FetchSourceContext context = this.fetchSourceContext == null ? FetchSourceContext.FETCH_SOURCE : this.fetchSourceContext; - this.fetchSourceContext = new FetchSourceContext(context.fetchSource(), new String[] {include}, new String[]{exclude}); + String[] includes = include == null ? Strings.EMPTY_ARRAY : new String[]{include}; + String[] excludes = exclude == null ? Strings.EMPTY_ARRAY : new String[]{exclude}; + this.fetchSourceContext = new FetchSourceContext(context.fetchSource(), includes, excludes); return this; } @@ -441,16 +451,6 @@ public UpdateRequest fetchSource(FetchSourceContext context) { return this; } - - /** - * Get the fields to be returned. - * @deprecated Use {@link UpdateRequest#fetchSource()} instead - */ - @Deprecated - public String[] fields() { - return fields; - } - /** * Gets the {@link FetchSourceContext} which defines how the _source should * be fetched. @@ -720,49 +720,7 @@ public boolean detectNoop() { } public UpdateRequest fromXContent(XContentParser parser) throws IOException { - Script script = null; - XContentParser.Token token = parser.nextToken(); - if (token == null) { - return this; - } - String currentFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if ("script".equals(currentFieldName)) { - script = Script.parse(parser); - } else if ("scripted_upsert".equals(currentFieldName)) { - scriptedUpsert = parser.booleanValue(); - } else if ("upsert".equals(currentFieldName)) { - XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType()); - builder.copyCurrentStructure(parser); - safeUpsertRequest().source(builder); - } else if ("doc".equals(currentFieldName)) { - XContentBuilder docBuilder = XContentFactory.contentBuilder(parser.contentType()); - docBuilder.copyCurrentStructure(parser); - safeDoc().source(docBuilder); - } else if ("doc_as_upsert".equals(currentFieldName)) { - docAsUpsert(parser.booleanValue()); - } else if ("detect_noop".equals(currentFieldName)) { - detectNoop(parser.booleanValue()); - } else if ("fields".equals(currentFieldName)) { - List fields = null; - if (token == XContentParser.Token.START_ARRAY) { - fields = (List) parser.list(); - } else if (token.isValue()) { - fields = Collections.singletonList(parser.text()); - } - if (fields != null) { - fields(fields.toArray(new String[fields.size()])); - } - } else if ("_source".equals(currentFieldName)) { - fetchSourceContext = FetchSourceContext.fromXContent(parser); - } - } - if (script != null) { - this.script = script; - } - return this; + return PARSER.parse(parser, this, null); } public boolean docAsUpsert() { @@ -790,7 +748,9 @@ public void readFrom(StreamInput in) throws IOException { type = in.readString(); id = in.readString(); routing = in.readOptionalString(); - parent = in.readOptionalString(); + if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + in.readOptionalString(); // _parent + } if (in.readBoolean()) { script = new Script(in); } @@ -800,7 +760,12 @@ public void readFrom(StreamInput in) throws IOException { doc = new IndexRequest(); doc.readFrom(in); } - fields = in.readOptionalStringArray(); + if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + String[] fields = in.readOptionalStringArray(); + if (fields != null) { + throw new IllegalArgumentException("[fields] is no longer supported"); + } + } fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new); if (in.readBoolean()) { upsertRequest = new IndexRequest(); @@ -820,7 +785,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(type); out.writeString(id); out.writeOptionalString(routing); - out.writeOptionalString(parent); + if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + out.writeOptionalString(null); // _parent + } + boolean hasScript = script != null; out.writeBoolean(hasScript); if (hasScript) { @@ -838,7 +806,9 @@ public void writeTo(StreamOutput out) throws IOException { doc.id(id); doc.writeTo(out); } - out.writeOptionalStringArray(fields); + if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + out.writeOptionalStringArray(null); + } out.writeOptionalWriteable(fetchSourceContext); if (upsertRequest == null) { out.writeBoolean(false); @@ -888,9 +858,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (detectNoop == false) { builder.field("detect_noop", detectNoop); } - if (fields != null) { - builder.array("fields", fields); - } if (fetchSourceContext != null) { builder.field("_source", fetchSourceContext); } @@ -916,9 +883,6 @@ public String toString() { } res.append(", scripted_upsert[").append(scriptedUpsert).append("]"); res.append(", detect_noop[").append(detectNoop).append("]"); - if (fields != null) { - res.append(", fields[").append(Arrays.toString(fields)).append("]"); - } return res.append("}").toString(); } } diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java index 5ba187013e79f..74935adbbb283 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java @@ -26,20 +26,15 @@ import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; -import org.elasticsearch.rest.action.document.RestUpdateAction; import org.elasticsearch.script.Script; import java.util.Map; public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder implements WriteRequestBuilder { - private static final DeprecationLogger DEPRECATION_LOGGER = - new DeprecationLogger(Loggers.getLogger(RestUpdateAction.class)); public UpdateRequestBuilder(ElasticsearchClient client, UpdateAction action) { super(client, action, new UpdateRequest()); @@ -74,11 +69,6 @@ public UpdateRequestBuilder setRouting(String routing) { return this; } - public UpdateRequestBuilder setParent(String parent) { - request.parent(parent); - return this; - } - /** * The script to execute. Note, make sure not to send different script each times and instead * use script params if possible with the same (automatically compiled) script. @@ -92,17 +82,6 @@ public UpdateRequestBuilder setScript(Script script) { return this; } - /** - * Explicitly specify the fields that will be returned. By default, nothing is returned. - * @deprecated Use {@link UpdateRequestBuilder#setFetchSource(String[], String[])} instead - */ - @Deprecated - public UpdateRequestBuilder setFields(String... fields) { - DEPRECATION_LOGGER.deprecated("Deprecated field [fields] used, expected [_source] instead"); - request.fields(fields); - return this; - } - /** * Indicate that _source should be returned with every hit, with an * "include" and/or "exclude" set which can include simple wildcard diff --git a/server/src/main/java/org/elasticsearch/client/Client.java b/server/src/main/java/org/elasticsearch/client/Client.java index 2c61653f61ce0..adb2f509b999e 100644 --- a/server/src/main/java/org/elasticsearch/client/Client.java +++ b/server/src/main/java/org/elasticsearch/client/Client.java @@ -477,4 +477,14 @@ public interface Client extends ElasticsearchClient, Releasable { * issued from it. */ Client filterWithHeader(Map headers); + + /** + * Returns a client to a remote cluster with the given cluster alias. + * + * @throws IllegalArgumentException if the given clusterAlias doesn't exist + * @throws UnsupportedOperationException if this functionality is not available on this client. + */ + default Client getRemoteClusterClient(String clusterAlias) { + throw new UnsupportedOperationException("this client doesn't support remote cluster connections"); + } } diff --git a/server/src/main/java/org/elasticsearch/client/FilterClient.java b/server/src/main/java/org/elasticsearch/client/FilterClient.java index 23d3c2c3d0c2f..92f6817b74b67 100644 --- a/server/src/main/java/org/elasticsearch/client/FilterClient.java +++ b/server/src/main/java/org/elasticsearch/client/FilterClient.java @@ -73,4 +73,9 @@ protected localNodeId; + private RemoteClusterService remoteClusterService; public NodeClient(Settings settings, ThreadPool threadPool) { super(settings, threadPool); } - public void initialize(Map actions, Supplier localNodeId) { + public void initialize(Map actions, Supplier localNodeId, + RemoteClusterService remoteClusterService) { this.actions = actions; this.localNodeId = localNodeId; + this.remoteClusterService = remoteClusterService; } @Override @@ -117,4 +121,9 @@ > TransportAction transportAction(GenericAction withoutType) { } else { this.routing = Routing.EMPTY; } - if (withoutType.containsKey("_parent")) { - this.hasParentField = true; - } else { - this.hasParentField = false; - } } void updateDefaultMapping(MappingMetaData defaultMapping) { @@ -149,10 +142,6 @@ public CompressedXContent source() { return this.source; } - public boolean hasParentField() { - return hasParentField; - } - /** * Converts the serialized compressed form of the mappings into a parsed map. */ @@ -189,7 +178,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString("now"); // 5.x default out.writeOptionalBoolean(null); } - out.writeBoolean(hasParentField()); + if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + out.writeBoolean(false); // hasParentField + } } @Override @@ -229,7 +220,9 @@ public MappingMetaData(StreamInput in) throws IOException { in.readOptionalString(); // defaultTimestamp in.readOptionalBoolean(); // ignoreMissing } - hasParentField = in.readBoolean(); + if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + in.readBoolean(); // hasParentField + } } public static Diff readDiffFrom(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index a569bb9a36e29..b18c82712b37e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -474,14 +474,14 @@ public String[] getConcreteAllClosedIndices() { */ // TODO: This can be moved to IndexNameExpressionResolver too, but this means that we will support wildcards and other expressions // in the index,bulk,update and delete apis. - public String resolveIndexRouting(@Nullable String parent, @Nullable String routing, String aliasOrIndex) { + public String resolveIndexRouting(@Nullable String routing, String aliasOrIndex) { if (aliasOrIndex == null) { - return routingOrParent(parent, routing); + return routing; } AliasOrIndex result = getAliasAndIndexLookup().get(aliasOrIndex); if (result == null || result.isAlias() == false) { - return routingOrParent(parent, routing); + return routing; } AliasOrIndex.Alias alias = (AliasOrIndex.Alias) result; if (result.getIndices().size() > 1) { @@ -500,7 +500,7 @@ public String resolveIndexRouting(@Nullable String parent, @Nullable String rout // Alias routing overrides the parent routing (if any). return aliasMd.indexRouting(); } - return routingOrParent(parent, routing); + return routing; } private void rejectSingleIndexOperation(String aliasOrIndex, AliasOrIndex result) { @@ -512,13 +512,6 @@ private void rejectSingleIndexOperation(String aliasOrIndex, AliasOrIndex result throw new IllegalArgumentException("Alias [" + aliasOrIndex + "] has more than one index associated with it [" + Arrays.toString(indexNames) + "], can't execute a single index op"); } - private String routingOrParent(@Nullable String parent, @Nullable String routing) { - if (routing == null) { - return parent; - } - return routing; - } - public boolean hasIndex(String index) { return indices.containsKey(index); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 829504c154e41..fbeca652a97ff 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -265,24 +265,6 @@ private ClusterState applyRequest(ClusterState currentState, PutMappingClusterSt if (existingMapper != null) { // first, simulate: just call merge and ignore the result existingMapper.merge(newMapper.mapping()); - } else { - // TODO: can we find a better place for this validation? - // The reason this validation is here is that the mapper service doesn't learn about - // new types all at once , which can create a false error. - - // For example in MapperService we can't distinguish between a create index api call - // and a put mapping api call, so we don't which type did exist before. - // Also the order of the mappings may be backwards. - if (newMapper.parentFieldMapper().active()) { - for (ObjectCursor mapping : indexMetaData.getMappings().values()) { - String parentType = newMapper.parentFieldMapper().type(); - if (parentType.equals(mapping.value.type()) && - mapperService.getParentTypes().contains(parentType) == false) { - throw new IllegalArgumentException("can't add a _parent field that points to an " + - "already existing type, that isn't already a parent"); - } - } - } } } if (mappingType == null) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/AllocationId.java b/server/src/main/java/org/elasticsearch/cluster/routing/AllocationId.java index ef07ef9b1ea54..ad6803de089d2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/AllocationId.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/AllocationId.java @@ -166,7 +166,7 @@ public boolean equals(Object o) { if (this == o) { return true; } - if (o == null) { + if (o == null || getClass() != o.getClass()) { return false; } AllocationId that = (AllocationId) o; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index 2f9771a05527d..193dfa4b2eb23 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -557,7 +557,7 @@ public int hashCode() { @Override public boolean equals(Object obj) { - return Arrays.equals(attributes, ((AttributesKey) obj).attributes); + return obj instanceof AttributesKey && Arrays.equals(attributes, ((AttributesKey) obj).attributes); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/PlainShardIterator.java b/server/src/main/java/org/elasticsearch/cluster/routing/PlainShardIterator.java index bb45ca66956f8..391ef8c67c81e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/PlainShardIterator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/PlainShardIterator.java @@ -51,6 +51,7 @@ public ShardId shardId() { @Override public boolean equals(Object o) { if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; ShardIterator that = (ShardIterator) o; return shardId.equals(that.shardId()); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java index b53629be37e7b..0ddf3ef1529b3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java @@ -66,7 +66,7 @@ public DiskThresholdMonitor(Settings settings, Supplier clusterSta private void warnAboutDiskIfNeeded(DiskUsage usage) { // Check absolute disk values if (usage.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdFloodStage().getBytes()) { - logger.warn("flood stage disk watermark [{}] exceeded on {}, all indices on this node will marked read-only", + logger.warn("flood stage disk watermark [{}] exceeded on {}, all indices on this node will be marked read-only", diskThresholdSettings.getFreeBytesThresholdFloodStage(), usage); } else if (usage.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdHigh().getBytes()) { logger.warn("high disk watermark [{}] exceeded on {}, shards will be relocated away from this node", @@ -78,7 +78,7 @@ private void warnAboutDiskIfNeeded(DiskUsage usage) { // Check percentage disk values if (usage.getFreeDiskAsPercentage() < diskThresholdSettings.getFreeDiskThresholdFloodStage()) { - logger.warn("flood stage disk watermark [{}] exceeded on {}, all indices on this node will marked read-only", + logger.warn("flood stage disk watermark [{}] exceeded on {}, all indices on this node will be marked read-only", Strings.format1Decimals(100.0 - diskThresholdSettings.getFreeDiskThresholdFloodStage(), "%"), usage); } else if (usage.getFreeDiskAsPercentage() < diskThresholdSettings.getFreeDiskThresholdHigh()) { logger.warn("high disk watermark [{}] exceeded on {}, shards will be relocated away from this node", diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java index 898294264274e..f2df6d3196dd0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/NodeVersionAllocationDecider.java @@ -55,13 +55,13 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing } } else { // relocating primary, only migrate to newer host - return isVersionCompatible(allocation.routingNodes(), shardRouting.currentNodeId(), node, allocation); + return isVersionCompatibleRelocatePrimary(allocation.routingNodes(), shardRouting.currentNodeId(), node, allocation); } } else { final ShardRouting primary = allocation.routingNodes().activePrimary(shardRouting.shardId()); // check that active primary has a newer version so that peer recovery works if (primary != null) { - return isVersionCompatible(allocation.routingNodes(), primary.currentNodeId(), node, allocation); + return isVersionCompatibleAllocatingReplica(allocation.routingNodes(), primary.currentNodeId(), node, allocation); } else { // ReplicaAfterPrimaryActiveAllocationDecider should prevent this case from occurring return allocation.decision(Decision.YES, NAME, "no active primary shard yet"); @@ -69,30 +69,45 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing } } - private Decision isVersionCompatible(final RoutingNodes routingNodes, final String sourceNodeId, final RoutingNode target, - RoutingAllocation allocation) { + private Decision isVersionCompatibleRelocatePrimary(final RoutingNodes routingNodes, final String sourceNodeId, + final RoutingNode target, final RoutingAllocation allocation) { + final RoutingNode source = routingNodes.node(sourceNodeId); + if (target.node().getVersion().onOrAfter(source.node().getVersion())) { + return allocation.decision(Decision.YES, NAME, + "can relocate primary shard from a node with version [%s] to a node with equal-or-newer version [%s]", + source.node().getVersion(), target.node().getVersion()); + } else { + return allocation.decision(Decision.NO, NAME, + "cannot relocate primary shard from a node with version [%s] to a node with older version [%s]", + source.node().getVersion(), target.node().getVersion()); + } + } + + private Decision isVersionCompatibleAllocatingReplica(final RoutingNodes routingNodes, final String sourceNodeId, + final RoutingNode target, final RoutingAllocation allocation) { final RoutingNode source = routingNodes.node(sourceNodeId); if (target.node().getVersion().onOrAfter(source.node().getVersion())) { /* we can allocate if we can recover from a node that is younger or on the same version * if the primary is already running on a newer version that won't work due to possible * differences in the lucene index format etc.*/ - return allocation.decision(Decision.YES, NAME, "target node version [%s] is the same or newer than source node version [%s]", - target.node().getVersion(), source.node().getVersion()); + return allocation.decision(Decision.YES, NAME, + "can allocate replica shard to a node with version [%s] since this is equal-or-newer than the primary version [%s]", + target.node().getVersion(), source.node().getVersion()); } else { - return allocation.decision(Decision.NO, NAME, "target node version [%s] is older than the source node version [%s] and may " + - "not support codecs or postings formats for a newer Lucene version", - target.node().getVersion(), source.node().getVersion()); + return allocation.decision(Decision.NO, NAME, + "cannot allocate replica shard to a node with version [%s] since this is older than the primary version [%s]", + target.node().getVersion(), source.node().getVersion()); } } - private Decision isVersionCompatible(SnapshotRecoverySource recoverySource, final RoutingNode target, RoutingAllocation allocation) { + private Decision isVersionCompatible(SnapshotRecoverySource recoverySource, final RoutingNode target, + final RoutingAllocation allocation) { if (target.node().getVersion().onOrAfter(recoverySource.version())) { /* we can allocate if we can restore from a snapshot that is older or on the same version */ - return allocation.decision(Decision.YES, NAME, "target node version [%s] is the same or newer than snapshot version [%s]", + return allocation.decision(Decision.YES, NAME, "node version [%s] is the same or newer than snapshot version [%s]", target.node().getVersion(), recoverySource.version()); } else { - return allocation.decision(Decision.NO, NAME, "target node version [%s] is older than the snapshot version [%s] and may " + - "not support codecs or postings formats for a newer Lucene version", + return allocation.decision(Decision.NO, NAME, "node version [%s] is older than the snapshot version [%s]", target.node().getVersion(), recoverySource.version()); } } diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java index df2e7a123a3ae..f960664306f08 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java @@ -24,7 +24,7 @@ import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; -import org.elasticsearch.common.io.Streams; +import org.elasticsearch.core.internal.io.Streams; import java.io.BufferedInputStream; import java.io.FileNotFoundException; @@ -128,7 +128,7 @@ public void writeBlob(String blobName, InputStream inputStream, long blobSize) t } final Path file = path.resolve(blobName); try (OutputStream outputStream = Files.newOutputStream(file, StandardOpenOption.CREATE_NEW)) { - Streams.copy(inputStream, outputStream, new byte[blobStore.bufferSizeInBytes()]); + Streams.copy(inputStream, outputStream); } IOUtils.fsync(file, false); IOUtils.fsync(path, true); diff --git a/server/src/main/java/org/elasticsearch/common/compress/CompressorFactory.java b/server/src/main/java/org/elasticsearch/common/compress/CompressorFactory.java index 332d9024e997f..3b1202fe66f42 100644 --- a/server/src/main/java/org/elasticsearch/common/compress/CompressorFactory.java +++ b/server/src/main/java/org/elasticsearch/common/compress/CompressorFactory.java @@ -21,11 +21,11 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.Streams; import java.io.IOException; import java.util.Objects; diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java b/server/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java index acfb8970e684c..d2ca936740e27 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java @@ -72,15 +72,19 @@ public static final long longEncode(final double lon, final double lat, final in /** * Encode from geohash string to the geohash based long format (lon/lat interleaved, 4 least significant bits = level) */ - public static final long longEncode(final String hash) { - int level = hash.length()-1; + private static long longEncode(final String hash, int length) { + int level = length - 1; long b; long l = 0L; for(char c : hash.toCharArray()) { b = (long)(BASE_32_STRING.indexOf(c)); l |= (b<<(level--*5)); + if (level < 0) { + // We cannot handle more than 12 levels + break; + } } - return (l<<4)|hash.length(); + return (l << 4) | length; } /** @@ -173,6 +177,10 @@ public static final long mortonEncode(final String hash) { for(char c : hash.toCharArray()) { b = (long)(BASE_32_STRING.indexOf(c)); l |= (b<<((level--*5) + MORTON_OFFSET)); + if (level < 0) { + // We cannot handle more than 12 levels + break; + } } return BitUtil.flipFlop(l); } @@ -200,13 +208,14 @@ private static char encode(int x, int y) { public static Rectangle bbox(final String geohash) { // bottom left is the coordinate GeoPoint bottomLeft = GeoPoint.fromGeohash(geohash); - long ghLong = longEncode(geohash); + int len = Math.min(12, geohash.length()); + long ghLong = longEncode(geohash, len); // shift away the level ghLong >>>= 4; // deinterleave and add 1 to lat and lon to get topRight long lat = BitUtil.deinterleave(ghLong >>> 1) + 1; long lon = BitUtil.deinterleave(ghLong) + 1; - GeoPoint topRight = GeoPoint.fromGeohash(BitUtil.interleave((int)lon, (int)lat) << 4 | geohash.length()); + GeoPoint topRight = GeoPoint.fromGeohash(BitUtil.interleave((int)lon, (int)lat) << 4 | len); return new Rectangle(bottomLeft.lat(), topRight.lat(), bottomLeft.lon(), topRight.lon()); } diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java b/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java index 655b259c81074..ce0098ea9722f 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java @@ -24,10 +24,10 @@ import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; import org.apache.lucene.util.SloppyMath; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.GeoPointValues; import org.elasticsearch.index.fielddata.MultiGeoPointValues; @@ -459,6 +459,51 @@ public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point, fina } } + /** + * Parse a precision that can be expressed as an integer or a distance measure like "1km", "10m". + * + * The precision is expressed as a number between 1 and 12 and indicates the length of geohash + * used to represent geo points. + * + * @param parser {@link XContentParser} to parse the value from + * @return int representing precision + */ + public static int parsePrecision(XContentParser parser) throws IOException, ElasticsearchParseException { + XContentParser.Token token = parser.currentToken(); + if (token.equals(XContentParser.Token.VALUE_NUMBER)) { + return XContentMapValues.nodeIntegerValue(parser.intValue()); + } else { + String precision = parser.text(); + try { + // we want to treat simple integer strings as precision levels, not distances + return XContentMapValues.nodeIntegerValue(precision); + } catch (NumberFormatException e) { + // try to parse as a distance value + final int parsedPrecision = GeoUtils.geoHashLevelsForPrecision(precision); + try { + return checkPrecisionRange(parsedPrecision); + } catch (IllegalArgumentException e2) { + // this happens when distance too small, so precision > 12. We'd like to see the original string + throw new IllegalArgumentException("precision too high [" + precision + "]", e2); + } + } + } + } + + /** + * Checks that the precision is within range supported by elasticsearch - between 1 and 12 + * + * Returns the precision value if it is in the range and throws an IllegalArgumentException if it + * is outside the range. + */ + public static int checkPrecisionRange(int precision) { + if ((precision < 1) || (precision > 12)) { + throw new IllegalArgumentException("Invalid geohash aggregation precision of " + precision + + ". Must be between 1 and 12."); + } + return precision; + } + /** Returns the maximum distance/radius (in meters) from the point 'center' before overlapping */ public static double maxRadialDistanceMeters(final double centerLat, final double centerLon) { if (Math.abs(centerLat) == MAX_LAT) { diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index 6706006a0a008..3988d4a0f2ba7 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.text.Text; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; @@ -65,6 +66,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.concurrent.TimeUnit; import java.util.function.IntFunction; import java.util.function.Supplier; @@ -82,6 +84,26 @@ * on {@link StreamInput}. */ public abstract class StreamInput extends InputStream { + + private static final Map BYTE_TIME_UNIT_MAP; + + static { + final Map byteTimeUnitMap = new HashMap<>(); + byteTimeUnitMap.put((byte)0, TimeUnit.NANOSECONDS); + byteTimeUnitMap.put((byte)1, TimeUnit.MICROSECONDS); + byteTimeUnitMap.put((byte)2, TimeUnit.MILLISECONDS); + byteTimeUnitMap.put((byte)3, TimeUnit.SECONDS); + byteTimeUnitMap.put((byte)4, TimeUnit.MINUTES); + byteTimeUnitMap.put((byte)5, TimeUnit.HOURS); + byteTimeUnitMap.put((byte)6, TimeUnit.DAYS); + + for (TimeUnit value : TimeUnit.values()) { + assert byteTimeUnitMap.containsValue(value) : value; + } + + BYTE_TIME_UNIT_MAP = Collections.unmodifiableMap(byteTimeUnitMap); + } + private Version version = Version.CURRENT; /** @@ -971,4 +993,24 @@ private int readArraySize() throws IOException { * be a no-op depending on the underlying implementation if the information of the remaining bytes is not present. */ protected abstract void ensureCanReadBytes(int length) throws EOFException; + + /** + * Read a {@link TimeValue} from the stream + */ + public TimeValue readTimeValue() throws IOException { + long duration = readZLong(); + TimeUnit timeUnit = BYTE_TIME_UNIT_MAP.get(readByte()); + return new TimeValue(duration, timeUnit); + } + + /** + * Read an optional {@link TimeValue} from the stream, returning null if no TimeValue was written. + */ + public @Nullable TimeValue readOptionalTimeValue() throws IOException { + if (readBoolean()) { + return readTimeValue(); + } else { + return null; + } + } } diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index 5f27f74956f2d..277ed63ef0ade 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.io.stream.Writeable.Writer; import org.elasticsearch.common.text.Text; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.joda.time.DateTimeZone; import org.joda.time.ReadableInstant; @@ -54,11 +55,13 @@ import java.time.ZonedDateTime; import java.util.Collections; import java.util.Date; +import java.util.EnumMap; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.TimeUnit; import java.util.function.IntFunction; /** @@ -74,6 +77,25 @@ */ public abstract class StreamOutput extends OutputStream { + private static final Map TIME_UNIT_BYTE_MAP; + + static { + final Map timeUnitByteMap = new EnumMap<>(TimeUnit.class); + timeUnitByteMap.put(TimeUnit.NANOSECONDS, (byte)0); + timeUnitByteMap.put(TimeUnit.MICROSECONDS, (byte)1); + timeUnitByteMap.put(TimeUnit.MILLISECONDS, (byte)2); + timeUnitByteMap.put(TimeUnit.SECONDS, (byte)3); + timeUnitByteMap.put(TimeUnit.MINUTES, (byte)4); + timeUnitByteMap.put(TimeUnit.HOURS, (byte)5); + timeUnitByteMap.put(TimeUnit.DAYS, (byte)6); + + for (TimeUnit value : TimeUnit.values()) { + assert timeUnitByteMap.containsKey(value) : value; + } + + TIME_UNIT_BYTE_MAP = Collections.unmodifiableMap(timeUnitByteMap); + } + private Version version = Version.CURRENT; /** @@ -973,4 +995,24 @@ public > void writeEnum(E enumValue) throws IOException { writeVInt(enumValue.ordinal()); } + /** + * Write a {@link TimeValue} to the stream + */ + public void writeTimeValue(TimeValue timeValue) throws IOException { + writeZLong(timeValue.duration()); + writeByte(TIME_UNIT_BYTE_MAP.get(timeValue.timeUnit())); + } + + /** + * Write an optional {@link TimeValue} to the stream. + */ + public void writeOptionalTimeValue(@Nullable TimeValue timeValue) throws IOException { + if (timeValue == null) { + writeBoolean(false); + } else { + writeBoolean(true); + writeTimeValue(timeValue); + } + } + } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/Queries.java b/server/src/main/java/org/elasticsearch/common/lucene/search/Queries.java index d004c798996c9..22aa336e460dd 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/Queries.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/Queries.java @@ -99,18 +99,13 @@ public static Query not(Query q) { .build(); } - private static boolean isNegativeQuery(Query q) { + static boolean isNegativeQuery(Query q) { if (!(q instanceof BooleanQuery)) { return false; } List clauses = ((BooleanQuery) q).clauses(); - if (clauses.isEmpty()) { - return false; - } - for (BooleanClause clause : clauses) { - if (!clause.isProhibited()) return false; - } - return true; + return clauses.isEmpty() == false && + clauses.stream().allMatch(BooleanClause::isProhibited); } public static Query fixNegativeQueryIfNeeded(Query q) { @@ -120,7 +115,7 @@ public static Query fixNegativeQueryIfNeeded(Query q) { for (BooleanClause clause : bq) { builder.add(clause); } - builder.add(newMatchAllQuery(), BooleanClause.Occur.MUST); + builder.add(newMatchAllQuery(), BooleanClause.Occur.FILTER); return builder.build(); } return q; diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index ced99fc806527..45eb3cf45efa2 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -245,6 +245,8 @@ public void apply(Settings value, Settings current, Settings previous) { HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH, HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE, HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE, + HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_COUNT, + HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_SIZE, HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH, HttpTransportSettings.SETTING_HTTP_READ_TIMEOUT, HttpTransportSettings.SETTING_HTTP_RESET_COOKIES, diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index d18858c2547ec..bd6bba7b784cd 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -58,7 +58,6 @@ public final class IndexScopedSettings extends AbstractScopedSettings { public static final Set> BUILT_IN_INDEX_SETTINGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY, - IndexSettings.INDEX_TTL_DISABLE_PURGE_SETTING, MergeSchedulerConfig.AUTO_THROTTLE_SETTING, MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING, MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING, @@ -207,9 +206,6 @@ public boolean isPrivateSetting(String key) { case IndexMetaData.INDEX_SHRINK_SOURCE_NAME_KEY: case IndexMetaData.INDEX_RESIZE_SOURCE_UUID_KEY: case IndexMetaData.INDEX_RESIZE_SOURCE_NAME_KEY: - case IndexSettings.INDEX_MAPPING_SINGLE_TYPE_SETTING_KEY: - // this was settable in 5.x but not anymore in 6.x so we have to preserve the value ie. make it read-only - // this can be removed in later versions return true; default: return IndexMetaData.INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING.getRawKey().match(key); diff --git a/server/src/main/java/org/elasticsearch/common/util/LocaleUtils.java b/server/src/main/java/org/elasticsearch/common/util/LocaleUtils.java index d447bc0567d5e..acc2cbbfa57ee 100644 --- a/server/src/main/java/org/elasticsearch/common/util/LocaleUtils.java +++ b/server/src/main/java/org/elasticsearch/common/util/LocaleUtils.java @@ -75,16 +75,6 @@ public static Locale parse(String localeStr) { return locale; } - /** - * Parse the string describing a locale into a {@link Locale} object - * for 5.x indices. - */ - @Deprecated - public static Locale parse5x(String localeStr) { - final String[] parts = localeStr.split("_", -1); - return parseParts(parts); - } - private static Locale parseParts(String[] parts) { switch (parts.length) { case 3: diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java index 8f950c5434bd7..901c6425d7131 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java @@ -23,10 +23,16 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.http.HttpTransportSettings; + +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_COUNT; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_SIZE; import java.io.Closeable; import java.io.IOException; @@ -39,13 +45,14 @@ import java.util.Set; import java.util.concurrent.CancellationException; import java.util.concurrent.ExecutionException; -import java.util.concurrent.FutureTask; import java.util.concurrent.RunnableFuture; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; +import java.nio.charset.StandardCharsets; + /** * A ThreadContext is a map of string headers and a transient map of keyed objects that are associated with @@ -81,6 +88,8 @@ public final class ThreadContext implements Closeable, Writeable { private static final ThreadContextStruct DEFAULT_CONTEXT = new ThreadContextStruct(); private final Map defaultHeader; private final ContextThreadLocal threadLocal; + private final int maxWarningHeaderCount; + private final long maxWarningHeaderSize; /** * Creates a new ThreadContext instance @@ -98,6 +107,8 @@ public ThreadContext(Settings settings) { this.defaultHeader = Collections.unmodifiableMap(defaultHeader); } threadLocal = new ContextThreadLocal(); + this.maxWarningHeaderCount = SETTING_HTTP_MAX_WARNING_HEADER_COUNT.get(settings); + this.maxWarningHeaderSize = SETTING_HTTP_MAX_WARNING_HEADER_SIZE.get(settings).getBytes(); } @Override @@ -282,7 +293,7 @@ public void addResponseHeader(final String key, final String value) { * @param uniqueValue the function that produces de-duplication values */ public void addResponseHeader(final String key, final String value, final Function uniqueValue) { - threadLocal.set(threadLocal.get().putResponse(key, value, uniqueValue)); + threadLocal.set(threadLocal.get().putResponse(key, value, uniqueValue, maxWarningHeaderCount, maxWarningHeaderSize)); } /** @@ -359,7 +370,7 @@ private static final class ThreadContextStruct { private final Map transientHeaders; private final Map> responseHeaders; private final boolean isSystemContext; - + private long warningHeadersSize; //saving current warning headers' size not to recalculate the size with every new warning header private ThreadContextStruct(StreamInput in) throws IOException { final int numRequest = in.readVInt(); Map requestHeaders = numRequest == 0 ? Collections.emptyMap() : new HashMap<>(numRequest); @@ -371,6 +382,7 @@ private ThreadContextStruct(StreamInput in) throws IOException { this.responseHeaders = in.readMapOfLists(StreamInput::readString, StreamInput::readString); this.transientHeaders = Collections.emptyMap(); isSystemContext = false; // we never serialize this it's a transient flag + this.warningHeadersSize = 0L; } private ThreadContextStruct setSystemContext() { @@ -387,6 +399,18 @@ private ThreadContextStruct(Map requestHeaders, this.responseHeaders = responseHeaders; this.transientHeaders = transientHeaders; this.isSystemContext = isSystemContext; + this.warningHeadersSize = 0L; + } + + private ThreadContextStruct(Map requestHeaders, + Map> responseHeaders, + Map transientHeaders, boolean isSystemContext, + long warningHeadersSize) { + this.requestHeaders = requestHeaders; + this.responseHeaders = responseHeaders; + this.transientHeaders = transientHeaders; + this.isSystemContext = isSystemContext; + this.warningHeadersSize = warningHeadersSize; } /** @@ -440,30 +464,58 @@ private ThreadContextStruct putResponseHeaders(Map> headers return new ThreadContextStruct(requestHeaders, newResponseHeaders, transientHeaders, isSystemContext); } - private ThreadContextStruct putResponse(final String key, final String value, final Function uniqueValue) { + private ThreadContextStruct putResponse(final String key, final String value, final Function uniqueValue, + final int maxWarningHeaderCount, final long maxWarningHeaderSize) { assert value != null; + long newWarningHeaderSize = warningHeadersSize; + //check if we can add another warning header - if max size within limits + if (key.equals("Warning") && (maxWarningHeaderSize != -1)) { //if size is NOT unbounded, check its limits + if (warningHeadersSize > maxWarningHeaderSize) { // if max size has already been reached before + final String message = "Dropping a warning header, as their total size reached the maximum allowed of [" + + maxWarningHeaderSize + "] bytes set in [" + + HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_SIZE.getKey() + "]!"; + ESLoggerFactory.getLogger(ThreadContext.class).warn(message); + return this; + } + newWarningHeaderSize += "Warning".getBytes(StandardCharsets.UTF_8).length + value.getBytes(StandardCharsets.UTF_8).length; + if (newWarningHeaderSize > maxWarningHeaderSize) { + final String message = "Dropping a warning header, as their total size reached the maximum allowed of [" + + maxWarningHeaderSize + "] bytes set in [" + + HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_SIZE.getKey() + "]!"; + ESLoggerFactory.getLogger(ThreadContext.class).warn(message); + return new ThreadContextStruct(requestHeaders, responseHeaders, transientHeaders, isSystemContext, newWarningHeaderSize); + } + } final Map> newResponseHeaders = new HashMap<>(this.responseHeaders); final List existingValues = newResponseHeaders.get(key); - if (existingValues != null) { final Set existingUniqueValues = existingValues.stream().map(uniqueValue).collect(Collectors.toSet()); assert existingValues.size() == existingUniqueValues.size(); if (existingUniqueValues.contains(uniqueValue.apply(value))) { return this; } - final List newValues = new ArrayList<>(existingValues); newValues.add(value); - newResponseHeaders.put(key, Collections.unmodifiableList(newValues)); } else { newResponseHeaders.put(key, Collections.singletonList(value)); } - return new ThreadContextStruct(requestHeaders, newResponseHeaders, transientHeaders, isSystemContext); + //check if we can add another warning header - if max count within limits + if ((key.equals("Warning")) && (maxWarningHeaderCount != -1)) { //if count is NOT unbounded, check its limits + final int warningHeaderCount = newResponseHeaders.containsKey("Warning") ? newResponseHeaders.get("Warning").size() : 0; + if (warningHeaderCount > maxWarningHeaderCount) { + final String message = "Dropping a warning header, as their total count reached the maximum allowed of [" + + maxWarningHeaderCount + "] set in [" + HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_COUNT.getKey() + "]!"; + ESLoggerFactory.getLogger(ThreadContext.class).warn(message); + return this; + } + } + return new ThreadContextStruct(requestHeaders, newResponseHeaders, transientHeaders, isSystemContext, newWarningHeaderSize); } + private ThreadContextStruct putTransient(String key, Object value) { Map newTransient = new HashMap<>(this.transientHeaders); if (newTransient.putIfAbsent(key, value) != null) { diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ObjectParserHelper.java b/server/src/main/java/org/elasticsearch/common/xcontent/ObjectParserHelper.java new file mode 100644 index 0000000000000..b40b981981901 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/xcontent/ObjectParserHelper.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.xcontent; + +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.json.JsonXContent; + +import java.io.IOException; +import java.util.function.BiConsumer; + +/** + * This class provides helpers for {@link ObjectParser} that allow dealing with + * classes outside of the xcontent dependencies. + */ +public final class ObjectParserHelper { + + /** + * Helper to declare an object that will be parsed into a {@link BytesReference} + */ + public void declareRawObject(final AbstractObjectParser parser, + final BiConsumer consumer, + final ParseField field) { + final CheckedFunction bytesParser = p -> { + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + builder.copyCurrentStructure(p); + return BytesReference.bytes(builder); + } + }; + parser.declareField(consumer, bytesParser, field, ValueType.OBJECT); + } + +} diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java index 42089d2392395..38abe90ad46dc 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java @@ -61,6 +61,7 @@ public Map, XContentBuilder.Writer> getXContentWriters() { writers.put(FixedDateTimeZone.class, (b, v) -> b.value(Objects.toString(v))); writers.put(MutableDateTime.class, XContentBuilder::timeValue); writers.put(DateTime.class, XContentBuilder::timeValue); + writers.put(TimeValue.class, (b, v) -> b.value(v.toString())); writers.put(BytesReference.class, (b, v) -> { if (v == null) { diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java index 64d51c2b5c4b3..e9ac1deec0ab4 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java @@ -627,7 +627,7 @@ static class UnicastPingRequest extends TransportRequest { UnicastPingRequest(StreamInput in) throws IOException { super(in); id = in.readInt(); - timeout = new TimeValue(in); + timeout = in.readTimeValue(); pingResponse = new PingResponse(in); } @@ -640,7 +640,7 @@ public void readFrom(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeInt(id); - timeout.writeTo(out); + out.writeTimeValue(timeout); pingResponse.writeTo(out); } } diff --git a/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java b/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java index 064406f0d389d..98451e0c304b9 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java +++ b/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.unit.TimeValue; import java.util.List; -import java.util.concurrent.TimeUnit; import java.util.function.Function; import static java.util.Collections.emptyList; @@ -93,6 +92,10 @@ public final class HttpTransportSettings { Setting.byteSizeSetting("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB), Property.NodeScope); public static final Setting SETTING_HTTP_MAX_HEADER_SIZE = Setting.byteSizeSetting("http.max_header_size", new ByteSizeValue(8, ByteSizeUnit.KB), Property.NodeScope); + public static final Setting SETTING_HTTP_MAX_WARNING_HEADER_COUNT = + Setting.intSetting("http.max_warning_header_count", -1, -1, Property.NodeScope); + public static final Setting SETTING_HTTP_MAX_WARNING_HEADER_SIZE = + Setting.byteSizeSetting("http.max_warning_header_size", new ByteSizeValue(-1), Property.NodeScope); public static final Setting SETTING_HTTP_MAX_INITIAL_LINE_LENGTH = Setting.byteSizeSetting("http.max_initial_line_length", new ByteSizeValue(4, ByteSizeUnit.KB), Property.NodeScope); // don't reset cookies by default, since I don't think we really need to diff --git a/server/src/main/java/org/elasticsearch/index/Index.java b/server/src/main/java/org/elasticsearch/index/Index.java index 41cb90d5ba838..ac5a2763644fa 100644 --- a/server/src/main/java/org/elasticsearch/index/Index.java +++ b/server/src/main/java/org/elasticsearch/index/Index.java @@ -87,7 +87,7 @@ public boolean equals(Object o) { if (this == o) { return true; } - if (o == null) { + if (o == null || getClass() != o.getClass()) { return false; } Index index1 = (Index) o; diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index 6c6f05623c355..12ded42033e8c 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -70,8 +70,6 @@ public final class IndexSettings { (value) -> Translog.Durability.valueOf(value.toUpperCase(Locale.ROOT)), Property.Dynamic, Property.IndexScope); public static final Setting INDEX_WARMER_ENABLED_SETTING = Setting.boolSetting("index.warmer.enabled", true, Property.Dynamic, Property.IndexScope); - public static final Setting INDEX_TTL_DISABLE_PURGE_SETTING = - Setting.boolSetting("index.ttl.disable_purge", false, Property.Dynamic, Property.IndexScope); public static final Setting INDEX_CHECK_ON_STARTUP = new Setting<>("index.shard.check_on_startup", "false", (s) -> { switch(s) { case "false": @@ -185,7 +183,7 @@ public final class IndexSettings { public static final Setting INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING = Setting.byteSizeSetting("index.translog.flush_threshold_size", new ByteSizeValue(512, ByteSizeUnit.MB), /* - * An empty translog occupies 43 bytes on disk. If the flush threshold is below this, the flush thread + * An empty translog occupies 55 bytes on disk. If the flush threshold is below this, the flush thread * can get stuck in an infinite loop as the shouldPeriodicallyFlush can still be true after flushing. * However, small thresholds are useful for testing so we do not add a large lower bound here. */ @@ -220,7 +218,7 @@ public final class IndexSettings { "index.translog.generation_threshold_size", new ByteSizeValue(64, ByteSizeUnit.MB), /* - * An empty translog occupies 43 bytes on disk. If the generation threshold is + * An empty translog occupies 55 bytes on disk. If the generation threshold is * below this, the flush thread can get stuck in an infinite loop repeatedly * rolling the generation as every new generation will already exceed the * generation threshold. However, small thresholds are useful for testing so we @@ -256,20 +254,6 @@ public final class IndexSettings { public static final Setting MAX_REGEX_LENGTH_SETTING = Setting.intSetting("index.max_regex_length", 1000, 1, Property.Dynamic, Property.IndexScope); - public static final String INDEX_MAPPING_SINGLE_TYPE_SETTING_KEY = "index.mapping.single_type"; - private static final Setting INDEX_MAPPING_SINGLE_TYPE_SETTING; // private - should not be registered - static { - Function defValue = settings -> { - boolean singleType = true; - if (settings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null) != null) { - singleType = Version.indexCreated(settings).onOrAfter(Version.V_6_0_0_alpha1); - } - return Boolean.valueOf(singleType).toString(); - }; - INDEX_MAPPING_SINGLE_TYPE_SETTING = Setting.boolSetting(INDEX_MAPPING_SINGLE_TYPE_SETTING_KEY, defValue, Property.IndexScope, - Property.Final); - } - private final Index index; private final Version version; private final Logger logger; @@ -306,7 +290,6 @@ public final class IndexSettings { private volatile int maxTokenCount; private volatile int maxNgramDiff; private volatile int maxShingleDiff; - private volatile boolean TTLPurgeDisabled; private volatile TimeValue searchIdleAfter; private volatile int maxAnalyzedOffset; private volatile int maxTermsCount; @@ -325,11 +308,6 @@ public final class IndexSettings { */ private volatile int maxRegexLength; - /** - * Whether the index is required to have at most one type. - */ - private final boolean singleType; - /** * Returns the default search fields for this index. */ @@ -422,7 +400,6 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti maxTokenCount = scopedSettings.get(MAX_TOKEN_COUNT_SETTING); maxNgramDiff = scopedSettings.get(MAX_NGRAM_DIFF_SETTING); maxShingleDiff = scopedSettings.get(MAX_SHINGLE_DIFF_SETTING); - TTLPurgeDisabled = scopedSettings.get(INDEX_TTL_DISABLE_PURGE_SETTING); maxRefreshListeners = scopedSettings.get(MAX_REFRESH_LISTENERS_PER_SHARD); maxSlicesPerScroll = scopedSettings.get(MAX_SLICES_PER_SCROLL); maxAnalyzedOffset = scopedSettings.get(MAX_ANALYZED_OFFSET_SETTING); @@ -431,11 +408,6 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti this.mergePolicyConfig = new MergePolicyConfig(logger, this); this.indexSortConfig = new IndexSortConfig(this); searchIdleAfter = scopedSettings.get(INDEX_SEARCH_IDLE_AFTER); - singleType = INDEX_MAPPING_SINGLE_TYPE_SETTING.get(indexMetaData.getSettings()); // get this from metadata - it's not registered - if ((singleType || version.before(Version.V_6_0_0_alpha1)) == false) { - throw new AssertionError(index.toString() + "multiple types are only allowed on pre 6.x indices but version is: [" - + version + "]"); - } scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, mergePolicyConfig::setNoCFSRatio); scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, mergePolicyConfig::setExpungeDeletesAllowed); @@ -450,7 +422,6 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti mergeSchedulerConfig::setMaxThreadAndMergeCount); scopedSettings.addSettingsUpdateConsumer(MergeSchedulerConfig.AUTO_THROTTLE_SETTING, mergeSchedulerConfig::setAutoThrottle); scopedSettings.addSettingsUpdateConsumer(INDEX_TRANSLOG_DURABILITY_SETTING, this::setTranslogDurability); - scopedSettings.addSettingsUpdateConsumer(INDEX_TTL_DISABLE_PURGE_SETTING, this::setTTLPurgeDisabled); scopedSettings.addSettingsUpdateConsumer(MAX_RESULT_WINDOW_SETTING, this::setMaxResultWindow); scopedSettings.addSettingsUpdateConsumer(MAX_INNER_RESULT_WINDOW_SETTING, this::setMaxInnerResultWindow); scopedSettings.addSettingsUpdateConsumer(MAX_ADJACENCY_MATRIX_FILTERS_SETTING, this::setMaxAdjacencyMatrixFilters); @@ -570,11 +541,6 @@ public IndexMetaData getIndexMetaData() { */ public int getNumberOfReplicas() { return settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, null); } - /** - * Returns whether the index enforces at most one type. - */ - public boolean isSingleType() { return singleType; } - /** * Returns the node settings. The settings returned from {@link #getSettings()} are a merged version of the * index settings and the node settings where node settings are overwritten by index settings. @@ -799,18 +765,6 @@ public MergePolicy getMergePolicy() { return mergePolicyConfig.getMergePolicy(); } - /** - * Returns true if the TTL purge is disabled for this index. Default is false - */ - public boolean isTTLPurgeDisabled() { - return TTLPurgeDisabled; - } - - private void setTTLPurgeDisabled(boolean ttlPurgeDisabled) { - this.TTLPurgeDisabled = ttlPurgeDisabled; - } - - public T getValue(Setting setting) { return scopedSettings.get(setting); } diff --git a/server/src/main/java/org/elasticsearch/index/VersionType.java b/server/src/main/java/org/elasticsearch/index/VersionType.java index 6a8214cb0b8ec..b350252dc9c41 100644 --- a/server/src/main/java/org/elasticsearch/index/VersionType.java +++ b/server/src/main/java/org/elasticsearch/index/VersionType.java @@ -38,6 +38,9 @@ public String explainConflictForWrites(long currentVersion, long expectedVersion if (expectedVersion == Versions.MATCH_DELETED) { return "document already exists (current version [" + currentVersion + "])"; } + if (currentVersion == Versions.NOT_FOUND) { + return "document does not exist (expected version [" + expectedVersion + "])"; + } return "current version [" + currentVersion + "] is different than the one provided [" + expectedVersion + "]"; } @@ -48,6 +51,9 @@ public boolean isVersionConflictForReads(long currentVersion, long expectedVersi @Override public String explainConflictForReads(long currentVersion, long expectedVersion) { + if (currentVersion == Versions.NOT_FOUND) { + return "document does not exist (expected version [" + expectedVersion + "])"; + } return "current version [" + currentVersion + "] is different than the one provided [" + expectedVersion + "]"; } @@ -123,6 +129,9 @@ public boolean isVersionConflictForReads(long currentVersion, long expectedVersi @Override public String explainConflictForReads(long currentVersion, long expectedVersion) { + if (currentVersion == Versions.NOT_FOUND) { + return "document does not exist (expected version [" + expectedVersion + "])"; + } return "current version [" + currentVersion + "] is different than the one provided [" + expectedVersion + "]"; } @@ -178,6 +187,9 @@ public boolean isVersionConflictForReads(long currentVersion, long expectedVersi @Override public String explainConflictForReads(long currentVersion, long expectedVersion) { + if (currentVersion == Versions.NOT_FOUND) { + return "document does not exist (expected version [" + expectedVersion + "])"; + } return "current version [" + currentVersion + "] is different than the one provided [" + expectedVersion + "]"; } diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 6cc8c4197dcd5..fab8cba468b56 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -1066,14 +1066,13 @@ public Index(Term uid, ParsedDocument doc, long seqNo, long primaryTerm, long ve this.autoGeneratedIdTimestamp = autoGeneratedIdTimestamp; } - public Index(Term uid, ParsedDocument doc) { - this(uid, doc, Versions.MATCH_ANY); + public Index(Term uid, long primaryTerm, ParsedDocument doc) { + this(uid, primaryTerm, doc, Versions.MATCH_ANY); } // TEST ONLY - Index(Term uid, ParsedDocument doc, long version) { - // use a primary term of 2 to allow tests to reduce it to a valid >0 term - this(uid, doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 2, version, VersionType.INTERNAL, - Origin.PRIMARY, System.nanoTime(), -1, false); + Index(Term uid, long primaryTerm, ParsedDocument doc, long version) { + this(uid, doc, SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm, version, VersionType.INTERNAL, + Origin.PRIMARY, System.nanoTime(), -1, false); } // TEST ONLY public ParsedDocument parsedDoc() { @@ -1099,10 +1098,6 @@ public String routing() { return this.doc.routing(); } - public String parent() { - return this.doc.parent(); - } - public List docs() { return this.doc.docs(); } @@ -1147,8 +1142,8 @@ public Delete(String type, String id, Term uid, long seqNo, long primaryTerm, lo this.id = Objects.requireNonNull(id); } - public Delete(String type, String id, Term uid) { - this(type, id, uid, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime()); + public Delete(String type, String id, Term uid, long primaryTerm) { + this(type, id, uid, SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm, Versions.MATCH_ANY, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime()); } public Delete(Delete template, VersionType versionType) { diff --git a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index 352c3ba3e6280..b7c5a41691343 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -79,6 +79,7 @@ public final class EngineConfig { @Nullable private final CircuitBreakerService circuitBreakerService; private final LongSupplier globalCheckpointSupplier; + private final LongSupplier primaryTermSupplier; /** * Index setting to change the low level lucene codec used for writing new segments. @@ -125,7 +126,7 @@ public EngineConfig(ShardId shardId, String allocationId, ThreadPool threadPool, List externalRefreshListener, List internalRefreshListener, Sort indexSort, TranslogRecoveryRunner translogRecoveryRunner, CircuitBreakerService circuitBreakerService, - LongSupplier globalCheckpointSupplier) { + LongSupplier globalCheckpointSupplier, LongSupplier primaryTermSupplier) { this.shardId = shardId; this.allocationId = allocationId; this.indexSettings = indexSettings; @@ -152,6 +153,7 @@ public EngineConfig(ShardId shardId, String allocationId, ThreadPool threadPool, this.translogRecoveryRunner = translogRecoveryRunner; this.circuitBreakerService = circuitBreakerService; this.globalCheckpointSupplier = globalCheckpointSupplier; + this.primaryTermSupplier = primaryTermSupplier; } /** @@ -354,4 +356,11 @@ public Sort getIndexSort() { public CircuitBreakerService getCircuitBreakerService() { return this.circuitBreakerService; } + + /** + * Returns a supplier that supplies the latest primary term value of the associated shard. + */ + public LongSupplier getPrimaryTermSupplier() { + return primaryTermSupplier; + } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 2f6e3ab0343f4..dcd1ba65d8950 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -62,7 +62,6 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.ParseContext; -import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.OnGoingMerge; import org.elasticsearch.index.seqno.LocalCheckpointTracker; @@ -122,8 +121,6 @@ public class InternalEngine extends Engine { private final LocalCheckpointTracker localCheckpointTracker; - private final String uidField; - private final CombinedDeletionPolicy combinedDeletionPolicy; // How many callers are currently requesting index throttling. Currently there are only two situations where we do this: when merges @@ -163,7 +160,6 @@ public InternalEngine(EngineConfig engineConfig) { if (engineConfig.isAutoGeneratedIDsOptimizationEnabled() == false) { maxUnsafeAutoIdTimestamp.set(Long.MAX_VALUE); } - this.uidField = engineConfig.getIndexSettings().isSingleType() ? IdFieldMapper.NAME : UidFieldMapper.NAME; final TranslogDeletionPolicy translogDeletionPolicy = new TranslogDeletionPolicy( engineConfig.getIndexSettings().getTranslogRetentionSize().getBytes(), engineConfig.getIndexSettings().getTranslogRetentionAge().getMillis() @@ -422,7 +418,7 @@ private Translog openTranslog(EngineConfig engineConfig, TranslogDeletionPolicy final TranslogConfig translogConfig = engineConfig.getTranslogConfig(); final String translogUUID = loadTranslogUUIDFromLastCommit(); // We expect that this shard already exists, so it must already have an existing translog else something is badly wrong! - return new Translog(translogConfig, translogUUID, translogDeletionPolicy, globalCheckpointSupplier); + return new Translog(translogConfig, translogUUID, translogDeletionPolicy, globalCheckpointSupplier, engineConfig.getPrimaryTermSupplier()); } @Override @@ -517,7 +513,7 @@ private ExternalSearcherManager createSearcherManager(SearchFactory externalSear @Override public GetResult get(Get get, BiFunction searcherFactory) throws EngineException { - assert Objects.equals(get.uid().field(), uidField) : get.uid().field(); + assert Objects.equals(get.uid().field(), IdFieldMapper.NAME) : get.uid().field(); try (ReleasableLock ignored = readLock.acquire()) { ensureOpen(); SearcherScope scope; @@ -723,7 +719,7 @@ protected long doGenerateSeqNoForOperation(final Operation operation) { @Override public IndexResult index(Index index) throws IOException { - assert Objects.equals(index.uid().field(), uidField) : index.uid().field(); + assert Objects.equals(index.uid().field(), IdFieldMapper.NAME) : index.uid().field(); final boolean doThrottle = index.origin().isRecovery() == false; try (ReleasableLock releasableLock = readLock.acquire()) { ensureOpen(); @@ -1072,7 +1068,7 @@ private void updateDocs(final Term uid, final List docs, @Override public DeleteResult delete(Delete delete) throws IOException { versionMap.enforceSafeAccess(); - assert Objects.equals(delete.uid().field(), uidField) : delete.uid().field(); + assert Objects.equals(delete.uid().field(), IdFieldMapper.NAME) : delete.uid().field(); assert assertVersionType(delete); assert assertIncomingSequenceNumber(delete.origin(), delete.seqNo()); final DeleteResult deleteResult; diff --git a/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java b/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java index 628bfd4826935..cbe1721f07f71 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java +++ b/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java @@ -39,11 +39,9 @@ import org.elasticsearch.index.fielddata.AbstractSortedDocValues; import org.elasticsearch.index.fielddata.AbstractSortedSetDocValues; import org.elasticsearch.index.mapper.IdFieldMapper; -import org.elasticsearch.index.mapper.ParentFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.translog.Translog; import java.io.IOException; @@ -65,9 +63,6 @@ final class TranslogLeafReader extends LeafReader { private static final FieldInfo FAKE_ID_FIELD = new FieldInfo(IdFieldMapper.NAME, 3, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), 0,0); - private static final FieldInfo FAKE_UID_FIELD - = new FieldInfo(UidFieldMapper.NAME, 4, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), - 0,0); private final Version indexVersionCreated; TranslogLeafReader(Translog.Index operation, Version indexVersionCreated) { @@ -96,49 +91,7 @@ public BinaryDocValues getBinaryDocValues(String field) { @Override public SortedDocValues getSortedDocValues(String field) { - // TODO this can be removed in 7.0 and upwards we don't support the parent field anymore - if (field.startsWith(ParentFieldMapper.NAME + "#") && operation.parent() != null) { - return new AbstractSortedDocValues() { - @Override - public int docID() { - return 0; - } - - private final BytesRef term = new BytesRef(operation.parent()); - private int ord; - @Override - public boolean advanceExact(int docID) { - if (docID != 0) { - throw new IndexOutOfBoundsException("do such doc ID: " + docID); - } - ord = 0; - return true; - } - - @Override - public int ordValue() { - return ord; - } - - @Override - public BytesRef lookupOrd(int ord) { - if (ord == 0) { - return term; - } - return null; - } - - @Override - public int getValueCount() { - return 1; - } - }; - } - if (operation.parent() == null) { - return null; - } - assert false : "unexpected field: " + field; - return null; + throw new UnsupportedOperationException(); } @Override @@ -220,9 +173,6 @@ public void document(int docID, StoredFieldVisitor visitor) throws IOException { } visitor.stringField(FAKE_ID_FIELD, id); } - if (visitor.needsField(FAKE_UID_FIELD) == StoredFieldVisitor.Status.YES) { - visitor.stringField(FAKE_UID_FIELD, Uid.createUid(operation.type(), operation.id()).getBytes(StandardCharsets.UTF_8)); - } } @Override diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/UidIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/UidIndexFieldData.java deleted file mode 100644 index 33de5d6a7e111..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/fielddata/UidIndexFieldData.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.fielddata; - -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.SortField; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.BytesRefBuilder; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; -import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; -import org.elasticsearch.index.mapper.UidFieldMapper; -import org.elasticsearch.search.MultiValueMode; - -import java.io.IOException; - -/** Fielddata view of the _uid field on indices that do not index _uid but _id. - * It gets fielddata on the {@code _id field}, which is in-memory since the _id - * field does not have doc values, and prepends {@code ${type}#} to all values. - * Note that it does not add memory compared to what fielddata on the _id is - * already using: this is just a view. - * TODO: Remove fielddata access on _uid and _id, or add doc values to _id. - */ -public final class UidIndexFieldData implements IndexFieldData { - - private final Index index; - private final BytesRef prefix; - private final IndexFieldData idFieldData; - - public UidIndexFieldData(Index index, String type, IndexFieldData idFieldData) { - this.index = index; - BytesRefBuilder prefix = new BytesRefBuilder(); - prefix.append(new BytesRef(type)); - prefix.append((byte) '#'); - this.prefix = prefix.toBytesRef(); - this.idFieldData = idFieldData; - } - - @Override - public Index index() { - return index; - } - - @Override - public String getFieldName() { - return UidFieldMapper.NAME; - } - - @Override - public SortField sortField(Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse) { - XFieldComparatorSource source = new BytesRefFieldComparatorSource(this, missingValue, sortMode, nested); - return new SortField(getFieldName(), source, reverse); - } - - @Override - public AtomicFieldData load(LeafReaderContext context) { - return new UidAtomicFieldData(prefix, idFieldData.load(context)); - } - - @Override - public AtomicFieldData loadDirect(LeafReaderContext context) throws Exception { - return new UidAtomicFieldData(prefix, idFieldData.loadDirect(context)); - } - - @Override - public void clear() { - idFieldData.clear(); - } - - static final class UidAtomicFieldData implements AtomicFieldData { - - private final BytesRef prefix; - private final AtomicFieldData idFieldData; - - UidAtomicFieldData(BytesRef prefix, AtomicFieldData idFieldData) { - this.prefix = prefix; - this.idFieldData = idFieldData; - } - - @Override - public ScriptDocValues getScriptValues() { - return new ScriptDocValues.Strings(getBytesValues()); - } - - @Override - public long ramBytesUsed() { - return 0; // simple wrapper - } - - @Override - public void close() { - idFieldData.close(); - } - - @Override - public SortedBinaryDocValues getBytesValues() { - SortedBinaryDocValues idValues = idFieldData.getBytesValues(); - return new SortedBinaryDocValues() { - - private final BytesRefBuilder scratch = new BytesRefBuilder(); - - @Override - public boolean advanceExact(int doc) throws IOException { - return idValues.advanceExact(doc); - } - - @Override - public int docValueCount() { - return idValues.docValueCount(); - } - - @Override - public BytesRef nextValue() throws IOException { - BytesRef nextID = idValues.nextValue(); - scratch.copyBytes(prefix); - scratch.append(nextID); - return scratch.get(); - } - - }; - } - - } - -} diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java index 2384e34732040..5732a872c8f58 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/DocValuesIndexFieldData.java @@ -30,7 +30,6 @@ import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.indices.breaker.CircuitBreakerService; import java.util.Set; @@ -68,7 +67,7 @@ public final Index index() { } public static class Builder implements IndexFieldData.Builder { - private static final Set BINARY_INDEX_FIELD_NAMES = unmodifiableSet(newHashSet(UidFieldMapper.NAME, IdFieldMapper.NAME)); + private static final Set BINARY_INDEX_FIELD_NAMES = unmodifiableSet(newHashSet(IdFieldMapper.NAME)); private NumericType numericType; private Function> scriptFunction = AbstractAtomicOrdinalsFieldData.DEFAULT_SCRIPT_FUNCTION; diff --git a/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java b/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java index 13baf81f84c43..aecbba766f416 100644 --- a/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java +++ b/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java @@ -26,11 +26,9 @@ import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.ParentFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.mapper.UidFieldMapper; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -51,10 +49,8 @@ */ public class FieldsVisitor extends StoredFieldVisitor { private static final Set BASE_REQUIRED_FIELDS = unmodifiableSet(newHashSet( - UidFieldMapper.NAME, IdFieldMapper.NAME, - RoutingFieldMapper.NAME, - ParentFieldMapper.NAME)); + RoutingFieldMapper.NAME)); private final boolean loadSource; private final Set requiredFields; @@ -81,12 +77,10 @@ public Status needsField(FieldInfo fieldInfo) throws IOException { } public void postProcess(MapperService mapperService) { - if (mapperService.getIndexSettings().isSingleType()) { - final Collection types = mapperService.types(); - assert types.size() <= 1 : types; - if (types.isEmpty() == false) { - type = types.iterator().next(); - } + final Collection types = mapperService.types(); + assert types.size() <= 1 : types; + if (types.isEmpty() == false) { + type = types.iterator().next(); } for (Map.Entry> entry : fields().entrySet()) { MappedFieldType fieldType = mapperService.fullName(entry.getKey()); @@ -115,19 +109,7 @@ public void binaryField(FieldInfo fieldInfo, byte[] value) throws IOException { @Override public void stringField(FieldInfo fieldInfo, byte[] bytes) throws IOException { final String value = new String(bytes, StandardCharsets.UTF_8); - if (UidFieldMapper.NAME.equals(fieldInfo.name)) { - // 5.x-only - // TODO: Remove when we are on 7.x - Uid uid = Uid.createUid(value); - type = uid.type(); - id = uid.id(); - } else if (IdFieldMapper.NAME.equals(fieldInfo.name)) { - // only applies to 5.x indices that have single_type = true - // TODO: Remove when we are on 7.x - id = value; - } else { - addValue(fieldInfo.name, value); - } + addValue(fieldInfo.name, value); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/fieldvisitor/SingleFieldsVisitor.java b/server/src/main/java/org/elasticsearch/index/fieldvisitor/SingleFieldsVisitor.java index 5d5040c637f04..6bcf2d9d99968 100644 --- a/server/src/main/java/org/elasticsearch/index/fieldvisitor/SingleFieldsVisitor.java +++ b/server/src/main/java/org/elasticsearch/index/fieldvisitor/SingleFieldsVisitor.java @@ -23,7 +23,6 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.TypeFieldMapper; import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.mapper.UidFieldMapper; import java.io.IOException; @@ -41,11 +40,6 @@ public Status needsField(FieldInfo fieldInfo) throws IOException { if (fieldInfo.name.equals(field)) { return Status.YES; } - if (fieldInfo.name.equals(UidFieldMapper.NAME)) { - if (TypeFieldMapper.NAME.equals(field) || IdFieldMapper.NAME.equals(field)) { - return Status.YES; - } - } return Status.NO; } @@ -63,8 +57,5 @@ public void postProcess(MapperService mapperService) { if (type != null) { addValue(TypeFieldMapper.NAME, type); } - if (type != null && id != null) { - addValue(UidFieldMapper.NAME, Uid.createUid(type, id)); - } } } diff --git a/server/src/main/java/org/elasticsearch/index/flush/FlushStats.java b/server/src/main/java/org/elasticsearch/index/flush/FlushStats.java index 4b931e47372b7..02e44dac105c4 100644 --- a/server/src/main/java/org/elasticsearch/index/flush/FlushStats.java +++ b/server/src/main/java/org/elasticsearch/index/flush/FlushStats.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.flush; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -31,20 +32,22 @@ public class FlushStats implements Streamable, ToXContentFragment { private long total; - + private long periodic; private long totalTimeInMillis; public FlushStats() { } - public FlushStats(long total, long totalTimeInMillis) { + public FlushStats(long total, long periodic, long totalTimeInMillis) { this.total = total; + this.periodic = periodic; this.totalTimeInMillis = totalTimeInMillis; } - public void add(long total, long totalTimeInMillis) { + public void add(long total, long periodic, long totalTimeInMillis) { this.total += total; + this.periodic += periodic; this.totalTimeInMillis += totalTimeInMillis; } @@ -57,6 +60,7 @@ public void addTotals(FlushStats flushStats) { return; } this.total += flushStats.total; + this.periodic += flushStats.periodic; this.totalTimeInMillis += flushStats.totalTimeInMillis; } @@ -67,6 +71,13 @@ public long getTotal() { return this.total; } + /** + * The number of flushes that were periodically triggered when translog exceeded the flush threshold. + */ + public long getPeriodic() { + return periodic; + } + /** * The total time merges have been executed (in milliseconds). */ @@ -85,6 +96,7 @@ public TimeValue getTotalTime() { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.FLUSH); builder.field(Fields.TOTAL, total); + builder.field(Fields.PERIODIC, periodic); builder.humanReadableField(Fields.TOTAL_TIME_IN_MILLIS, Fields.TOTAL_TIME, getTotalTime()); builder.endObject(); return builder; @@ -93,6 +105,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws static final class Fields { static final String FLUSH = "flush"; static final String TOTAL = "total"; + static final String PERIODIC = "periodic"; static final String TOTAL_TIME = "total_time"; static final String TOTAL_TIME_IN_MILLIS = "total_time_in_millis"; } @@ -101,11 +114,17 @@ static final class Fields { public void readFrom(StreamInput in) throws IOException { total = in.readVLong(); totalTimeInMillis = in.readVLong(); + if (in.getVersion().onOrAfter(Version.V_6_3_0)) { + periodic = in.readVLong(); + } } @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(total); out.writeVLong(totalTimeInMillis); + if (out.getVersion().onOrAfter(Version.V_6_3_0)) { + out.writeVLong(periodic); + } } } diff --git a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java index a6c8dbf53b395..50f73944d8700 100644 --- a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java +++ b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java @@ -41,13 +41,11 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.ParentFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; -import org.elasticsearch.search.fetch.subphase.ParentFieldSubFetchPhase; import java.io.IOException; import java.util.Collection; @@ -98,7 +96,7 @@ private GetResult get(String type, String id, String[] gFields, boolean realtime } public GetResult getForUpdate(String type, String id, long version, VersionType versionType) { - return get(type, id, new String[]{RoutingFieldMapper.NAME, ParentFieldMapper.NAME}, true, version, versionType, + return get(type, id, new String[]{RoutingFieldMapper.NAME}, true, version, versionType, FetchSourceContext.FETCH_SOURCE, true); } @@ -208,13 +206,6 @@ private GetResult innerGetLoadFromStoredFields(String type, String id, String[] } DocumentMapper docMapper = mapperService.documentMapper(type); - if (docMapper.parentFieldMapper().active()) { - String parentId = ParentFieldSubFetchPhase.getParentId(docMapper.parentFieldMapper(), docIdAndVersion.reader, docIdAndVersion.docId); - if (fields == null) { - fields = new HashMap<>(1); - } - fields.put(ParentFieldMapper.NAME, new DocumentField(ParentFieldMapper.NAME, Collections.singletonList(parentId))); - } if (gFields != null && gFields.length > 0) { for (String field : gFields) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java index 45cd9e17ad119..52b9a0d46e55d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java @@ -29,7 +29,6 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermRangeQuery; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; @@ -37,6 +36,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; @@ -113,7 +113,7 @@ public Mapper.Builder parse(String name, Map node, ParserContext if (propNode == null) { throw new MapperParsingException("Property [null_value] cannot be null."); } - builder.nullValue(TypeParsers.nodeBooleanValue(name, "null_value", propNode, parserContext)); + builder.nullValue(XContentMapValues.nodeBooleanValue(propNode, name + ".null_value")); iterator.remove(); } } @@ -245,15 +245,7 @@ protected void parseCreateField(ParseContext context, List field value = fieldType().nullValue(); } } else { - if (indexCreatedVersion.onOrAfter(Version.V_6_0_0_alpha1)) { - value = context.parser().booleanValue(); - } else { - value = context.parser().booleanValueLenient(); - if (context.parser().isBooleanValueLenient() != context.parser().isBooleanValue()) { - String rawValue = context.parser().text(); - deprecationLogger.deprecated("Expected a boolean for property [{}] but got [{}]", fieldType().name(), rawValue); - } - } + value = context.parser().booleanValue(); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java index 00e09112deed2..7f1f0b9568209 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java @@ -33,7 +33,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.geo.ShapeRelation; @@ -43,6 +42,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.LocaleUtils; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; @@ -156,16 +156,10 @@ public Mapper.Builder parse(String name, Map node, ParserCo builder.nullValue(propNode.toString()); iterator.remove(); } else if (propName.equals("ignore_malformed")) { - builder.ignoreMalformed(TypeParsers.nodeBooleanValue(name, "ignore_malformed", propNode, parserContext)); + builder.ignoreMalformed(XContentMapValues.nodeBooleanValue(propNode, name + ".ignore_malformed")); iterator.remove(); } else if (propName.equals("locale")) { - Locale locale; - if (parserContext.indexVersionCreated().onOrAfter(Version.V_6_0_0_beta2)) { - locale = LocaleUtils.parse(propNode.toString()); - } else { - locale = LocaleUtils.parse5x(propNode.toString()); - } - builder.locale(locale); + builder.locale(LocaleUtils.parse(propNode.toString())); iterator.remove(); } else if (propName.equals("format")) { builder.dateTimeFormatter(parseDateTimeFormatter(propNode)); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index 42f842e612803..b0d9b1e5e34ba 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -131,11 +131,6 @@ public DocumentMapper(MapperService mapperService, Mapping mapping) { this.mapping = mapping; this.documentParser = new DocumentParser(indexSettings, mapperService.documentMapperParser(), this); - if (metadataMapper(ParentFieldMapper.class).active()) { - // mark the routing field mapper as required - metadataMapper(RoutingFieldMapper.class).markAsRequired(); - } - // collect all the mappers for this type List newObjectMappers = new ArrayList<>(); List newFieldMappers = new ArrayList<>(); @@ -200,10 +195,6 @@ public RootObjectMapper root() { return mapping.root; } - public UidFieldMapper uidMapper() { - return metadataMapper(UidFieldMapper.class); - } - @SuppressWarnings({"unchecked"}) public T metadataMapper(Class type) { return mapping.metadataMapper(type); @@ -229,10 +220,6 @@ public RoutingFieldMapper routingFieldMapper() { return metadataMapper(RoutingFieldMapper.class); } - public ParentFieldMapper parentFieldMapper() { - return metadataMapper(ParentFieldMapper.class); - } - public IndexFieldMapper IndexFieldMapper() { return metadataMapper(IndexFieldMapper.class); } @@ -292,10 +279,6 @@ public ObjectMapper findNestedObjectMapper(int nestedDocId, SearchContext sc, Le return nestedObjectMapper; } - public boolean isParent(String type) { - return mapperService.getParentTypes().contains(type); - } - public DocumentMapper merge(Mapping mapping) { Mapping merged = this.mapping.merge(mapping); return new DocumentMapper(mapperService, merged); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index c2e0028544f88..61ff4a4ff3d0f 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -76,7 +76,7 @@ ParsedDocument parseDocument(SourceToParse source) throws MapperParsingException throw new IllegalStateException("found leftover path elements: " + remainingPath); } - reverseOrder(context); + context.postParse(); return parsedDocument(source, context, createDynamicUpdate(mapping, docMapper, context.getDynamicMappers())); } @@ -141,12 +141,6 @@ private static boolean isEmptyDoc(Mapping mapping, XContentParser parser) throws return false; } - private static void reverseOrder(ParseContext.InternalParseContext context) { - // reverse the order of docs for nested docs support, parent should be last - if (context.docs().size() > 1) { - Collections.reverse(context.docs()); - } - } private static ParsedDocument parsedDocument(SourceToParse source, ParseContext.InternalParseContext context, Mapping update) { return new ParsedDocument( @@ -159,7 +153,7 @@ private static ParsedDocument parsedDocument(SourceToParse source, ParseContext. context.sourceToParse().source(), context.sourceToParse().getXContentType(), update - ).parent(source.parent()); + ); } @@ -423,7 +417,7 @@ private static void nested(ParseContext context, ObjectMapper.Nested nested) { private static void addFields(ParseContext.Document nestedDoc, ParseContext.Document rootDoc) { for (IndexableField field : nestedDoc.getFields()) { - if (!field.name().equals(UidFieldMapper.NAME) && !field.name().equals(TypeFieldMapper.NAME)) { + if (!field.name().equals(TypeFieldMapper.NAME)) { rootDoc.add(field); } } @@ -440,30 +434,19 @@ private static ParseContext nestedContext(ParseContext context, ObjectMapper map // documents inside the Lucene index (document blocks) will be incorrect, as nested documents of different root // documents are then aligned with other root documents. This will lead tothe nested query, sorting, aggregations // and inner hits to fail or yield incorrect results. - if (context.mapperService().getIndexSettings().isSingleType()) { - IndexableField idField = parentDoc.getField(IdFieldMapper.NAME); - if (idField != null) { - // We just need to store the id as indexed field, so that IndexWriter#deleteDocuments(term) can then - // delete it when the root document is deleted too. - if (idField.stringValue() != null) { - // backward compat with 5.x - // TODO: Remove on 7.0 - nestedDoc.add(new Field(IdFieldMapper.NAME, idField.stringValue(), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - } else { - nestedDoc.add(new Field(IdFieldMapper.NAME, idField.binaryValue(), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); - } + IndexableField idField = parentDoc.getField(IdFieldMapper.NAME); + if (idField != null) { + // We just need to store the id as indexed field, so that IndexWriter#deleteDocuments(term) can then + // delete it when the root document is deleted too. + if (idField.stringValue() != null) { + // backward compat with 5.x + // TODO: Remove on 7.0 + nestedDoc.add(new Field(IdFieldMapper.NAME, idField.stringValue(), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); } else { - throw new IllegalStateException("The root document of a nested document should have an id field"); + nestedDoc.add(new Field(IdFieldMapper.NAME, idField.binaryValue(), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); } } else { - IndexableField uidField = parentDoc.getField(UidFieldMapper.NAME); - if (uidField != null) { - /// We just need to store the uid as indexed field, so that IndexWriter#deleteDocuments(term) can then - // delete it when the root document is deleted too. - nestedDoc.add(new Field(UidFieldMapper.NAME, uidField.stringValue(), UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); - } else { - throw new IllegalStateException("The root document of a nested document should have an uid field"); - } + throw new IllegalStateException("The root document of a nested document should have an _id field"); } // the type of the nested doc starts with __, so we can identify that its a nested one in filters diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java index ada640f873975..606777392dec5 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.query.QueryShardContext; import java.io.IOException; @@ -112,7 +113,7 @@ public MetadataFieldMapper.Builder parse(String name, Map n String fieldName = entry.getKey(); Object fieldNode = entry.getValue(); if (fieldName.equals("enabled")) { - builder.enabled(TypeParsers.nodeBooleanValue(name, "enabled", fieldNode, parserContext)); + builder.enabled(XContentMapValues.nodeBooleanValue(fieldNode, name + ".enabled")); iterator.remove(); } } @@ -262,7 +263,7 @@ protected void parseCreateField(ParseContext context, List field if (fieldType().isEnabled() == false) { return; } - for (ParseContext.Document document : context.docs()) { + for (ParseContext.Document document : context) { final List paths = new ArrayList<>(document.getFields().size()); String previousPath = ""; // used as a sentinel - field names can't be empty for (IndexableField field : document.getFields()) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java index bc9f8b660be01..10c0db01dc776 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.plain.AbstractLatLonPointDVIndexFieldData; import org.elasticsearch.index.query.QueryShardContext; @@ -140,11 +141,11 @@ public Mapper.Builder parse(String name, Map node, ParserContext Object propNode = entry.getValue(); if (propName.equals(Names.IGNORE_MALFORMED)) { - builder.ignoreMalformed(TypeParsers.nodeBooleanValue(name, Names.IGNORE_MALFORMED, propNode, parserContext)); + builder.ignoreMalformed(XContentMapValues.nodeBooleanValue(propNode, name + "." + Names.IGNORE_MALFORMED)); iterator.remove(); } else if (propName.equals(Names.IGNORE_Z_VALUE.getPreferredName())) { - builder.ignoreZValue(TypeParsers.nodeBooleanValue(propName, Names.IGNORE_Z_VALUE.getPreferredName(), - propNode, parserContext)); + builder.ignoreZValue(XContentMapValues.nodeBooleanValue(propNode, + name + "." + Names.IGNORE_Z_VALUE.getPreferredName())); iterator.remove(); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java index b80831298cb87..753d91f7be231 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java @@ -30,7 +30,6 @@ import org.apache.lucene.spatial.prefix.tree.PackedQuadPrefixTree; import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.geo.GeoUtils; @@ -42,6 +41,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardException; import org.locationtech.spatial4j.shape.Point; @@ -222,18 +222,17 @@ public Mapper.Builder parse(String name, Map node, ParserContext builder.fieldType().setStrategyName(fieldNode.toString()); iterator.remove(); } else if (IGNORE_MALFORMED.equals(fieldName)) { - builder.ignoreMalformed(TypeParsers.nodeBooleanValue(fieldName, "ignore_malformed", fieldNode, parserContext)); + builder.ignoreMalformed(XContentMapValues.nodeBooleanValue(fieldNode, name + ".ignore_malformed")); iterator.remove(); } else if (Names.COERCE.equals(fieldName)) { - builder.coerce(TypeParsers.nodeBooleanValue(fieldName, Names.COERCE, fieldNode, parserContext)); + builder.coerce(XContentMapValues.nodeBooleanValue(fieldNode, name + "." + Names.COERCE)); iterator.remove(); } else if (GeoPointFieldMapper.Names.IGNORE_Z_VALUE.getPreferredName().equals(fieldName)) { - builder.ignoreZValue(TypeParsers.nodeBooleanValue(fieldName, GeoPointFieldMapper.Names.IGNORE_Z_VALUE.getPreferredName(), - fieldNode, parserContext)); + builder.ignoreZValue(XContentMapValues.nodeBooleanValue(fieldNode, name + "." + GeoPointFieldMapper.Names.IGNORE_Z_VALUE.getPreferredName())); iterator.remove(); } else if (Names.STRATEGY_POINTS_ONLY.equals(fieldName) && builder.fieldType().strategyName.equals(SpatialStrategy.TERM.getStrategyName()) == false) { - boolean pointsOnly = TypeParsers.nodeBooleanValue(fieldName, Names.STRATEGY_POINTS_ONLY, fieldNode, parserContext); + boolean pointsOnly = XContentMapValues.nodeBooleanValue(fieldNode, name + "." + Names.STRATEGY_POINTS_ONLY); builder.fieldType().setPointsOnly(pointsOnly); iterator.remove(); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java index e60b27fce7239..32cb99e3f9194 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java @@ -134,34 +134,22 @@ public Query existsQuery(QueryShardContext context) { @Override public Query termsQuery(List values, QueryShardContext context) { - if (indexOptions() != IndexOptions.NONE) { - failIfNotIndexed(); - BytesRef[] bytesRefs = new BytesRef[values.size()]; - final boolean is5xIndex = context.indexVersionCreated().before(Version.V_6_0_0_beta1); - for (int i = 0; i < bytesRefs.length; i++) { - BytesRef id; - if (is5xIndex) { - // 5.x index with index.mapping.single_type = true - id = BytesRefs.toBytesRef(values.get(i)); - } else { - Object idObject = values.get(i); - if (idObject instanceof BytesRef) { - idObject = ((BytesRef) idObject).utf8ToString(); - } - id = Uid.encodeId(idObject.toString()); - } - bytesRefs[i] = id; + failIfNotIndexed(); + BytesRef[] bytesRefs = new BytesRef[values.size()]; + for (int i = 0; i < bytesRefs.length; i++) { + Object idObject = values.get(i); + if (idObject instanceof BytesRef) { + idObject = ((BytesRef) idObject).utf8ToString(); } - return new TermInSetQuery(name(), bytesRefs); + bytesRefs[i] = Uid.encodeId(idObject.toString()); } - // 5.x index, _uid is indexed - return new TermInSetQuery(UidFieldMapper.NAME, Uid.createUidsForTypesAndIds(context.queryTypes(), values)); + return new TermInSetQuery(name(), bytesRefs); } @Override public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) { if (indexOptions() == IndexOptions.NONE) { - throw new IllegalArgumentException("Fielddata access on the _uid field is disallowed"); + throw new IllegalArgumentException("Fielddata access on the _id field is disallowed"); } final IndexFieldData.Builder fieldDataBuilder = new PagedBytesIndexFieldData.Builder( TextFieldMapper.Defaults.FIELDDATA_MIN_FREQUENCY, @@ -172,10 +160,6 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) { public IndexFieldData build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache, CircuitBreakerService breakerService, MapperService mapperService) { final IndexFieldData fieldData = fieldDataBuilder.build(indexSettings, fieldType, cache, breakerService, mapperService); - if (indexSettings.getIndexVersionCreated().before(Version.V_6_0_0_beta1)) { - // ids were indexed as utf-8 - return fieldData; - } return new IndexFieldData() { @Override @@ -265,13 +249,8 @@ public boolean advanceExact(int doc) throws IOException { static MappedFieldType defaultFieldType(IndexSettings indexSettings) { MappedFieldType defaultFieldType = Defaults.FIELD_TYPE.clone(); - if (indexSettings.isSingleType()) { - defaultFieldType.setIndexOptions(IndexOptions.DOCS); - defaultFieldType.setStored(true); - } else { - defaultFieldType.setIndexOptions(IndexOptions.NONE); - defaultFieldType.setStored(false); - } + defaultFieldType.setIndexOptions(IndexOptions.DOCS); + defaultFieldType.setStored(true); return defaultFieldType; } @@ -288,18 +267,11 @@ public void preParse(ParseContext context) throws IOException { super.parse(context); } - @Override - public void postParse(ParseContext context) throws IOException {} - @Override protected void parseCreateField(ParseContext context, List fields) throws IOException { if (fieldType.indexOptions() != IndexOptions.NONE || fieldType.stored()) { - if (context.mapperService().getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_0_0_beta1)) { - BytesRef id = Uid.encodeId(context.sourceToParse().id()); - fields.add(new Field(NAME, id, fieldType)); - } else { - fields.add(new Field(NAME, context.sourceToParse().id(), fieldType)); - } + BytesRef id = Uid.encodeId(context.sourceToParse().id()); + fields.add(new Field(NAME, id, fieldType)); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java index 8e92ecc8bf686..4061303416b7c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java @@ -172,9 +172,6 @@ private IndexFieldMapper(MappedFieldType fieldType, Settings indexSettings) { @Override public void preParse(ParseContext context) throws IOException {} - @Override - public void postParse(ParseContext context) throws IOException {} - @Override protected void parseCreateField(ParseContext context, List fields) throws IOException {} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index c10c2339b895e..fd5dc080011e6 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; @@ -113,7 +114,7 @@ public Mapper.Builder parse(String name, Map node, ParserCo builder.nullValue(InetAddresses.forString(propNode.toString())); iterator.remove(); } else if (propName.equals("ignore_malformed")) { - builder.ignoreMalformed(TypeParsers.nodeBooleanValue(name, "ignore_malformed", propNode, parserContext)); + builder.ignoreMalformed(XContentMapValues.nodeBooleanValue(propNode, name + ".ignore_malformed")); iterator.remove(); } else if (TypeParsers.parseMultiField(builder, name, parserContext, propName, propNode)) { iterator.remove(); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index e13c23754ab38..2f027c0fbb998 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -111,7 +111,7 @@ public enum MergeReason { //TODO this needs to be cleaned up: _timestamp and _ttl are not supported anymore, _field_names, _seq_no, _version and _source are //also missing, not sure if on purpose. See IndicesModule#getMetadataMappers private static ObjectHashSet META_FIELDS = ObjectHashSet.from( - "_uid", "_id", "_type", "_parent", "_routing", "_index", + "_id", "_type", "_routing", "_index", "_size", "_timestamp", "_ttl" ); @@ -135,8 +135,6 @@ public enum MergeReason { private volatile Map unmappedFieldTypes = emptyMap(); - private volatile Set parentTypes = emptySet(); - final MapperRegistry mapperRegistry; public MapperService(IndexSettings indexSettings, IndexAnalyzers indexAnalyzers, NamedXContentRegistry xContentRegistry, @@ -367,7 +365,6 @@ private synchronized Map internalMerge(@Nullable Documen boolean hasNested = this.hasNested; Map fullPathObjectMappers = this.fullPathObjectMappers; FieldTypeLookup fieldTypes = this.fieldTypes; - Set parentTypes = this.parentTypes; Map mappers = new HashMap<>(this.mappers); Map results = new LinkedHashMap<>(documentMappers.size() + 1); @@ -385,7 +382,7 @@ private synchronized Map internalMerge(@Nullable Documen results.put(DEFAULT_MAPPING, defaultMapper); } - if (indexSettings.isSingleType()) { + { Set actualTypes = new HashSet<>(mappers.keySet()); documentMappers.forEach(mapper -> actualTypes.add(mapper.type())); actualTypes.remove(DEFAULT_MAPPING); @@ -398,9 +395,6 @@ private synchronized Map internalMerge(@Nullable Documen for (DocumentMapper mapper : documentMappers) { // check naming validateTypeName(mapper.type()); - if (mapper.type().equals(mapper.parentFieldMapper().type())) { - throw new IllegalArgumentException("The [_parent.type] option can't point to the same type"); - } // compute the merged DocumentMapper DocumentMapper oldMapper = mappers.get(mapper.type()); @@ -436,9 +430,7 @@ private synchronized Map internalMerge(@Nullable Documen } } - if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_6_0_0_beta1)) { - validateCopyTo(fieldMappers, fullPathObjectMappers, fieldTypes); - } + validateCopyTo(fieldMappers, fullPathObjectMappers, fieldTypes); if (reason == MergeReason.MAPPING_UPDATE) { // this check will only be performed on the master node when there is @@ -449,14 +441,6 @@ private synchronized Map internalMerge(@Nullable Documen checkTotalFieldsLimit(objectMappers.size() + fieldMappers.size()); } - if (oldMapper == null && newMapper.parentFieldMapper().active()) { - if (parentTypes == this.parentTypes) { - // first time through the loop - parentTypes = new HashSet<>(this.parentTypes); - } - parentTypes.add(mapper.parentFieldMapper().type()); - } - results.put(newMapper.type(), newMapper); mappers.put(newMapper.type(), newMapper); } @@ -497,9 +481,6 @@ private synchronized Map internalMerge(@Nullable Documen if (fullPathObjectMappers != this.fullPathObjectMappers) { fullPathObjectMappers = Collections.unmodifiableMap(fullPathObjectMappers); } - if (parentTypes != this.parentTypes) { - parentTypes = Collections.unmodifiableSet(parentTypes); - } // commit the change if (defaultMappingSource != null) { @@ -509,7 +490,6 @@ private synchronized Map internalMerge(@Nullable Documen this.fieldTypes = fieldTypes; this.hasNested = hasNested; this.fullPathObjectMappers = fullPathObjectMappers; - this.parentTypes = parentTypes; assert assertMappersShareSameFieldType(); assert results.values().stream().allMatch(this::assertSerialization); @@ -635,11 +615,6 @@ private void checkDepthLimit(String objectPath, long maxDepth) { private void checkPartitionedIndexConstraints(DocumentMapper newMapper) { if (indexSettings.getIndexMetaData().isRoutingPartitionedIndex()) { - if (newMapper.parentFieldMapper().active()) { - throw new IllegalArgumentException("mapping type name [" + newMapper.type() + "] cannot have a " - + "_parent field for the partitioned index [" + indexSettings.getIndex().getName() + "]"); - } - if (!newMapper.routingFieldMapper().required()) { throw new IllegalArgumentException("mapping type [" + newMapper.type() + "] must have routing " + "required for partitioned index [" + indexSettings.getIndex().getName() + "]"); @@ -818,10 +793,6 @@ public Analyzer searchQuoteAnalyzer() { return this.searchQuoteAnalyzer; } - public Set getParentTypes() { - return parentTypes; - } - @Override public void close() throws IOException { indexAnalyzers.close(); @@ -868,13 +839,6 @@ public Term createUidTerm(String type, String id) { if (hasMapping(type) == false) { return null; } - if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_6_0_0_beta1)) { - assert indexSettings.isSingleType(); - return new Term(IdFieldMapper.NAME, Uid.encodeId(id)); - } else if (indexSettings.isSingleType()) { - return new Term(IdFieldMapper.NAME, id); - } else { - return new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(type, id)); - } + return new Term(IdFieldMapper.NAME, Uid.encodeId(id)); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java index 1240250a74743..c5c9099224138 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java @@ -64,7 +64,9 @@ protected MetadataFieldMapper(String simpleName, MappedFieldType fieldType, Mapp /** * Called after {@link FieldMapper#parse(ParseContext)} on the {@link RootObjectMapper}. */ - public abstract void postParse(ParseContext context) throws IOException; + public void postParse(ParseContext context) throws IOException { + // do nothing + } @Override public MetadataFieldMapper merge(Mapper mergeWith) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index 92cb44cfd147f..69793ca89b57d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -47,6 +47,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; @@ -156,10 +157,10 @@ public Mapper.Builder parse(String name, Map node, builder.nullValue(type.parse(propNode, false)); iterator.remove(); } else if (propName.equals("ignore_malformed")) { - builder.ignoreMalformed(TypeParsers.nodeBooleanValue(name,"ignore_malformed", propNode, parserContext)); + builder.ignoreMalformed(XContentMapValues.nodeBooleanValue(propNode, name + ".ignore_malformed")); iterator.remove(); } else if (propName.equals("coerce")) { - builder.coerce(TypeParsers.nodeBooleanValue(name, "coerce", propNode, parserContext)); + builder.coerce(XContentMapValues.nodeBooleanValue(propNode, name + ".coerce")); iterator.remove(); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index c96d8bb384bb6..c3e3e41798d91 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.support.XContentMapValues; import java.io.IOException; import java.util.ArrayList; @@ -178,12 +179,12 @@ protected static boolean parseObjectOrDocumentTypeProperties(String fieldName, O if (value.equalsIgnoreCase("strict")) { builder.dynamic(Dynamic.STRICT); } else { - boolean dynamic = TypeParsers.nodeBooleanValue(fieldName, "dynamic", fieldNode, parserContext); + boolean dynamic = XContentMapValues.nodeBooleanValue(fieldNode, fieldName + ".dynamic"); builder.dynamic(dynamic ? Dynamic.TRUE : Dynamic.FALSE); } return true; } else if (fieldName.equals("enabled")) { - builder.enabled(TypeParsers.nodeBooleanValue(fieldName, "enabled", fieldNode, parserContext)); + builder.enabled(XContentMapValues.nodeBooleanValue(fieldNode, fieldName + ".enabled")); return true; } else if (fieldName.equals("properties")) { if (fieldNode instanceof Collection && ((Collection) fieldNode).isEmpty()) { @@ -219,12 +220,12 @@ protected static void parseNested(String name, Map node, ObjectM } fieldNode = node.get("include_in_parent"); if (fieldNode != null) { - nestedIncludeInParent = TypeParsers.nodeBooleanValue(name, "include_in_parent", fieldNode, parserContext); + nestedIncludeInParent = XContentMapValues.nodeBooleanValue(fieldNode, name + ".include_in_parent"); node.remove("include_in_parent"); } fieldNode = node.get("include_in_root"); if (fieldNode != null) { - nestedIncludeInRoot = TypeParsers.nodeBooleanValue(name, "include_in_root", fieldNode, parserContext); + nestedIncludeInRoot = XContentMapValues.nodeBooleanValue(fieldNode, name + ".include_in_root"); node.remove("include_in_root"); } if (nested) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java deleted file mode 100644 index 9de53b8dec6dd..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/mapper/ParentFieldMapper.java +++ /dev/null @@ -1,326 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.index.mapper; - -import org.apache.lucene.document.SortedDocValuesField; -import org.apache.lucene.index.DocValuesType; -import org.apache.lucene.index.IndexOptions; -import org.apache.lucene.index.IndexableField; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.DocValuesFieldExistsQuery; -import org.apache.lucene.search.DocValuesTermsQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermQuery; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.lucene.BytesRefs; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.index.fielddata.IndexFieldData; -import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; -import org.elasticsearch.index.query.QueryShardContext; - -import java.io.IOException; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Objects; - -import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeMapValue; - -public class ParentFieldMapper extends MetadataFieldMapper { - - public static final String NAME = "_parent"; - public static final String CONTENT_TYPE = "_parent"; - - public static class Defaults { - public static final String NAME = ParentFieldMapper.NAME; - - public static final ParentFieldType FIELD_TYPE = new ParentFieldType(); - - static { - FIELD_TYPE.setTokenized(false); - FIELD_TYPE.setIndexOptions(IndexOptions.NONE); - FIELD_TYPE.setHasDocValues(true); - FIELD_TYPE.setDocValuesType(DocValuesType.SORTED); - FIELD_TYPE.setEagerGlobalOrdinals(false); - FIELD_TYPE.freeze(); - } - } - - public static class Builder extends MetadataFieldMapper.Builder { - - private String parentType; - - private final String documentType; - - public Builder(String documentType) { - super(Defaults.NAME, new ParentFieldType(Defaults.FIELD_TYPE, documentType), Defaults.FIELD_TYPE); - // Defaults to true - eagerGlobalOrdinals(true); - this.documentType = documentType; - builder = this; - } - - public Builder type(String type) { - this.parentType = type; - return builder; - } - - public Builder eagerGlobalOrdinals(boolean eagerGlobalOrdinals) { - fieldType().setEagerGlobalOrdinals(eagerGlobalOrdinals); - return builder; - } - - @Override - public ParentFieldMapper build(BuilderContext context) { - if (parentType == null) { - throw new MapperParsingException("[_parent] field mapping must contain the [type] option"); - } - name = joinField(parentType); - setupFieldType(context); - return new ParentFieldMapper(createParentJoinFieldMapper(documentType, context), fieldType, parentType, context.indexSettings()); - } - } - - public static class TypeParser implements MetadataFieldMapper.TypeParser { - private static final ParseField FIELDDATA = new ParseField("fielddata").withAllDeprecated("eager_global_ordinals"); - @Override - public MetadataFieldMapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { - Builder builder = new Builder(parserContext.type()); - for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { - Map.Entry entry = iterator.next(); - String fieldName = entry.getKey(); - Object fieldNode = entry.getValue(); - if (fieldName.equals("type")) { - builder.type(fieldNode.toString()); - iterator.remove(); - } else if (FIELDDATA.match(fieldName, LoggingDeprecationHandler.INSTANCE)) { - // for bw compat only - Map fieldDataSettings = nodeMapValue(fieldNode, "fielddata"); - if (fieldDataSettings.containsKey("loading")) { - builder.eagerGlobalOrdinals("eager_global_ordinals".equals(fieldDataSettings.get("loading"))); - } - iterator.remove(); - } else if (fieldName.equals("eager_global_ordinals")) { - builder.eagerGlobalOrdinals(XContentMapValues.nodeBooleanValue(fieldNode, "eager_global_ordinals")); - iterator.remove(); - } - } - return builder; - } - - @Override - public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) { - final Settings indexSettings = context.mapperService().getIndexSettings().getSettings(); - final String typeName = context.type(); - KeywordFieldMapper parentJoinField = createParentJoinFieldMapper(typeName, new BuilderContext(indexSettings, new ContentPath(0))); - MappedFieldType childJoinFieldType = new ParentFieldType(Defaults.FIELD_TYPE, typeName); - childJoinFieldType.setName(ParentFieldMapper.NAME); - return new ParentFieldMapper(parentJoinField, childJoinFieldType, null, indexSettings); - } - } - - static KeywordFieldMapper createParentJoinFieldMapper(String docType, BuilderContext context) { - KeywordFieldMapper.Builder parentJoinField = new KeywordFieldMapper.Builder(joinField(docType)); - parentJoinField.indexOptions(IndexOptions.NONE); - parentJoinField.docValues(true); - parentJoinField.fieldType().setDocValuesType(DocValuesType.SORTED); - return parentJoinField.build(context); - } - - static final class ParentFieldType extends MappedFieldType { - - final String documentType; - - ParentFieldType() { - documentType = null; - setEagerGlobalOrdinals(true); - } - - ParentFieldType(ParentFieldType ref, String documentType) { - super(ref); - this.documentType = documentType; - } - - private ParentFieldType(ParentFieldType ref) { - super(ref); - this.documentType = ref.documentType; - } - - @Override - public MappedFieldType clone() { - return new ParentFieldType(this); - } - - @Override - public String typeName() { - return CONTENT_TYPE; - } - - @Override - public Query existsQuery(QueryShardContext context) { - return new DocValuesFieldExistsQuery(name()); - } - - @Override - public Query termQuery(Object value, @Nullable QueryShardContext context) { - return termsQuery(Collections.singletonList(value), context); - } - - @Override - public Query termsQuery(List values, @Nullable QueryShardContext context) { - BytesRef[] ids = new BytesRef[values.size()]; - for (int i = 0; i < ids.length; i++) { - ids[i] = BytesRefs.toBytesRef(values.get(i)); - } - BooleanQuery.Builder query = new BooleanQuery.Builder(); - query.add(new DocValuesTermsQuery(name(), ids), BooleanClause.Occur.MUST); - query.add(new TermQuery(new Term(TypeFieldMapper.NAME, documentType)), BooleanClause.Occur.FILTER); - return query.build(); - } - - @Override - public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) { - return new DocValuesIndexFieldData.Builder(); - } - } - - private final String parentType; - // has no impact of field data settings, is just here for creating a join field, - // the parent field mapper in the child type pointing to this type determines the field data settings for this join field - private final KeywordFieldMapper parentJoinField; - - private ParentFieldMapper(KeywordFieldMapper parentJoinField, MappedFieldType childJoinFieldType, String parentType, Settings indexSettings) { - super(NAME, childJoinFieldType, Defaults.FIELD_TYPE, indexSettings); - this.parentType = parentType; - this.parentJoinField = parentJoinField; - } - - public MappedFieldType getParentJoinFieldType() { - return parentJoinField.fieldType(); - } - - public String type() { - return parentType; - } - - @Override - public void preParse(ParseContext context) throws IOException { - } - - @Override - public void postParse(ParseContext context) throws IOException { - parse(context); - } - - @Override - protected void parseCreateField(ParseContext context, List fields) throws IOException { - boolean parent = context.docMapper().isParent(context.sourceToParse().type()); - if (parent) { - fields.add(new SortedDocValuesField(parentJoinField.fieldType().name(), new BytesRef(context.sourceToParse().id()))); - } - - if (!active()) { - return; - } - - if (context.parser().currentName() != null && context.parser().currentName().equals(Defaults.NAME)) { - // we are in the parsing of _parent phase - String parentId = context.parser().text(); - context.sourceToParse().parent(parentId); - fields.add(new SortedDocValuesField(fieldType.name(), new BytesRef(parentId))); - } else { - // otherwise, we are running it post processing of the xcontent - String parsedParentId = context.doc().get(Defaults.NAME); - if (context.sourceToParse().parent() != null) { - String parentId = context.sourceToParse().parent(); - if (parsedParentId == null) { - if (parentId == null) { - throw new MapperParsingException("No parent id provided, not within the document, and not externally"); - } - // we did not add it in the parsing phase, add it now - fields.add(new SortedDocValuesField(fieldType.name(), new BytesRef(parentId))); - } else if (parentId != null && !parsedParentId.equals(Uid.createUid(parentType, parentId))) { - throw new MapperParsingException("Parent id mismatch, document value is [" + Uid.createUid(parsedParentId).id() + "], while external value is [" + parentId + "]"); - } - } - } - // we have parent mapping, yet no value was set, ignore it... - } - - public static String joinField(String parentType) { - return ParentFieldMapper.NAME + "#" + parentType; - } - - @Override - protected String contentType() { - return CONTENT_TYPE; - } - - @Override - public Iterator iterator() { - return Collections.singleton(parentJoinField).iterator(); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - if (!active()) { - return builder; - } - boolean includeDefaults = params.paramAsBoolean("include_defaults", false); - - builder.startObject(CONTENT_TYPE); - builder.field("type", parentType); - if (includeDefaults || fieldType().eagerGlobalOrdinals() == false) { - builder.field("eager_global_ordinals", fieldType().eagerGlobalOrdinals()); - } - builder.endObject(); - return builder; - } - - @Override - protected void doMerge(Mapper mergeWith) { - ParentFieldMapper fieldMergeWith = (ParentFieldMapper) mergeWith; - if (fieldMergeWith.parentType != null && Objects.equals(parentType, fieldMergeWith.parentType) == false) { - throw new IllegalArgumentException("The _parent field's type option can't be changed: [" + parentType + "]->[" + fieldMergeWith.parentType + "]"); - } - // If fieldMergeWith is not active it means the user provided a mapping - // update that does not explicitly configure the _parent field, so we - // ignore it. - if (fieldMergeWith.active()) { - super.doMerge(mergeWith); - } - - } - - /** - * @return Whether the _parent field is actually configured. - */ - public boolean active() { - return parentType != null; - } - -} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ParseContext.java b/server/src/main/java/org/elasticsearch/index/mapper/ParseContext.java index 4a7af6cba47ea..8c2eda31ca198 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ParseContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ParseContext.java @@ -29,10 +29,11 @@ import org.elasticsearch.common.xcontent.XContentParser; import java.util.ArrayList; +import java.util.Collections; import java.util.Iterator; import java.util.List; -public abstract class ParseContext { +public abstract class ParseContext implements Iterable{ /** Fork of {@link org.apache.lucene.document.Document} with additional functionality. */ public static class Document implements Iterable { @@ -171,6 +172,11 @@ private FilterParseContext(ParseContext in) { this.in = in; } + @Override + public Iterable nonRootDocuments() { + return in.nonRootDocuments(); + } + @Override public DocumentMapperParser docMapperParser() { return in.docMapperParser(); @@ -211,11 +217,6 @@ public Document rootDoc() { return in.rootDoc(); } - @Override - public List docs() { - return in.docs(); - } - @Override public Document doc() { return in.doc(); @@ -280,6 +281,11 @@ public void addDynamicMapper(Mapper update) { public List getDynamicMappers() { return in.getDynamicMappers(); } + + @Override + public Iterator iterator() { + return in.iterator(); + } } public static class InternalParseContext extends ParseContext { @@ -309,9 +315,10 @@ public static class InternalParseContext extends ParseContext { private long numNestedDocs; - private final List dynamicMappers; + private boolean docsReversed = false; + public InternalParseContext(@Nullable Settings indexSettings, DocumentMapperParser docMapperParser, DocumentMapper docMapper, SourceToParse source, XContentParser parser) { this.indexSettings = indexSettings; @@ -360,8 +367,7 @@ public Document rootDoc() { return documents.get(0); } - @Override - public List docs() { + List docs() { return this.documents; } @@ -426,8 +432,35 @@ public void addDynamicMapper(Mapper mapper) { public List getDynamicMappers() { return dynamicMappers; } + + @Override + public Iterable nonRootDocuments() { + if (docsReversed) { + throw new IllegalStateException("documents are already reversed"); + } + return documents.subList(1, documents.size()); + } + + void postParse() { + // reverse the order of docs for nested docs support, parent should be last + if (documents.size() > 1) { + docsReversed = true; + Collections.reverse(documents); + } + } + + @Override + public Iterator iterator() { + return documents.iterator(); + } } + /** + * Returns an Iterable over all non-root documents. If there are no non-root documents + * the iterable will return an empty iterator. + */ + public abstract Iterable nonRootDocuments(); + public abstract DocumentMapperParser docMapperParser(); /** @@ -506,8 +539,6 @@ public boolean isWithinMultiFields() { public abstract Document rootDoc(); - public abstract List docs(); - public abstract Document doc(); protected abstract void addDoc(Document doc); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java b/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java index 0c740a0af7c8b..414cb3a98ecab 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java @@ -45,8 +45,6 @@ public class ParsedDocument { private Mapping dynamicMappingsUpdate; - private String parent; - public ParsedDocument(Field version, SeqNoFieldMapper.SequenceIDFields seqID, String id, @@ -110,15 +108,6 @@ public void setSource(BytesReference source, XContentType xContentType) { this.xContentType = xContentType; } - public ParsedDocument parent(String parent) { - this.parent = parent; - return this; - } - - public String parent() { - return this.parent; - } - /** * Return dynamic updates to mappings or {@code null} if there were no * updates to the mappings. @@ -137,7 +126,7 @@ public void addDynamicMappingsUpdate(Mapping update) { @Override public String toString() { - return "Document uid[" + Uid.createUidAsBytes(type, id) + "] doc [" + documents + ']'; + return "Document id[" + id + "] doc [" + documents + ']'; } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java index 1536db6510fc7..0a92d0d188477 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java @@ -53,6 +53,7 @@ import org.elasticsearch.common.util.LocaleUtils; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.mapper.NumberFieldMapper.NumberType; import org.elasticsearch.index.query.QueryShardContext; import org.joda.time.DateTimeZone; @@ -183,16 +184,10 @@ public Mapper.Builder parse(String name, Map node, throw new MapperParsingException("Property [null_value] is not supported for [" + this.type.name + "] field types."); } else if (propName.equals("coerce")) { - builder.coerce(TypeParsers.nodeBooleanValue(name, "coerce", propNode, parserContext)); + builder.coerce(XContentMapValues.nodeBooleanValue(propNode, name + ".coerce")); iterator.remove(); } else if (propName.equals("locale")) { - Locale locale; - if (parserContext.indexVersionCreated().onOrAfter(Version.V_6_0_0_beta2)) { - locale = LocaleUtils.parse(propNode.toString()); - } else { - locale = LocaleUtils.parse5x(propNode.toString()); - } - builder.locale(locale); + builder.locale(LocaleUtils.parse(propNode.toString())); iterator.remove(); } else if (propName.equals("format")) { builder.dateTimeFormatter(parseDateTimeFormatter(propNode)); @@ -214,7 +209,7 @@ public static final class RangeFieldType extends MappedFieldType { super(); this.rangeType = Objects.requireNonNull(type); setTokenized(false); - setHasDocValues(indexVersionCreated.onOrAfter(Version.V_6_0_0_beta1)); + setHasDocValues(true); setOmitNorms(true); if (rangeType == RangeType.DATE) { setDateTimeFormatter(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java index 25cfc71261b0a..76753496f4672 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.query.QueryShardContext; import java.io.IOException; @@ -88,7 +89,7 @@ public MetadataFieldMapper.Builder parse(String name, Map n String fieldName = entry.getKey(); Object fieldNode = entry.getValue(); if (fieldName.equals("required")) { - builder.required(TypeParsers.nodeBooleanValue(name, "required", fieldNode, parserContext)); + builder.required(XContentMapValues.nodeBooleanValue(fieldNode, name + ".required")); iterator.remove(); } } @@ -156,10 +157,6 @@ public void preParse(ParseContext context) throws IOException { super.parse(context); } - @Override - public void postParse(ParseContext context) throws IOException { - } - @Override public Mapper parse(ParseContext context) throws IOException { // no need ot parse here, we either get the routing in the sourceToParse diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java index 197d555736343..ac3ffe4627238 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java @@ -253,11 +253,9 @@ public void postParse(ParseContext context) throws IOException { // we share the parent docs fields to ensure good compression SequenceIDFields seqID = context.seqID(); assert seqID != null; - int numDocs = context.docs().size(); final Version versionCreated = context.mapperService().getIndexSettings().getIndexVersionCreated(); final boolean includePrimaryTerm = versionCreated.before(Version.V_6_1_0); - for (int i = 1; i < numDocs; i++) { - final Document doc = context.docs().get(i); + for (Document doc : context.nonRootDocuments()) { doc.add(seqID.seqNo); doc.add(seqID.seqNoDocValue); if (includePrimaryTerm) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java index b4a8330e23803..f2090613c0965 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceFieldMapper.java @@ -113,7 +113,7 @@ public MetadataFieldMapper.Builder parse(String name, Map n String fieldName = entry.getKey(); Object fieldNode = entry.getValue(); if (fieldName.equals("enabled")) { - builder.enabled(TypeParsers.nodeBooleanValue(name, "enabled", fieldNode, parserContext)); + builder.enabled(XContentMapValues.nodeBooleanValue(fieldNode, name + ".enabled")); iterator.remove(); } else if (fieldName.equals("includes")) { List values = (List) fieldNode; @@ -216,10 +216,6 @@ public void preParse(ParseContext context) throws IOException { super.parse(context); } - @Override - public void postParse(ParseContext context) throws IOException { - } - @Override public Mapper parse(ParseContext context) throws IOException { // nothing to do here, we will call it in pre parse @@ -228,32 +224,23 @@ public Mapper parse(ParseContext context) throws IOException { @Override protected void parseCreateField(ParseContext context, List fields) throws IOException { - if (!enabled) { - return; - } - if (!fieldType().stored()) { - return; - } BytesReference source = context.sourceToParse().source(); - // Percolate and tv APIs may not set the source and that is ok, because these APIs will not index any data - if (source == null) { - return; - } - - if (filter != null) { - // we don't update the context source if we filter, we want to keep it as is... - Tuple> mapTuple = - XContentHelper.convertToMap(source, true, context.sourceToParse().getXContentType()); - Map filteredSource = filter.apply(mapTuple.v2()); - BytesStreamOutput bStream = new BytesStreamOutput(); - XContentType contentType = mapTuple.v1(); - XContentBuilder builder = XContentFactory.contentBuilder(contentType, bStream).map(filteredSource); - builder.close(); - - source = bStream.bytes(); + if (enabled && fieldType().stored() && source != null) { + // Percolate and tv APIs may not set the source and that is ok, because these APIs will not index any data + if (filter != null) { + // we don't update the context source if we filter, we want to keep it as is... + Tuple> mapTuple = + XContentHelper.convertToMap(source, true, context.sourceToParse().getXContentType()); + Map filteredSource = filter.apply(mapTuple.v2()); + BytesStreamOutput bStream = new BytesStreamOutput(); + XContentType contentType = mapTuple.v1(); + XContentBuilder builder = XContentFactory.contentBuilder(contentType, bStream).map(filteredSource); + builder.close(); + source = bStream.bytes(); + } + BytesRef ref = source.toBytesRef(); + fields.add(new StoredField(fieldType().name(), ref.bytes, ref.offset, ref.length)); } - BytesRef ref = source.toBytesRef(); - fields.add(new StoredField(fieldType().name(), ref.bytes, ref.offset, ref.length)); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java index 52e3001da84cd..0130207c0a78e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java @@ -42,8 +42,6 @@ public static SourceToParse source(String index, String type, String id, BytesRe private String routing; - private String parentId; - private XContentType xContentType; private SourceToParse(String index, String type, String id, BytesReference source, XContentType xContentType) { @@ -72,15 +70,6 @@ public String id() { return this.id; } - public String parent() { - return this.parentId; - } - - public SourceToParse parent(String parentId) { - this.parentId = parentId; - return this; - } - public String routing() { return this.routing; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java index b47242d02b0f3..36bd4b137cf9c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java @@ -144,33 +144,23 @@ public Query termQuery(Object value, QueryShardContext context) { @Override public Query termsQuery(List values, QueryShardContext context) { - if (context.getIndexSettings().isSingleType()) { - Collection indexTypes = context.getMapperService().types(); - if (indexTypes.isEmpty()) { - return new MatchNoDocsQuery("No types"); - } - assert indexTypes.size() == 1; - BytesRef indexType = indexedValueForSearch(indexTypes.iterator().next()); - if (values.stream() - .map(this::indexedValueForSearch) - .anyMatch(indexType::equals)) { - if (context.getMapperService().hasNested()) { - // type filters are expected not to match nested docs - return Queries.newNonNestedFilter(context.indexVersionCreated()); - } else { - return new MatchAllDocsQuery(); - } + Collection indexTypes = context.getMapperService().types(); + if (indexTypes.isEmpty()) { + return new MatchNoDocsQuery("No types"); + } + assert indexTypes.size() == 1; + BytesRef indexType = indexedValueForSearch(indexTypes.iterator().next()); + if (values.stream() + .map(this::indexedValueForSearch) + .anyMatch(indexType::equals)) { + if (context.getMapperService().hasNested()) { + // type filters are expected not to match nested docs + return Queries.newNonNestedFilter(context.indexVersionCreated()); } else { - return new MatchNoDocsQuery("Type list does not contain the index type"); + return new MatchAllDocsQuery(); } } else { - if (indexOptions() == IndexOptions.NONE) { - throw new AssertionError(); - } - final BytesRef[] types = values.stream() - .map(this::indexedValueForSearch) - .toArray(size -> new BytesRef[size]); - return new TypesQuery(types); + return new MatchNoDocsQuery("Type list does not contain the index type"); } } @@ -269,13 +259,8 @@ private TypeFieldMapper(MappedFieldType fieldType, IndexSettings indexSettings) private static MappedFieldType defaultFieldType(IndexSettings indexSettings) { MappedFieldType defaultFieldType = Defaults.FIELD_TYPE.clone(); - if (indexSettings.isSingleType()) { - defaultFieldType.setIndexOptions(IndexOptions.NONE); - defaultFieldType.setHasDocValues(false); - } else { - defaultFieldType.setIndexOptions(IndexOptions.DOCS); - defaultFieldType.setHasDocValues(true); - } + defaultFieldType.setIndexOptions(IndexOptions.NONE); + defaultFieldType.setHasDocValues(false); return defaultFieldType; } @@ -284,10 +269,6 @@ public void preParse(ParseContext context) throws IOException { super.parse(context); } - @Override - public void postParse(ParseContext context) throws IOException { - } - @Override public Mapper parse(ParseContext context) throws IOException { // we parse in pre parse diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java b/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java index 37fd1203622b1..52dfadfe27308 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java @@ -21,12 +21,8 @@ import org.apache.lucene.index.IndexOptions; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; -import org.elasticsearch.common.Booleans; import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.common.joda.Joda; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.similarity.SimilarityProvider; @@ -48,35 +44,6 @@ public class TypeParsers { public static final String INDEX_OPTIONS_POSITIONS = "positions"; public static final String INDEX_OPTIONS_OFFSETS = "offsets"; - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(TypeParsers.class)); - - //TODO 22298: Remove this method and have all call-sites use XContentMapValues.nodeBooleanValue(node) directly. - public static boolean nodeBooleanValue(String fieldName, String propertyName, Object node, - Mapper.TypeParser.ParserContext parserContext) { - if (parserContext.indexVersionCreated().onOrAfter(Version.V_6_0_0_alpha1)) { - return XContentMapValues.nodeBooleanValue(node, fieldName + "." + propertyName); - } else { - return nodeBooleanValueLenient(fieldName, propertyName, node); - } - } - - //TODO 22298: Remove this method and have all call-sites use XContentMapValues.nodeBooleanValue(node) directly. - public static boolean nodeBooleanValueLenient(String fieldName, String propertyName, Object node) { - if (Booleans.isBoolean(node.toString()) == false) { - DEPRECATION_LOGGER.deprecated("Expected a boolean for property [{}] for field [{}] but got [{}]", - propertyName, fieldName, node); - } - if (node instanceof Boolean) { - return (Boolean) node; - } - if (node instanceof Number) { - return ((Number) node).intValue() != 0; - } - @SuppressWarnings("deprecated") - boolean value = Booleans.parseBooleanLenient(node.toString(), false); - return value; - } - private static void parseAnalyzersAndTermVectors(FieldMapper.Builder builder, String name, Map fieldNode, Mapper.TypeParser.ParserContext parserContext) { @@ -92,17 +59,16 @@ private static void parseAnalyzersAndTermVectors(FieldMapper.Builder builder, St parseTermVector(name, propNode.toString(), builder); iterator.remove(); } else if (propName.equals("store_term_vectors")) { - builder.storeTermVectors(nodeBooleanValue(name, "store_term_vectors", propNode, parserContext)); + builder.storeTermVectors(XContentMapValues.nodeBooleanValue(propNode, name + ".store_term_vectors")); iterator.remove(); } else if (propName.equals("store_term_vector_offsets")) { - builder.storeTermVectorOffsets(nodeBooleanValue(name, "store_term_vector_offsets", propNode, parserContext)); + builder.storeTermVectorOffsets(XContentMapValues.nodeBooleanValue(propNode, name + ".store_term_vector_offsets")); iterator.remove(); } else if (propName.equals("store_term_vector_positions")) { - builder.storeTermVectorPositions( - nodeBooleanValue(name, "store_term_vector_positions", propNode, parserContext)); + builder.storeTermVectorPositions(XContentMapValues.nodeBooleanValue(propNode, name + ".store_term_vector_positions")); iterator.remove(); } else if (propName.equals("store_term_vector_payloads")) { - builder.storeTermVectorPayloads(nodeBooleanValue(name,"store_term_vector_payloads", propNode, parserContext)); + builder.storeTermVectorPayloads(XContentMapValues.nodeBooleanValue(propNode, name + ".store_term_vector_payloads")); iterator.remove(); } else if (propName.equals("analyzer")) { NamedAnalyzer analyzer = parserContext.getIndexAnalyzers().get(propNode.toString()); @@ -158,7 +124,7 @@ private static void parseAnalyzersAndTermVectors(FieldMapper.Builder builder, St public static void parseNorms(FieldMapper.Builder builder, String fieldName, Object propNode, Mapper.TypeParser.ParserContext parserContext) { - builder.omitNorms(nodeBooleanValue(fieldName, "norms", propNode, parserContext) == false); + builder.omitNorms(XContentMapValues.nodeBooleanValue(propNode, fieldName + ".norms") == false); } /** @@ -197,13 +163,13 @@ public static void parseField(FieldMapper.Builder builder, String name, Map types, Object id) { - return createUidsForTypesAndIds(types, Collections.singletonList(id)); - } - - public static BytesRef[] createUidsForTypesAndIds(Collection types, Collection ids) { - BytesRef[] uids = new BytesRef[types.size() * ids.size()]; - BytesRefBuilder typeBytes = new BytesRefBuilder(); - BytesRefBuilder idBytes = new BytesRefBuilder(); - int index = 0; - for (String type : types) { - typeBytes.copyChars(type); - for (Object id : ids) { - uids[index++] = Uid.createUidAsBytes(typeBytes.get(), BytesRefs.toBytesRef(id, idBytes)); - } - } - return uids; - } - - public static String createUid(String type, String id) { - return type + DELIMITER + id; + return type + "#" + id; } private static final int UTF8 = 0xff; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/UidFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/UidFieldMapper.java deleted file mode 100644 index 04e791b8cee1e..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/mapper/UidFieldMapper.java +++ /dev/null @@ -1,235 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.mapper; - -import org.apache.lucene.document.Field; -import org.apache.lucene.index.IndexOptions; -import org.apache.lucene.index.IndexableField; -import org.apache.lucene.search.MatchAllDocsQuery; -import org.apache.lucene.search.MatchNoDocsQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermInSetQuery; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.fielddata.IndexFieldData; -import org.elasticsearch.index.fielddata.IndexFieldDataCache; -import org.elasticsearch.index.fielddata.UidIndexFieldData; -import org.elasticsearch.index.fielddata.plain.PagedBytesIndexFieldData; -import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.indices.breaker.CircuitBreakerService; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Map; - -public class UidFieldMapper extends MetadataFieldMapper { - - public static final String NAME = "_uid"; - - public static final String CONTENT_TYPE = "_uid"; - - public static class Defaults { - public static final String NAME = UidFieldMapper.NAME; - - public static final MappedFieldType FIELD_TYPE = new UidFieldType(); - public static final MappedFieldType NESTED_FIELD_TYPE; - - static { - FIELD_TYPE.setIndexOptions(IndexOptions.DOCS); - FIELD_TYPE.setTokenized(false); - FIELD_TYPE.setStored(true); - FIELD_TYPE.setOmitNorms(true); - FIELD_TYPE.setIndexAnalyzer(Lucene.KEYWORD_ANALYZER); - FIELD_TYPE.setSearchAnalyzer(Lucene.KEYWORD_ANALYZER); - FIELD_TYPE.setName(NAME); - FIELD_TYPE.freeze(); - - NESTED_FIELD_TYPE = FIELD_TYPE.clone(); - NESTED_FIELD_TYPE.setStored(false); - NESTED_FIELD_TYPE.freeze(); - } - } - - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(UidFieldMapper.class)); - - public static class TypeParser implements MetadataFieldMapper.TypeParser { - @Override - public MetadataFieldMapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { - throw new MapperParsingException(NAME + " is not configurable"); - } - - @Override - public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) { - final IndexSettings indexSettings = context.mapperService().getIndexSettings(); - return new UidFieldMapper(indexSettings, fieldType); - } - } - - static final class UidFieldType extends TermBasedFieldType { - - UidFieldType() { - } - - protected UidFieldType(UidFieldType ref) { - super(ref); - } - - @Override - public MappedFieldType clone() { - return new UidFieldType(this); - } - - @Override - public String typeName() { - return CONTENT_TYPE; - } - - @Override - public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) { - if (indexOptions() == IndexOptions.NONE) { - DEPRECATION_LOGGER.deprecated("Fielddata access on the _uid field is deprecated, use _id instead"); - return new IndexFieldData.Builder() { - @Override - public IndexFieldData build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache, - CircuitBreakerService breakerService, MapperService mapperService) { - MappedFieldType idFieldType = mapperService.fullName(IdFieldMapper.NAME); - IndexFieldData idFieldData = idFieldType.fielddataBuilder(fullyQualifiedIndexName) - .build(indexSettings, idFieldType, cache, breakerService, mapperService); - final String type = mapperService.types().iterator().next(); - return new UidIndexFieldData(indexSettings.getIndex(), type, idFieldData); - } - }; - } else { - // Old index, _uid was indexed - return new PagedBytesIndexFieldData.Builder( - TextFieldMapper.Defaults.FIELDDATA_MIN_FREQUENCY, - TextFieldMapper.Defaults.FIELDDATA_MAX_FREQUENCY, - TextFieldMapper.Defaults.FIELDDATA_MIN_SEGMENT_SIZE); - } - } - - @Override - public Query existsQuery(QueryShardContext context) { - return new MatchAllDocsQuery(); - } - - @Override - public Query termQuery(Object value, @Nullable QueryShardContext context) { - return termsQuery(Arrays.asList(value), context); - } - - @Override - public Query termsQuery(List values, @Nullable QueryShardContext context) { - if (indexOptions() != IndexOptions.NONE) { - return super.termsQuery(values, context); - } - Collection indexTypes = context.getMapperService().types(); - if (indexTypes.isEmpty()) { - return new MatchNoDocsQuery("No types"); - } - assert indexTypes.size() == 1; - final String expectedPrefix = indexTypes.iterator().next() + "#"; - List ids = new ArrayList<>(); - for (Object uid : values) { - if (uid instanceof BytesRef) { - uid = ((BytesRef) uid).utf8ToString(); - } - String uidString = uid.toString(); - if (uidString.startsWith(expectedPrefix)) { - String id = uidString.substring(expectedPrefix.length(), uidString.length()); - BytesRef encodedId; - if (context.indexVersionCreated().onOrAfter(Version.V_6_0_0_beta1)) { - encodedId = Uid.encodeId(id); - } else { - encodedId = new BytesRef(id); - } - ids.add(encodedId); - } - } - return new TermInSetQuery(IdFieldMapper.NAME, ids); - } - } - - static MappedFieldType defaultFieldType(IndexSettings indexSettings) { - MappedFieldType defaultFieldType = Defaults.FIELD_TYPE.clone(); - if (indexSettings.isSingleType()) { - defaultFieldType.setIndexOptions(IndexOptions.NONE); - defaultFieldType.setStored(false); - } else { - defaultFieldType.setIndexOptions(IndexOptions.DOCS); - defaultFieldType.setStored(true); - } - return defaultFieldType; - } - - private UidFieldMapper(IndexSettings indexSettings, MappedFieldType existing) { - this(existing == null ? defaultFieldType(indexSettings) : existing, indexSettings); - } - - private UidFieldMapper(MappedFieldType fieldType, IndexSettings indexSettings) { - super(NAME, fieldType, defaultFieldType(indexSettings), indexSettings.getSettings()); - } - - @Override - public void preParse(ParseContext context) throws IOException { - super.parse(context); - } - - @Override - public void postParse(ParseContext context) throws IOException {} - - @Override - public Mapper parse(ParseContext context) throws IOException { - // nothing to do here, we do everything in preParse - return null; - } - - @Override - protected void parseCreateField(ParseContext context, List fields) throws IOException { - if (fieldType.indexOptions() != IndexOptions.NONE || fieldType.stored()) { - Field uid = new Field(NAME, Uid.createUid(context.sourceToParse().type(), context.sourceToParse().id()), fieldType); - fields.add(uid); - } - } - - @Override - protected String contentType() { - return CONTENT_TYPE; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder; - } - - @Override - protected void doMerge(Mapper mergeWith) { - // do nothing here, no merging, but also no exception - } -} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java index bedb98e2126ac..ef3c63f488947 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java @@ -128,8 +128,7 @@ public void postParse(ParseContext context) throws IOException { // that don't have the field. This is consistent with the default value for efficiency. Field version = context.version(); assert version != null; - for (int i = 1; i < context.docs().size(); i++) { - final Document doc = context.docs().get(i); + for (Document doc : context.nonRootDocuments()) { doc.add(version); } } diff --git a/server/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java index 9f9b508267224..68c872e9cbb90 100644 --- a/server/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java @@ -31,11 +31,12 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.mapper.UidFieldMapper; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -83,6 +84,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { /** * Add types to query */ + // TODO: Remove public IdsQueryBuilder types(String... types) { if (types == null) { throw new IllegalArgumentException("[" + NAME + "] types cannot be null"); @@ -154,13 +156,12 @@ public String getWriteableName() { @Override protected Query doToQuery(QueryShardContext context) throws IOException { - Query query; - MappedFieldType uidField = context.fieldMapper(UidFieldMapper.NAME); - if (uidField == null) { + MappedFieldType idField = context.fieldMapper(IdFieldMapper.NAME); + if (idField == null) { return new MatchNoDocsQuery("No mappings"); } if (this.ids.isEmpty()) { - query = Queries.newMatchNoDocsQuery("Missing ids in \"" + this.getName() + "\" query."); + return Queries.newMatchNoDocsQuery("Missing ids in \"" + this.getName() + "\" query."); } else { Collection typesForQuery; if (types.length == 0) { @@ -172,9 +173,15 @@ protected Query doToQuery(QueryShardContext context) throws IOException { Collections.addAll(typesForQuery, types); } - query = uidField.termsQuery(Arrays.asList(Uid.createUidsForTypesAndIds(typesForQuery, ids)), context); + final Collection mappingTypes = context.getMapperService().types(); + assert mappingTypes.size() == 1; + if (typesForQuery.contains(mappingTypes.iterator().next())) { + return idField.termsQuery(new ArrayList<>(ids), context); + } else { + return new MatchNoDocsQuery("Type mismatch"); + } + } - return query; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java index d296db28ad625..b5e3272ab0a6e 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java @@ -24,7 +24,6 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; @@ -52,10 +51,10 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper.KeywordFieldType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.TextFieldMapper.TextFieldType; -import org.elasticsearch.index.mapper.UidFieldMapper; import java.io.IOException; import java.io.InputStream; @@ -70,7 +69,6 @@ import java.util.Set; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.index.mapper.Uid.createUidAsBytes; /** * A more like this query that finds documents that are "like" the provided set of document(s). @@ -1132,21 +1130,21 @@ private static Fields[] getFieldsFor(MultiTermVectorsResponse responses) throws } private static void handleExclude(BooleanQuery.Builder boolQuery, Item[] likeItems, QueryShardContext context) { - MappedFieldType uidField = context.fieldMapper(UidFieldMapper.NAME); - if (uidField == null) { + MappedFieldType idField = context.fieldMapper(IdFieldMapper.NAME); + if (idField == null) { // no mappings, nothing to exclude return; } // artificial docs get assigned a random id and should be disregarded - List uids = new ArrayList<>(); + List ids = new ArrayList<>(); for (Item item : likeItems) { if (item.doc() != null) { continue; } - uids.add(createUidAsBytes(item.type(), item.id())); + ids.add(item.id()); } - if (!uids.isEmpty()) { - Query query = uidField.termsQuery(uids, context); + if (!ids.isEmpty()) { + Query query = idField.termsQuery(ids, context); boolQuery.add(query, BooleanClause.Occur.MUST_NOT); } } diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryBuilders.java b/server/src/main/java/org/elasticsearch/index/query/QueryBuilders.java index cf8e6e57e0ca0..54c1384cf9de4 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryBuilders.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryBuilders.java @@ -571,7 +571,9 @@ public static WrapperQueryBuilder wrapperQuery(byte[] source) { /** * A filter based on doc/mapping type. + * @deprecated Types are going away, prefer filtering on a field. */ + @Deprecated public static TypeQueryBuilder typeQuery(String type) { return new TypeQueryBuilder(type); } diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java index 4ce8aae52c133..3920b730d7a51 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java @@ -42,7 +42,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Map; @@ -358,7 +357,7 @@ public QueryStringQueryBuilder tieBreaker(float tieBreaker) { return this; } - public float tieBreaker() { + public Float tieBreaker() { return this.tieBreaker; } @@ -389,6 +388,22 @@ public QueryStringQueryBuilder analyzer(String analyzer) { this.analyzer = analyzer; return this; } + + /** + * The optional analyzer used to analyze the query string. Note, if a field has search analyzer + * defined for it, then it will be used automatically. Defaults to the smart search analyzer. + */ + public String analyzer() { + return analyzer; + } + + /** + * The optional analyzer used to analyze the query string for phrase searches. Note, if a field has search (quote) analyzer + * defined for it, then it will be used automatically. Defaults to the smart search analyzer. + */ + public String quoteAnalyzer() { + return quoteAnalyzer; + } /** * The optional analyzer used to analyze the query string for phrase searches. Note, if a field has search (quote) analyzer @@ -884,9 +899,10 @@ protected boolean doEquals(QueryStringQueryBuilder other) { Objects.equals(tieBreaker, other.tieBreaker) && Objects.equals(rewrite, other.rewrite) && Objects.equals(minimumShouldMatch, other.minimumShouldMatch) && - Objects.equals(lenient, other.lenient) && - timeZone == null ? other.timeZone == null : other.timeZone != null && - Objects.equals(timeZone.getID(), other.timeZone.getID()) && + Objects.equals(lenient, other.lenient) && + Objects.equals( + timeZone == null ? null : timeZone.getID(), + other.timeZone == null ? null : other.timeZone.getID()) && Objects.equals(escape, other.escape) && Objects.equals(maxDeterminizedStates, other.maxDeterminizedStates) && Objects.equals(autoGenerateSynonymsPhraseQuery, other.autoGenerateSynonymsPhraseQuery) && diff --git a/server/src/main/java/org/elasticsearch/index/query/SpanNearQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SpanNearQueryBuilder.java index 7ff181acb9033..d4333fa0bc5f0 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SpanNearQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/SpanNearQueryBuilder.java @@ -24,9 +24,11 @@ import org.apache.lucene.search.spans.SpanQuery; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -203,18 +205,54 @@ public static SpanNearQueryBuilder fromXContent(XContentParser parser) throws IO @Override protected Query doToQuery(QueryShardContext context) throws IOException { - if (clauses.size() == 1) { - Query query = clauses.get(0).toQuery(context); + SpanQueryBuilder queryBuilder = clauses.get(0); + boolean isGap = queryBuilder instanceof SpanGapQueryBuilder; + Query query = null; + if (!isGap) { + query = queryBuilder.toQuery(context); assert query instanceof SpanQuery; + } + if (clauses.size() == 1) { + assert !isGap; return query; } - SpanQuery[] spanQueries = new SpanQuery[clauses.size()]; - for (int i = 0; i < clauses.size(); i++) { - Query query = clauses.get(i).toQuery(context); - assert query instanceof SpanQuery; - spanQueries[i] = (SpanQuery) query; + String spanNearFieldName = null; + if (isGap) { + spanNearFieldName = ((SpanGapQueryBuilder) queryBuilder).fieldName(); + } else { + spanNearFieldName = ((SpanQuery) query).getField(); } - return new SpanNearQuery(spanQueries, slop, inOrder); + + SpanNearQuery.Builder builder = new SpanNearQuery.Builder(spanNearFieldName, inOrder); + builder.setSlop(slop); + /* + * Lucene SpanNearQuery throws exceptions for certain use cases like adding gap to a + * unordered SpanNearQuery. Should ES have the same checks or wrap those thrown exceptions? + */ + if (isGap) { + int gap = ((SpanGapQueryBuilder) queryBuilder).width(); + builder.addGap(gap); + } else { + builder.addClause((SpanQuery) query); + } + + for (int i = 1; i < clauses.size(); i++) { + queryBuilder = clauses.get(i); + isGap = queryBuilder instanceof SpanGapQueryBuilder; + if (isGap) { + String fieldName = ((SpanGapQueryBuilder) queryBuilder).fieldName(); + if (!spanNearFieldName.equals(fieldName)) { + throw new IllegalArgumentException("[span_near] clauses must have same field"); + } + int gap = ((SpanGapQueryBuilder) queryBuilder).width(); + builder.addGap(gap); + } else { + query = clauses.get(i).toQuery(context); + assert query instanceof SpanQuery; + builder.addClause((SpanQuery)query); + } + } + return builder.build(); } @Override @@ -233,4 +271,168 @@ protected boolean doEquals(SpanNearQueryBuilder other) { public String getWriteableName() { return NAME; } + + /** + * SpanGapQueryBuilder enables gaps in a SpanNearQuery. + * Since, SpanGapQuery is private to SpanNearQuery, SpanGapQueryBuilder cannot + * be used to generate a Query (SpanGapQuery) like another QueryBuilder. + * Instead, it just identifies a span_gap clause so that SpanNearQuery.addGap(int) + * can be invoked for it. + * This QueryBuilder is only applicable as a clause in SpanGapQueryBuilder but + * yet to enforce this restriction. + */ + public static class SpanGapQueryBuilder implements SpanQueryBuilder { + public static final String NAME = "span_gap"; + + /** Name of field to match against. */ + private final String fieldName; + + /** Width of the gap introduced. */ + private final int width; + + /** + * Constructs a new SpanGapQueryBuilder term query. + * + * @param fieldName The name of the field + * @param width The width of the gap introduced + */ + public SpanGapQueryBuilder(String fieldName, int width) { + if (Strings.isEmpty(fieldName)) { + throw new IllegalArgumentException("[span_gap] field name is null or empty"); + } + //lucene has not coded any restriction on value of width. + //to-do : find if theoretically it makes sense to apply restrictions. + this.fieldName = fieldName; + this.width = width; + } + + /** + * Read from a stream. + */ + public SpanGapQueryBuilder(StreamInput in) throws IOException { + fieldName = in.readString(); + width = in.readInt(); + } + + /** + * @return fieldName The name of the field + */ + public String fieldName() { + return fieldName; + } + + /** + * @return width The width of the gap introduced + */ + public int width() { + return width; + } + + @Override + public Query toQuery(QueryShardContext context) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public Query toFilter(QueryShardContext context) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public String queryName() { + throw new UnsupportedOperationException(); + } + + @Override + public QueryBuilder queryName(String queryName) { + throw new UnsupportedOperationException(); + } + + @Override + public float boost() { + throw new UnsupportedOperationException(); + } + + @Override + public QueryBuilder boost(float boost) { + throw new UnsupportedOperationException(); + } + + @Override + public String getName() { + return NAME; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public final void writeTo(StreamOutput out) throws IOException { + out.writeString(fieldName); + out.writeInt(width); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startObject(getName()); + builder.field(fieldName, width); + builder.endObject(); + builder.endObject(); + return builder; + } + + public static SpanGapQueryBuilder fromXContent(XContentParser parser) throws IOException { + String fieldName = null; + int width = 0; + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, currentFieldName); + fieldName = currentFieldName; + } else if (token.isValue()) { + width = parser.intValue(); + } + } + SpanGapQueryBuilder result = new SpanGapQueryBuilder(fieldName, width); + return result; + } + + @Override + public final boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + SpanGapQueryBuilder other = (SpanGapQueryBuilder) obj; + return Objects.equals(fieldName, other.fieldName) && + Objects.equals(width, other.width); + } + + @Override + public final int hashCode() { + return Objects.hash(getClass(), fieldName, width); + } + + + @Override + public final String toString() { + return Strings.toString(this, true, true); + } + + //copied from AbstractQueryBuilder + protected static void throwParsingExceptionOnMultipleFields(String queryName, XContentLocation contentLocation, + String processedFieldName, String currentFieldName) { + if (processedFieldName != null) { + throw new ParsingException(contentLocation, "[" + queryName + "] query doesn't support multiple fields, found [" + + processedFieldName + "] and [" + currentFieldName + "]"); + } + } + } } diff --git a/server/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java index 365806ab10c30..59c6f655dfeb9 100644 --- a/server/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java @@ -27,6 +27,8 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -39,6 +41,7 @@ public class TypeQueryBuilder extends AbstractQueryBuilder { public static final String NAME = "type"; private static final ParseField VALUE_FIELD = new ParseField("value"); + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(TypeQueryBuilder.class)); private final String type; @@ -125,6 +128,7 @@ public String getWriteableName() { @Override protected Query doToQuery(QueryShardContext context) throws IOException { + DEPRECATION_LOGGER.deprecated("The [type] query is deprecated, filter on a field instead."); //LUCENE 4 UPGRADE document mapper should use bytesref as well? DocumentMapper documentMapper = context.getMapperService().documentMapper(type); if (documentMapper == null) { diff --git a/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java b/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java index d7ce32d9b7628..da2a165258f33 100644 --- a/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.query.QueryShardContext; import java.io.IOException; @@ -170,11 +169,7 @@ protected ScoreFunction doToFunction(QueryShardContext context) { } else { DEPRECATION_LOGGER.deprecated( "As of version 7.0 Elasticsearch will require that a [field] parameter is provided when a [seed] is set"); - if (context.getIndexSettings().isSingleType()) { - fieldType = context.getMapperService().fullName(IdFieldMapper.NAME); - } else { - fieldType = context.getMapperService().fullName(UidFieldMapper.NAME); - } + fieldType = context.getMapperService().fullName(IdFieldMapper.NAME); } if (fieldType == null) { if (context.getMapperService().types().isEmpty()) { diff --git a/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java b/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java index fc4163ddd19b2..8536337bfdbc2 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java @@ -420,9 +420,9 @@ public void readFrom(StreamInput in) throws IOException { abortOnVersionConflict = in.readBoolean(); size = in.readVInt(); refresh = in.readBoolean(); - timeout = new TimeValue(in); + timeout = in.readTimeValue(); activeShardCount = ActiveShardCount.readFrom(in); - retryBackoffInitialTime = new TimeValue(in); + retryBackoffInitialTime = in.readTimeValue(); maxRetries = in.readVInt(); requestsPerSecond = in.readFloat(); slices = in.readVInt(); @@ -435,9 +435,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(abortOnVersionConflict); out.writeVInt(size); out.writeBoolean(refresh); - timeout.writeTo(out); + out.writeTimeValue(timeout); activeShardCount.writeTo(out); - retryBackoffInitialTime.writeTo(out); + out.writeTimeValue(retryBackoffInitialTime); out.writeVInt(maxRetries); out.writeFloat(requestsPerSecond); if (out.getVersion().before(Version.V_6_1_0) && slices == AUTO_SLICES) { diff --git a/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollResponse.java b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollResponse.java index e9e67a7ed1fe6..ac206c2c44f06 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollResponse.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollResponse.java @@ -152,7 +152,7 @@ public boolean isTimedOut() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - took.writeTo(out); + out.writeTimeValue(took); status.writeTo(out); out.writeList(bulkFailures); out.writeList(searchFailures); @@ -162,7 +162,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - took = new TimeValue(in); + took = in.readTimeValue(); status = new BulkByScrollTask.Status(in); bulkFailures = in.readList(Failure::new); searchFailures = in.readList(ScrollableHitSource.SearchFailure::new); diff --git a/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java index b6a4cc2fd74e2..9ff26b13212c7 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java @@ -317,10 +317,10 @@ public Status(StreamInput in) throws IOException { noops = in.readVLong(); bulkRetries = in.readVLong(); searchRetries = in.readVLong(); - throttled = new TimeValue(in); + throttled = in.readTimeValue(); requestsPerSecond = in.readFloat(); reasonCancelled = in.readOptionalString(); - throttledUntil = new TimeValue(in); + throttledUntil = in.readTimeValue(); if (in.getVersion().onOrAfter(Version.V_5_1_1)) { sliceStatuses = in.readList(stream -> stream.readOptionalWriteable(StatusOrException::new)); } else { @@ -342,10 +342,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(noops); out.writeVLong(bulkRetries); out.writeVLong(searchRetries); - throttled.writeTo(out); + out.writeTimeValue(throttled); out.writeFloat(requestsPerSecond); out.writeOptionalString(reasonCancelled); - throttledUntil.writeTo(out); + out.writeTimeValue(throttledUntil); if (out.getVersion().onOrAfter(Version.V_5_1_1)) { out.writeVInt(sliceStatuses.size()); for (StatusOrException sliceStatus : sliceStatuses) { diff --git a/server/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java b/server/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java index 67e0f5400b389..eb8c0e14f4343 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java @@ -39,7 +39,6 @@ import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.mapper.ParentFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.search.SearchHit; import org.elasticsearch.threadpool.ThreadPool; @@ -241,11 +240,6 @@ public long getVersion() { return delegate.getVersion(); } - @Override - public String getParent() { - return fieldValue(ParentFieldMapper.NAME); - } - @Override public String getRouting() { return fieldValue(RoutingFieldMapper.NAME); diff --git a/server/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java b/server/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java index 105afcc95bc38..8e7a990902631 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java @@ -91,8 +91,8 @@ public RemoteInfo(StreamInput in) throws IOException { } this.headers = unmodifiableMap(headers); if (in.getVersion().onOrAfter(Version.V_5_2_0)) { - socketTimeout = new TimeValue(in); - connectTimeout = new TimeValue(in); + socketTimeout = in.readTimeValue(); + connectTimeout = in.readTimeValue(); } else { socketTimeout = DEFAULT_SOCKET_TIMEOUT; connectTimeout = DEFAULT_CONNECT_TIMEOUT; @@ -113,8 +113,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(header.getValue()); } if (out.getVersion().onOrAfter(Version.V_5_2_0)) { - socketTimeout.writeTo(out); - connectTimeout.writeTo(out); + out.writeTimeValue(socketTimeout); + out.writeTimeValue(connectTimeout); } } diff --git a/server/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java b/server/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java index 969bc96e35b95..917b57a9c9745 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ToXContent.Params; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; @@ -200,10 +199,6 @@ public interface Hit { * The content type of the hit source. Returns null if the source didn't come back from the search. */ @Nullable XContentType getXContentType(); - /** - * The document id of the parent of the hit if there is a parent or null if there isn't. - */ - @Nullable String getParent(); /** * The routing on the hit if there is any or null if there isn't. */ @@ -221,7 +216,6 @@ public static class BasicHit implements Hit { private BytesReference source; private XContentType xContentType; - private String parent; private String routing; public BasicHit(String index, String type, String id, long version) { @@ -267,16 +261,6 @@ public BasicHit setSource(BytesReference source, XContentType xContentType) { return this; } - @Override - public String getParent() { - return parent; - } - - public BasicHit setParent(String parent) { - this.parent = parent; - return this; - } - @Override public String getRouting() { return routing; diff --git a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java index 0612853cd502f..398f2240a5c43 100644 --- a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java +++ b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java @@ -66,6 +66,7 @@ import static org.elasticsearch.common.lucene.search.Queries.newLenientFieldQuery; import static org.elasticsearch.common.lucene.search.Queries.newUnmappedFieldQuery; import static org.elasticsearch.index.search.QueryParserHelper.resolveMappingField; +import static org.elasticsearch.index.search.QueryParserHelper.resolveMappingFields; /** * A {@link XQueryParser} that uses the {@link MapperService} in order to build smarter @@ -264,6 +265,8 @@ private Map extractMultiFields(String field, boolean quoted) { // Filters unsupported fields if a pattern is requested // Filters metadata fields if all fields are requested return resolveMappingField(context, field, 1.0f, !allFields, !multiFields, quoted ? quoteFieldSuffix : null); + } else if (quoted && quoteFieldSuffix != null) { + return resolveMappingFields(context, fieldsAndWeights, quoteFieldSuffix); } else { return fieldsAndWeights; } diff --git a/server/src/main/java/org/elasticsearch/index/search/SimpleQueryStringQueryParser.java b/server/src/main/java/org/elasticsearch/index/search/SimpleQueryStringQueryParser.java index c4048887cf97e..2f221aa0244eb 100644 --- a/server/src/main/java/org/elasticsearch/index/search/SimpleQueryStringQueryParser.java +++ b/server/src/main/java/org/elasticsearch/index/search/SimpleQueryStringQueryParser.java @@ -408,9 +408,9 @@ public boolean equals(Object obj) { Objects.equals(analyzeWildcard, other.analyzeWildcard) && Objects.equals(quoteFieldSuffix, other.quoteFieldSuffix) && Objects.equals(autoGenerateSynonymsPhraseQuery, other.autoGenerateSynonymsPhraseQuery) && - Objects.equals(fuzzyPrefixLength, fuzzyPrefixLength) && - Objects.equals(fuzzyMaxExpansions, fuzzyMaxExpansions) && - Objects.equals(fuzzyTranspositions, fuzzyTranspositions); + Objects.equals(fuzzyPrefixLength, other.fuzzyPrefixLength) && + Objects.equals(fuzzyMaxExpansions, other.fuzzyMaxExpansions) && + Objects.equals(fuzzyTranspositions, other.fuzzyTranspositions); } } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/ElasticsearchMergePolicy.java b/server/src/main/java/org/elasticsearch/index/shard/ElasticsearchMergePolicy.java index a98384cee1c62..9253728fd1956 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ElasticsearchMergePolicy.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ElasticsearchMergePolicy.java @@ -22,7 +22,7 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.MergePolicy; -import org.apache.lucene.index.MergeTrigger; +import org.apache.lucene.index.MergePolicyWrapper; import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfos; import org.elasticsearch.Version; @@ -44,12 +44,10 @@ * For now, this {@link MergePolicy} takes care of moving versions that used to * be stored as payloads to numeric doc values. */ -public final class ElasticsearchMergePolicy extends MergePolicy { +public final class ElasticsearchMergePolicy extends MergePolicyWrapper { private static Logger logger = Loggers.getLogger(ElasticsearchMergePolicy.class); - private final MergePolicy delegate; - // True if the next merge request should do segment upgrades: private volatile boolean upgradeInProgress; @@ -60,13 +58,7 @@ public final class ElasticsearchMergePolicy extends MergePolicy { /** @param delegate the merge policy to wrap */ public ElasticsearchMergePolicy(MergePolicy delegate) { - this.delegate = delegate; - } - - @Override - public MergeSpecification findMerges(MergeTrigger mergeTrigger, - SegmentInfos segmentInfos, IndexWriter writer) throws IOException { - return delegate.findMerges(mergeTrigger, segmentInfos, writer); + super(delegate); } private boolean shouldUpgrade(SegmentCommitInfo info) { @@ -130,18 +122,7 @@ public MergeSpecification findForcedMerges(SegmentInfos segmentInfos, // has a chance to decide what to do (e.g. collapse the segments to satisfy maxSegmentCount) } - return delegate.findForcedMerges(segmentInfos, maxSegmentCount, segmentsToMerge, writer); - } - - @Override - public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos, IndexWriter writer) - throws IOException { - return delegate.findForcedDeletesMerges(segmentInfos, writer); - } - - @Override - public boolean useCompoundFile(SegmentInfos segments, SegmentCommitInfo newSegment, IndexWriter writer) throws IOException { - return delegate.useCompoundFile(segments, newSegment, writer); + return super.findForcedMerges(segmentInfos, maxSegmentCount, segmentsToMerge, writer); } /** @@ -154,10 +135,4 @@ public void setUpgradeInProgress(boolean upgrade, boolean onlyAncientSegments) { this.upgradeInProgress = upgrade; this.upgradeOnlyAncientSegments = onlyAncientSegments; } - - @Override - public String toString() { - return getClass().getSimpleName() + "(" + delegate + ")"; - } - } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index e2e8459943c26..520115dc30a46 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -24,7 +24,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.IndexCommit; -import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfos; @@ -58,6 +57,7 @@ import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -99,7 +99,6 @@ import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.refresh.RefreshStats; @@ -210,6 +209,7 @@ Runnable getGlobalCheckpointSyncer() { private final RecoveryStats recoveryStats = new RecoveryStats(); private final MeanMetric refreshMetric = new MeanMetric(); private final MeanMetric flushMetric = new MeanMetric(); + private final CounterMetric periodicFlushMetric = new CounterMetric(); private final ShardEventListener shardEventListener = new ShardEventListener(); @@ -683,14 +683,7 @@ public static Engine.Index prepareIndex(DocumentMapperForType docMapper, Version if (docMapper.getMapping() != null) { doc.addDynamicMappingsUpdate(docMapper.getMapping()); } - Term uid; - if (indexCreatedVersion.onOrAfter(Version.V_6_0_0_beta1)) { - uid = new Term(IdFieldMapper.NAME, Uid.encodeId(doc.id())); - } else if (docMapper.getDocumentMapper().idFieldMapper().fieldType().indexOptions() != IndexOptions.NONE) { - uid = new Term(IdFieldMapper.NAME, doc.id()); - } else { - uid = new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(doc.type(), doc.id())); - } + Term uid = new Term(IdFieldMapper.NAME, Uid.encodeId(doc.id())); return new Engine.Index(uid, doc, seqNo, primaryTerm, version, versionType, origin, startTime, autoGeneratedIdTimestamp, isRetry); } @@ -751,22 +744,21 @@ private Engine.DeleteResult applyDeleteOperation(long seqNo, long opPrimaryTerm, assert opPrimaryTerm <= this.primaryTerm : "op term [ " + opPrimaryTerm + " ] > shard term [" + this.primaryTerm + "]"; assert versionType.validateVersionForWrites(version); ensureWriteAllowed(origin); - if (indexSettings().isSingleType()) { - // When there is a single type, the unique identifier is only composed of the _id, - // so there is no way to differenciate foo#1 from bar#1. This is especially an issue - // if a user first deletes foo#1 and then indexes bar#1: since we do not encode the - // _type in the uid it might look like we are reindexing the same document, which - // would fail if bar#1 is indexed with a lower version than foo#1 was deleted with. - // In order to work around this issue, we make deletions create types. This way, we - // fail if index and delete operations do not use the same type. - try { - Mapping update = docMapper(type).getMapping(); - if (update != null) { - onMappingUpdate.accept(update); - } - } catch (MapperParsingException | IllegalArgumentException | TypeMissingException e) { - return new Engine.DeleteResult(e, version, seqNo, false); + // When there is a single type, the unique identifier is only composed of the _id, + // so there is no way to differenciate foo#1 from bar#1. This is especially an issue + // if a user first deletes foo#1 and then indexes bar#1: since we do not encode the + // _type in the uid it might look like we are reindexing the same document, which + // would fail if bar#1 is indexed with a lower version than foo#1 was deleted with. + // In order to work around this issue, we make deletions create types. This way, we + // fail if index and delete operations do not use the same type. + // TODO: clean this up when types are gone + try { + Mapping update = docMapper(type).getMapping(); + if (update != null) { + onMappingUpdate.accept(update); } + } catch (MapperParsingException | IllegalArgumentException | TypeMissingException e) { + return new Engine.DeleteResult(e, version, seqNo, false); } final Term uid = extractUidForDelete(type, id); final Engine.Delete delete = prepareDelete(type, id, uid, seqNo, opPrimaryTerm, version, @@ -781,19 +773,10 @@ private static Engine.Delete prepareDelete(String type, String id, Term uid, lon } private Term extractUidForDelete(String type, String id) { - if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_6_0_0_beta1)) { - assert indexSettings.isSingleType(); - // This is only correct because we create types dynamically on delete operations - // otherwise this could match the same _id from a different type - BytesRef idBytes = Uid.encodeId(id); - return new Term(IdFieldMapper.NAME, idBytes); - } else if (indexSettings.isSingleType()) { - // This is only correct because we create types dynamically on delete operations - // otherwise this could match the same _id from a different type - return new Term(IdFieldMapper.NAME, id); - } else { - return new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(type, id)); - } + // This is only correct because we create types dynamically on delete operations + // otherwise this could match the same _id from a different type + BytesRef idBytes = Uid.encodeId(id); + return new Term(IdFieldMapper.NAME, idBytes); } private Engine.DeleteResult delete(Engine engine, Engine.Delete delete) throws IOException { @@ -846,7 +829,7 @@ public RefreshStats refreshStats() { } public FlushStats flushStats() { - return new FlushStats(flushMetric.count(), TimeUnit.NANOSECONDS.toMillis(flushMetric.sum())); + return new FlushStats(flushMetric.count(), periodicFlushMetric.count(), TimeUnit.NANOSECONDS.toMillis(flushMetric.sum())); } public DocsStats docStats() { @@ -1226,7 +1209,7 @@ public Engine.Result applyTranslogOperation(Translog.Operation operation, Engine index.versionType().versionTypeForReplicationAndRecovery(), index.getAutoGeneratedIdTimestamp(), true, origin, source(shardId.getIndexName(), index.type(), index.id(), index.source(), XContentHelper.xContentType(index.source())) - .routing(index.routing()).parent(index.parent()), onMappingUpdate); + .routing(index.routing()), onMappingUpdate); break; case DELETE: final Translog.Delete delete = (Translog.Delete) operation; @@ -2153,7 +2136,7 @@ private EngineConfig newEngineConfig() { IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings()), Collections.singletonList(refreshListeners), Collections.singletonList(new RefreshMetricUpdater(refreshMetric)), - indexSort, this::runTranslogRecovery, circuitBreakerService, replicationTracker); + indexSort, this::runTranslogRecovery, circuitBreakerService, replicationTracker, this::getPrimaryTerm); } /** @@ -2363,6 +2346,7 @@ public void onFailure(final Exception e) { @Override protected void doRun() throws IOException { flush(new FlushRequest()); + periodicFlushMetric.inc(); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardId.java b/server/src/main/java/org/elasticsearch/index/shard/ShardId.java index 085fd6e339282..e243c916aa232 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ShardId.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardId.java @@ -94,7 +94,7 @@ public static ShardId fromString(String shardIdString) { @Override public boolean equals(Object o) { if (this == o) return true; - if (o == null) return false; + if (o == null || getClass() != o.getClass()) return false; ShardId shardId1 = (ShardId) o; return shardId == shardId1.shardId && index.equals(shardId1.index); } diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index 3654aeba2bf8d..54718c545a44e 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -393,7 +393,8 @@ private void internalRecoverFromStore(IndexShard indexShard) throws IndexShardRe store.bootstrapNewHistory(); final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo(); final long maxSeqNo = Long.parseLong(segmentInfos.userData.get(SequenceNumbers.MAX_SEQ_NO)); - final String translogUUID = Translog.createEmptyTranslog(indexShard.shardPath().resolveTranslog(), maxSeqNo, shardId); + final String translogUUID = Translog.createEmptyTranslog( + indexShard.shardPath().resolveTranslog(), maxSeqNo, shardId, indexShard.getPrimaryTerm()); store.associateIndexWithNewTranslog(translogUUID); } else if (indexShouldExists) { // since we recover from local, just fill the files and size @@ -407,8 +408,8 @@ private void internalRecoverFromStore(IndexShard indexShard) throws IndexShardRe } } else { store.createEmpty(); - final String translogUUID = Translog.createEmptyTranslog(indexShard.shardPath().resolveTranslog(), - SequenceNumbers.NO_OPS_PERFORMED, shardId); + final String translogUUID = Translog.createEmptyTranslog( + indexShard.shardPath().resolveTranslog(), SequenceNumbers.NO_OPS_PERFORMED, shardId, indexShard.getPrimaryTerm()); store.associateIndexWithNewTranslog(translogUUID); } indexShard.openEngineAndRecoverFromTranslog(); @@ -456,7 +457,8 @@ private void restore(final IndexShard indexShard, final Repository repository, f store.bootstrapNewHistory(); final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo(); final long maxSeqNo = Long.parseLong(segmentInfos.userData.get(SequenceNumbers.MAX_SEQ_NO)); - final String translogUUID = Translog.createEmptyTranslog(indexShard.shardPath().resolveTranslog(), maxSeqNo, shardId); + final String translogUUID = Translog.createEmptyTranslog( + indexShard.shardPath().resolveTranslog(), maxSeqNo, shardId, indexShard.getPrimaryTerm()); store.associateIndexWithNewTranslog(translogUUID); assert indexShard.shardRouting.primary() : "only primary shards can recover from store"; indexShard.openEngineAndRecoverFromTranslog(); diff --git a/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java b/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java index 14ee8ecb9b3c0..ff226ae00bef5 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java +++ b/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java @@ -35,15 +35,15 @@ public abstract class BaseTranslogReader implements Comparable getPrimaryTerm() && getPrimaryTerm() != TranslogHeader.UNKNOWN_PRIMARY_TERM) { + throw new TranslogCorruptedException("Operation's term is newer than translog header term; " + + "operation term[" + op.primaryTerm() + "], translog header term [" + getPrimaryTerm() + "]"); + } + return op; } /** * reads bytes at position into the given buffer, filling it. */ - protected abstract void readBytes(ByteBuffer buffer, long position) throws IOException; + protected abstract void readBytes(ByteBuffer buffer, long position) throws IOException; @Override public String toString() { diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index b6b6f656be44f..cc5041bf24434 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -23,7 +23,6 @@ import org.apache.lucene.index.Term; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.Version; -import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -41,7 +40,6 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.IndexShardComponent; @@ -109,7 +107,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC public static final String CHECKPOINT_FILE_NAME = "translog" + CHECKPOINT_SUFFIX; static final Pattern PARSE_STRICT_ID_PATTERN = Pattern.compile("^" + TRANSLOG_FILE_PREFIX + "(\\d+)(\\.tlog)$"); - public static final int DEFAULT_HEADER_SIZE_IN_BYTES = TranslogWriter.getHeaderLength(UUIDs.randomBase64UUID()); + public static final int DEFAULT_HEADER_SIZE_IN_BYTES = TranslogHeader.headerSizeInBytes(UUIDs.randomBase64UUID()); // the list of translog readers is guaranteed to be in order of translog generation private final List readers = new ArrayList<>(); @@ -122,6 +120,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC private final AtomicBoolean closed = new AtomicBoolean(); private final TranslogConfig config; private final LongSupplier globalCheckpointSupplier; + private final LongSupplier primaryTermSupplier; private final String translogUUID; private final TranslogDeletionPolicy deletionPolicy; @@ -133,17 +132,22 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC * translog file referenced by this generation. The translog creation will fail if this generation can't be opened. * * @param config the configuration of this translog - * @param translogUUID the translog uuid to open, null for a new translog + * @param translogUUID the translog uuid to open, null for a new translog * @param deletionPolicy an instance of {@link TranslogDeletionPolicy} that controls when a translog file can be safely * deleted * @param globalCheckpointSupplier a supplier for the global checkpoint + * @param primaryTermSupplier a supplier for the latest value of primary term of the owning index shard. The latest term value is + * examined and stored in the header whenever a new generation is rolled. It's guaranteed from outside + * that a new generation is rolled when the term is increased. This guarantee allows to us to validate + * and reject operation whose term is higher than the primary term stored in the translog header. */ public Translog( final TranslogConfig config, final String translogUUID, TranslogDeletionPolicy deletionPolicy, - final LongSupplier globalCheckpointSupplier) throws IOException { + final LongSupplier globalCheckpointSupplier, final LongSupplier primaryTermSupplier) throws IOException { super(config.getShardId(), config.getIndexSettings()); this.config = config; this.globalCheckpointSupplier = globalCheckpointSupplier; + this.primaryTermSupplier = primaryTermSupplier; this.deletionPolicy = deletionPolicy; this.translogUUID = translogUUID; bigArrays = config.getBigArrays(); @@ -165,7 +169,7 @@ public Translog( // // For this to happen we must have already copied the translog.ckp file into translog-gen.ckp so we first check if that file exists // if not we don't even try to clean it up and wait until we fail creating it - assert Files.exists(nextTranslogFile) == false || Files.size(nextTranslogFile) <= TranslogWriter.getHeaderLength(translogUUID) : "unexpected translog file: [" + nextTranslogFile + "]"; + assert Files.exists(nextTranslogFile) == false || Files.size(nextTranslogFile) <= TranslogHeader.headerSizeInBytes(translogUUID) : "unexpected translog file: [" + nextTranslogFile + "]"; if (Files.exists(currentCheckpointFile) // current checkpoint is already copied && Files.deleteIfExists(nextTranslogFile)) { // delete it and log a warning logger.warn("deleted previously created, but not yet committed, next generation [{}]. This can happen due to a tragic exception when creating a new generation", nextTranslogFile.getFileName()); @@ -226,6 +230,9 @@ private ArrayList recoverFromFiles(Checkpoint checkpoint) throws minGenerationToRecoverFrom + " checkpoint: " + checkpoint.generation + " - translog ids must be consecutive"); } final TranslogReader reader = openReader(committedTranslogFile, Checkpoint.read(location.resolve(getCommitCheckpointFileName(i)))); + assert reader.getPrimaryTerm() <= primaryTermSupplier.getAsLong() : + "Primary terms go backwards; current term [" + primaryTermSupplier.getAsLong() + "]" + + "translog path [ " + committedTranslogFile + ", existing term [" + reader.getPrimaryTerm() + "]"; foundTranslogs.add(reader); logger.debug("recovered local translog from checkpoint {}", checkpoint); } @@ -269,10 +276,6 @@ private ArrayList recoverFromFiles(Checkpoint checkpoint) throws } TranslogReader openReader(Path path, Checkpoint checkpoint) throws IOException { - return openReader(path, checkpoint, translogUUID); - } - - private static TranslogReader openReader(Path path, Checkpoint checkpoint, String translogUUID) throws IOException { FileChannel channel = FileChannel.open(path, StandardOpenOption.READ); try { assert Translog.parseIdFromFileName(path) == checkpoint.generation : "expected generation: " + Translog.parseIdFromFileName(path) + " but got: " + checkpoint.generation; @@ -459,7 +462,7 @@ TranslogWriter createWriter(long fileGeneration, long initialMinTranslogGen, lon getChannelFactory(), config.getBufferSize(), initialMinTranslogGen, initialGlobalCheckpoint, - globalCheckpointSupplier, this::getMinFileGeneration); + globalCheckpointSupplier, this::getMinFileGeneration, primaryTermSupplier.getAsLong()); } catch (final IOException e) { throw new TranslogException(shardId, "failed to create new translog file", e); } @@ -487,22 +490,18 @@ public Location add(final Operation operation) throws IOException { final ReleasablePagedBytesReference bytes = out.bytes(); try (ReleasableLock ignored = readLock.acquire()) { ensureOpen(); + if (operation.primaryTerm() > current.getPrimaryTerm()) { + throw new IllegalArgumentException("Operation term is newer than the current term;" + + "current term[" + current.getPrimaryTerm() + "], operation term[" + operation + "]"); + } return current.add(bytes, operation.seqNo()); } } catch (final AlreadyClosedException | IOException ex) { - try { - closeOnTragicEvent(ex); - } catch (final Exception inner) { - ex.addSuppressed(inner); - } + closeOnTragicEvent(ex); throw ex; - } catch (final Exception e) { - try { - closeOnTragicEvent(e); - } catch (final Exception inner) { - e.addSuppressed(inner); - } - throw new TranslogException(shardId, "Failed to write operation [" + operation + "]", e); + } catch (final Exception ex) { + closeOnTragicEvent(ex); + throw new TranslogException(shardId, "Failed to write operation [" + operation + "]", ex); } finally { Releasables.close(out); } @@ -594,6 +593,9 @@ public Operation readOperation(Location location) throws IOException { } } } + } catch (final Exception ex) { + closeOnTragicEvent(ex); + throw ex; } return null; } @@ -670,12 +672,8 @@ public void sync() throws IOException { if (closed.get() == false) { current.sync(); } - } catch (Exception ex) { - try { - closeOnTragicEvent(ex); - } catch (Exception inner) { - ex.addSuppressed(inner); - } + } catch (final Exception ex) { + closeOnTragicEvent(ex); throw ex; } } @@ -710,12 +708,8 @@ public boolean ensureSynced(Location location) throws IOException { ensureOpen(); return current.syncUpTo(location.translogLocation + location.size); } - } catch (Exception ex) { - try { - closeOnTragicEvent(ex); - } catch (Exception inner) { - ex.addSuppressed(inner); - } + } catch (final Exception ex) { + closeOnTragicEvent(ex); throw ex; } return false; @@ -739,15 +733,28 @@ public boolean ensureSynced(Stream locations) throws IOException { } } - private void closeOnTragicEvent(Exception ex) { + /** + * Closes the translog if the current translog writer experienced a tragic exception. + * + * Note that in case this thread closes the translog it must not already be holding a read lock on the translog as it will acquire a + * write lock in the course of closing the translog + * + * @param ex if an exception occurs closing the translog, it will be suppressed into the provided exception + */ + private void closeOnTragicEvent(final Exception ex) { + // we can not hold a read lock here because closing will attempt to obtain a write lock and that would result in self-deadlock + assert readLock.isHeldByCurrentThread() == false : Thread.currentThread().getName(); if (current.getTragicException() != null) { try { close(); - } catch (AlreadyClosedException inner) { - // don't do anything in this case. The AlreadyClosedException comes from TranslogWriter and we should not add it as suppressed because - // will contain the Exception ex as cause. See also https://github.com/elastic/elasticsearch/issues/15941 - } catch (Exception inner) { - assert (ex != inner.getCause()); + } catch (final AlreadyClosedException inner) { + /* + * Don't do anything in this case. The AlreadyClosedException comes from TranslogWriter and we should not add it as + * suppressed because it will contain the provided exception as its cause. See also + * https://github.com/elastic/elasticsearch/issues/15941. + */ + } catch (final Exception inner) { + assert ex != inner.getCause(); ex.addSuppressed(inner); } } @@ -950,22 +957,19 @@ public static class Source { public final BytesReference source; public final String routing; - public final String parent; - public Source(BytesReference source, String routing, String parent) { + public Source(BytesReference source, String routing) { this.source = source; this.routing = routing; - this.parent = parent; } } public static class Index implements Operation { - public static final int FORMAT_2_X = 6; // since 2.0-beta1 and 1.1 - public static final int FORMAT_AUTO_GENERATED_IDS = FORMAT_2_X + 1; // since 5.0.0-beta1 - public static final int FORMAT_SEQ_NO = FORMAT_AUTO_GENERATED_IDS + 1; // since 6.0.0 - public static final int SERIALIZATION_FORMAT = FORMAT_SEQ_NO; + public static final int FORMAT_6_0 = 8; // since 6.0.0 + public static final int FORMAT_NO_PARENT = FORMAT_6_0 + 1; // since 7.0 + public static final int SERIALIZATION_FORMAT = FORMAT_NO_PARENT; private final String id; private final long autoGeneratedIdTimestamp; @@ -976,35 +980,23 @@ public static class Index implements Operation { private final VersionType versionType; private final BytesReference source; private final String routing; - private final String parent; private Index(final StreamInput in) throws IOException { final int format = in.readVInt(); // SERIALIZATION_FORMAT - assert format >= FORMAT_2_X : "format was: " + format; + assert format >= FORMAT_6_0 : "format was: " + format; id = in.readString(); type = in.readString(); source = in.readBytesReference(); routing = in.readOptionalString(); - parent = in.readOptionalString(); - this.version = in.readLong(); - if (format < FORMAT_SEQ_NO) { - in.readLong(); // timestamp - in.readLong(); // ttl + if (format < FORMAT_NO_PARENT) { + in.readOptionalString(); // _parent } + this.version = in.readLong(); this.versionType = VersionType.fromValue(in.readByte()); assert versionType.validateVersionForWrites(this.version) : "invalid version for writes: " + this.version; - if (format >= FORMAT_AUTO_GENERATED_IDS) { - this.autoGeneratedIdTimestamp = in.readLong(); - } else { - this.autoGeneratedIdTimestamp = IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP; - } - if (format >= FORMAT_SEQ_NO) { - seqNo = in.readLong(); - primaryTerm = in.readLong(); - } else { - seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; - primaryTerm = 0; - } + this.autoGeneratedIdTimestamp = in.readLong(); + seqNo = in.readLong(); + primaryTerm = in.readLong(); } public Index(Engine.Index index, Engine.IndexResult indexResult) { @@ -1012,7 +1004,6 @@ public Index(Engine.Index index, Engine.IndexResult indexResult) { this.type = index.type(); this.source = index.source(); this.routing = index.routing(); - this.parent = index.parent(); this.seqNo = indexResult.getSeqNo(); this.primaryTerm = index.primaryTerm(); this.version = indexResult.getVersion(); @@ -1020,21 +1011,20 @@ public Index(Engine.Index index, Engine.IndexResult indexResult) { this.autoGeneratedIdTimestamp = index.getAutoGeneratedIdTimestamp(); } - public Index(String type, String id, long seqNo, byte[] source) { - this(type, id, seqNo, Versions.MATCH_ANY, VersionType.INTERNAL, source, null, null, -1); + public Index(String type, String id, long seqNo, long primaryTerm, byte[] source) { + this(type, id, seqNo, primaryTerm, Versions.MATCH_ANY, VersionType.INTERNAL, source, null, -1); } - public Index(String type, String id, long seqNo, long version, VersionType versionType, byte[] source, String routing, - String parent, long autoGeneratedIdTimestamp) { + public Index(String type, String id, long seqNo, long primaryTerm, long version, VersionType versionType, + byte[] source, String routing, long autoGeneratedIdTimestamp) { this.type = type; this.id = id; this.source = new BytesArray(source); this.seqNo = seqNo; - this.primaryTerm = 0; + this.primaryTerm = primaryTerm; this.version = version; this.versionType = versionType; this.routing = routing; - this.parent = parent; this.autoGeneratedIdTimestamp = autoGeneratedIdTimestamp; } @@ -1060,10 +1050,6 @@ public String routing() { return this.routing; } - public String parent() { - return this.parent; - } - public BytesReference source() { return this.source; } @@ -1088,7 +1074,7 @@ public VersionType versionType() { @Override public Source getSource() { - return new Source(source, routing, parent); + return new Source(source, routing); } private void write(final StreamOutput out) throws IOException { @@ -1097,7 +1083,6 @@ private void write(final StreamOutput out) throws IOException { out.writeString(type); out.writeBytesReference(source); out.writeOptionalString(routing); - out.writeOptionalString(parent); out.writeLong(version); out.writeByte(versionType.getValue()); @@ -1130,7 +1115,7 @@ public boolean equals(Object o) { if (routing != null ? !routing.equals(index.routing) : index.routing != null) { return false; } - return !(parent != null ? !parent.equals(index.parent) : index.parent != null); + return true; } @@ -1144,7 +1129,6 @@ public int hashCode() { result = 31 * result + versionType.hashCode(); result = 31 * result + source.hashCode(); result = 31 * result + (routing != null ? routing.hashCode() : 0); - result = 31 * result + (parent != null ? parent.hashCode() : 0); result = 31 * result + Long.hashCode(autoGeneratedIdTimestamp); return result; } @@ -1167,10 +1151,8 @@ public long getAutoGeneratedIdTimestamp() { public static class Delete implements Operation { - public static final int FORMAT_5_0 = 2; // 5.0 - 5.5 - private static final int FORMAT_SINGLE_TYPE = FORMAT_5_0 + 1; // 5.5 - 6.0 - private static final int FORMAT_SEQ_NO = FORMAT_SINGLE_TYPE + 1; // 6.0 - * - public static final int SERIALIZATION_FORMAT = FORMAT_SEQ_NO; + private static final int FORMAT_6_0 = 4; // 6.0 - * + public static final int SERIALIZATION_FORMAT = FORMAT_6_0; private final String type, id; private final Term uid; @@ -1181,33 +1163,15 @@ public static class Delete implements Operation { private Delete(final StreamInput in) throws IOException { final int format = in.readVInt();// SERIALIZATION_FORMAT - assert format >= FORMAT_5_0 : "format was: " + format; - if (format >= FORMAT_SINGLE_TYPE) { - type = in.readString(); - id = in.readString(); - if (format >= FORMAT_SEQ_NO) { - uid = new Term(in.readString(), in.readBytesRef()); - } else { - uid = new Term(in.readString(), in.readString()); - } - } else { - uid = new Term(in.readString(), in.readString()); - // the uid was constructed from the type and id so we can - // extract them back - Uid uidObject = Uid.createUid(uid.text()); - type = uidObject.type(); - id = uidObject.id(); - } + assert format >= FORMAT_6_0 : "format was: " + format; + type = in.readString(); + id = in.readString(); + uid = new Term(in.readString(), in.readBytesRef()); this.version = in.readLong(); this.versionType = VersionType.fromValue(in.readByte()); assert versionType.validateVersionForWrites(this.version); - if (format >= FORMAT_SEQ_NO) { - seqNo = in.readLong(); - primaryTerm = in.readLong(); - } else { - seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; - primaryTerm = 0; - } + seqNo = in.readLong(); + primaryTerm = in.readLong(); } public Delete(Engine.Delete delete, Engine.DeleteResult deleteResult) { @@ -1215,8 +1179,8 @@ public Delete(Engine.Delete delete, Engine.DeleteResult deleteResult) { } /** utility for testing */ - public Delete(String type, String id, long seqNo, Term uid) { - this(type, id, uid, seqNo, 0, Versions.MATCH_ANY, VersionType.INTERNAL); + public Delete(String type, String id, long seqNo, long primaryTerm, Term uid) { + this(type, id, uid, seqNo, primaryTerm, Versions.MATCH_ANY, VersionType.INTERNAL); } public Delete(String type, String id, Term uid, long seqNo, long primaryTerm, long version, VersionType versionType) { @@ -1420,10 +1384,10 @@ public enum Durability { } - private static void verifyChecksum(BufferedChecksumStreamInput in) throws IOException { + static void verifyChecksum(BufferedChecksumStreamInput in) throws IOException { // This absolutely must come first, or else reading the checksum becomes part of the checksum long expectedChecksum = in.getChecksum(); - long readChecksum = in.readInt() & 0xFFFF_FFFFL; + long readChecksum = Integer.toUnsignedLong(in.readInt()); if (readChecksum != expectedChecksum) { throw new TranslogCorruptedException("translog stream is corrupted, expected: 0x" + Long.toHexString(expectedChecksum) + ", got: 0x" + Long.toHexString(readChecksum)); @@ -1600,12 +1564,8 @@ public void trimUnreferencedReaders() throws IOException { assert readers.isEmpty() == false || current.generation == minReferencedGen : "all readers were cleaned but the minReferenceGen [" + minReferencedGen + "] is not the current writer's gen [" + current.generation + "]"; - } catch (Exception ex) { - try { - closeOnTragicEvent(ex); - } catch (final Exception inner) { - ex.addSuppressed(inner); - } + } catch (final Exception ex) { + closeOnTragicEvent(ex); throw ex; } } @@ -1711,10 +1671,10 @@ public static long readGlobalCheckpoint(final Path location, final String expect private static Checkpoint readCheckpoint(Path location, String expectedTranslogUUID) throws IOException { final Checkpoint checkpoint = readCheckpoint(location); - // We need to open at least translog reader to validate the translogUUID. + // We need to open at least one translog header to validate the translogUUID. final Path translogFile = location.resolve(getFilename(checkpoint.generation)); - try (TranslogReader reader = openReader(translogFile, checkpoint, expectedTranslogUUID)) { - + try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) { + TranslogHeader.read(expectedTranslogUUID, translogFile, channel); } catch (TranslogCorruptedException ex) { throw ex; // just bubble up. } catch (Exception ex) { @@ -1753,13 +1713,14 @@ List getReaders() { return readers; } - public static String createEmptyTranslog(final Path location, final long initialGlobalCheckpoint, final ShardId shardId) - throws IOException { + public static String createEmptyTranslog(final Path location, final long initialGlobalCheckpoint, + final ShardId shardId, final long primaryTerm) throws IOException { final ChannelFactory channelFactory = FileChannel::open; - return createEmptyTranslog(location, initialGlobalCheckpoint, shardId, channelFactory); + return createEmptyTranslog(location, initialGlobalCheckpoint, shardId, channelFactory, primaryTerm); } - static String createEmptyTranslog(Path location, long initialGlobalCheckpoint, ShardId shardId, ChannelFactory channelFactory) throws IOException { + static String createEmptyTranslog(Path location, long initialGlobalCheckpoint, ShardId shardId, + ChannelFactory channelFactory, long primaryTerm) throws IOException { IOUtils.rm(location); Files.createDirectories(location); final Checkpoint checkpoint = Checkpoint.emptyTranslogCheckpoint(0, 1, initialGlobalCheckpoint, 1); @@ -1769,7 +1730,7 @@ static String createEmptyTranslog(Path location, long initialGlobalCheckpoint, S final String translogUUID = UUIDs.randomBase64UUID(); TranslogWriter writer = TranslogWriter.create(shardId, translogUUID, 1, location.resolve(getFilename(1)), channelFactory, new ByteSizeValue(10), 1, initialGlobalCheckpoint, - () -> { throw new UnsupportedOperationException(); }, () -> { throw new UnsupportedOperationException(); } + () -> { throw new UnsupportedOperationException(); }, () -> { throw new UnsupportedOperationException(); }, primaryTerm ); writer.close(); return translogUUID; diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java new file mode 100644 index 0000000000000..0fde24d8bb4d5 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java @@ -0,0 +1,195 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.translog; + +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.index.CorruptIndexException; +import org.apache.lucene.index.IndexFormatTooNewException; +import org.apache.lucene.index.IndexFormatTooOldException; +import org.apache.lucene.store.InputStreamDataInput; +import org.apache.lucene.store.OutputStreamDataOutput; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.Channels; +import org.elasticsearch.common.io.stream.InputStreamStreamInput; +import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; + +import java.io.IOException; +import java.nio.channels.FileChannel; +import java.nio.file.Path; + +/** + * Each translog file is started with a translog header then followed by translog operations. + */ +final class TranslogHeader { + public static final String TRANSLOG_CODEC = "translog"; + + public static final int VERSION_CHECKSUMS = 1; // pre-2.0 - unsupported + public static final int VERSION_CHECKPOINTS = 2; // added checkpoints + public static final int VERSION_PRIMARY_TERM = 3; // added primary term + public static final int CURRENT_VERSION = VERSION_PRIMARY_TERM; + + public static final long UNKNOWN_PRIMARY_TERM = 0L; + + private final String translogUUID; + private final long primaryTerm; + private final int headerSizeInBytes; + + /** + * Creates a new translog header with the given uuid and primary term. + * + * @param translogUUID this UUID is used to prevent accidental recovery from a transaction log that belongs to a + * different engine + * @param primaryTerm the primary term of the owning index shard when creating (eg. rolling) this translog file. + * All operations' terms in this translog file are enforced to be at most this term. + */ + TranslogHeader(String translogUUID, long primaryTerm) { + this(translogUUID, primaryTerm, headerSizeInBytes(translogUUID)); + assert primaryTerm >= 0 : "Primary term must be non-negative; term [" + primaryTerm + "]"; + } + + private TranslogHeader(String translogUUID, long primaryTerm, int headerSizeInBytes) { + this.translogUUID = translogUUID; + this.primaryTerm = primaryTerm; + this.headerSizeInBytes = headerSizeInBytes; + } + + public String getTranslogUUID() { + return translogUUID; + } + + /** + * Returns the primary term stored in this translog header. + * All operations in a translog file are expected to have their primary terms at most this term. + */ + public long getPrimaryTerm() { + return primaryTerm; + } + + /** + * Returns the header size in bytes. This value can be used as the offset of the first translog operation. + * See {@link BaseTranslogReader#getFirstOperationOffset()} + */ + public int sizeInBytes() { + return headerSizeInBytes; + } + + static int headerSizeInBytes(String translogUUID) { + return headerSizeInBytes(CURRENT_VERSION, new BytesRef(translogUUID).length); + } + + private static int headerSizeInBytes(int version, int uuidLength) { + int size = CodecUtil.headerLength(TRANSLOG_CODEC); + size += Integer.BYTES + uuidLength; // uuid + if (version >= VERSION_PRIMARY_TERM) { + size += Long.BYTES; // primary term + size += Integer.BYTES; // checksum + } + return size; + } + + /** + * Read a translog header from the given path and file channel + */ + static TranslogHeader read(final String translogUUID, final Path path, final FileChannel channel) throws IOException { + // This input is intentionally not closed because closing it will close the FileChannel. + final BufferedChecksumStreamInput in = + new BufferedChecksumStreamInput(new InputStreamStreamInput(java.nio.channels.Channels.newInputStream(channel), channel.size())); + final int version; + try { + version = CodecUtil.checkHeader(new InputStreamDataInput(in), TRANSLOG_CODEC, VERSION_CHECKSUMS, VERSION_PRIMARY_TERM); + } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException e) { + tryReportOldVersionError(path, channel); + throw new TranslogCorruptedException("Translog header corrupted. path:" + path, e); + } + if (version == VERSION_CHECKSUMS) { + throw new IllegalStateException("pre-2.0 translog found [" + path + "]"); + } + // Read the translogUUID + final int uuidLen = in.readInt(); + if (uuidLen > channel.size()) { + throw new TranslogCorruptedException("uuid length can't be larger than the translog"); + } + final BytesRef uuid = new BytesRef(uuidLen); + uuid.length = uuidLen; + in.read(uuid.bytes, uuid.offset, uuid.length); + final BytesRef expectedUUID = new BytesRef(translogUUID); + if (uuid.bytesEquals(expectedUUID) == false) { + throw new TranslogCorruptedException("expected shard UUID " + expectedUUID + " but got: " + uuid + + " this translog file belongs to a different translog. path:" + path); + } + // Read the primary term + final long primaryTerm; + if (version == VERSION_PRIMARY_TERM) { + primaryTerm = in.readLong(); + assert primaryTerm >= 0 : "Primary term must be non-negative [" + primaryTerm + "]; translog path [" + path + "]"; + } else { + assert version == VERSION_CHECKPOINTS : "Unknown header version [" + version + "]"; + primaryTerm = UNKNOWN_PRIMARY_TERM; + } + // Verify the checksum + if (version >= VERSION_PRIMARY_TERM) { + Translog.verifyChecksum(in); + } + final int headerSizeInBytes = headerSizeInBytes(version, uuid.length); + assert channel.position() == headerSizeInBytes : + "Header is not fully read; header size [" + headerSizeInBytes + "], position [" + channel.position() + "]"; + return new TranslogHeader(translogUUID, primaryTerm, headerSizeInBytes); + } + + private static void tryReportOldVersionError(final Path path, final FileChannel channel) throws IOException { + // Lucene's CodecUtil writes a magic number of 0x3FD76C17 with the header, in binary this looks like: + // binary: 0011 1111 1101 0111 0110 1100 0001 0111 + // hex : 3 f d 7 6 c 1 7 + // + // With version 0 of the translog, the first byte is the Operation.Type, which will always be between 0-4, + // so we know if we grab the first byte, it can be: + // 0x3f => Lucene's magic number, so we can assume it's version 1 or later + // 0x00 => version 0 of the translog + final byte b1 = Channels.readFromFileChannel(channel, 0, 1)[0]; + if (b1 == 0x3f) { // LUCENE_CODEC_HEADER_BYTE + throw new TranslogCorruptedException("translog looks like version 1 or later, but has corrupted header. path:" + path); + } else if (b1 == 0x00) { // UNVERSIONED_TRANSLOG_HEADER_BYTE + throw new IllegalStateException("pre-1.4 translog found [" + path + "]"); + } + } + + /** + * Writes this header with the latest format into the file channel + */ + void write(final FileChannel channel) throws IOException { + // This output is intentionally not closed because closing it will close the FileChannel. + @SuppressWarnings({"IOResourceOpenedButNotSafelyClosed", "resource"}) + final BufferedChecksumStreamOutput out = new BufferedChecksumStreamOutput( + new OutputStreamStreamOutput(java.nio.channels.Channels.newOutputStream(channel))); + CodecUtil.writeHeader(new OutputStreamDataOutput(out), TRANSLOG_CODEC, CURRENT_VERSION); + // Write uuid + final BytesRef uuid = new BytesRef(translogUUID); + out.writeInt(uuid.length); + out.writeBytes(uuid.bytes, uuid.offset, uuid.length); + // Write primary term + out.writeLong(primaryTerm); + // Checksum header + out.writeInt((int) out.getChecksum()); + out.flush(); + channel.force(true); + assert channel.position() == headerSizeInBytes : + "Header is not fully written; header size [" + headerSizeInBytes + "], channel position [" + channel.position() + "]"; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogReader.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogReader.java index b88037c32fd59..29e30bd25dd37 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogReader.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogReader.java @@ -19,15 +19,8 @@ package org.elasticsearch.index.translog; -import org.apache.lucene.codecs.CodecUtil; -import org.apache.lucene.index.CorruptIndexException; -import org.apache.lucene.index.IndexFormatTooNewException; -import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.store.AlreadyClosedException; -import org.apache.lucene.store.InputStreamDataInput; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.io.Channels; -import org.elasticsearch.common.io.stream.InputStreamStreamInput; import java.io.Closeable; import java.io.EOFException; @@ -41,10 +34,6 @@ * an immutable translog filereader */ public class TranslogReader extends BaseTranslogReader implements Closeable { - - private static final byte LUCENE_CODEC_HEADER_BYTE = 0x3f; - private static final byte UNVERSIONED_TRANSLOG_HEADER_BYTE = 0x00; - protected final long length; private final int totalOperations; private final Checkpoint checkpoint; @@ -53,13 +42,13 @@ public class TranslogReader extends BaseTranslogReader implements Closeable { /** * Create a translog writer against the specified translog file channel. * - * @param checkpoint the translog checkpoint - * @param channel the translog file channel to open a translog reader against - * @param path the path to the translog - * @param firstOperationOffset the offset to the first operation + * @param checkpoint the translog checkpoint + * @param channel the translog file channel to open a translog reader against + * @param path the path to the translog + * @param header the header of the translog file */ - TranslogReader(final Checkpoint checkpoint, final FileChannel channel, final Path path, final long firstOperationOffset) { - super(checkpoint.generation, channel, path, firstOperationOffset); + TranslogReader(final Checkpoint checkpoint, final FileChannel channel, final Path path, final TranslogHeader header) { + super(checkpoint.generation, channel, path, header); this.length = checkpoint.offset; this.totalOperations = checkpoint.numOps; this.checkpoint = checkpoint; @@ -77,75 +66,8 @@ public class TranslogReader extends BaseTranslogReader implements Closeable { */ public static TranslogReader open( final FileChannel channel, final Path path, final Checkpoint checkpoint, final String translogUUID) throws IOException { - - try { - InputStreamStreamInput headerStream = new InputStreamStreamInput(java.nio.channels.Channels.newInputStream(channel), - channel.size()); // don't close - // Lucene's CodecUtil writes a magic number of 0x3FD76C17 with the - // header, in binary this looks like: - // - // binary: 0011 1111 1101 0111 0110 1100 0001 0111 - // hex : 3 f d 7 6 c 1 7 - // - // With version 0 of the translog, the first byte is the - // Operation.Type, which will always be between 0-4, so we know if - // we grab the first byte, it can be: - // 0x3f => Lucene's magic number, so we can assume it's version 1 or later - // 0x00 => version 0 of the translog - // - // otherwise the first byte of the translog is corrupted and we - // should bail - byte b1 = headerStream.readByte(); - if (b1 == LUCENE_CODEC_HEADER_BYTE) { - // Read 3 more bytes, meaning a whole integer has been read - byte b2 = headerStream.readByte(); - byte b3 = headerStream.readByte(); - byte b4 = headerStream.readByte(); - // Convert the 4 bytes that were read into an integer - int header = ((b1 & 0xFF) << 24) + ((b2 & 0xFF) << 16) + ((b3 & 0xFF) << 8) + ((b4 & 0xFF) << 0); - // We confirm CodecUtil's CODEC_MAGIC number (0x3FD76C17) - // ourselves here, because it allows us to read the first - // byte separately - if (header != CodecUtil.CODEC_MAGIC) { - throw new TranslogCorruptedException("translog looks like version 1 or later, but has corrupted header. path:" + path); - } - // Confirm the rest of the header using CodecUtil, extracting - // the translog version - int version = CodecUtil.checkHeaderNoMagic(new InputStreamDataInput(headerStream), TranslogWriter.TRANSLOG_CODEC, 1, Integer.MAX_VALUE); - switch (version) { - case TranslogWriter.VERSION_CHECKSUMS: - throw new IllegalStateException("pre-2.0 translog found [" + path + "]"); - case TranslogWriter.VERSION_CHECKPOINTS: - assert path.getFileName().toString().endsWith(Translog.TRANSLOG_FILE_SUFFIX) : "new file ends with old suffix: " + path; - assert checkpoint.numOps >= 0 : "expected at least 0 operation but got: " + checkpoint.numOps; - assert checkpoint.offset <= channel.size() : "checkpoint is inconsistent with channel length: " + channel.size() + " " + checkpoint; - int len = headerStream.readInt(); - if (len > channel.size()) { - throw new TranslogCorruptedException("uuid length can't be larger than the translog"); - } - BytesRef ref = new BytesRef(len); - ref.length = len; - headerStream.read(ref.bytes, ref.offset, ref.length); - BytesRef uuidBytes = new BytesRef(translogUUID); - if (uuidBytes.bytesEquals(ref) == false) { - throw new TranslogCorruptedException("expected shard UUID " + uuidBytes + " but got: " + ref + - " this translog file belongs to a different translog. path:" + path); - } - final long firstOperationOffset; - firstOperationOffset = ref.length + CodecUtil.headerLength(TranslogWriter.TRANSLOG_CODEC) + Integer.BYTES; - return new TranslogReader(checkpoint, channel, path, firstOperationOffset); - - default: - throw new TranslogCorruptedException("No known translog stream version: " + version + " path:" + path); - } - } else if (b1 == UNVERSIONED_TRANSLOG_HEADER_BYTE) { - throw new IllegalStateException("pre-1.4 translog found [" + path + "]"); - } else { - throw new TranslogCorruptedException("Invalid first byte in translog file, got: " + Long.toHexString(b1) + ", expected 0x00 or 0x3f. path:" + path); - } - } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException e) { - throw new TranslogCorruptedException("Translog header corrupted. path:" + path, e); - } + final TranslogHeader header = TranslogHeader.read(translogUUID, path, channel); + return new TranslogReader(checkpoint, channel, path, header); } public long sizeInBytes() { @@ -168,8 +90,8 @@ protected void readBytes(ByteBuffer buffer, long position) throws IOException { if (position >= length) { throw new EOFException("read requested past EOF. pos [" + position + "] end: [" + length + "]"); } - if (position < firstOperationOffset) { - throw new IOException("read requested before position of first ops. pos [" + position + "] first op on: [" + firstOperationOffset + "]"); + if (position < getFirstOperationOffset()) { + throw new IOException("read requested before position of first ops. pos [" + position + "] first op on: [" + getFirstOperationOffset() + "]"); } Channels.readFromFileChannelWithEofException(channel, position, buffer); } diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java index 5f6d14e192eb8..a966720353297 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java @@ -39,14 +39,14 @@ final class TranslogSnapshot extends BaseTranslogReader { * Create a snapshot of translog file channel. */ TranslogSnapshot(final BaseTranslogReader reader, final long length) { - super(reader.generation, reader.channel, reader.path, reader.firstOperationOffset); + super(reader.generation, reader.channel, reader.path, reader.header); this.length = length; this.totalOperations = reader.totalOperations(); this.checkpoint = reader.getCheckpoint(); this.reusableBuffer = ByteBuffer.allocate(1024); - readOperations = 0; - position = firstOperationOffset; - reuse = null; + this.readOperations = 0; + this.position = reader.getFirstOperationOffset(); + this.reuse = null; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java index a1e7e18801445..cae6578886534 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java @@ -19,10 +19,8 @@ package org.elasticsearch.index.translog; -import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.OutputStreamDataOutput; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Assertions; import org.elasticsearch.common.bytes.BytesArray; @@ -47,12 +45,6 @@ import java.util.function.LongSupplier; public class TranslogWriter extends BaseTranslogReader implements Closeable { - - public static final String TRANSLOG_CODEC = "translog"; - public static final int VERSION_CHECKSUMS = 1; - public static final int VERSION_CHECKPOINTS = 2; // since 2.0 we have checkpoints? - public static final int VERSION = VERSION_CHECKPOINTS; - private final ShardId shardId; private final ChannelFactory channelFactory; // the last checkpoint that was written when the translog was last synced @@ -85,10 +77,10 @@ private TranslogWriter( final FileChannel channel, final Path path, final ByteSizeValue bufferSize, - final LongSupplier globalCheckpointSupplier, LongSupplier minTranslogGenerationSupplier) throws IOException { - super(initialCheckpoint.generation, channel, path, channel.position()); + final LongSupplier globalCheckpointSupplier, LongSupplier minTranslogGenerationSupplier, TranslogHeader header) throws IOException { + super(initialCheckpoint.generation, channel, path, header); assert initialCheckpoint.offset == channel.position() : - "initial checkpoint offset [" + initialCheckpoint.offset + "] is different than current channel poistion [" + "initial checkpoint offset [" + initialCheckpoint.offset + "] is different than current channel position [" + channel.position() + "]"; this.shardId = shardId; this.channelFactory = channelFactory; @@ -104,34 +96,16 @@ private TranslogWriter( this.seenSequenceNumbers = Assertions.ENABLED ? new HashMap<>() : null; } - static int getHeaderLength(String translogUUID) { - return getHeaderLength(new BytesRef(translogUUID).length); - } - - static int getHeaderLength(int uuidLength) { - return CodecUtil.headerLength(TRANSLOG_CODEC) + uuidLength + Integer.BYTES; - } - - static void writeHeader(OutputStreamDataOutput out, BytesRef ref) throws IOException { - CodecUtil.writeHeader(out, TRANSLOG_CODEC, VERSION); - out.writeInt(ref.length); - out.writeBytes(ref.bytes, ref.offset, ref.length); - } - public static TranslogWriter create(ShardId shardId, String translogUUID, long fileGeneration, Path file, ChannelFactory channelFactory, ByteSizeValue bufferSize, final long initialMinTranslogGen, long initialGlobalCheckpoint, - final LongSupplier globalCheckpointSupplier, final LongSupplier minTranslogGenerationSupplier) + final LongSupplier globalCheckpointSupplier, final LongSupplier minTranslogGenerationSupplier, + final long primaryTerm) throws IOException { - final BytesRef ref = new BytesRef(translogUUID); - final int firstOperationOffset = getHeaderLength(ref.length); final FileChannel channel = channelFactory.open(file); try { - // This OutputStreamDataOutput is intentionally not closed because - // closing it will close the FileChannel - final OutputStreamDataOutput out = new OutputStreamDataOutput(java.nio.channels.Channels.newOutputStream(channel)); - writeHeader(out, ref); - channel.force(true); - final Checkpoint checkpoint = Checkpoint.emptyTranslogCheckpoint(firstOperationOffset, fileGeneration, + final TranslogHeader header = new TranslogHeader(translogUUID, primaryTerm); + header.write(channel); + final Checkpoint checkpoint = Checkpoint.emptyTranslogCheckpoint(header.sizeInBytes(), fileGeneration, initialGlobalCheckpoint, initialMinTranslogGen); writeCheckpoint(channelFactory, file.getParent(), checkpoint); final LongSupplier writerGlobalCheckpointSupplier; @@ -146,7 +120,7 @@ public static TranslogWriter create(ShardId shardId, String translogUUID, long f writerGlobalCheckpointSupplier = globalCheckpointSupplier; } return new TranslogWriter(channelFactory, shardId, checkpoint, channel, file, bufferSize, - writerGlobalCheckpointSupplier, minTranslogGenerationSupplier); + writerGlobalCheckpointSupplier, minTranslogGenerationSupplier, header); } catch (Exception exception) { // if we fail to bake the file-generation into the checkpoint we stick with the file and once we recover and that // file exists we remove it. We only apply this logic to the checkpoint.generation+1 any other file with a higher generation is an error condition @@ -164,16 +138,20 @@ public Exception getTragicException() { return tragedy; } - private synchronized void closeWithTragicEvent(Exception exception) throws IOException { - assert exception != null; + private synchronized void closeWithTragicEvent(final Exception ex) { + assert ex != null; if (tragedy == null) { - tragedy = exception; - } else if (tragedy != exception) { + tragedy = ex; + } else if (tragedy != ex) { // it should be safe to call closeWithTragicEvents on multiple layers without // worrying about self suppression. - tragedy.addSuppressed(exception); + tragedy.addSuppressed(ex); + } + try { + close(); + } catch (final IOException | RuntimeException e) { + ex.addSuppressed(e); } - close(); } /** @@ -194,11 +172,7 @@ public synchronized Translog.Location add(final BytesReference data, final long try { data.writeTo(outputStream); } catch (final Exception ex) { - try { - closeWithTragicEvent(ex); - } catch (final Exception inner) { - ex.addSuppressed(inner); - } + closeWithTragicEvent(ex); throw ex; } totalOffset += data.length(); @@ -290,16 +264,12 @@ public TranslogReader closeIntoReader() throws IOException { synchronized (this) { try { sync(); // sync before we close.. - } catch (IOException e) { - try { - closeWithTragicEvent(e); - } catch (Exception inner) { - e.addSuppressed(inner); - } - throw e; + } catch (final Exception ex) { + closeWithTragicEvent(ex); + throw ex; } if (closed.compareAndSet(false, true)) { - return new TranslogReader(getLastSyncedCheckpoint(), channel, path, getFirstOperationOffset()); + return new TranslogReader(getLastSyncedCheckpoint(), channel, path, header); } else { throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed (path [" + path + "]", tragedy); } @@ -346,12 +316,8 @@ public boolean syncUpTo(long offset) throws IOException { try { outputStream.flush(); checkpointToSync = getCheckpoint(); - } catch (Exception ex) { - try { - closeWithTragicEvent(ex); - } catch (Exception inner) { - ex.addSuppressed(inner); - } + } catch (final Exception ex) { + closeWithTragicEvent(ex); throw ex; } } @@ -360,12 +326,8 @@ public boolean syncUpTo(long offset) throws IOException { try { channel.force(false); writeCheckpoint(channelFactory, path.getParent(), checkpointToSync); - } catch (Exception ex) { - try { - closeWithTragicEvent(ex); - } catch (Exception inner) { - ex.addSuppressed(inner); - } + } catch (final Exception ex) { + closeWithTragicEvent(ex); throw ex; } assert lastSyncedCheckpoint.offset <= checkpointToSync.offset : @@ -380,38 +342,27 @@ public boolean syncUpTo(long offset) throws IOException { @Override protected void readBytes(ByteBuffer targetBuffer, long position) throws IOException { - if (position + targetBuffer.remaining() > getWrittenOffset()) { - synchronized (this) { - // we only flush here if it's really really needed - try to minimize the impact of the read operation - // in some cases ie. a tragic event we might still be able to read the relevant value - // which is not really important in production but some test can make most strict assumptions - // if we don't fail in this call unless absolutely necessary. - if (position + targetBuffer.remaining() > getWrittenOffset()) { - outputStream.flush(); + try { + if (position + targetBuffer.remaining() > getWrittenOffset()) { + synchronized (this) { + // we only flush here if it's really really needed - try to minimize the impact of the read operation + // in some cases ie. a tragic event we might still be able to read the relevant value + // which is not really important in production but some test can make most strict assumptions + // if we don't fail in this call unless absolutely necessary. + if (position + targetBuffer.remaining() > getWrittenOffset()) { + outputStream.flush(); + } } } + } catch (final Exception ex) { + closeWithTragicEvent(ex); + throw ex; } // we don't have to have a lock here because we only write ahead to the file, so all writes has been complete // for the requested location. Channels.readFromFileChannelWithEofException(channel, position, targetBuffer); } - private static Checkpoint writeCheckpoint( - ChannelFactory channelFactory, - long syncPosition, - int numOperations, - long minSeqNo, - long maxSeqNo, - long globalCheckpoint, - long minTranslogGeneration, - Path translogFile, - long generation) throws IOException { - final Checkpoint checkpoint = - new Checkpoint(syncPosition, numOperations, generation, minSeqNo, maxSeqNo, globalCheckpoint, minTranslogGeneration); - writeCheckpoint(channelFactory, translogFile, checkpoint); - return checkpoint; - } - private static void writeCheckpoint( final ChannelFactory channelFactory, final Path translogFile, @@ -458,12 +409,8 @@ public synchronized void flush() throws IOException { try { ensureOpen(); super.flush(); - } catch (Exception ex) { - try { - closeWithTragicEvent(ex); - } catch (Exception inner) { - ex.addSuppressed(inner); - } + } catch (final Exception ex) { + closeWithTragicEvent(ex); throw ex; } } diff --git a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java index 164d8fee956dd..b8bd93e05a6f8 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java @@ -33,7 +33,6 @@ import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.store.NativeFSLockFactory; import org.apache.lucene.store.OutputStreamDataOutput; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cli.EnvironmentAwareCommand; @@ -213,13 +212,11 @@ static void writeEmptyCheckpoint(Path filename, int translogLength, long translo * Write a translog containing the given translog UUID to the given location. Returns the number of bytes written. */ public static int writeEmptyTranslog(Path filename, String translogUUID) throws IOException { - final BytesRef translogRef = new BytesRef(translogUUID); - try (FileChannel fc = FileChannel.open(filename, StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW); - OutputStreamDataOutput out = new OutputStreamDataOutput(Channels.newOutputStream(fc))) { - TranslogWriter.writeHeader(out, translogRef); - fc.force(true); + try (FileChannel fc = FileChannel.open(filename, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW)) { + TranslogHeader header = new TranslogHeader(translogUUID, TranslogHeader.UNKNOWN_PRIMARY_TERM); + header.write(fc); + return header.sizeInBytes(); } - return TranslogWriter.getHeaderLength(translogRef.length); } /** Show a warning about deleting files, asking for a confirmation if {@code batchMode} is false */ diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java index 4e1456548ab88..92faa0a71fda6 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -42,14 +42,12 @@ import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.index.mapper.ObjectMapper; -import org.elasticsearch.index.mapper.ParentFieldMapper; import org.elasticsearch.index.mapper.RangeFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.TextFieldMapper; import org.elasticsearch.index.mapper.TypeFieldMapper; -import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.mapper.VersionFieldMapper; import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction; import org.elasticsearch.index.shard.PrimaryReplicaSyncer; @@ -132,15 +130,13 @@ private static Map initBuiltInMetadataMa Map builtInMetadataMappers; // Use a LinkedHashMap for metadataMappers because iteration order matters builtInMetadataMappers = new LinkedHashMap<>(); - // UID first so it will be the first stored field to load (so will benefit from "fields: []" early termination - builtInMetadataMappers.put(UidFieldMapper.NAME, new UidFieldMapper.TypeParser()); + // ID first so it will be the first stored field to load (so will benefit from "fields: []" early termination builtInMetadataMappers.put(IdFieldMapper.NAME, new IdFieldMapper.TypeParser()); builtInMetadataMappers.put(RoutingFieldMapper.NAME, new RoutingFieldMapper.TypeParser()); builtInMetadataMappers.put(IndexFieldMapper.NAME, new IndexFieldMapper.TypeParser()); builtInMetadataMappers.put(SourceFieldMapper.NAME, new SourceFieldMapper.TypeParser()); builtInMetadataMappers.put(TypeFieldMapper.NAME, new TypeFieldMapper.TypeParser()); builtInMetadataMappers.put(VersionFieldMapper.NAME, new VersionFieldMapper.TypeParser()); - builtInMetadataMappers.put(ParentFieldMapper.NAME, new ParentFieldMapper.TypeParser()); builtInMetadataMappers.put(SeqNoFieldMapper.NAME, new SeqNoFieldMapper.TypeParser()); //_field_names must be added last so that it has a chance to see all the other mappers builtInMetadataMappers.put(FieldNamesFieldMapper.NAME, new FieldNamesFieldMapper.TypeParser()); diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java b/server/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java index bd6d75ea3d6dc..a0095613cdb50 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java @@ -229,6 +229,7 @@ public Collection getChildResources() { @Override public boolean equals(Object o) { if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; Key key = (Key) o; if (readerVersion != key.readerVersion) return false; if (!entity.getCacheIdentity().equals(key.entity.getCacheIdentity())) return false; diff --git a/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java b/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java index ff7c3009dcf4e..e7573ae9f71a0 100644 --- a/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java +++ b/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java @@ -225,6 +225,7 @@ public static class Key { @Override public boolean equals(Object o) { if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; Key key = (Key) o; if (!indexCache.equals(key.indexCache)) return false; if (!readerKey.equals(key.readerKey)) return false; diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 2189e6b2fb2a8..710b4bc46e235 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -38,13 +38,13 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.StopWatch; import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.CancellableThreads; +import org.elasticsearch.core.internal.io.Streams; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.RecoveryEngineException; import org.elasticsearch.index.seqno.LocalCheckpointTracker; diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index eb0db395a155f..244bb462df6ae 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -441,8 +441,8 @@ public void cleanFiles(int totalTranslogOps, Store.MetadataSnapshot sourceMetaDa store.ensureIndexHasHistoryUUID(); } // TODO: Assign the global checkpoint to the max_seqno of the safe commit if the index version >= 6.2 - final String translogUUID = - Translog.createEmptyTranslog(indexShard.shardPath().resolveTranslog(), SequenceNumbers.UNASSIGNED_SEQ_NO, shardId); + final String translogUUID = Translog.createEmptyTranslog( + indexShard.shardPath().resolveTranslog(), SequenceNumbers.UNASSIGNED_SEQ_NO, shardId, indexShard.getPrimaryTerm()); store.associateIndexWithNewTranslog(translogUUID); } catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) { // this is a fatal exception at this stage. diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java b/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java index 89e945780c8f5..e31a97dc2c6ce 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java @@ -23,7 +23,6 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.IndexFieldMapper; -import org.elasticsearch.index.mapper.ParentFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.TypeFieldMapper; @@ -56,7 +55,7 @@ public final class IngestDocument { private final Map sourceAndMetadata; private final Map ingestMetadata; - public IngestDocument(String index, String type, String id, String routing, String parent, + public IngestDocument(String index, String type, String id, String routing, Long version, VersionType versionType, Map source) { this.sourceAndMetadata = new HashMap<>(); this.sourceAndMetadata.putAll(source); @@ -66,9 +65,6 @@ public IngestDocument(String index, String type, String id, String routing, Stri if (routing != null) { this.sourceAndMetadata.put(MetaData.ROUTING.getFieldName(), routing); } - if (parent != null) { - this.sourceAndMetadata.put(MetaData.PARENT.getFieldName(), parent); - } if (version != null) { sourceAndMetadata.put(MetaData.VERSION.getFieldName(), version); } @@ -656,7 +652,6 @@ public enum MetaData { TYPE(TypeFieldMapper.NAME), ID(IdFieldMapper.NAME), ROUTING(RoutingFieldMapper.NAME), - PARENT(ParentFieldMapper.NAME), VERSION(VersionFieldMapper.NAME), VERSION_TYPE("_version_type"); diff --git a/server/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java b/server/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java index 31bedd4ee1777..39e60b5812eaf 100644 --- a/server/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java +++ b/server/src/main/java/org/elasticsearch/ingest/PipelineExecutionService.java @@ -53,23 +53,6 @@ public PipelineExecutionService(PipelineStore store, ThreadPool threadPool) { this.threadPool = threadPool; } - public void executeIndexRequest(IndexRequest request, Consumer failureHandler, Consumer completionHandler) { - Pipeline pipeline = getPipeline(request.getPipeline()); - threadPool.executor(ThreadPool.Names.INDEX).execute(new AbstractRunnable() { - - @Override - public void onFailure(Exception e) { - failureHandler.accept(e); - } - - @Override - protected void doRun() throws Exception { - innerExecute(request, pipeline); - completionHandler.accept(true); - } - }); - } - public void executeBulkRequest(Iterable actionRequests, BiConsumer itemFailureHandler, Consumer completionHandler) { @@ -164,11 +147,10 @@ private void innerExecute(IndexRequest indexRequest, Pipeline pipeline) throws E String type = indexRequest.type(); String id = indexRequest.id(); String routing = indexRequest.routing(); - String parent = indexRequest.parent(); Long version = indexRequest.version(); VersionType versionType = indexRequest.versionType(); Map sourceAsMap = indexRequest.sourceAsMap(); - IngestDocument ingestDocument = new IngestDocument(index, type, id, routing, parent, version, versionType, sourceAsMap); + IngestDocument ingestDocument = new IngestDocument(index, type, id, routing, version, versionType, sourceAsMap); pipeline.execute(ingestDocument); Map metadataMap = ingestDocument.extractMetadata(); @@ -178,7 +160,6 @@ private void innerExecute(IndexRequest indexRequest, Pipeline pipeline) throws E indexRequest.type((String) metadataMap.get(IngestDocument.MetaData.TYPE)); indexRequest.id((String) metadataMap.get(IngestDocument.MetaData.ID)); indexRequest.routing((String) metadataMap.get(IngestDocument.MetaData.ROUTING)); - indexRequest.parent((String) metadataMap.get(IngestDocument.MetaData.PARENT)); indexRequest.version(((Number) metadataMap.get(IngestDocument.MetaData.VERSION)).longValue()); if (metadataMap.get(IngestDocument.MetaData.VERSION_TYPE) != null) { indexRequest.versionType(VersionType.fromString((String) metadataMap.get(IngestDocument.MetaData.VERSION_TYPE))); diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 102ae71978598..b02e1614bbdea 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -93,6 +93,7 @@ import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.gateway.MetaStateService; import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.IndicesService; @@ -537,7 +538,7 @@ protected Node(final Environment environment, Collection resourcesToClose.addAll(pluginLifecycleComponents); this.pluginLifecycleComponents = Collections.unmodifiableList(pluginLifecycleComponents); client.initialize(injector.getInstance(new Key>() {}), - () -> clusterService.localNode().getId()); + () -> clusterService.localNode().getId(), transportService.getRemoteClusterService()); if (NetworkModule.HTTP_ENABLED.get(settings)) { logger.debug("initializing HTTP handlers ..."); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 0f8e29d7f3835..f1adf9273ffde 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -55,7 +55,6 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.compress.NotXContentException; -import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -75,6 +74,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.Streams; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException; diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java index a959cd0efb87e..8c8139d5abd6a 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressorFactory; -import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.store.ByteArrayIndexInput; @@ -39,6 +38,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.Streams; import org.elasticsearch.gateway.CorruptStateException; import java.io.ByteArrayOutputStream; diff --git a/server/src/main/java/org/elasticsearch/rest/RestController.java b/server/src/main/java/org/elasticsearch/rest/RestController.java index d6aba28ce27eb..111663497d766 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestController.java +++ b/server/src/main/java/org/elasticsearch/rest/RestController.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.path.PathTrie; @@ -35,6 +34,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.Streams; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.usage.UsageService; @@ -51,7 +51,6 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Supplier; import java.util.function.UnaryOperator; import static org.elasticsearch.rest.RestStatus.BAD_REQUEST; diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java index 201a3b66b086d..08637e0dfce1d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java @@ -23,12 +23,18 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; public class RestCreateIndexAction extends BaseRestHandler { public RestCreateIndexAction(Settings settings, RestController controller) { @@ -43,9 +49,16 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + final boolean includeTypeName = request.paramAsBoolean("include_type_name", true); CreateIndexRequest createIndexRequest = new CreateIndexRequest(request.param("index")); if (request.hasContent()) { - createIndexRequest.source(request.content(), request.getXContentType()); + Map sourceAsMap = XContentHelper.convertToMap(request.content(), false, request.getXContentType()).v2(); + if (includeTypeName == false && sourceAsMap.containsKey("mappings")) { + Map newSourceAsMap = new HashMap<>(sourceAsMap); + newSourceAsMap.put("mappings", Collections.singletonMap(MapperService.SINGLE_MAPPING_NAME, sourceAsMap.get("mappings"))); + sourceAsMap = newSourceAsMap; + } + createIndexRequest.source(sourceAsMap, LoggingDeprecationHandler.INSTANCE); } createIndexRequest.timeout(request.paramAsTime("timeout", createIndexRequest.timeout())); createIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", createIndexRequest.masterNodeTimeout())); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java index 99b8215025ec7..62356824365ae 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java @@ -77,6 +77,7 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + final boolean includeTypeName = request.paramAsBoolean("include_type_name", true); final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final String[] types = request.paramAsStringArrayOrEmptyIfAll("type"); final GetMappingsRequest getMappingsRequest = new GetMappingsRequest(); @@ -141,13 +142,29 @@ public RestResponse buildResponse(final GetMappingsResponse response, final XCon for (final ObjectObjectCursor> indexEntry : mappingsByIndex) { builder.startObject(indexEntry.key); { - builder.startObject("mappings"); - { + if (includeTypeName == false) { + MappingMetaData mappings = null; for (final ObjectObjectCursor typeEntry : indexEntry.value) { - builder.field(typeEntry.key, typeEntry.value.sourceAsMap()); + if (typeEntry.key.equals("_default_") == false) { + assert mappings == null; + mappings = typeEntry.value; + } } + if (mappings == null) { + // no mappings yet + builder.startObject("mappings").endObject(); + } else { + builder.field("mappings", mappings.sourceAsMap()); + } + } else { + builder.startObject("mappings"); + { + for (final ObjectObjectCursor typeEntry : indexEntry.value) { + builder.field(typeEntry.key, typeEntry.value.sourceAsMap()); + } + } + builder.endObject(); } - builder.endObject(); } builder.endObject(); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java index 6d3804eddc90e..dc77cf52a8cfc 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; @@ -67,8 +68,13 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + final boolean includeTypeName = request.paramAsBoolean("include_type_name", true); PutMappingRequest putMappingRequest = putMappingRequest(Strings.splitStringByCommaToArray(request.param("index"))); - putMappingRequest.type(request.param("type")); + final String type = request.param("type"); + if (type != null && includeTypeName == false) { + throw new IllegalArgumentException("Cannot set include_type_name=false and provide a type at the same time"); + } + putMappingRequest.type(includeTypeName ? type : MapperService.SINGLE_MAPPING_NAME); putMappingRequest.source(request.requiredContent(), request.getXContentType()); putMappingRequest.timeout(request.paramAsTime("timeout", putMappingRequest.timeout())); putMappingRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putMappingRequest.masterNodeTimeout())); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java index 93090ba25eee6..68f696b180267 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java @@ -57,21 +57,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC updateSettingsRequest.setPreserveExisting(request.paramAsBoolean("preserve_existing", updateSettingsRequest.isPreserveExisting())); updateSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", updateSettingsRequest.masterNodeTimeout())); updateSettingsRequest.indicesOptions(IndicesOptions.fromRequest(request, updateSettingsRequest.indicesOptions())); - - Map settings = new HashMap<>(); - try (XContentParser parser = request.contentParser()) { - Map bodySettings = parser.map(); - Object innerBodySettings = bodySettings.get("settings"); - // clean up in case the body is wrapped with "settings" : { ... } - if (innerBodySettings instanceof Map) { - @SuppressWarnings("unchecked") - Map innerBodySettingsMap = (Map) innerBodySettings; - settings.putAll(innerBodySettingsMap); - } else { - settings.putAll(bodySettings); - } - } - updateSettingsRequest.settings(settings); + updateSettingsRequest.fromXContent(request.contentParser()); return channel -> client.admin().indices().updateSettings(updateSettingsRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java index 671917c380c0d..8db9710af3139 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java @@ -24,10 +24,8 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.Requests; import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; @@ -49,8 +47,6 @@ * */ public class RestBulkAction extends BaseRestHandler { - private static final DeprecationLogger DEPRECATION_LOGGER = - new DeprecationLogger(Loggers.getLogger(RestBulkAction.class)); private final boolean allowExplicitIndex; @@ -76,14 +72,9 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { BulkRequest bulkRequest = Requests.bulkRequest(); String defaultIndex = request.param("index"); - String defaultType = request.param("type"); + String defaultType = request.param("type", MapperService.SINGLE_MAPPING_NAME); String defaultRouting = request.param("routing"); FetchSourceContext defaultFetchSourceContext = FetchSourceContext.parseFromRestRequest(request); - String fieldsParam = request.param("fields"); - if (fieldsParam != null) { - DEPRECATION_LOGGER.deprecated("Deprecated field [fields] used, expected [_source] instead"); - } - String[] defaultFields = fieldsParam != null ? Strings.commaDelimitedListToStringArray(fieldsParam) : null; String defaultPipeline = request.param("pipeline"); String waitForActiveShards = request.param("wait_for_active_shards"); if (waitForActiveShards != null) { @@ -91,7 +82,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC } bulkRequest.timeout(request.paramAsTime("timeout", BulkShardRequest.DEFAULT_TIMEOUT)); bulkRequest.setRefreshPolicy(request.param("refresh")); - bulkRequest.add(request.requiredContent(), defaultIndex, defaultType, defaultRouting, defaultFields, + bulkRequest.add(request.requiredContent(), defaultIndex, defaultType, defaultRouting, defaultFetchSourceContext, defaultPipeline, null, allowExplicitIndex, request.getXContentType()); return channel -> client.bulk(bulkRequest, new RestStatusToXContentListener<>(channel)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestDeleteAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestDeleteAction.java index 0db997c1dae5c..f6b0878c03802 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestDeleteAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestDeleteAction.java @@ -49,7 +49,6 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { DeleteRequest deleteRequest = new DeleteRequest(request.param("index"), request.param("type"), request.param("id")); deleteRequest.routing(request.param("routing")); - deleteRequest.parent(request.param("parent")); deleteRequest.timeout(request.paramAsTime("timeout", DeleteRequest.DEFAULT_TIMEOUT)); deleteRequest.setRefreshPolicy(request.param("refresh")); deleteRequest.version(RestActions.parseVersion(request)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java index 8f86d9ae600a9..e1d3f7557783c 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java @@ -58,7 +58,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final GetRequest getRequest = new GetRequest(request.param("index"), request.param("type"), request.param("id")); getRequest.refresh(request.paramAsBoolean("refresh", getRequest.refresh())); getRequest.routing(request.param("routing")); - getRequest.parent(request.param("parent")); getRequest.preference(request.param("preference")); getRequest.realtime(request.paramAsBoolean("realtime", getRequest.realtime())); if (request.param("fields") != null) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestGetSourceAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestGetSourceAction.java index 9e61885cab252..39ff9ff86665a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestGetSourceAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestGetSourceAction.java @@ -64,7 +64,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final GetRequest getRequest = new GetRequest(request.param("index"), request.param("type"), request.param("id")); getRequest.refresh(request.paramAsBoolean("refresh", getRequest.refresh())); getRequest.routing(request.param("routing")); - getRequest.parent(request.param("parent")); getRequest.preference(request.param("preference")); getRequest.realtime(request.paramAsBoolean("realtime", getRequest.realtime())); diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java index b29df7a281a86..5cc514f744098 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestIndexAction.java @@ -80,7 +80,6 @@ void validateOpType(String opType) { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { IndexRequest indexRequest = new IndexRequest(request.param("index"), request.param("type"), request.param("id")); indexRequest.routing(request.param("routing")); - indexRequest.parent(request.param("parent")); indexRequest.setPipeline(request.param("pipeline")); indexRequest.source(request.requiredContent(), request.getXContentType()); indexRequest.timeout(request.paramAsTime("timeout", IndexRequest.DEFAULT_TIMEOUT)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestTermVectorsAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestTermVectorsAction.java index a12b7ce16a724..2a93b5d390c19 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestTermVectorsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestTermVectorsAction.java @@ -85,7 +85,6 @@ public static void readURIParameters(TermVectorsRequest termVectorsRequest, Rest termVectorsRequest.realtime(request.paramAsBoolean("realtime", termVectorsRequest.realtime())); termVectorsRequest.version(RestActions.parseVersion(request, termVectorsRequest.version())); termVectorsRequest.versionType(VersionType.fromString(request.param("version_type"), termVectorsRequest.versionType())); - termVectorsRequest.parent(request.param("parent")); termVectorsRequest.preference(request.param("preference")); termVectorsRequest.termStatistics(request.paramAsBoolean("termStatistics", termVectorsRequest.termStatistics())); termVectorsRequest.termStatistics(request.paramAsBoolean("term_statistics", termVectorsRequest.termStatistics())); diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java index 3204ce68abbc1..de7c1fad5b26a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java @@ -23,9 +23,6 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.VersionType; import org.elasticsearch.rest.BaseRestHandler; @@ -40,8 +37,6 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; public class RestUpdateAction extends BaseRestHandler { - private static final DeprecationLogger DEPRECATION_LOGGER = - new DeprecationLogger(Loggers.getLogger(RestUpdateAction.class)); public RestUpdateAction(Settings settings, RestController controller) { super(settings); @@ -57,7 +52,6 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { UpdateRequest updateRequest = new UpdateRequest(request.param("index"), request.param("type"), request.param("id")); updateRequest.routing(request.param("routing")); - updateRequest.parent(request.param("parent")); updateRequest.timeout(request.paramAsTime("timeout", updateRequest.timeout())); updateRequest.setRefreshPolicy(request.param("refresh")); String waitForActiveShards = request.param("wait_for_active_shards"); @@ -66,15 +60,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC } updateRequest.docAsUpsert(request.paramAsBoolean("doc_as_upsert", updateRequest.docAsUpsert())); FetchSourceContext fetchSourceContext = FetchSourceContext.parseFromRestRequest(request); - String sField = request.param("fields"); - if (sField != null && fetchSourceContext != null) { - throw new IllegalArgumentException("[fields] and [_source] cannot be used in the same request"); - } - if (sField != null) { - DEPRECATION_LOGGER.deprecated("Deprecated field [fields] used, expected [_source] instead"); - String[] sFields = Strings.splitStringByCommaToArray(sField); - updateRequest.fields(sFields); - } else if (fetchSourceContext != null) { + if (fetchSourceContext != null) { updateRequest.fetchSource(fetchSourceContext); } @@ -88,14 +74,12 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC IndexRequest upsertRequest = updateRequest.upsertRequest(); if (upsertRequest != null) { upsertRequest.routing(request.param("routing")); - upsertRequest.parent(request.param("parent")); upsertRequest.version(RestActions.parseVersion(request)); upsertRequest.versionType(VersionType.fromString(request.param("version_type"), upsertRequest.versionType())); } IndexRequest doc = updateRequest.doc(); if (doc != null) { doc.routing(request.param("routing")); - doc.parent(request.param("parent")); // order is important, set it after routing, so it will set the routing doc.version(RestActions.parseVersion(request)); doc.versionType(VersionType.fromString(request.param("version_type"), doc.versionType())); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java index 7b2c375a1ee4d..513643f99ab03 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java @@ -94,7 +94,7 @@ public static MultiSearchRequest parseRequest(RestRequest restRequest, boolean a parseMultiLineRequest(restRequest, multiRequest.indicesOptions(), allowExplicitIndex, (searchRequest, parser) -> { - searchRequest.source(SearchSourceBuilder.fromXContent(parser)); + searchRequest.source(SearchSourceBuilder.fromXContent(parser, false)); multiRequest.add(searchRequest); }); List requests = multiRequest.requests(); diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index 011b24309f8f4..6f0c033a0cf22 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -23,6 +23,8 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryBuilder; @@ -55,6 +57,7 @@ public class RestSearchAction extends BaseRestHandler { public static final String TYPED_KEYS_PARAM = "typed_keys"; private static final Set RESPONSE_PARAMS = Collections.singleton(TYPED_KEYS_PARAM); + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(RestSearchAction.class)); public RestSearchAction(Settings settings, RestController controller) { super(settings); @@ -109,7 +112,7 @@ public static void parseSearchRequest(SearchRequest searchRequest, RestRequest r } searchRequest.indices(Strings.splitStringByCommaToArray(request.param("index"))); if (requestContentParser != null) { - searchRequest.source().parseXContent(requestContentParser); + searchRequest.source().parseXContent(requestContentParser, true); } final int batchedReduceSize = request.paramAsInt("batched_reduce_size", searchRequest.getBatchedReduceSize()); @@ -128,7 +131,7 @@ public static void parseSearchRequest(SearchRequest searchRequest, RestRequest r // only set if we have the parameter passed to override the cluster-level default searchRequest.allowPartialSearchResults(request.paramAsBoolean("allow_partial_search_results", null)); } - + // do not allow 'query_and_fetch' or 'dfs_query_and_fetch' search types // from the REST layer. these modes are an internal optimization and should // not be specified explicitly by the user. @@ -147,7 +150,11 @@ public static void parseSearchRequest(SearchRequest searchRequest, RestRequest r searchRequest.scroll(new Scroll(parseTimeValue(scroll, null, "scroll"))); } - searchRequest.types(Strings.splitStringByCommaToArray(request.param("type"))); + String types = request.param("type"); + if (types != null) { + DEPRECATION_LOGGER.deprecated("The {index}/{type}/_search endpoint is deprecated, use {index}/_search instead"); + } + searchRequest.types(Strings.splitStringByCommaToArray(types)); searchRequest.routing(request.param("routing")); searchRequest.preference(request.param("preference")); searchRequest.indicesOptions(IndicesOptions.fromRequest(request, searchRequest.indicesOptions())); diff --git a/server/src/main/java/org/elasticsearch/search/Scroll.java b/server/src/main/java/org/elasticsearch/search/Scroll.java index d4cd7cb803039..b5f22a6443a06 100644 --- a/server/src/main/java/org/elasticsearch/search/Scroll.java +++ b/server/src/main/java/org/elasticsearch/search/Scroll.java @@ -38,7 +38,7 @@ public final class Scroll implements Writeable { private final TimeValue keepAlive; public Scroll(StreamInput in) throws IOException { - this.keepAlive = new TimeValue(in); + this.keepAlive = in.readTimeValue(); } /** @@ -57,7 +57,7 @@ public TimeValue keepAlive() { @Override public void writeTo(StreamOutput out) throws IOException { - keepAlive.writeTo(out); + out.writeTimeValue(keepAlive); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index 323cca7961737..b401ff5da1dba 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -228,7 +228,6 @@ import org.elasticsearch.search.fetch.subphase.ExplainFetchSubPhase; import org.elasticsearch.search.fetch.subphase.FetchSourceSubPhase; import org.elasticsearch.search.fetch.subphase.MatchedQueriesFetchSubPhase; -import org.elasticsearch.search.fetch.subphase.ParentFieldSubFetchPhase; import org.elasticsearch.search.fetch.subphase.ScriptFieldsFetchSubPhase; import org.elasticsearch.search.fetch.subphase.VersionFetchSubPhase; import org.elasticsearch.search.fetch.subphase.highlight.FastVectorHighlighter; @@ -261,6 +260,7 @@ import static java.util.Collections.unmodifiableMap; import static java.util.Objects.requireNonNull; +import static org.elasticsearch.index.query.SpanNearQueryBuilder.SpanGapQueryBuilder; /** * Sets up things that can be done at search time like queries, aggregations, and suggesters. @@ -688,7 +688,6 @@ private void registerFetchSubPhases(List plugins) { registerFetchSubPhase(new VersionFetchSubPhase()); registerFetchSubPhase(new MatchedQueriesFetchSubPhase()); registerFetchSubPhase(new HighlightPhase(settings, highlighters)); - registerFetchSubPhase(new ParentFieldSubFetchPhase()); FetchPhaseConstructionContext context = new FetchPhaseConstructionContext(highlighters); registerFromPlugin(plugins, p -> p.getFetchSubPhases(context), this::registerFetchSubPhase); @@ -743,6 +742,7 @@ private void registerQueryParsers(List plugins) { FieldMaskingSpanQueryBuilder::fromXContent)); registerQuery(new QuerySpec<>(SpanFirstQueryBuilder.NAME, SpanFirstQueryBuilder::new, SpanFirstQueryBuilder::fromXContent)); registerQuery(new QuerySpec<>(SpanNearQueryBuilder.NAME, SpanNearQueryBuilder::new, SpanNearQueryBuilder::fromXContent)); + registerQuery(new QuerySpec<>(SpanGapQueryBuilder.NAME, SpanGapQueryBuilder::new, SpanGapQueryBuilder::fromXContent)); registerQuery(new QuerySpec<>(SpanOrQueryBuilder.NAME, SpanOrQueryBuilder::new, SpanOrQueryBuilder::fromXContent)); registerQuery(new QuerySpec<>(MoreLikeThisQueryBuilder.NAME, MoreLikeThisQueryBuilder::new, MoreLikeThisQueryBuilder::fromXContent)); diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 82508f56a64a2..a742a3a06ae13 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -210,7 +210,7 @@ private void validateKeepAlives(TimeValue defaultKeepAlive, TimeValue maxKeepAli if (defaultKeepAlive.millis() > maxKeepAlive.millis()) { throw new IllegalArgumentException("Default keep alive setting for scroll [" + DEFAULT_KEEPALIVE_SETTING.getKey() + "]" + " should be smaller than max keep alive [" + MAX_KEEPALIVE_SETTING.getKey() + "], " + - "was (" + defaultKeepAlive.format() + " > " + maxKeepAlive.format() + ")"); + "was (" + defaultKeepAlive + " > " + maxKeepAlive + ")"); } } @@ -673,8 +673,8 @@ public void freeAllScrollContexts() { private void contextScrollKeepAlive(SearchContext context, long keepAlive) throws IOException { if (keepAlive > maxKeepAlive) { throw new IllegalArgumentException( - "Keep alive for scroll (" + TimeValue.timeValueMillis(keepAlive).format() + ") is too large. " + - "It must be less than (" + TimeValue.timeValueMillis(maxKeepAlive).format() + "). " + + "Keep alive for scroll (" + TimeValue.timeValueMillis(keepAlive) + ") is too large. " + + "It must be less than (" + TimeValue.timeValueMillis(maxKeepAlive) + "). " + "This limit can be set by changing the [" + MAX_KEEPALIVE_SETTING.getKey() + "] cluster level setting."); } context.keepAlive(keepAlive); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/BinaryValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/BinaryValuesSource.java index cd46b90889d49..bf73b6e199eaf 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/BinaryValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/BinaryValuesSource.java @@ -26,7 +26,11 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; +import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.StringFieldType; +import org.elasticsearch.index.mapper.TextFieldMapper; +import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.LeafBucketCollector; import java.io.IOException; @@ -40,8 +44,8 @@ class BinaryValuesSource extends SingleDimensionValuesSource { private BytesRef currentValue; BinaryValuesSource(MappedFieldType fieldType, CheckedFunction docValuesFunc, - int size, int reverseMul) { - super(fieldType, size, reverseMul); + DocValueFormat format, Object missing, int size, int reverseMul) { + super(format, fieldType, missing, size, reverseMul); this.docValuesFunc = docValuesFunc; this.values = new BytesRef[size]; } @@ -72,10 +76,8 @@ int compareValues(BytesRef v1, BytesRef v2) { @Override void setAfter(Comparable value) { - if (value.getClass() == BytesRef.class) { - afterValue = (BytesRef) value; - } else if (value.getClass() == String.class) { - afterValue = new BytesRef((String) value); + if (value.getClass() == String.class) { + afterValue = format.parseBytesRef(value.toString()); } else { throw new IllegalArgumentException("invalid value, expected string, got " + value.getClass().getSimpleName()); } @@ -120,7 +122,8 @@ public void collect(int doc, long bucket) throws IOException { @Override SortedDocsProducer createSortedDocsProducerOrNull(IndexReader reader, Query query) { if (checkIfSortedDocsIsApplicable(reader, fieldType) == false || - (query != null && query.getClass() != MatchAllDocsQuery.class)) { + fieldType instanceof StringFieldType == false || + (query != null && query.getClass() != MatchAllDocsQuery.class)) { return null; } return new TermsSortedDocsProducer(fieldType.name()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java index 04864e7419def..472697abe788a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java @@ -263,30 +263,84 @@ private static SingleDimensionValuesSource[] createValuesSources(BigArrays bi final int reverseMul = configs[i].reverseMul(); if (configs[i].valuesSource() instanceof ValuesSource.Bytes.WithOrdinals && reader instanceof DirectoryReader) { ValuesSource.Bytes.WithOrdinals vs = (ValuesSource.Bytes.WithOrdinals) configs[i].valuesSource(); - sources[i] = new GlobalOrdinalValuesSource(bigArrays, configs[i].fieldType(), vs::globalOrdinalsValues, size, reverseMul); + sources[i] = new GlobalOrdinalValuesSource( + bigArrays, + configs[i].fieldType(), + vs::globalOrdinalsValues, + configs[i].format(), + configs[i].missing(), + size, + reverseMul + ); + if (i == 0 && sources[i].createSortedDocsProducerOrNull(reader, query) != null) { // this the leading source and we can optimize it with the sorted docs producer but // we don't want to use global ordinals because the number of visited documents // should be low and global ordinals need one lookup per visited term. Releasables.close(sources[i]); - sources[i] = new BinaryValuesSource(configs[i].fieldType(), vs::bytesValues, size, reverseMul); + sources[i] = new BinaryValuesSource( + configs[i].fieldType(), + vs::bytesValues, + configs[i].format(), + configs[i].missing(), + size, + reverseMul + ); } } else if (configs[i].valuesSource() instanceof ValuesSource.Bytes) { ValuesSource.Bytes vs = (ValuesSource.Bytes) configs[i].valuesSource(); - sources[i] = new BinaryValuesSource(configs[i].fieldType(), vs::bytesValues, size, reverseMul); + sources[i] = new BinaryValuesSource( + configs[i].fieldType(), + vs::bytesValues, + configs[i].format(), + configs[i].missing(), + size, + reverseMul + ); + } else if (configs[i].valuesSource() instanceof ValuesSource.Numeric) { final ValuesSource.Numeric vs = (ValuesSource.Numeric) configs[i].valuesSource(); if (vs.isFloatingPoint()) { - sources[i] = new DoubleValuesSource(bigArrays, configs[i].fieldType(), vs::doubleValues, size, reverseMul); + sources[i] = new DoubleValuesSource( + bigArrays, + configs[i].fieldType(), + vs::doubleValues, + configs[i].format(), + configs[i].missing(), + size, + reverseMul + ); + } else { if (vs instanceof RoundingValuesSource) { - sources[i] = new LongValuesSource(bigArrays, configs[i].fieldType(), vs::longValues, - ((RoundingValuesSource) vs)::round, configs[i].format(), size, reverseMul); + sources[i] = new LongValuesSource( + bigArrays, + configs[i].fieldType(), + vs::longValues, + ((RoundingValuesSource) vs)::round, + configs[i].format(), + configs[i].missing(), + size, + reverseMul + ); + } else { - sources[i] = new LongValuesSource(bigArrays, configs[i].fieldType(), vs::longValues, - (value) -> value, configs[i].format(), size, reverseMul); + sources[i] = new LongValuesSource( + bigArrays, + configs[i].fieldType(), + vs::longValues, + (value) -> value, + configs[i].format(), + configs[i].missing(), + size, + reverseMul + ); + } } + } else { + throw new IllegalArgumentException("Unknown value source: " + configs[i].valuesSource().getClass().getName() + + " for field: " + sources[i].fieldType.name()); } } return sources; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java index d19729293a912..994f8c43a83ac 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java @@ -291,6 +291,7 @@ public String format() { public final CompositeValuesSourceConfig build(SearchContext context) throws IOException { ValuesSourceConfig config = ValuesSourceConfig.resolve(context.getQueryShardContext(), valueType, field, script, missing, null, format); + if (config.unmapped() && field != null && config.missing() == null) { // this source cannot produce any values so we refuse to build // since composite buckets are not created on null values diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java index 8756eed6feb78..aad713b305d02 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java @@ -32,13 +32,25 @@ class CompositeValuesSourceConfig { private final ValuesSource vs; private final DocValueFormat format; private final int reverseMul; + private final Object missing; - CompositeValuesSourceConfig(String name, @Nullable MappedFieldType fieldType, ValuesSource vs, DocValueFormat format, SortOrder order) { + /** + * Creates a new {@link CompositeValuesSourceConfig}. + * @param name The name of the source. + * @param fieldType The field type or null if the source is a script. + * @param vs The underlying {@link ValuesSource}. + * @param format The {@link DocValueFormat} of this source. + * @param order The sort order associated with this source. + * @param missing The missing value or null if documents with missing value should be ignored. + */ + CompositeValuesSourceConfig(String name, @Nullable MappedFieldType fieldType, ValuesSource vs, DocValueFormat format, + SortOrder order, @Nullable Object missing) { this.name = name; this.fieldType = fieldType; this.vs = vs; this.format = format; this.reverseMul = order == SortOrder.ASC ? 1 : -1; + this.missing = missing; } /** @@ -70,6 +82,13 @@ DocValueFormat format() { return format; } + /** + * The missing value for this configuration or null if documents with missing value should be ignored. + */ + Object missing() { + return missing; + } + /** * The sort order for the values source (e.g. -1 for descending and 1 for ascending). */ diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java index fb2999bbd0b33..0b373f15d5ccb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java @@ -33,7 +33,6 @@ import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.support.FieldContext; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; @@ -227,7 +226,7 @@ protected CompositeValuesSourceConfig innerBuild(SearchContext context, ValuesSo // is specified in the builder. final DocValueFormat docValueFormat = format() == null ? DocValueFormat.RAW : config.format(); final MappedFieldType fieldType = config.fieldContext() != null ? config.fieldContext().fieldType() : null; - return new CompositeValuesSourceConfig(name, fieldType, vs, docValueFormat, order()); + return new CompositeValuesSourceConfig(name, fieldType, vs, docValueFormat, order(), missing()); } else { throw new IllegalArgumentException("invalid source, expected numeric, got " + orig.getClass().getSimpleName()); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java index baf63a8d65fee..0f74544fe2bc5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.util.DoubleArray; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.LeafBucketCollector; import java.io.IOException; @@ -42,8 +43,8 @@ class DoubleValuesSource extends SingleDimensionValuesSource { DoubleValuesSource(BigArrays bigArrays, MappedFieldType fieldType, CheckedFunction docValuesFunc, - int size, int reverseMul) { - super(fieldType, size, reverseMul); + DocValueFormat format, Object missing, int size, int reverseMul) { + super(format, fieldType, missing, size, reverseMul); this.docValuesFunc = docValuesFunc; this.values = bigArrays.newDoubleArray(size, false); } @@ -77,7 +78,9 @@ void setAfter(Comparable value) { if (value instanceof Number) { afterValue = ((Number) value).doubleValue(); } else { - afterValue = Double.parseDouble(value.toString()); + afterValue = format.parseDouble(value.toString(), false, () -> { + throw new IllegalArgumentException("now() is not supported in [after] key"); + }); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/GlobalOrdinalValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/GlobalOrdinalValuesSource.java index e3ae3dca1bd63..a83f92e21fdc8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/GlobalOrdinalValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/GlobalOrdinalValuesSource.java @@ -30,6 +30,8 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.LongArray; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.StringFieldType; +import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.LeafBucketCollector; import java.io.IOException; @@ -52,8 +54,8 @@ class GlobalOrdinalValuesSource extends SingleDimensionValuesSource { GlobalOrdinalValuesSource(BigArrays bigArrays, MappedFieldType type, CheckedFunction docValuesFunc, - int size, int reverseMul) { - super(type, size, reverseMul); + DocValueFormat format, Object missing, int size, int reverseMul) { + super(format, type, missing, size, reverseMul); this.docValuesFunc = docValuesFunc; this.values = bigArrays.newLongArray(size, false); } @@ -87,10 +89,8 @@ int compareCurrentWithAfter() { @Override void setAfter(Comparable value) { - if (value instanceof BytesRef) { - afterValue = (BytesRef) value; - } else if (value instanceof String) { - afterValue = new BytesRef(value.toString()); + if (value.getClass() == String.class) { + afterValue = format.parseBytesRef(value.toString()); } else { throw new IllegalArgumentException("invalid value, expected string, got " + value.getClass().getSimpleName()); } @@ -164,7 +164,8 @@ public void collect(int doc, long bucket) throws IOException { @Override SortedDocsProducer createSortedDocsProducerOrNull(IndexReader reader, Query query) { if (checkIfSortedDocsIsApplicable(reader, fieldType) == false || - (query != null && query.getClass() != MatchAllDocsQuery.class)) { + fieldType instanceof StringFieldType == false || + (query != null && query.getClass() != MatchAllDocsQuery.class)) { return null; } return new TermsSortedDocsProducer(fieldType.name()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java index 1dc0aa596d790..fb3585c87391a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java @@ -115,7 +115,7 @@ protected CompositeValuesSourceConfig innerBuild(SearchContext context, ValuesSo ValuesSource.Numeric numeric = (ValuesSource.Numeric) orig; final HistogramValuesSource vs = new HistogramValuesSource(numeric, interval); final MappedFieldType fieldType = config.fieldContext() != null ? config.fieldContext().fieldType() : null; - return new CompositeValuesSourceConfig(name, fieldType, vs, config.format(), order()); + return new CompositeValuesSourceConfig(name, fieldType, vs, config.format(), order(), missing()); } else { throw new IllegalArgumentException("invalid source, expected numeric, got " + orig.getClass().getSimpleName()); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java index 96d0b02780948..20e1fa4794786 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java @@ -47,19 +47,16 @@ class LongValuesSource extends SingleDimensionValuesSource { private final CheckedFunction docValuesFunc; private final LongUnaryOperator rounding; - // handles "format" for date histogram source - private final DocValueFormat format; private final LongArray values; private long currentValue; LongValuesSource(BigArrays bigArrays, MappedFieldType fieldType, CheckedFunction docValuesFunc, - LongUnaryOperator rounding, DocValueFormat format, int size, int reverseMul) { - super(fieldType, size, reverseMul); + LongUnaryOperator rounding, DocValueFormat format, Object missing, int size, int reverseMul) { + super(format, fieldType, missing, size, reverseMul); this.docValuesFunc = docValuesFunc; this.rounding = rounding; - this.format = format; this.values = bigArrays.newLongArray(size, false); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java index efedce7db2afa..bb7314eed147f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.sort.SortOrder; @@ -35,21 +36,31 @@ * A source that can record and compare values of similar type. */ abstract class SingleDimensionValuesSource> implements Releasable { + protected final DocValueFormat format; + @Nullable + protected final MappedFieldType fieldType; + @Nullable + protected final Object missing; + protected final int size; protected final int reverseMul; + protected T afterValue; - @Nullable - protected MappedFieldType fieldType; /** - * Ctr + * Creates a new {@link SingleDimensionValuesSource}. * - * @param fieldType The fieldType associated with the source. + * @param format The format of the source. + * @param fieldType The field type or null if the source is a script. + * @param missing The missing value or null if documents with missing value should be ignored. * @param size The number of values to record. * @param reverseMul -1 if the natural order ({@link SortOrder#ASC} should be reversed. */ - SingleDimensionValuesSource(@Nullable MappedFieldType fieldType, int size, int reverseMul) { + SingleDimensionValuesSource(DocValueFormat format, @Nullable MappedFieldType fieldType, @Nullable Object missing, + int size, int reverseMul) { + this.format = format; this.fieldType = fieldType; + this.missing = missing; this.size = size; this.reverseMul = reverseMul; this.afterValue = null; @@ -127,6 +138,7 @@ abstract LeafBucketCollector getLeafCollector(Comparable value, */ protected boolean checkIfSortedDocsIsApplicable(IndexReader reader, MappedFieldType fieldType) { if (fieldType == null || + missing != null || fieldType.indexOptions() == IndexOptions.NONE || // inverse of the natural order reverseMul == -1) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java index 21ab14fe27e21..60fcf43a086fb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java @@ -24,7 +24,9 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.internal.SearchContext; @@ -84,6 +86,13 @@ protected CompositeValuesSourceConfig innerBuild(SearchContext context, ValuesSo vs = ValuesSource.Numeric.EMPTY; } final MappedFieldType fieldType = config.fieldContext() != null ? config.fieldContext().fieldType() : null; - return new CompositeValuesSourceConfig(name, fieldType, vs, config.format(), order()); + final DocValueFormat format; + if (format() == null && fieldType instanceof DateFieldMapper.DateFieldType) { + // defaults to the raw format on date fields (preserve timestamp as longs). + format = DocValueFormat.RAW; + } else { + format = config.format(); + } + return new CompositeValuesSourceConfig(name, fieldType, vs, format, order(), missing()); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilters.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilters.java index e522392cf4b9a..56cf71b82cfdd 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilters.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilters.java @@ -125,7 +125,7 @@ public boolean equals(Object other) { } InternalBucket that = (InternalBucket) other; return Objects.equals(key, that.key) - && Objects.equals(keyed, keyed) + && Objects.equals(keyed, that.keyed) && Objects.equals(docCount, that.docCount) && Objects.equals(aggregations, that.aggregations); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java index f91dde8877093..2f66531834d38 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java @@ -54,6 +54,8 @@ import java.util.Map; import java.util.Objects; +import static org.elasticsearch.common.geo.GeoUtils.parsePrecision; + public class GeoGridAggregationBuilder extends ValuesSourceAggregationBuilder implements MultiBucketAggregationBuilder { public static final String NAME = "geohash_grid"; @@ -64,29 +66,8 @@ public class GeoGridAggregationBuilder extends ValuesSourceAggregationBuilder(GeoGridAggregationBuilder.NAME); ValuesSourceParserHelper.declareGeoFields(PARSER, false, false); - PARSER.declareField((parser, builder, context) -> { - XContentParser.Token token = parser.currentToken(); - if (token.equals(XContentParser.Token.VALUE_NUMBER)) { - builder.precision(XContentMapValues.nodeIntegerValue(parser.intValue())); - } else { - String precision = parser.text(); - try { - // we want to treat simple integer strings as precision levels, not distances - builder.precision(XContentMapValues.nodeIntegerValue(Integer.parseInt(precision))); - } catch (NumberFormatException e) { - // try to parse as a distance value - try { - builder.precision(GeoUtils.geoHashLevelsForPrecision(precision)); - } catch (NumberFormatException e2) { - // can happen when distance unit is unknown, in this case we simply want to know the reason - throw e2; - } catch (IllegalArgumentException e3) { - // this happens when distance too small, so precision > 12. We'd like to see the original string - throw new IllegalArgumentException("precision too high [" + precision + "]", e3); - } - } - } - }, GeoHashGridParams.FIELD_PRECISION, org.elasticsearch.common.xcontent.ObjectParser.ValueType.INT); + PARSER.declareField((parser, builder, context) -> builder.precision(parsePrecision(parser)), GeoHashGridParams.FIELD_PRECISION, + org.elasticsearch.common.xcontent.ObjectParser.ValueType.INT); PARSER.declareInt(GeoGridAggregationBuilder::size, GeoHashGridParams.FIELD_SIZE); PARSER.declareInt(GeoGridAggregationBuilder::shardSize, GeoHashGridParams.FIELD_SHARD_SIZE); } @@ -133,7 +114,7 @@ protected void innerWriteTo(StreamOutput out) throws IOException { } public GeoGridAggregationBuilder precision(int precision) { - this.precision = GeoHashGridParams.checkPrecision(precision); + this.precision = GeoUtils.checkPrecisionRange(precision); return this; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParams.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParams.java index e4b8d753c4018..ff3b21a3a7bae 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParams.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParams.java @@ -30,15 +30,6 @@ final class GeoHashGridParams { static final ParseField FIELD_SIZE = new ParseField("size"); static final ParseField FIELD_SHARD_SIZE = new ParseField("shard_size"); - - static int checkPrecision(int precision) { - if ((precision < 1) || (precision > 12)) { - throw new IllegalArgumentException("Invalid geohash aggregation precision of " + precision - + ". Must be between 1 and 12."); - } - return precision; - } - private GeoHashGridParams() { throw new AssertionError("No instances intended"); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregationBuilder.java index 8b5858dcd9513..5c90832bb150a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregationBuilder.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.AggregationBuilder; -import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.metrics.percentiles.hdr.HDRPercentilesAggregatorFactory; @@ -50,7 +49,7 @@ public class PercentilesAggregationBuilder extends LeafOnly { public static final String NAME = Percentiles.TYPE_NAME; - public static final double[] DEFAULT_PERCENTS = new double[] { 1, 5, 25, 50, 75, 95, 99 }; + private static final double[] DEFAULT_PERCENTS = new double[] { 1, 5, 25, 50, 75, 95, 99 }; public static final ParseField PERCENTS_FIELD = new ParseField("percents"); public static final ParseField KEYED_FIELD = new ParseField("keyed"); public static final ParseField METHOD_FIELD = new ParseField("method"); diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 815abf1b7a7c4..582c6ca28f8fd 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -111,8 +111,12 @@ public final class SearchSourceBuilder implements Writeable, ToXContentObject, R public static final ParseField ALL_FIELDS_FIELDS = new ParseField("all_fields"); public static SearchSourceBuilder fromXContent(XContentParser parser) throws IOException { + return fromXContent(parser, true); + } + + public static SearchSourceBuilder fromXContent(XContentParser parser, boolean checkTrailingTokens) throws IOException { SearchSourceBuilder builder = new SearchSourceBuilder(); - builder.parseXContent(parser); + builder.parseXContent(parser, checkTrailingTokens); return builder; } @@ -220,7 +224,7 @@ public SearchSourceBuilder(StreamInput in) throws IOException { } suggestBuilder = in.readOptionalWriteable(SuggestBuilder::new); terminateAfter = in.readVInt(); - timeout = in.readOptionalWriteable(TimeValue::new); + timeout = in.readOptionalTimeValue(); trackScores = in.readBoolean(); version = in.readOptionalBoolean(); extBuilders = in.readNamedWriteableList(SearchExtBuilder.class); @@ -276,7 +280,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeOptionalWriteable(suggestBuilder); out.writeVInt(terminateAfter); - out.writeOptionalWriteable(timeout); + out.writeOptionalTimeValue(timeout); out.writeBoolean(trackScores); out.writeOptionalBoolean(version); out.writeNamedWriteableList(extBuilders); @@ -951,12 +955,19 @@ private SearchSourceBuilder shallowCopy(QueryBuilder queryBuilder, QueryBuilder return rewrittenBuilder; } + public void parseXContent(XContentParser parser) throws IOException { + parseXContent(parser, true); + } + /** * Parse some xContent into this SearchSourceBuilder, overwriting any values specified in the xContent. Use this if you need to set up - * different defaults than a regular SearchSourceBuilder would have and use - * {@link #fromXContent(XContentParser)} if you have normal defaults. + * different defaults than a regular SearchSourceBuilder would have and use {@link #fromXContent(XContentParser, boolean)} if you have + * normal defaults. + * + * @param parser The xContent parser. + * @param checkTrailingTokens If true throws a parsing exception when extra tokens are found after the main object. */ - public void parseXContent(XContentParser parser) throws IOException { + public void parseXContent(XContentParser parser, boolean checkTrailingTokens) throws IOException { XContentParser.Token token = parser.currentToken(); String currentFieldName = null; if (token != XContentParser.Token.START_OBJECT && (token = parser.nextToken()) != XContentParser.Token.START_OBJECT) { @@ -1106,6 +1117,12 @@ public void parseXContent(XContentParser parser) throws IOException { parser.getTokenLocation()); } } + if (checkTrailingTokens) { + token = parser.nextToken(); + if (token != null) { + throw new ParsingException(parser.getTokenLocation(), "Unexpected token [" + token + "] found after the main object."); + } + } } @Override diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/ParentFieldSubFetchPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/ParentFieldSubFetchPhase.java deleted file mode 100644 index 6015e3c90211d..0000000000000 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/ParentFieldSubFetchPhase.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.search.fetch.subphase; - -import org.apache.lucene.index.LeafReader; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.ReaderUtil; -import org.apache.lucene.index.SortedDocValues; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.common.document.DocumentField; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.ParentFieldMapper; -import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.fetch.FetchSubPhase; -import org.elasticsearch.search.internal.SearchContext; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - -public final class ParentFieldSubFetchPhase implements FetchSubPhase { - @Override - public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException { - if (context.storedFieldsContext() != null && context.storedFieldsContext().fetchFields() == false) { - return ; - } - - hits = hits.clone(); // don't modify the incoming hits - Arrays.sort(hits, Comparator.comparingInt(SearchHit::docId)); - - MapperService mapperService = context.mapperService(); - Set parentFields = new HashSet<>(); - for (SearchHit hit : hits) { - ParentFieldMapper parentFieldMapper = mapperService.documentMapper(hit.getType()).parentFieldMapper(); - if (parentFieldMapper.active()) { - parentFields.add(parentFieldMapper.name()); - } - } - - int lastReaderId = -1; - Map docValuesMap = new HashMap<>(); - for (SearchHit hit : hits) { - ParentFieldMapper parentFieldMapper = mapperService.documentMapper(hit.getType()).parentFieldMapper(); - if (parentFieldMapper.active() == false) { - continue; - } - int readerId = ReaderUtil.subIndex(hit.docId(), context.searcher().getIndexReader().leaves()); - LeafReaderContext subReaderContext = context.searcher().getIndexReader().leaves().get(readerId); - if (lastReaderId != readerId) { - docValuesMap.clear(); - for (String field : parentFields) { - docValuesMap.put(field, subReaderContext.reader().getSortedDocValues(field)); - } - lastReaderId = readerId; - } - int docId = hit.docId() - subReaderContext.docBase; - SortedDocValues values = docValuesMap.get(parentFieldMapper.name()); - if (values != null && values.advanceExact(docId)) { - BytesRef binaryValue = values.binaryValue(); - String value = binaryValue.length > 0 ? binaryValue.utf8ToString() : null; - if (value == null) { - // hit has no _parent field. Can happen for nested inner hits if parent hit is a p/c document. - continue; - } - Map fields = hit.fieldsOrNull(); - if (fields == null) { - fields = new HashMap<>(); - hit.fields(fields); - } - fields.put(ParentFieldMapper.NAME, new DocumentField(ParentFieldMapper.NAME, Collections.singletonList(value))); - } - } - } - - public static String getParentId(ParentFieldMapper fieldMapper, LeafReader reader, int docId) { - try { - SortedDocValues docValues = reader.getSortedDocValues(fieldMapper.name()); - if (docValues == null || docValues.advanceExact(docId) == false) { - // hit has no _parent field. - return null; - } - BytesRef parentId = docValues.binaryValue(); - return parentId.length > 0 ? parentId.utf8ToString() : null; - } catch (IOException e) { - throw ExceptionsHelper.convertToElastic(e); - } - } -} diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java index cc00c2faac72d..ff332c7d73461 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java @@ -75,9 +75,9 @@ public class HighlightBuilder extends AbstractHighlighterBuilder"}; + static final String[] DEFAULT_PRE_TAGS = new String[]{""}; /** the default closing tag */ - public static final String[] DEFAULT_POST_TAGS = new String[]{""}; + static final String[] DEFAULT_POST_TAGS = new String[]{""}; /** the default opening tags when tag_schema = "styled" */ public static final String[] DEFAULT_STYLED_PRE_TAG = { diff --git a/server/src/main/java/org/elasticsearch/search/lookup/LeafFieldsLookup.java b/server/src/main/java/org/elasticsearch/search/lookup/LeafFieldsLookup.java index 4a104797d7c1b..2e04443f9e526 100644 --- a/server/src/main/java/org/elasticsearch/search/lookup/LeafFieldsLookup.java +++ b/server/src/main/java/org/elasticsearch/search/lookup/LeafFieldsLookup.java @@ -22,10 +22,8 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Nullable; import org.elasticsearch.index.fieldvisitor.SingleFieldsVisitor; -import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.UidFieldMapper; import java.io.IOException; import java.util.Arrays; @@ -40,7 +38,6 @@ public class LeafFieldsLookup implements Map { private final MapperService mapperService; - private final boolean singleType; @Nullable private final String[] types; @@ -55,7 +52,6 @@ public class LeafFieldsLookup implements Map { LeafFieldsLookup(MapperService mapperService, @Nullable String[] types, LeafReader reader) { this.mapperService = mapperService; - this.singleType = mapperService.getIndexSettings().isSingleType(); this.types = types; this.reader = reader; this.fieldVisitor = new SingleFieldsVisitor(null); @@ -147,9 +143,6 @@ private FieldLookup loadFieldData(String name) { } if (data.fields() == null) { String fieldName = data.fieldType().name(); - if (singleType && UidFieldMapper.NAME.equals(fieldName)) { - fieldName = IdFieldMapper.NAME; - } fieldVisitor.reset(fieldName); try { reader.document(docId, fieldVisitor); diff --git a/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java b/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java index 7a5da9df9aa36..aabf0c3fd0c69 100644 --- a/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java @@ -22,11 +22,14 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -35,7 +38,6 @@ import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.query.QueryShardContext; import java.io.IOException; @@ -53,6 +55,9 @@ * {@link org.elasticsearch.search.slice.DocValuesSliceQuery} is used to filter the results. */ public class SliceBuilder implements Writeable, ToXContentObject { + + private static final DeprecationLogger DEPRECATION_LOG = new DeprecationLogger(Loggers.getLogger(SliceBuilder.class)); + public static final ParseField FIELD_FIELD = new ParseField("field"); public static final ParseField ID_FIELD = new ParseField("id"); public static final ParseField MAX_FIELD = new ParseField("max"); @@ -66,7 +71,7 @@ public class SliceBuilder implements Writeable, ToXContentObject { } /** Name of field to slice against (_uid by default) */ - private String field = UidFieldMapper.NAME; + private String field = IdFieldMapper.NAME; /** The id of the slice */ private int id = -1; /** Max number of slices */ @@ -75,7 +80,7 @@ public class SliceBuilder implements Writeable, ToXContentObject { private SliceBuilder() {} public SliceBuilder(int id, int max) { - this(UidFieldMapper.NAME, id, max); + this(IdFieldMapper.NAME, id, max); } /** @@ -91,14 +96,23 @@ public SliceBuilder(String field, int id, int max) { } public SliceBuilder(StreamInput in) throws IOException { - this.field = in.readString(); + String field = in.readString(); + if ("_uid".equals(field) && in.getVersion().before(Version.V_6_3_0)) { + // This is safe because _id and _uid are handled the same way in #toFilter + field = IdFieldMapper.NAME; + } + this.field = field; this.id = in.readVInt(); this.max = in.readVInt(); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeString(field); + if (IdFieldMapper.NAME.equals(field) && out.getVersion().before(Version.V_6_3_0)) { + out.writeString("_uid"); + } else { + out.writeString(field); + } out.writeVInt(id); out.writeVInt(max); } @@ -197,11 +211,15 @@ public Query toFilter(QueryShardContext context, int shardId, int numShards) { String field = this.field; boolean useTermQuery = false; - if (UidFieldMapper.NAME.equals(field)) { - if (context.getIndexSettings().isSingleType()) { - // on new indices, the _id acts as a _uid - field = IdFieldMapper.NAME; + if ("_uid".equals(field)) { + // on new indices, the _id acts as a _uid + field = IdFieldMapper.NAME; + if (context.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_7_0_0_alpha1)) { + throw new IllegalArgumentException("Computing slices on the [_uid] field is illegal for 7.x indices, use [_id] instead"); } + DEPRECATION_LOG.deprecated("Computing slices on the [_uid] field is deprecated for 6.x indices, use [_id] instead"); + useTermQuery = true; + } else if (IdFieldMapper.NAME.equals(field)) { useTermQuery = true; } else if (type.hasDocValues() == false) { throw new IllegalArgumentException("cannot load numeric doc values on " + field); diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryQueryContext.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryQueryContext.java index e639fb07844e5..e391f78f27c50 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryQueryContext.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/CategoryQueryContext.java @@ -19,12 +19,11 @@ package org.elasticsearch.search.suggest.completion.context; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -110,14 +109,14 @@ public static CategoryQueryContext fromXContent(XContentParser parser) throws IO if (token == XContentParser.Token.START_OBJECT) { try { CATEGORY_PARSER.parse(parser, builder, null); - } catch(ParsingException e) { - throw new ElasticsearchParseException("category context must be a string, number or boolean"); + } catch(XContentParseException e) { + throw new XContentParseException("category context must be a string, number or boolean"); } } else if (token == XContentParser.Token.VALUE_STRING || token == XContentParser.Token.VALUE_BOOLEAN || token == XContentParser.Token.VALUE_NUMBER) { builder.setCategory(parser.text()); } else { - throw new ElasticsearchParseException("category context must be an object, string, number or boolean"); + throw new XContentParseException("category context must be an object, string, number or boolean"); } return builder.build(); } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoQueryContext.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoQueryContext.java index 151dcc9173f28..259446cb0c1df 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoQueryContext.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoQueryContext.java @@ -33,6 +33,7 @@ import java.util.List; import java.util.Objects; +import static org.elasticsearch.common.geo.GeoUtils.parsePrecision; import static org.elasticsearch.search.suggest.completion.context.GeoContextMapping.CONTEXT_BOOST; import static org.elasticsearch.search.suggest.completion.context.GeoContextMapping.CONTEXT_NEIGHBOURS; import static org.elasticsearch.search.suggest.completion.context.GeoContextMapping.CONTEXT_PRECISION; @@ -115,10 +116,10 @@ public static Builder builder() { static { GEO_CONTEXT_PARSER.declareField((parser, geoQueryContext, geoContextMapping) -> geoQueryContext.setGeoPoint(GeoUtils.parseGeoPoint(parser)), new ParseField(CONTEXT_VALUE), ObjectParser.ValueType.OBJECT); GEO_CONTEXT_PARSER.declareInt(GeoQueryContext.Builder::setBoost, new ParseField(CONTEXT_BOOST)); - // TODO : add string support for precision for GeoUtils.geoHashLevelsForPrecision() - GEO_CONTEXT_PARSER.declareInt(GeoQueryContext.Builder::setPrecision, new ParseField(CONTEXT_PRECISION)); - // TODO : add string array support for precision for GeoUtils.geoHashLevelsForPrecision() - GEO_CONTEXT_PARSER.declareIntArray(GeoQueryContext.Builder::setNeighbours, new ParseField(CONTEXT_NEIGHBOURS)); + GEO_CONTEXT_PARSER.declareField((parser, builder, context) -> builder.setPrecision(parsePrecision(parser)), + new ParseField(CONTEXT_PRECISION), ObjectParser.ValueType.INT); + GEO_CONTEXT_PARSER.declareFieldArray(GeoQueryContext.Builder::setNeighbours, (parser, builder) -> parsePrecision(parser), + new ParseField(CONTEXT_NEIGHBOURS), ObjectParser.ValueType.INT_ARRAY); GEO_CONTEXT_PARSER.declareDouble(GeoQueryContext.Builder::setLat, new ParseField("lat")); GEO_CONTEXT_PARSER.declareDouble(GeoQueryContext.Builder::setLon, new ParseField("lon")); } diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java b/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java index 9027f961ae75b..da4909bb3817f 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParserHelper; import org.elasticsearch.common.xcontent.ToXContent.Params; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -242,7 +243,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws PARSER.declareString(constructorArg(), new ParseField("type")); PARSER.declareString(constructorArg(), new ParseField("action")); PARSER.declareString(optionalConstructorArg(), new ParseField("description")); - PARSER.declareRawObject(optionalConstructorArg(), new ParseField("status")); + ObjectParserHelper parserHelper = new ObjectParserHelper<>(); + parserHelper.declareRawObject(PARSER, optionalConstructorArg(), new ParseField("status")); PARSER.declareLong(constructorArg(), new ParseField("start_time_in_millis")); PARSER.declareLong(constructorArg(), new ParseField("running_time_in_nanos")); PARSER.declareBoolean(constructorArg(), new ParseField("cancellable")); diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskResult.java b/server/src/main/java/org/elasticsearch/tasks/TaskResult.java index f75a4fe7ee520..a866ad9bb2dd1 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskResult.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskResult.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParserHelper; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -185,8 +186,9 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t static { PARSER.declareBoolean(constructorArg(), new ParseField("completed")); PARSER.declareObject(constructorArg(), TaskInfo.PARSER, new ParseField("task")); - PARSER.declareRawObject(optionalConstructorArg(), new ParseField("error")); - PARSER.declareRawObject(optionalConstructorArg(), new ParseField("response")); + ObjectParserHelper parserHelper = new ObjectParserHelper<>(); + parserHelper.declareRawObject(PARSER, optionalConstructorArg(), new ParseField("error")); + parserHelper.declareRawObject(PARSER, optionalConstructorArg(), new ParseField("response")); } @Override diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java b/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java index de63994457a1f..6ec949a0c918b 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java @@ -19,10 +19,9 @@ package org.elasticsearch.tasks; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; @@ -38,13 +37,12 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.Streams; import java.io.ByteArrayOutputStream; import java.io.IOException; diff --git a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index b3bcc6b0b081f..80814960f0ea7 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -608,7 +608,7 @@ public Info(StreamInput in) throws IOException { type = ThreadPoolType.fromType(in.readString()); min = in.readInt(); max = in.readInt(); - keepAlive = in.readOptionalWriteable(TimeValue::new); + keepAlive = in.readOptionalTimeValue(); queueSize = in.readOptionalWriteable(SizeValue::new); } @@ -624,7 +624,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeInt(min); out.writeInt(max); - out.writeOptionalWriteable(keepAlive); + out.writeOptionalTimeValue(keepAlive); out.writeOptionalWriteable(queueSize); } diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAwareClient.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAwareClient.java new file mode 100644 index 0000000000000..aa476bf4dd267 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAwareClient.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.transport; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.support.AbstractClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; + +final class RemoteClusterAwareClient extends AbstractClient { + + private final TransportService service; + private final String clusterAlias; + private final RemoteClusterService remoteClusterService; + + RemoteClusterAwareClient(Settings settings, ThreadPool threadPool, TransportService service, String clusterAlias) { + super(settings, threadPool); + this.service = service; + this.clusterAlias = clusterAlias; + this.remoteClusterService = service.getRemoteClusterService(); + } + + @Override + protected > + void doExecute(Action action, Request request, ActionListener listener) { + remoteClusterService.ensureConnected(clusterAlias, ActionListener.wrap(res -> { + Transport.Connection connection = remoteClusterService.getConnection(clusterAlias); + service.sendRequest(connection, action.name(), request, TransportRequestOptions.EMPTY, + new ActionListenerResponseHandler<>(listener, action::newResponse)); + }, + listener::onFailure)); + } + + + @Override + public void close() { + // do nothing + } + + @Override + public Client getRemoteClusterClient(String clusterAlias) { + return remoteClusterService.getRemoteClusterClient(threadPool(), clusterAlias); + } +} diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index b253d9d23df4e..f454571301777 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.transport; +import org.elasticsearch.client.Client; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -36,6 +37,7 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.threadpool.ThreadPool; import java.io.Closeable; import java.io.IOException; @@ -398,4 +400,18 @@ public void onFailure(Exception e) { }); } } + + /** + * Returns a client to the remote cluster if the given cluster alias exists. + * @param threadPool the {@link ThreadPool} for the client + * @param clusterAlias the cluster alias the remote cluster is registered under + * + * @throws IllegalArgumentException if the given clusterAlias doesn't exist + */ + public Client getRemoteClusterClient(ThreadPool threadPool, String clusterAlias) { + if (transportService.getRemoteClusterService().getRemoteClusterNames().contains(clusterAlias) == false) { + throw new IllegalArgumentException("unknown cluster alias [" + clusterAlias + "]"); + } + return new RemoteClusterAwareClient(settings, threadPool, transportService, clusterAlias); + } } diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionInfo.java b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionInfo.java index f95243921e9bd..cb51f7edce570 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionInfo.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionInfo.java @@ -61,7 +61,7 @@ public RemoteConnectionInfo(StreamInput input) throws IOException { seedNodes = input.readList(TransportAddress::new); httpAddresses = input.readList(TransportAddress::new); connectionsPerCluster = input.readVInt(); - initialConnectionTimeout = new TimeValue(input); + initialConnectionTimeout = input.readTimeValue(); numNodesConnected = input.readVInt(); clusterAlias = input.readString(); if (input.getVersion().onOrAfter(Version.V_6_1_0)) { @@ -100,7 +100,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeList(seedNodes); out.writeList(httpAddresses); out.writeVInt(connectionsPerCluster); - initialConnectionTimeout.writeTo(out); + out.writeTimeValue(initialConnectionTimeout); out.writeVInt(numNodesConnected); out.writeString(clusterAlias); if (out.getVersion().onOrAfter(Version.V_6_1_0)) { diff --git a/server/src/test/java/org/apache/lucene/queries/BaseRandomBinaryDocValuesRangeQueryTestCase.java b/server/src/test/java/org/apache/lucene/queries/BaseRandomBinaryDocValuesRangeQueryTestCase.java index fcc9f67229f87..dc21ed6a2f799 100644 --- a/server/src/test/java/org/apache/lucene/queries/BaseRandomBinaryDocValuesRangeQueryTestCase.java +++ b/server/src/test/java/org/apache/lucene/queries/BaseRandomBinaryDocValuesRangeQueryTestCase.java @@ -41,12 +41,6 @@ public void testMultiValued() throws Exception { // Can't test this how BaseRangeFieldQueryTestCase works now, because we're using BinaryDocValuesField here. } - @Override - public void testRandomBig() throws Exception { - // Test regardless whether -Dtests.nightly=true has been specified: - super.testRandomBig(); - } - @Override protected final Field newRangeField(Range box) { AbstractRange testRange = (AbstractRange) box; diff --git a/server/src/test/java/org/elasticsearch/action/GenericActionTests.java b/server/src/test/java/org/elasticsearch/action/GenericActionTests.java new file mode 100644 index 0000000000000..1bbff4b2a99b5 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/GenericActionTests.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action; + +import org.elasticsearch.test.ESTestCase; + +public class GenericActionTests extends ESTestCase { + + public void testEquals() { + class FakeAction extends GenericAction { + protected FakeAction(String name) { + super(name); + } + + @Override + public ActionResponse newResponse() { + return null; + } + } + FakeAction fakeAction1 = new FakeAction("a"); + FakeAction fakeAction2 = new FakeAction("a"); + FakeAction fakeAction3 = new FakeAction("b"); + String s = "Some random other object"; + assertEquals(fakeAction1, fakeAction1); + assertEquals(fakeAction2, fakeAction2); + assertNotEquals(fakeAction1, null); + assertNotEquals(fakeAction1, fakeAction3); + assertNotEquals(fakeAction1, s); + } +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/alias/AliasActionsTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/alias/AliasActionsTests.java index 1811bfb89a62d..fcd73a6f1dda3 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/alias/AliasActionsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/alias/AliasActionsTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.ESTestCase; @@ -42,6 +43,7 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; public class AliasActionsTests extends ESTestCase { @@ -265,9 +267,9 @@ public void testParseIndexAndIndicesThrowsError() throws IOException { } b.endObject(); try (XContentParser parser = createParser(b)) { - Exception e = expectThrows(ParsingException.class, () -> AliasActions.PARSER.apply(parser, null)); + Exception e = expectThrows(XContentParseException.class, () -> AliasActions.PARSER.apply(parser, null)); assertThat(e.getCause().getCause(), instanceOf(IllegalArgumentException.class)); - assertEquals("Only one of [index] and [indices] is supported", e.getCause().getCause().getMessage()); + assertThat(e.getCause().getCause().getMessage(), containsString("Only one of [index] and [indices] is supported")); } } @@ -285,9 +287,9 @@ public void testParseAliasAndAliasesThrowsError() throws IOException { } b.endObject(); try (XContentParser parser = createParser(b)) { - Exception e = expectThrows(ParsingException.class, () -> AliasActions.PARSER.apply(parser, null)); + Exception e = expectThrows(XContentParseException.class, () -> AliasActions.PARSER.apply(parser, null)); assertThat(e.getCause().getCause(), instanceOf(IllegalArgumentException.class)); - assertEquals("Only one of [alias] and [aliases] is supported", e.getCause().getCause().getMessage()); + assertThat(e.getCause().getCause().getMessage(), containsString("Only one of [alias] and [aliases] is supported")); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java index 44f7e38e3ad4a..e48f151081f62 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java @@ -27,6 +27,9 @@ import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; +import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; +import org.elasticsearch.action.admin.indices.segments.ShardSegments; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; @@ -64,6 +67,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.List; +import java.util.Map; import java.util.stream.IntStream; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -467,4 +471,77 @@ public void testCreateShrinkWithIndexSort() throws Exception { flushAndRefresh(); assertSortedSegments("target", expectedIndexSort); } + + + public void testShrinkCommitsMergeOnIdle() throws Exception { + prepareCreate("source").setSettings(Settings.builder().put(indexSettings()) + .put("index.number_of_replicas", 0) + .put("number_of_shards", 5)).get(); + for (int i = 0; i < 30; i++) { + client().prepareIndex("source", "type") + .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + } + client().admin().indices().prepareFlush("source").get(); + ImmutableOpenMap dataNodes = + client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin().indices().prepareUpdateSettings("source") + .setSettings(Settings.builder() + .put("index.routing.allocation.require._name", discoveryNodes[0].getName()) + .put("index.blocks.write", true)).get(); + ensureGreen(); + IndicesSegmentResponse sourceStats = client().admin().indices().prepareSegments("source").get(); + + // disable rebalancing to be able to capture the right stats. balancing can move the target primary + // making it hard to pin point the source shards. + client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put( + EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none" + )).get(); + + // now merge source into a single shard index + assertAcked(client().admin().indices().prepareResizeIndex("source", "target") + .setSettings(Settings.builder().put("index.number_of_replicas", 0).build()).get()); + ensureGreen(); + ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get(); + IndexMetaData target = clusterStateResponse.getState().getMetaData().index("target"); + client().admin().indices().prepareForceMerge("target").setMaxNumSegments(1).setFlush(false).get(); + IndicesSegmentResponse targetSegStats = client().admin().indices().prepareSegments("target").get(); + ShardSegments segmentsStats = targetSegStats.getIndices().get("target").getShards().get(0).getShards()[0]; + assertTrue(segmentsStats.getNumberOfCommitted() > 0); + assertNotEquals(segmentsStats.getSegments(), segmentsStats.getNumberOfCommitted()); + + Iterable dataNodeInstances = internalCluster().getDataNodeInstances(IndicesService.class); + for (IndicesService service : dataNodeInstances) { + if (service.hasIndex(target.getIndex())) { + IndexService indexShards = service.indexService(target.getIndex()); + IndexShard shard = indexShards.getShard(0); + assertTrue(shard.isActive()); + shard.checkIdle(0); + assertFalse(shard.isActive()); + } + } + assertBusy(() -> { + IndicesSegmentResponse targetStats = client().admin().indices().prepareSegments("target").get(); + ShardSegments targetShardSegments = targetStats.getIndices().get("target").getShards().get(0).getShards()[0]; + Map source = sourceStats.getIndices().get("source").getShards(); + int numSourceSegments = 0; + for (IndexShardSegments s : source.values()) { + numSourceSegments += s.getAt(0).getNumberOfCommitted(); + } + assertTrue(targetShardSegments.getSegments().size() < numSourceSegments); + assertEquals(targetShardSegments.getNumberOfCommitted(), targetShardSegments.getNumberOfSearch()); + assertEquals(targetShardSegments.getNumberOfCommitted(), targetShardSegments.getSegments().size()); + assertEquals(1, targetShardSegments.getSegments().size()); + }); + + // clean up + client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put( + EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String)null + )).get(); + } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java index 16afa92fb0377..1e8d8e2a2932c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestTests; -import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; @@ -33,6 +32,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.RandomCreateIndexGenerator; import org.elasticsearch.indices.IndicesModule; @@ -172,7 +172,7 @@ public void testUnknownFields() throws IOException { } builder.endObject(); BytesReference mutated = XContentTestUtils.insertRandomFields(xContentType, BytesReference.bytes(builder), null, random()); - expectThrows(ParsingException.class, () -> request.fromXContent(createParser(xContentType.xContent(), mutated))); + expectThrows(XContentParseException.class, () -> request.fromXContent(createParser(xContentType.xContent(), mutated))); } public void testSameConditionCanOnlyBeAddedOnce() { diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java new file mode 100644 index 0000000000000..463049a8c3c1f --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java @@ -0,0 +1,133 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.settings.put; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.Settings.Builder; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.test.AbstractStreamableTestCase; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.Set; +import java.util.StringJoiner; + +public class UpdateSettingsRequestStreamableTests extends AbstractStreamableTestCase { + + @Override + protected UpdateSettingsRequest mutateInstance(UpdateSettingsRequest request) { + UpdateSettingsRequest mutation = copyRequest(request); + List mutators = new ArrayList<>(); + mutators.add(() -> mutation.masterNodeTimeout(randomTimeValue())); + mutators.add(() -> mutation.timeout(randomTimeValue())); + mutators.add(() -> mutation.settings(mutateSettings(request.settings()))); + mutators.add(() -> mutation.indices(mutateIndices(request.indices()))); + mutators.add(() -> mutation.indicesOptions(randomValueOtherThan(request.indicesOptions(), + () -> IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())))); + mutators.add(() -> mutation.setPreserveExisting(!request.isPreserveExisting())); + randomFrom(mutators).run(); + return mutation; + } + + @Override + protected UpdateSettingsRequest createTestInstance() { + return createTestItem(); + } + + @Override + protected UpdateSettingsRequest createBlankInstance() { + return new UpdateSettingsRequest(); + } + + public static UpdateSettingsRequest createTestItem() { + UpdateSettingsRequest request = randomBoolean() + ? new UpdateSettingsRequest(randomSettings(0, 2)) + : new UpdateSettingsRequest(randomSettings(0, 2), randomIndicesNames(0, 2)); + request.masterNodeTimeout(randomTimeValue()); + request.timeout(randomTimeValue()); + request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); + request.setPreserveExisting(randomBoolean()); + request.flatSettings(randomBoolean()); + return request; + } + + private static UpdateSettingsRequest copyRequest(UpdateSettingsRequest request) { + UpdateSettingsRequest result = new UpdateSettingsRequest(request.settings(), request.indices()); + result.masterNodeTimeout(request.timeout()); + result.timeout(request.timeout()); + result.indicesOptions(request.indicesOptions()); + result.setPreserveExisting(request.isPreserveExisting()); + result.flatSettings(request.flatSettings()); + return result; + } + + private static Settings mutateSettings(Settings settings) { + if (settings.isEmpty()) { + return randomSettings(1, 5); + } + Set allKeys = settings.keySet(); + List keysToBeModified = randomSubsetOf(randomIntBetween(1, allKeys.size()), allKeys); + Builder builder = Settings.builder(); + for (String key : allKeys) { + String value = settings.get(key); + if (keysToBeModified.contains(key)) { + value += randomAlphaOfLengthBetween(2, 5); + } + builder.put(key, value); + } + return builder.build(); + } + + private static String[] mutateIndices(String[] indices) { + if (CollectionUtils.isEmpty(indices)) { + return randomIndicesNames(1, 5); + } + String[] mutated = Arrays.copyOf(indices, indices.length); + Arrays.asList(mutated).replaceAll(i -> i += randomAlphaOfLengthBetween(2, 5)); + return mutated; + } + + private static Settings randomSettings(int min, int max) { + int num = randomIntBetween(min, max); + Builder builder = Settings.builder(); + for (int i = 0; i < num; i++) { + int keyDepth = randomIntBetween(1, 5); + StringJoiner keyJoiner = new StringJoiner(".", "", ""); + for (int d = 0; d < keyDepth; d++) { + keyJoiner.add(randomAlphaOfLengthBetween(3, 5)); + } + builder.put(keyJoiner.toString(), randomAlphaOfLengthBetween(2, 5)); + } + return builder.build(); + } + + private static String[] randomIndicesNames(int minIndicesNum, int maxIndicesNum) { + int numIndices = randomIntBetween(minIndicesNum, maxIndicesNum); + String[] indices = new String[numIndices]; + for (int i = 0; i < numIndices; i++) { + indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5).toLowerCase(Locale.ROOT); + } + return indices; + } + +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestTests.java new file mode 100644 index 0000000000000..ff75dbecd520c --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestTests.java @@ -0,0 +1,87 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.settings.put; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.function.Predicate; + +public class UpdateSettingsRequestTests extends AbstractXContentTestCase { + + private final boolean enclosedSettings = randomBoolean(); + + @Override + protected UpdateSettingsRequest createTestInstance() { + UpdateSettingsRequest testRequest = UpdateSettingsRequestStreamableTests.createTestItem(); + if (enclosedSettings) { + UpdateSettingsRequest requestWithEnclosingSettings = new UpdateSettingsRequest(testRequest.settings()) { + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startObject("settings"); + this.settings().toXContent(builder, params); + builder.endObject(); + builder.endObject(); + return builder; + } + }; + return requestWithEnclosingSettings; + } + return testRequest; + } + + @Override + protected UpdateSettingsRequest doParseInstance(XContentParser parser) throws IOException { + return new UpdateSettingsRequest().fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + // if the settings are enclose as a "settings" object + // then all other top-level elements will be ignored during the parsing + return enclosedSettings; + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + if (enclosedSettings) { + return field -> field.startsWith("settings"); + } + return field -> true; + } + + @Override + protected void assertEqualInstances(UpdateSettingsRequest expectedInstance, UpdateSettingsRequest newInstance) { + // here only the settings should be tested, as this test covers explicitly only the XContent parsing + // the rest of the request fields are tested by the StreamableTests + super.assertEqualInstances(new UpdateSettingsRequest(expectedInstance.settings()), + new UpdateSettingsRequest(newInstance.settings())); + } + + @Override + protected boolean assertToXContentEquivalence() { + // if enclosedSettings are used, disable the XContentEquivalence check as the + // parsed.toXContent is not equivalent to the test instance + return !enclosedSettings; + } + +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponseTests.java new file mode 100644 index 0000000000000..a3fb484f02e88 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponseTests.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.settings.put; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; + +public class UpdateSettingsResponseTests extends AbstractStreamableXContentTestCase { + + @Override + protected UpdateSettingsResponse doParseInstance(XContentParser parser) { + return UpdateSettingsResponse.fromXContent(parser); + } + + @Override + protected UpdateSettingsResponse createTestInstance() { + return new UpdateSettingsResponse(randomBoolean()); + } + + @Override + protected UpdateSettingsResponse createBlankInstance() { + return new UpdateSettingsResponse(); + } + + @Override + protected UpdateSettingsResponse mutateInstance(UpdateSettingsResponse response) { + return new UpdateSettingsResponse(response.isAcknowledged() == false); + } +} diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java index 97a1ef2806a3e..1d03d065e7af7 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java @@ -94,12 +94,12 @@ public void testSimpleBulk4() throws Exception { BulkRequest bulkRequest = new BulkRequest(); bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null, XContentType.JSON); assertThat(bulkRequest.numberOfActions(), equalTo(4)); - assertThat(((UpdateRequest) bulkRequest.requests().get(0)).id(), equalTo("1")); + assertThat(bulkRequest.requests().get(0).id(), equalTo("1")); assertThat(((UpdateRequest) bulkRequest.requests().get(0)).retryOnConflict(), equalTo(2)); assertThat(((UpdateRequest) bulkRequest.requests().get(0)).doc().source().utf8ToString(), equalTo("{\"field\":\"value\"}")); - assertThat(((UpdateRequest) bulkRequest.requests().get(1)).id(), equalTo("0")); - assertThat(((UpdateRequest) bulkRequest.requests().get(1)).type(), equalTo("type1")); - assertThat(((UpdateRequest) bulkRequest.requests().get(1)).index(), equalTo("index1")); + assertThat(bulkRequest.requests().get(1).id(), equalTo("0")); + assertThat(bulkRequest.requests().get(1).type(), equalTo("type1")); + assertThat(bulkRequest.requests().get(1).index(), equalTo("index1")); Script script = ((UpdateRequest) bulkRequest.requests().get(1)).script(); assertThat(script, notNullValue()); assertThat(script.getIdOrCode(), equalTo("counter += param1")); @@ -107,20 +107,18 @@ public void testSimpleBulk4() throws Exception { Map scriptParams = script.getParams(); assertThat(scriptParams, notNullValue()); assertThat(scriptParams.size(), equalTo(1)); - assertThat(((Integer) scriptParams.get("param1")), equalTo(1)); + assertThat(scriptParams.get("param1"), equalTo(1)); assertThat(((UpdateRequest) bulkRequest.requests().get(1)).upsertRequest().source().utf8ToString(), equalTo("{\"counter\":1}")); } public void testBulkAllowExplicitIndex() throws Exception { - String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk.json"); - try { - new BulkRequest().add(new BytesArray(bulkAction.getBytes(StandardCharsets.UTF_8)), null, null, false, XContentType.JSON); - fail(); - } catch (Exception e) { - - } + String bulkAction1 = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk.json"); + Exception ex = expectThrows(Exception.class, + () -> new BulkRequest().add( + new BytesArray(bulkAction1.getBytes(StandardCharsets.UTF_8)), null, null, false, XContentType.JSON)); + assertEquals("explicit index in bulk is not allowed", ex.getMessage()); - bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk5.json"); + String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk5.json"); new BulkRequest().add(new BytesArray(bulkAction.getBytes(StandardCharsets.UTF_8)), "test", null, false, XContentType.JSON); } @@ -177,6 +175,16 @@ public void testSimpleBulk10() throws Exception { assertThat(bulkRequest.numberOfActions(), equalTo(9)); } + public void testBulkActionShouldNotContainArray() throws Exception { + String bulkAction = "{ \"index\":{\"_index\":[\"index1\", \"index2\"],\"_type\":\"type1\",\"_id\":\"1\"} }\r\n" + + "{ \"field1\" : \"value1\" }\r\n"; + BulkRequest bulkRequest = new BulkRequest(); + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, + () -> bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null, XContentType.JSON)); + assertEquals(exc.getMessage(), "Malformed action/metadata line [1]" + + ", expected a simple value for field [_index] but found [START_ARRAY]"); + } + public void testBulkEmptyObject() throws Exception { String bulkIndexAction = "{ \"index\":{\"_index\":\"test\",\"_type\":\"type1\",\"_id\":\"1\"} }\r\n"; String bulkIndexSource = "{ \"field1\" : \"value1\" }\r\n"; @@ -299,7 +307,7 @@ public void testToValidateUpsertRequestAndVersionInBulkRequest() throws IOExcept out.write(xContentType.xContent().streamSeparator()); try(XContentBuilder builder = XContentFactory.contentBuilder(xContentType, out)) { builder.startObject(); - builder.field("doc", "{}"); + builder.startObject("doc").endObject(); Map values = new HashMap<>(); values.put("version", 2L); values.put("_index", "index"); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java index 6656faf1e194e..80048cf343372 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkWithUpdatesIT.java @@ -260,13 +260,13 @@ public void testBulkUpdateMalformedScripts() throws Exception { assertThat(bulkResponse.getItems().length, equalTo(3)); bulkResponse = client().prepareBulk() - .add(client().prepareUpdate().setIndex("test").setType("type1").setId("1").setFields("field") + .add(client().prepareUpdate().setIndex("test").setType("type1").setId("1").setFetchSource("field", null) .setScript(new Script( ScriptType.INLINE, CustomScriptPlugin.NAME, "throw script exception on unknown var", Collections.emptyMap()))) - .add(client().prepareUpdate().setIndex("test").setType("type1").setId("2").setFields("field") + .add(client().prepareUpdate().setIndex("test").setType("type1").setId("2").setFetchSource("field", null) .setScript(new Script( ScriptType.INLINE, CustomScriptPlugin.NAME, "ctx._source.field += 1", Collections.emptyMap()))) - .add(client().prepareUpdate().setIndex("test").setType("type1").setId("3").setFields("field") + .add(client().prepareUpdate().setIndex("test").setType("type1").setId("3").setFetchSource("field", null) .setScript(new Script( ScriptType.INLINE, CustomScriptPlugin.NAME, "throw script exception on unknown var", Collections.emptyMap()))) .execute().actionGet(); @@ -279,7 +279,7 @@ public void testBulkUpdateMalformedScripts() throws Exception { assertThat(bulkResponse.getItems()[1].getResponse().getId(), equalTo("2")); assertThat(bulkResponse.getItems()[1].getResponse().getVersion(), equalTo(2L)); - assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getGetResult().field("field").getValue(), equalTo(2)); + assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getGetResult().sourceAsMap().get("field"), equalTo(2)); assertThat(bulkResponse.getItems()[1].getFailure(), nullValue()); assertThat(bulkResponse.getItems()[2].getFailure().getId(), equalTo("3")); @@ -303,7 +303,7 @@ public void testBulkUpdateLargerVolume() throws Exception { builder.add( client().prepareUpdate() .setIndex("test").setType("type1").setId(Integer.toString(i)) - .setFields("counter") + .setFetchSource("counter", null) .setScript(script) .setUpsert(jsonBuilder().startObject().field("counter", 1).endObject())); } @@ -319,7 +319,7 @@ public void testBulkUpdateLargerVolume() throws Exception { assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE)); assertThat(response.getItems()[i].getResponse().getId(), equalTo(Integer.toString(i))); assertThat(response.getItems()[i].getResponse().getVersion(), equalTo(1L)); - assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getGetResult().field("counter").getValue(), equalTo(1)); + assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getGetResult().sourceAsMap().get("counter"), equalTo(1)); for (int j = 0; j < 5; j++) { GetResponse getResponse = client().prepareGet("test", "type1", Integer.toString(i)).execute() @@ -333,7 +333,7 @@ public void testBulkUpdateLargerVolume() throws Exception { builder = client().prepareBulk(); for (int i = 0; i < numDocs; i++) { UpdateRequestBuilder updateBuilder = client().prepareUpdate().setIndex("test").setType("type1").setId(Integer.toString(i)) - .setFields("counter"); + .setFetchSource("counter", null); if (i % 2 == 0) { updateBuilder.setScript(script); } else { @@ -357,7 +357,7 @@ public void testBulkUpdateLargerVolume() throws Exception { assertThat(response.getItems()[i].getOpType(), equalTo(OpType.UPDATE)); assertThat(response.getItems()[i].getResponse().getId(), equalTo(Integer.toString(i))); assertThat(response.getItems()[i].getResponse().getVersion(), equalTo(2L)); - assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getGetResult().field("counter").getValue(), equalTo(2)); + assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getGetResult().sourceAsMap().get("counter"), equalTo(2)); } builder = client().prepareBulk(); @@ -450,76 +450,6 @@ public void testBulkIndexingWhileInitializing() throws Exception { assertHitCount(countResponse, numDocs); } - - - /* - * Test for https://github.com/elastic/elasticsearch/issues/8365 - */ - public void testBulkUpdateChildMissingParentRouting() throws Exception { - assertAcked(prepareCreate("test") - .setSettings(Settings.builder().put("index.version.created", Version.V_5_6_0.id)) // allows for multiple types - .addMapping("parent", "{\"parent\":{}}", XContentType.JSON) - .addMapping("child", "{\"child\": {\"_parent\": {\"type\": \"parent\"}}}", XContentType.JSON)); - ensureGreen(); - - BulkRequestBuilder builder = client().prepareBulk(); - - byte[] addParent = ( - "{" + - " \"index\" : {" + - " \"_index\" : \"test\"," + - " \"_type\" : \"parent\"," + - " \"_id\" : \"parent1\"" + - " }" + - "}" + - "\n" + - "{" + - " \"field1\" : \"value1\"" + - "}" + - "\n").getBytes(StandardCharsets.UTF_8); - - byte[] addChildOK = ( - "{" + - " \"index\" : {" + - " \"_index\" : \"test\"," + - " \"_type\" : \"child\"," + - " \"_id\" : \"child1\"," + - " \"parent\" : \"parent1\"" + - " }" + - "}" + - "\n" + - "{" + - " \"field1\" : \"value1\"" + - "}" + - "\n").getBytes(StandardCharsets.UTF_8); - - byte[] addChildMissingRouting = ( - "{" + - " \"index\" : {" + - " \"_index\" : \"test\"," + - " \"_type\" : \"child\"," + - " \"_id\" : \"child1\"" + - " }" + - "}" + - "\n" + - "{" + - " \"field1\" : \"value1\"" + - "}" + - "\n").getBytes(StandardCharsets.UTF_8); - - builder.add(addParent, 0, addParent.length, XContentType.JSON); - builder.add(addChildOK, 0, addChildOK.length, XContentType.JSON); - builder.add(addChildMissingRouting, 0, addChildMissingRouting.length, XContentType.JSON); - builder.add(addChildOK, 0, addChildOK.length, XContentType.JSON); - - BulkResponse bulkResponse = builder.get(); - assertThat(bulkResponse.getItems().length, equalTo(4)); - assertThat(bulkResponse.getItems()[0].isFailed(), equalTo(false)); - assertThat(bulkResponse.getItems()[1].isFailed(), equalTo(false)); - assertThat(bulkResponse.getItems()[2].isFailed(), equalTo(true)); - assertThat(bulkResponse.getItems()[3].isFailed(), equalTo(false)); - } - public void testFailingVersionedUpdatedOnBulk() throws Exception { createIndex("test"); index("test", "type", "1", "field", "1"); diff --git a/server/src/test/java/org/elasticsearch/action/get/MultiGetRequestTests.java b/server/src/test/java/org/elasticsearch/action/get/MultiGetRequestTests.java index 8834ee203fba0..f1de226704e53 100644 --- a/server/src/test/java/org/elasticsearch/action/get/MultiGetRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/get/MultiGetRequestTests.java @@ -182,9 +182,6 @@ private MultiGetRequest createTestInstance() { if (randomBoolean()) { item.routing(randomAlphaOfLength(4)); } - if (randomBoolean()) { - item.parent(randomAlphaOfLength(4)); - } request.add(item); } return request; diff --git a/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestParsingTests.java b/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestParsingTests.java index 00815807eee8a..b0c6d717bb38e 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestParsingTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineRequestParsingTests.java @@ -42,7 +42,6 @@ import static org.elasticsearch.action.ingest.SimulatePipelineRequest.SIMULATED_PIPELINE_ID; import static org.elasticsearch.ingest.IngestDocument.MetaData.ID; import static org.elasticsearch.ingest.IngestDocument.MetaData.INDEX; -import static org.elasticsearch.ingest.IngestDocument.MetaData.PARENT; import static org.elasticsearch.ingest.IngestDocument.MetaData.ROUTING; import static org.elasticsearch.ingest.IngestDocument.MetaData.TYPE; import static org.elasticsearch.ingest.IngestDocument.MetaData.VERSION; @@ -123,7 +122,7 @@ public void testParseWithProvidedPipeline() throws Exception { for (int i = 0; i < numDocs; i++) { Map doc = new HashMap<>(); Map expectedDoc = new HashMap<>(); - List fields = Arrays.asList(INDEX, TYPE, ID, ROUTING, PARENT, VERSION, VERSION_TYPE); + List fields = Arrays.asList(INDEX, TYPE, ID, ROUTING, VERSION, VERSION_TYPE); for(IngestDocument.MetaData field : fields) { if (field == VERSION) { Long value = randomLong(); @@ -194,7 +193,6 @@ public void testParseWithProvidedPipeline() throws Exception { assertThat(metadataMap.get(TYPE), equalTo(expectedDocument.get(TYPE.getFieldName()))); assertThat(metadataMap.get(ID), equalTo(expectedDocument.get(ID.getFieldName()))); assertThat(metadataMap.get(ROUTING), equalTo(expectedDocument.get(ROUTING.getFieldName()))); - assertThat(metadataMap.get(PARENT), equalTo(expectedDocument.get(PARENT.getFieldName()))); assertThat(metadataMap.get(VERSION), equalTo(expectedDocument.get(VERSION.getFieldName()))); assertThat(metadataMap.get(VERSION_TYPE), equalTo(expectedDocument.get(VERSION_TYPE.getFieldName()))); assertThat(ingestDocument.getSourceAndMetadata(), equalTo(expectedDocument.get(Fields.SOURCE))); diff --git a/server/src/test/java/org/elasticsearch/action/resync/ResyncReplicationRequestTests.java b/server/src/test/java/org/elasticsearch/action/resync/ResyncReplicationRequestTests.java index f1f9fec34de59..d5ad3941a5e8f 100644 --- a/server/src/test/java/org/elasticsearch/action/resync/ResyncReplicationRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/resync/ResyncReplicationRequestTests.java @@ -37,7 +37,8 @@ public class ResyncReplicationRequestTests extends ESTestCase { public void testSerialization() throws IOException { final byte[] bytes = "{}".getBytes(Charset.forName("UTF-8")); - final Translog.Index index = new Translog.Index("type", "id", 0, Versions.MATCH_ANY, VersionType.INTERNAL, bytes, null, null, -1); + final Translog.Index index = new Translog.Index("type", "id", 0, randomNonNegativeLong(), + Versions.MATCH_ANY, VersionType.INTERNAL, bytes, null, -1); final ShardId shardId = new ShardId(new Index("index", "uuid"), 0); final ResyncReplicationRequest before = new ResyncReplicationRequest(shardId, new Translog.Operation[]{index}); diff --git a/server/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java index d9fea03a56692..a772fa6951c2f 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java @@ -165,7 +165,7 @@ public void testResponseErrorToXContent() throws IOException { new MultiSearchResponse.Item(null, new IllegalStateException("baaaaaazzzz")) }, tookInMillis); - assertEquals("{\"took\":" + assertEquals("{\"took\":" + tookInMillis + ",\"responses\":[" + "{" @@ -225,7 +225,7 @@ public void testMultiLineSerialization() throws IOException { byte[] originalBytes = MultiSearchRequest.writeMultiLineFormat(originalRequest, xContentType.xContent()); MultiSearchRequest parsedRequest = new MultiSearchRequest(); CheckedBiConsumer consumer = (r, p) -> { - SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(p); + SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(p, false); if (searchSourceBuilder.equals(new SearchSourceBuilder()) == false) { r.source(searchSourceBuilder); } @@ -273,7 +273,7 @@ private static MultiSearchRequest createMultiSearchRequest() throws IOException if (randomBoolean()) { searchRequest.allowPartialSearchResults(true); } - + // scroll is not supported in the current msearch api, so unset it: searchRequest.scroll((Scroll) null); diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java b/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java index d110d12e4fc39..a16a8f628f98b 100644 --- a/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java +++ b/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java @@ -237,8 +237,6 @@ public void testStreamRequest() throws IOException { request.payloads(random().nextBoolean()); request.positions(random().nextBoolean()); request.termStatistics(random().nextBoolean()); - String parent = random().nextBoolean() ? "someParent" : null; - request.parent(parent); String pref = random().nextBoolean() ? "somePreference" : null; request.preference(pref); request.doc(new BytesArray("{}"), randomBoolean(), XContentType.JSON); @@ -282,7 +280,6 @@ public void testStreamRequestWithXContentBwc() throws IOException { assertTrue(request.payloads()); assertFalse(request.positions()); assertTrue(request.termStatistics()); - assertNull(request.parent()); assertEquals("somePreference", request.preference()); assertEquals("{}", request.doc().utf8ToString()); assertEquals(XContentType.JSON, request.xContentType()); diff --git a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java index ddf4f32c2c2b4..f562cbd0ec184 100644 --- a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java @@ -61,7 +61,6 @@ import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; import static org.elasticsearch.script.MockScriptEngine.mockInlineScript; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; -import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -277,17 +276,26 @@ public void testFromXContent() throws Exception { assertThat(((Map) doc.get("compound")).get("field2").toString(), equalTo("value2")); } - // Related to issue 15338 - public void testFieldsParsing() throws Exception { - UpdateRequest request = new UpdateRequest("test", "type1", "1").fromXContent( - createParser(JsonXContent.jsonXContent, new BytesArray("{\"doc\": {\"field1\": \"value1\"}, \"fields\": \"_source\"}"))); - assertThat(request.doc().sourceAsMap().get("field1").toString(), equalTo("value1")); - assertThat(request.fields(), arrayContaining("_source")); - - request = new UpdateRequest("test", "type2", "2").fromXContent(createParser(JsonXContent.jsonXContent, - new BytesArray("{\"doc\": {\"field2\": \"value2\"}, \"fields\": [\"field1\", \"field2\"]}"))); - assertThat(request.doc().sourceAsMap().get("field2").toString(), equalTo("value2")); - assertThat(request.fields(), arrayContaining("field1", "field2")); + public void testUnknownFieldParsing() throws Exception { + UpdateRequest request = new UpdateRequest("test", "type", "1"); + XContentParser contentParser = createParser(XContentFactory.jsonBuilder() + .startObject() + .field("unknown_field", "test") + .endObject()); + + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> request.fromXContent(contentParser)); + assertEquals("[UpdateRequest] unknown field [unknown_field], parser not found", ex.getMessage()); + + UpdateRequest request2 = new UpdateRequest("test", "type", "1"); + XContentParser unknownObject = createParser(XContentFactory.jsonBuilder() + .startObject() + .field("script", "ctx.op = ctx._source.views == params.count ? 'delete' : 'none'") + .startObject("params") + .field("count", 1) + .endObject() + .endObject()); + ex = expectThrows(IllegalArgumentException.class, () -> request2.fromXContent(unknownObject)); + assertEquals("[UpdateRequest] unknown field [params], parser not found", ex.getMessage()); } public void testFetchSourceParsing() throws Exception { @@ -444,13 +452,6 @@ public void testToAndFromXContent() throws IOException { BytesReference source = RandomObjects.randomSource(random(), xContentType); updateRequest.upsert(new IndexRequest().source(source, xContentType)); } - if (randomBoolean()) { - String[] fields = new String[randomIntBetween(0, 5)]; - for (int i = 0; i < fields.length; i++) { - fields[i] = randomAlphaOfLength(5); - } - updateRequest.fields(fields); - } if (randomBoolean()) { if (randomBoolean()) { updateRequest.fetchSource(randomBoolean()); @@ -487,10 +488,8 @@ public void testToAndFromXContent() throws IOException { assertEquals(updateRequest.detectNoop(), parsedUpdateRequest.detectNoop()); assertEquals(updateRequest.docAsUpsert(), parsedUpdateRequest.docAsUpsert()); - assertEquals(updateRequest.docAsUpsert(), parsedUpdateRequest.docAsUpsert()); assertEquals(updateRequest.script(), parsedUpdateRequest.script()); assertEquals(updateRequest.scriptedUpsert(), parsedUpdateRequest.scriptedUpsert()); - assertArrayEquals(updateRequest.fields(), parsedUpdateRequest.fields()); assertEquals(updateRequest.fetchSource(), parsedUpdateRequest.fetchSource()); BytesReference finalBytes = toXContent(parsedUpdateRequest, xContentType, humanReadable); @@ -512,27 +511,23 @@ public void testToValidateUpsertRequestWithVersion() { assertThat(updateRequest.validate().validationErrors(), contains("can't provide version in upsert request")); } - public void testParentAndRoutingExtraction() throws Exception { + public void testRoutingExtraction() throws Exception { GetResult getResult = new GetResult("test", "type", "1", 0, false, null, null); IndexRequest indexRequest = new IndexRequest("test", "type", "1"); // There is no routing and parent because the document doesn't exist assertNull(UpdateHelper.calculateRouting(getResult, null)); - assertNull(UpdateHelper.calculateParent(getResult, null)); // There is no routing and parent the indexing request assertNull(UpdateHelper.calculateRouting(getResult, indexRequest)); - assertNull(UpdateHelper.calculateParent(getResult, indexRequest)); // Doc exists but has no source or fields getResult = new GetResult("test", "type", "1", 0, true, null, null); // There is no routing and parent on either request assertNull(UpdateHelper.calculateRouting(getResult, indexRequest)); - assertNull(UpdateHelper.calculateParent(getResult, indexRequest)); Map fields = new HashMap<>(); - fields.put("_parent", new DocumentField("_parent", Collections.singletonList("parent1"))); fields.put("_routing", new DocumentField("_routing", Collections.singletonList("routing1"))); // Doc exists and has the parent and routing fields @@ -540,14 +535,6 @@ public void testParentAndRoutingExtraction() throws Exception { // Use the get result parent and routing assertThat(UpdateHelper.calculateRouting(getResult, indexRequest), equalTo("routing1")); - assertThat(UpdateHelper.calculateParent(getResult, indexRequest), equalTo("parent1")); - - // Index request has overriding parent and routing values - indexRequest = new IndexRequest("test", "type", "1").parent("parent2").routing("routing2"); - - // Use the request's parent and routing - assertThat(UpdateHelper.calculateRouting(getResult, indexRequest), equalTo("routing2")); - assertThat(UpdateHelper.calculateParent(getResult, indexRequest), equalTo("parent2")); } @SuppressWarnings("deprecated") // VersionType.FORCE is deprecated diff --git a/server/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java b/server/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java index bca04738d8b89..5e739cc325040 100644 --- a/server/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java +++ b/server/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java @@ -43,7 +43,7 @@ protected Client buildClient(Settings headersSettings, GenericAction[] testedAct Settings settings = HEADER_SETTINGS; Actions actions = new Actions(settings, threadPool, testedActions); NodeClient client = new NodeClient(settings, threadPool); - client.initialize(actions, () -> "test"); + client.initialize(actions, () -> "test", null); return client; } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java index 307b9716fa3f9..211ae48d04355 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java @@ -47,7 +47,6 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.ParentFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.indices.IndicesService; @@ -433,7 +432,6 @@ private void setupIndicesService() throws Exception { when(routingMapper.required()).thenReturn(false); when(docMapper.routingFieldMapper()).thenReturn(routingMapper); - when(docMapper.parentFieldMapper()).thenReturn(mock(ParentFieldMapper.class)); when(mapper.docMappers(anyBoolean())).thenReturn(Collections.singletonList(docMapper)); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataMappingServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataMappingServiceTests.java index 428d9488dc2c6..1e46c2c428663 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataMappingServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataMappingServiceTests.java @@ -18,16 +18,11 @@ */ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; @@ -36,7 +31,6 @@ import java.util.Collections; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; public class MetaDataMappingServiceTests extends ESSingleNodeTestCase { @@ -45,51 +39,6 @@ protected Collection> getPlugins() { return Collections.singleton(InternalSettingsPlugin.class); } - // Tests _parent meta field logic, because part of the validation is in MetaDataMappingService - public void testAddChildTypePointingToAlreadyExistingType() throws Exception { - createIndex("test", Settings.EMPTY, "type", "field", "type=keyword"); - - // Shouldn't be able the add the _parent field pointing to an already existing type, which isn't a parent type - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> client().admin() - .indices() - .preparePutMapping("test") - .setType("child") - .setSource("_parent", "type=type") - .get()); - assertThat(e.getMessage(), - equalTo("can't add a _parent field that points to an already existing type, that isn't already a parent")); - } - - // Tests _parent meta field logic, because part of the validation is in MetaDataMappingService - public void testAddExtraChildTypePointingToAlreadyParentExistingType() throws Exception { - IndexService indexService = createIndex("test", client().admin().indices().prepareCreate("test") - .setSettings(Settings.builder().put("index.version.created", Version.V_5_6_0.id)) - .addMapping("parent") - .addMapping("child1", "_parent", "type=parent") - ); - - // adding the extra child type that points to an already existing parent type is allowed: - client().admin() - .indices() - .preparePutMapping("test") - .setType("child2") - .setSource("_parent", "type=parent") - .get(); - - DocumentMapper documentMapper = indexService.mapperService().documentMapper("child2"); - assertThat(documentMapper.parentFieldMapper().type(), equalTo("parent")); - assertThat(documentMapper.parentFieldMapper().active(), is(true)); - } - - public void testParentIsAString() throws Exception { - // Shouldn't be able the add the _parent field pointing to an already existing type, which isn't a parent type - Exception e = expectThrows(MapperParsingException.class, () -> client().admin().indices().prepareCreate("test") - .addMapping("parent", "{\"properties\":{}}", XContentType.JSON) - .addMapping("child", "{\"_parent\": \"parent\",\"properties\":{}}", XContentType.JSON) - .get()); - assertEquals("Failed to parse mapping [child]: [_parent] must be an object containing [type]", e.getMessage()); - } - public void testMappingClusterStateUpdateDoesntChangeExistingIndices() throws Exception { final IndexService indexService = createIndex("test", client().admin().indices().prepareCreate("test").addMapping("type")); final CompressedXContent currentMapping = indexService.mapperService().documentMapper("type").mappingSource(); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java index 74d13a2aab046..3a83580dc1cdd 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java @@ -110,36 +110,21 @@ public void testResolveIndexRouting() { MetaData metaData = MetaData.builder().put(builder).build(); // no alias, no index - assertEquals(metaData.resolveIndexRouting(null, null, null), null); - assertEquals(metaData.resolveIndexRouting(null, "0", null), "0"); - assertEquals(metaData.resolveIndexRouting("32", "0", null), "0"); - assertEquals(metaData.resolveIndexRouting("32", null, null), "32"); + assertEquals(metaData.resolveIndexRouting(null, null), null); + assertEquals(metaData.resolveIndexRouting("0", null), "0"); // index, no alias - assertEquals(metaData.resolveIndexRouting("32", "0", "index"), "0"); - assertEquals(metaData.resolveIndexRouting("32", null, "index"), "32"); - assertEquals(metaData.resolveIndexRouting(null, null, "index"), null); - assertEquals(metaData.resolveIndexRouting(null, "0", "index"), "0"); + assertEquals(metaData.resolveIndexRouting(null, "index"), null); + assertEquals(metaData.resolveIndexRouting("0", "index"), "0"); // alias with no index routing - assertEquals(metaData.resolveIndexRouting(null, null, "alias0"), null); - assertEquals(metaData.resolveIndexRouting(null, "0", "alias0"), "0"); - assertEquals(metaData.resolveIndexRouting("32", null, "alias0"), "32"); - assertEquals(metaData.resolveIndexRouting("32", "0", "alias0"), "0"); + assertEquals(metaData.resolveIndexRouting(null, "alias0"), null); + assertEquals(metaData.resolveIndexRouting("0", "alias0"), "0"); // alias with index routing. - assertEquals(metaData.resolveIndexRouting(null, null, "alias1"), "1"); - assertEquals(metaData.resolveIndexRouting("32", null, "alias1"), "1"); - assertEquals(metaData.resolveIndexRouting("32", "1", "alias1"), "1"); + assertEquals(metaData.resolveIndexRouting(null, "alias1"), "1"); try { - metaData.resolveIndexRouting(null, "0", "alias1"); - fail("should fail"); - } catch (IllegalArgumentException ex) { - assertThat(ex.getMessage(), is("Alias [alias1] has index routing associated with it [1], and was provided with routing value [0], rejecting operation")); - } - - try { - metaData.resolveIndexRouting("32", "0", "alias1"); + metaData.resolveIndexRouting("0", "alias1"); fail("should fail"); } catch (IllegalArgumentException ex) { assertThat(ex.getMessage(), is("Alias [alias1] has index routing associated with it [1], and was provided with routing value [0], rejecting operation")); @@ -147,21 +132,14 @@ public void testResolveIndexRouting() { // alias with invalid index routing. try { - metaData.resolveIndexRouting(null, null, "alias2"); - fail("should fail"); - } catch (IllegalArgumentException ex) { - assertThat(ex.getMessage(), is("index/alias [alias2] provided with routing value [1,2] that resolved to several routing values, rejecting operation")); - } - - try { - metaData.resolveIndexRouting(null, "1", "alias2"); + metaData.resolveIndexRouting(null, "alias2"); fail("should fail"); } catch (IllegalArgumentException ex) { assertThat(ex.getMessage(), is("index/alias [alias2] provided with routing value [1,2] that resolved to several routing values, rejecting operation")); } try { - metaData.resolveIndexRouting("32", null, "alias2"); + metaData.resolveIndexRouting("1", "alias2"); fail("should fail"); } catch (IllegalArgumentException ex) { assertThat(ex.getMessage(), is("index/alias [alias2] provided with routing value [1,2] that resolved to several routing values, rejecting operation")); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java index 8a62e14ba579a..c1861572d8352 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java @@ -118,4 +118,19 @@ public void testSerialization() throws IOException { AllocationId parsedAllocationId = AllocationId.fromXContent(createParser(JsonXContent.jsonXContent, bytes)); assertEquals(allocationId, parsedAllocationId); } + + public void testEquals() { + AllocationId allocationId1 = AllocationId.newInitializing(); + AllocationId allocationId2 = AllocationId.newInitializing(allocationId1.getId()); + AllocationId allocationId3 = AllocationId.newInitializing("not a UUID"); + String s = "Some random other object"; + assertEquals(allocationId1, allocationId1); + assertEquals(allocationId1, allocationId2); + assertNotEquals(allocationId1, s); + assertNotEquals(allocationId1, null); + assertNotEquals(allocationId1, allocationId3); + + allocationId2 = AllocationId.newRelocation(allocationId1); + assertNotEquals(allocationId1, allocationId2); + } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/IndexShardRoutingTableTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/IndexShardRoutingTableTests.java new file mode 100644 index 0000000000000..7823970ff46d8 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/routing/IndexShardRoutingTableTests.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing; + +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ESTestCase; + +import java.util.ArrayList; + +public class IndexShardRoutingTableTests extends ESTestCase { + public void testEqualsAttributesKey() { + String[] attr1 = {"a"}; + String[] attr2 = {"b"}; + IndexShardRoutingTable.AttributesKey attributesKey1 = new IndexShardRoutingTable.AttributesKey(attr1); + IndexShardRoutingTable.AttributesKey attributesKey2 = new IndexShardRoutingTable.AttributesKey(attr1); + IndexShardRoutingTable.AttributesKey attributesKey3 = new IndexShardRoutingTable.AttributesKey(attr2); + String s = "Some random other object"; + assertEquals(attributesKey1, attributesKey1); + assertEquals(attributesKey1, attributesKey2); + assertNotEquals(attributesKey1, null); + assertNotEquals(attributesKey1, s); + assertNotEquals(attributesKey1, attributesKey3); + } + + public void testEquals() { + Index index = new Index("a", "b"); + ShardId shardId = new ShardId(index, 1); + ShardId shardId2 = new ShardId(index, 2); + IndexShardRoutingTable table1 = new IndexShardRoutingTable(shardId, new ArrayList<>()); + IndexShardRoutingTable table2 = new IndexShardRoutingTable(shardId, new ArrayList<>()); + IndexShardRoutingTable table3 = new IndexShardRoutingTable(shardId2, new ArrayList<>()); + String s = "Some other random object"; + assertEquals(table1, table1); + assertEquals(table1, table2); + assertNotEquals(table1, null); + assertNotEquals(table1, s); + assertNotEquals(table1, table3); + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/PlainShardIteratorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/PlainShardIteratorTests.java new file mode 100644 index 0000000000000..c92da8e0a8fa0 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/routing/PlainShardIteratorTests.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing; + +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ESTestCase; + +import java.util.ArrayList; + +public class PlainShardIteratorTests extends ESTestCase { + + public void testEquals() { + Index index = new Index("a", "b"); + ShardId shardId = new ShardId(index, 1); + ShardId shardId2 = new ShardId(index, 2); + PlainShardIterator iterator1 = new PlainShardIterator(shardId, new ArrayList<>()); + PlainShardIterator iterator2 = new PlainShardIterator(shardId, new ArrayList<>()); + PlainShardIterator iterator3 = new PlainShardIterator(shardId2, new ArrayList<>()); + String s = "Some other random object"; + assertEquals(iterator1, iterator1); + assertEquals(iterator1, iterator2); + assertNotEquals(iterator1, null); + assertNotEquals(iterator1, s); + assertNotEquals(iterator1, iterator3); + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java index 077466906b709..88766e7943ea9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java @@ -33,15 +33,19 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource; +import org.elasticsearch.cluster.routing.RoutingChangesObserver; +import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingHelper; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.NodeVersionAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider; import org.elasticsearch.common.UUIDs; @@ -68,6 +72,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.core.Is.is; public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { @@ -428,4 +433,82 @@ private void assertRecoveryNodeVersions(RoutingNodes routingNodes) { } } + public void testMessages() { + + MetaData metaData = MetaData.builder() + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)) + .build(); + + RoutingTable initialRoutingTable = RoutingTable.builder() + .addAsNew(metaData.index("test")) + .build(); + + RoutingNode newNode = new RoutingNode("newNode", newNode("newNode", Version.CURRENT)); + RoutingNode oldNode = new RoutingNode("oldNode", newNode("oldNode", VersionUtils.getPreviousVersion())); + + final ClusterName clusterName = ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY); + ClusterState clusterState = ClusterState.builder(clusterName).metaData(metaData).routingTable(initialRoutingTable) + .nodes(DiscoveryNodes.builder().add(newNode.node()).add(oldNode.node())).build(); + + final ShardId shardId = clusterState.routingTable().index("test").shard(0).getShardId(); + final ShardRouting primaryShard = clusterState.routingTable().shardRoutingTable(shardId).primaryShard(); + final ShardRouting replicaShard = clusterState.routingTable().shardRoutingTable(shardId).replicaShards().get(0); + + RoutingAllocation routingAllocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, null, 0); + routingAllocation.debugDecision(true); + + final NodeVersionAllocationDecider allocationDecider = new NodeVersionAllocationDecider(Settings.EMPTY); + Decision decision = allocationDecider.canAllocate(primaryShard, newNode, routingAllocation); + assertThat(decision.type(), is(Decision.Type.YES)); + assertThat(decision.getExplanation(), is("the primary shard is new or already existed on the node")); + + decision = allocationDecider.canAllocate(ShardRoutingHelper.initialize(primaryShard, "oldNode"), newNode, routingAllocation); + assertThat(decision.type(), is(Decision.Type.YES)); + assertThat(decision.getExplanation(), is("can relocate primary shard from a node with version [" + + oldNode.node().getVersion() + "] to a node with equal-or-newer version [" + newNode.node().getVersion() + "]")); + + decision = allocationDecider.canAllocate(ShardRoutingHelper.initialize(primaryShard, "newNode"), oldNode, routingAllocation); + assertThat(decision.type(), is(Decision.Type.NO)); + assertThat(decision.getExplanation(), is("cannot relocate primary shard from a node with version [" + + newNode.node().getVersion() + "] to a node with older version [" + oldNode.node().getVersion() + "]")); + + final SnapshotRecoverySource newVersionSnapshot = new SnapshotRecoverySource( + new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), newNode.node().getVersion(), "test"); + final SnapshotRecoverySource oldVersionSnapshot = new SnapshotRecoverySource( + new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), oldNode.node().getVersion(), "test"); + + decision = allocationDecider.canAllocate(ShardRoutingHelper.newWithRestoreSource(primaryShard, newVersionSnapshot), + oldNode, routingAllocation); + assertThat(decision.type(), is(Decision.Type.NO)); + assertThat(decision.getExplanation(), is("node version [" + + oldNode.node().getVersion() + "] is older than the snapshot version [" + newNode.node().getVersion() + "]")); + + decision = allocationDecider.canAllocate(ShardRoutingHelper.newWithRestoreSource(primaryShard, oldVersionSnapshot), + newNode, routingAllocation); + assertThat(decision.type(), is(Decision.Type.YES)); + assertThat(decision.getExplanation(), is("node version [" + + newNode.node().getVersion() + "] is the same or newer than snapshot version [" + oldNode.node().getVersion() + "]")); + + final RoutingChangesObserver routingChangesObserver = new RoutingChangesObserver.AbstractRoutingChangesObserver(); + final RoutingNodes routingNodes = new RoutingNodes(clusterState, false); + final ShardRouting startedPrimary = routingNodes.startShard(logger, routingNodes.initializeShard(primaryShard, "newNode", null, 0, + routingChangesObserver), routingChangesObserver); + routingAllocation = new RoutingAllocation(null, routingNodes, clusterState, null, 0); + routingAllocation.debugDecision(true); + + decision = allocationDecider.canAllocate(replicaShard, oldNode, routingAllocation); + assertThat(decision.type(), is(Decision.Type.NO)); + assertThat(decision.getExplanation(), is("cannot allocate replica shard to a node with version [" + + oldNode.node().getVersion() + "] since this is older than the primary version [" + newNode.node().getVersion() + "]")); + + routingNodes.startShard(logger, routingNodes.relocateShard(startedPrimary, "oldNode", 0, routingChangesObserver).v2(), + routingChangesObserver); + routingAllocation = new RoutingAllocation(null, routingNodes, clusterState, null, 0); + routingAllocation.debugDecision(true); + + decision = allocationDecider.canAllocate(replicaShard, newNode, routingAllocation); + assertThat(decision.type(), is(Decision.Type.YES)); + assertThat(decision.getExplanation(), is("can allocate replica shard to a node with version [" + + newNode.node().getVersion() + "] since this is equal-or-newer than the primary version [" + oldNode.node().getVersion() + "]")); + } } diff --git a/server/src/test/java/org/elasticsearch/common/UUIDTests.java b/server/src/test/java/org/elasticsearch/common/UUIDTests.java index 74a72dd6f3033..849db0dc71259 100644 --- a/server/src/test/java/org/elasticsearch/common/UUIDTests.java +++ b/server/src/test/java/org/elasticsearch/common/UUIDTests.java @@ -120,9 +120,9 @@ public void testCompression() throws Exception { Logger logger = Loggers.getLogger(UUIDTests.class); // Low number so that the test runs quickly, but the results are more interesting with larger numbers // of indexed documents - assertThat(testCompression(500000, 10000, 3, logger), Matchers.lessThan(12d)); // ~10.8 in practice - assertThat(testCompression(500000, 1000, 3, logger), Matchers.lessThan(14d)); // ~11.5 in practice - assertThat(testCompression(500000, 100, 3, logger), Matchers.lessThan(21d)); // ~19.5 in practice + assertThat(testCompression(100000, 10000, 3, logger), Matchers.lessThan(14d)); // ~12 in practice + assertThat(testCompression(100000, 1000, 3, logger), Matchers.lessThan(15d)); // ~13 in practice + assertThat(testCompression(100000, 100, 3, logger), Matchers.lessThan(21d)); // ~20 in practice } private static double testCompression(int numDocs, int numDocsPerSecond, int numNodes, Logger logger) throws Exception { diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java index e4856fd01136b..2726380b7e3bc 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java @@ -82,4 +82,20 @@ public void testGeohashExtremes() { assertEquals("xbpbpbpbpbpb", GeoHashUtils.stringEncode(180, 0)); assertEquals("zzzzzzzzzzzz", GeoHashUtils.stringEncode(180, 90)); } + + public void testLongGeohashes() { + for (int i = 0; i < 100000; i++) { + String geohash = randomGeohash(12, 12); + GeoPoint expected = GeoPoint.fromGeohash(geohash); + // Adding some random geohash characters at the end + String extendedGeohash = geohash + randomGeohash(1, 10); + GeoPoint actual = GeoPoint.fromGeohash(extendedGeohash); + assertEquals("Additional data points above 12 should be ignored [" + extendedGeohash + "]" , expected, actual); + + Rectangle expectedBbox = GeoHashUtils.bbox(geohash); + Rectangle actualBbox = GeoHashUtils.bbox(extendedGeohash); + assertEquals("Additional data points above 12 should be ignored [" + extendedGeohash + "]" , expectedBbox, actualBbox); + + } + } } diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoUtilTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoUtilTests.java new file mode 100644 index 0000000000000..efec56e788da1 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoUtilTests.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.geo; + +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; + +public class GeoUtilTests extends ESTestCase { + + public void testPrecisionParser() throws IOException { + assertEquals(10, parsePrecision(builder -> builder.field("test", 10))); + assertEquals(10, parsePrecision(builder -> builder.field("test", 10.2))); + assertEquals(6, parsePrecision(builder -> builder.field("test", "6"))); + assertEquals(7, parsePrecision(builder -> builder.field("test", "1km"))); + assertEquals(7, parsePrecision(builder -> builder.field("test", "1.1km"))); + } + + public void testIncorrectPrecisionParser() { + expectThrows(NumberFormatException.class, () -> parsePrecision(builder -> builder.field("test", "10.1.1.1"))); + expectThrows(NumberFormatException.class, () -> parsePrecision(builder -> builder.field("test", "364.4smoots"))); + assertEquals( + "precision too high [0.01mm]", + expectThrows(IllegalArgumentException.class, () -> parsePrecision(builder -> builder.field("test", "0.01mm"))).getMessage() + ); + } + + /** + * Invokes GeoUtils.parsePrecision parser on the value generated by tokenGenerator + *

+ * The supplied tokenGenerator should generate a single field that contains the precision in + * one of the supported formats or malformed precision value if error handling is tested. The + * method return the parsed value or throws an exception, if precision value is malformed. + */ + private int parsePrecision(CheckedConsumer tokenGenerator) throws IOException { + XContentBuilder builder = jsonBuilder().startObject(); + tokenGenerator.accept(builder); + builder.endObject(); + XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); // { + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); // field name + assertTrue(parser.nextToken().isValue()); // field value + int precision = GeoUtils.parsePrecision(parser); + assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); // } + assertNull(parser.nextToken()); // no more tokens + return precision; + } +} diff --git a/server/src/test/java/org/elasticsearch/common/io/StreamsTests.java b/server/src/test/java/org/elasticsearch/common/io/StreamsTests.java index 76b52c08a854f..ee1933e3a1043 100644 --- a/server/src/test/java/org/elasticsearch/common/io/StreamsTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/StreamsTests.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.test.ESTestCase; -import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.StringReader; @@ -32,7 +31,6 @@ import java.nio.charset.StandardCharsets; import java.util.Arrays; -import static org.elasticsearch.common.io.Streams.copy; import static org.elasticsearch.common.io.Streams.copyToString; import static org.hamcrest.Matchers.equalTo; @@ -40,20 +38,11 @@ * Unit tests for {@link org.elasticsearch.common.io.Streams}. */ public class StreamsTests extends ESTestCase { - public void testCopyFromInputStream() throws IOException { - byte[] content = "content".getBytes(StandardCharsets.UTF_8); - ByteArrayInputStream in = new ByteArrayInputStream(content); - ByteArrayOutputStream out = new ByteArrayOutputStream(content.length); - long count = copy(in, out); - - assertThat(count, equalTo((long) content.length)); - assertThat(Arrays.equals(content, out.toByteArray()), equalTo(true)); - } public void testCopyFromByteArray() throws IOException { byte[] content = "content".getBytes(StandardCharsets.UTF_8); ByteArrayOutputStream out = new ByteArrayOutputStream(content.length); - copy(content, out); + Streams.copy(content, out); assertThat(Arrays.equals(content, out.toByteArray()), equalTo(true)); } @@ -61,7 +50,7 @@ public void testCopyFromReader() throws IOException { String content = "content"; StringReader in = new StringReader(content); StringWriter out = new StringWriter(); - int count = copy(in, out); + int count = Streams.copy(in, out); assertThat(content.length(), equalTo(count)); assertThat(out.toString(), equalTo(content)); } @@ -69,7 +58,7 @@ public void testCopyFromReader() throws IOException { public void testCopyFromString() throws IOException { String content = "content"; StringWriter out = new StringWriter(); - copy(content, out); + Streams.copy(content, out); assertThat(out.toString(), equalTo(content)); } diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java index 27656e9bc092d..cee6c2e4cb823 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.lucene.BytesRefs; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.test.ESTestCase; import org.joda.time.DateTimeZone; @@ -41,6 +42,7 @@ import java.util.Map; import java.util.Objects; import java.util.TreeMap; +import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -812,4 +814,30 @@ public void testInvalidEnum() throws IOException { } assertEquals(0, input.available()); } + + private void assertEqualityAfterSerialize(TimeValue value, int expectedSize) throws IOException { + BytesStreamOutput out = new BytesStreamOutput(); + out.writeTimeValue(value); + assertEquals(expectedSize, out.size()); + + StreamInput in = out.bytes().streamInput(); + TimeValue inValue = in.readTimeValue(); + + assertThat(inValue, equalTo(value)); + assertThat(inValue.duration(), equalTo(value.duration())); + assertThat(inValue.timeUnit(), equalTo(value.timeUnit())); + } + + public void testTimeValueSerialize() throws Exception { + assertEqualityAfterSerialize(new TimeValue(100, TimeUnit.DAYS), 3); + assertEqualityAfterSerialize(TimeValue.timeValueNanos(-1), 2); + assertEqualityAfterSerialize(TimeValue.timeValueNanos(1), 2); + assertEqualityAfterSerialize(TimeValue.timeValueSeconds(30), 2); + + final TimeValue timeValue = new TimeValue(randomIntBetween(0, 1024), randomFrom(TimeUnit.values())); + BytesStreamOutput out = new BytesStreamOutput(); + out.writeZLong(timeValue.duration()); + assertEqualityAfterSerialize(timeValue, 1 + out.bytes().length()); + } + } diff --git a/server/src/test/java/org/elasticsearch/common/logging/DeprecationLoggerTests.java b/server/src/test/java/org/elasticsearch/common/logging/DeprecationLoggerTests.java index fdb530749e105..490f7961a894d 100644 --- a/server/src/test/java/org/elasticsearch/common/logging/DeprecationLoggerTests.java +++ b/server/src/test/java/org/elasticsearch/common/logging/DeprecationLoggerTests.java @@ -33,6 +33,7 @@ import java.util.Map; import java.util.Set; import java.util.stream.IntStream; +import java.nio.charset.StandardCharsets; import static org.elasticsearch.common.logging.DeprecationLogger.WARNING_HEADER_PATTERN; import static org.elasticsearch.test.hamcrest.RegexMatcher.matches; @@ -246,6 +247,60 @@ public void testEncode() { assertThat(DeprecationLogger.encode(s), IsSame.sameInstance(s)); } + + public void testWarningHeaderCountSetting() throws IOException{ + // Test that the number of warning headers don't exceed 'http.max_warning_header_count' + final int maxWarningHeaderCount = 2; + Settings settings = Settings.builder() + .put("http.max_warning_header_count", maxWarningHeaderCount) + .build(); + try (ThreadContext threadContext = new ThreadContext(settings)) { + final Set threadContexts = Collections.singleton(threadContext); + // try to log three warning messages + logger.deprecated(threadContexts, "A simple message 1"); + logger.deprecated(threadContexts, "A simple message 2"); + logger.deprecated(threadContexts, "A simple message 3"); + final Map> responseHeaders = threadContext.getResponseHeaders(); + final List responses = responseHeaders.get("Warning"); + + assertEquals(maxWarningHeaderCount, responses.size()); + assertThat(responses.get(0), warningValueMatcher); + assertThat(responses.get(0), containsString("\"A simple message 1")); + assertThat(responses.get(1), warningValueMatcher); + assertThat(responses.get(1), containsString("\"A simple message 2")); + } + } + + public void testWarningHeaderSizeSetting() throws IOException{ + // Test that the size of warning headers don't exceed 'http.max_warning_header_size' + Settings settings = Settings.builder() + .put("http.max_warning_header_size", "1Kb") + .build(); + + byte [] arr = new byte[300]; + String message1 = new String(arr, StandardCharsets.UTF_8) + "1"; + String message2 = new String(arr, StandardCharsets.UTF_8) + "2"; + String message3 = new String(arr, StandardCharsets.UTF_8) + "3"; + + try (ThreadContext threadContext = new ThreadContext(settings)) { + final Set threadContexts = Collections.singleton(threadContext); + // try to log three warning messages + logger.deprecated(threadContexts, message1); + logger.deprecated(threadContexts, message2); + logger.deprecated(threadContexts, message3); + final Map> responseHeaders = threadContext.getResponseHeaders(); + final List responses = responseHeaders.get("Warning"); + + long warningHeadersSize = 0L; + for (String response : responses){ + warningHeadersSize += "Warning".getBytes(StandardCharsets.UTF_8).length + + response.getBytes(StandardCharsets.UTF_8).length; + } + // assert that the size of all warning headers is less or equal to 1Kb + assertTrue(warningHeadersSize <= 1024); + } + } + private String range(int lowerInclusive, int upperInclusive) { return IntStream .range(lowerInclusive, upperInclusive + 1) diff --git a/server/src/test/java/org/apache/lucene/search/QueriesTests.java b/server/src/test/java/org/elasticsearch/common/lucene/search/QueriesTests.java similarity index 55% rename from server/src/test/java/org/apache/lucene/search/QueriesTests.java rename to server/src/test/java/org/elasticsearch/common/lucene/search/QueriesTests.java index 9256c8b31a3f7..a1236fd53df92 100644 --- a/server/src/test/java/org/apache/lucene/search/QueriesTests.java +++ b/server/src/test/java/org/elasticsearch/common/lucene/search/QueriesTests.java @@ -17,10 +17,16 @@ * under the License. */ -package org.apache.lucene.search; +package org.elasticsearch.common.lucene.search; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanClause.Occur; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.DocValuesFieldExistsQuery; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.TermQuery; import org.elasticsearch.Version; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; @@ -43,4 +49,25 @@ public void testNonNestedQuery() { } } + public void testIsNegativeQuery() { + assertFalse(Queries.isNegativeQuery(new MatchAllDocsQuery())); + assertFalse(Queries.isNegativeQuery(new BooleanQuery.Builder().build())); + assertFalse(Queries.isNegativeQuery(new BooleanQuery.Builder() + .add(new TermQuery(new Term("foo", "bar")), Occur.MUST).build())); + assertTrue(Queries.isNegativeQuery(new BooleanQuery.Builder() + .add(new TermQuery(new Term("foo", "bar")), Occur.MUST_NOT).build())); + assertFalse(Queries.isNegativeQuery(new BooleanQuery.Builder() + .add(new MatchAllDocsQuery(), Occur.MUST) + .add(new MatchAllDocsQuery(), Occur.MUST_NOT).build())); + } + + public void testFixNegativeQuery() { + assertEquals(new BooleanQuery.Builder() + .add(new MatchAllDocsQuery(), Occur.FILTER) + .add(new TermQuery(new Term("foo", "bar")), Occur.MUST_NOT).build(), + Queries.fixNegativeQueryIfNeeded( + new BooleanQuery.Builder() + .add(new TermQuery(new Term("foo", "bar")), Occur.MUST_NOT) + .build())); + } } diff --git a/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java b/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java index c5e66a3bf2ad5..115cfcdf26f9c 100644 --- a/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java +++ b/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java @@ -28,7 +28,7 @@ import org.apache.lucene.store.Directory; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; -import org.elasticsearch.index.mapper.UidFieldMapper; +import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.VersionFieldMapper; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; @@ -63,41 +63,41 @@ public void testVersions() throws Exception { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "_na_", 1)); - MatcherAssert.assertThat(loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_FOUND)); + MatcherAssert.assertThat(loadVersion(directoryReader, new Term(IdFieldMapper.NAME, "1")), equalTo(Versions.NOT_FOUND)); Document doc = new Document(); - doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE)); + doc.add(new Field(IdFieldMapper.NAME, "1", IdFieldMapper.Defaults.FIELD_TYPE)); doc.add(new NumericDocValuesField(VersionFieldMapper.NAME, 1)); - writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc); + writer.updateDocument(new Term(IdFieldMapper.NAME, "1"), doc); directoryReader = reopen(directoryReader); - assertThat(loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(1L)); - assertThat(loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(1L)); + assertThat(loadVersion(directoryReader, new Term(IdFieldMapper.NAME, "1")), equalTo(1L)); + assertThat(loadDocIdAndVersion(directoryReader, new Term(IdFieldMapper.NAME, "1")).version, equalTo(1L)); doc = new Document(); - Field uid = new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE); + Field uid = new Field(IdFieldMapper.NAME, "1", IdFieldMapper.Defaults.FIELD_TYPE); Field version = new NumericDocValuesField(VersionFieldMapper.NAME, 2); doc.add(uid); doc.add(version); - writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc); + writer.updateDocument(new Term(IdFieldMapper.NAME, "1"), doc); directoryReader = reopen(directoryReader); - assertThat(loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(2L)); - assertThat(loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(2L)); + assertThat(loadVersion(directoryReader, new Term(IdFieldMapper.NAME, "1")), equalTo(2L)); + assertThat(loadDocIdAndVersion(directoryReader, new Term(IdFieldMapper.NAME, "1")).version, equalTo(2L)); // test reuse of uid field doc = new Document(); version.setLongValue(3); doc.add(uid); doc.add(version); - writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc); + writer.updateDocument(new Term(IdFieldMapper.NAME, "1"), doc); directoryReader = reopen(directoryReader); - assertThat(loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(3L)); - assertThat(loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(3L)); + assertThat(loadVersion(directoryReader, new Term(IdFieldMapper.NAME, "1")), equalTo(3L)); + assertThat(loadDocIdAndVersion(directoryReader, new Term(IdFieldMapper.NAME, "1")).version, equalTo(3L)); - writer.deleteDocuments(new Term(UidFieldMapper.NAME, "1")); + writer.deleteDocuments(new Term(IdFieldMapper.NAME, "1")); directoryReader = reopen(directoryReader); - assertThat(loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_FOUND)); - assertThat(loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), nullValue()); + assertThat(loadVersion(directoryReader, new Term(IdFieldMapper.NAME, "1")), equalTo(Versions.NOT_FOUND)); + assertThat(loadDocIdAndVersion(directoryReader, new Term(IdFieldMapper.NAME, "1")), nullValue()); directoryReader.close(); writer.close(); dir.close(); @@ -111,33 +111,33 @@ public void testNestedDocuments() throws IOException { for (int i = 0; i < 4; ++i) { // Nested Document doc = new Document(); - doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); + doc.add(new Field(IdFieldMapper.NAME, "1", IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); docs.add(doc); } // Root Document doc = new Document(); - doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE)); + doc.add(new Field(IdFieldMapper.NAME, "1", IdFieldMapper.Defaults.FIELD_TYPE)); NumericDocValuesField version = new NumericDocValuesField(VersionFieldMapper.NAME, 5L); doc.add(version); docs.add(doc); - writer.updateDocuments(new Term(UidFieldMapper.NAME, "1"), docs); + writer.updateDocuments(new Term(IdFieldMapper.NAME, "1"), docs); DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "_na_", 1)); - assertThat(loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(5L)); - assertThat(loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(5L)); + assertThat(loadVersion(directoryReader, new Term(IdFieldMapper.NAME, "1")), equalTo(5L)); + assertThat(loadDocIdAndVersion(directoryReader, new Term(IdFieldMapper.NAME, "1")).version, equalTo(5L)); version.setLongValue(6L); - writer.updateDocuments(new Term(UidFieldMapper.NAME, "1"), docs); + writer.updateDocuments(new Term(IdFieldMapper.NAME, "1"), docs); version.setLongValue(7L); - writer.updateDocuments(new Term(UidFieldMapper.NAME, "1"), docs); + writer.updateDocuments(new Term(IdFieldMapper.NAME, "1"), docs); directoryReader = reopen(directoryReader); - assertThat(loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(7L)); - assertThat(loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(7L)); + assertThat(loadVersion(directoryReader, new Term(IdFieldMapper.NAME, "1")), equalTo(7L)); + assertThat(loadDocIdAndVersion(directoryReader, new Term(IdFieldMapper.NAME, "1")).version, equalTo(7L)); - writer.deleteDocuments(new Term(UidFieldMapper.NAME, "1")); + writer.deleteDocuments(new Term(IdFieldMapper.NAME, "1")); directoryReader = reopen(directoryReader); - assertThat(loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_FOUND)); - assertThat(loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), nullValue()); + assertThat(loadVersion(directoryReader, new Term(IdFieldMapper.NAME, "1")), equalTo(Versions.NOT_FOUND)); + assertThat(loadDocIdAndVersion(directoryReader, new Term(IdFieldMapper.NAME, "1")), nullValue()); directoryReader.close(); writer.close(); dir.close(); @@ -150,15 +150,15 @@ public void testCache() throws Exception { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); Document doc = new Document(); - doc.add(new Field(UidFieldMapper.NAME, "6", UidFieldMapper.Defaults.FIELD_TYPE)); + doc.add(new Field(IdFieldMapper.NAME, "6", IdFieldMapper.Defaults.FIELD_TYPE)); doc.add(new NumericDocValuesField(VersionFieldMapper.NAME, 87)); writer.addDocument(doc); DirectoryReader reader = DirectoryReader.open(writer); // should increase cache size by 1 - assertEquals(87, loadVersion(reader, new Term(UidFieldMapper.NAME, "6"))); + assertEquals(87, loadVersion(reader, new Term(IdFieldMapper.NAME, "6"))); assertEquals(size+1, VersionsAndSeqNoResolver.lookupStates.size()); // should be cache hit - assertEquals(87, loadVersion(reader, new Term(UidFieldMapper.NAME, "6"))); + assertEquals(87, loadVersion(reader, new Term(IdFieldMapper.NAME, "6"))); assertEquals(size+1, VersionsAndSeqNoResolver.lookupStates.size()); reader.close(); @@ -175,15 +175,15 @@ public void testCacheFilterReader() throws Exception { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); Document doc = new Document(); - doc.add(new Field(UidFieldMapper.NAME, "6", UidFieldMapper.Defaults.FIELD_TYPE)); + doc.add(new Field(IdFieldMapper.NAME, "6", IdFieldMapper.Defaults.FIELD_TYPE)); doc.add(new NumericDocValuesField(VersionFieldMapper.NAME, 87)); writer.addDocument(doc); DirectoryReader reader = DirectoryReader.open(writer); - assertEquals(87, loadVersion(reader, new Term(UidFieldMapper.NAME, "6"))); + assertEquals(87, loadVersion(reader, new Term(IdFieldMapper.NAME, "6"))); assertEquals(size+1, VersionsAndSeqNoResolver.lookupStates.size()); // now wrap the reader DirectoryReader wrapped = ElasticsearchDirectoryReader.wrap(reader, new ShardId("bogus", "_na_", 5)); - assertEquals(87, loadVersion(wrapped, new Term(UidFieldMapper.NAME, "6"))); + assertEquals(87, loadVersion(wrapped, new Term(IdFieldMapper.NAME, "6"))); // same size map: core cache key is shared assertEquals(size+1, VersionsAndSeqNoResolver.lookupStates.size()); diff --git a/server/src/test/java/org/elasticsearch/get/GetActionIT.java b/server/src/test/java/org/elasticsearch/get/GetActionIT.java index 7e3645b33ce58..30f86241cbd6d 100644 --- a/server/src/test/java/org/elasticsearch/get/GetActionIT.java +++ b/server/src/test/java/org/elasticsearch/get/GetActionIT.java @@ -288,79 +288,6 @@ public void testGetDocWithMultivaluedFields() throws Exception { assertThat(response.getFields().get("field").getValues().get(1).toString(), equalTo("2")); } - public void testGetDocWithMultivaluedFieldsMultiTypeBWC() throws Exception { - assertTrue("remove this multi type test", Version.CURRENT.before(Version.fromString("7.0.0"))); - String mapping1 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") - .startObject("properties") - .startObject("field").field("type", "text").field("store", true).endObject() - .endObject() - .endObject().endObject()); - String mapping2 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type2") - .startObject("properties") - .startObject("field").field("type", "text").field("store", true).endObject() - .endObject() - .endObject().endObject()); - assertAcked(prepareCreate("test") - .addMapping("type1", mapping1, XContentType.JSON) - .addMapping("type2", mapping2, XContentType.JSON) - // multi types in 5.6 - .setSettings(Settings.builder().put("index.refresh_interval", -1).put("index.version.created", Version.V_5_6_0.id))); - - ensureGreen(); - - GetResponse response = client().prepareGet("test", "type1", "1").get(); - assertThat(response.isExists(), equalTo(false)); - response = client().prepareGet("test", "type2", "1").get(); - assertThat(response.isExists(), equalTo(false)); - - client().prepareIndex("test", "type1", "1") - .setSource(jsonBuilder().startObject().array("field", "1", "2").endObject()).get(); - - client().prepareIndex("test", "type2", "1") - .setSource(jsonBuilder().startObject().array("field", "1", "2").endObject()).get(); - - response = client().prepareGet("test", "type1", "1").setStoredFields("field").get(); - assertThat(response.isExists(), equalTo(true)); - assertThat(response.getId(), equalTo("1")); - assertThat(response.getType(), equalTo("type1")); - Set fields = new HashSet<>(response.getFields().keySet()); - assertThat(fields, equalTo(singleton("field"))); - assertThat(response.getFields().get("field").getValues().size(), equalTo(2)); - assertThat(response.getFields().get("field").getValues().get(0).toString(), equalTo("1")); - assertThat(response.getFields().get("field").getValues().get(1).toString(), equalTo("2")); - - - response = client().prepareGet("test", "type2", "1").setStoredFields("field").get(); - assertThat(response.isExists(), equalTo(true)); - assertThat(response.getType(), equalTo("type2")); - assertThat(response.getId(), equalTo("1")); - fields = new HashSet<>(response.getFields().keySet()); - assertThat(fields, equalTo(singleton("field"))); - assertThat(response.getFields().get("field").getValues().size(), equalTo(2)); - assertThat(response.getFields().get("field").getValues().get(0).toString(), equalTo("1")); - assertThat(response.getFields().get("field").getValues().get(1).toString(), equalTo("2")); - - // Now test values being fetched from stored fields. - refresh(); - response = client().prepareGet("test", "type1", "1").setStoredFields("field").get(); - assertThat(response.isExists(), equalTo(true)); - assertThat(response.getId(), equalTo("1")); - fields = new HashSet<>(response.getFields().keySet()); - assertThat(fields, equalTo(singleton("field"))); - assertThat(response.getFields().get("field").getValues().size(), equalTo(2)); - assertThat(response.getFields().get("field").getValues().get(0).toString(), equalTo("1")); - assertThat(response.getFields().get("field").getValues().get(1).toString(), equalTo("2")); - - response = client().prepareGet("test", "type2", "1").setStoredFields("field").get(); - assertThat(response.isExists(), equalTo(true)); - assertThat(response.getId(), equalTo("1")); - fields = new HashSet<>(response.getFields().keySet()); - assertThat(fields, equalTo(singleton("field"))); - assertThat(response.getFields().get("field").getValues().size(), equalTo(2)); - assertThat(response.getFields().get("field").getValues().get(0).toString(), equalTo("1")); - assertThat(response.getFields().get("field").getValues().get(1).toString(), equalTo("2")); - } - public void testGetWithVersion() { assertAcked(prepareCreate("test").addAlias(new Alias("alias")) .setSettings(Settings.builder().put("index.refresh_interval", -1))); @@ -610,48 +537,6 @@ public void testGetFieldsMetaDataWithRouting() throws Exception { assertThat(getResponse.getField("_routing").getValue().toString(), equalTo("1")); } - public void testGetFieldsMetaDataWithParentChild() throws Exception { - assertTrue("remove this multi type test", Version.CURRENT.before(Version.fromString("7.0.0"))); - assertAcked(prepareCreate("test") - .addMapping("parent") - .addMapping("my-type1", "_parent", "type=parent", "field1", "type=keyword,store=true") - .addAlias(new Alias("alias")) - .setSettings(Settings.builder().put("index.refresh_interval", -1).put("index.version.created", Version.V_5_6_0.id))); - // multi types in 5.6 - - client().prepareIndex("test", "my-type1", "1") - .setRouting("1") - .setParent("parent_1") - .setSource(jsonBuilder().startObject().field("field1", "value").endObject()) - .get(); - - GetResponse getResponse = client().prepareGet(indexOrAlias(), "my-type1", "1") - .setRouting("1") - .setStoredFields("field1") - .get(); - assertThat(getResponse.isExists(), equalTo(true)); - assertThat(getResponse.getField("field1").isMetadataField(), equalTo(false)); - assertThat(getResponse.getField("field1").getValue().toString(), equalTo("value")); - assertThat(getResponse.getField("_routing").isMetadataField(), equalTo(true)); - assertThat(getResponse.getField("_routing").getValue().toString(), equalTo("1")); - assertThat(getResponse.getField("_parent").isMetadataField(), equalTo(true)); - assertThat(getResponse.getField("_parent").getValue().toString(), equalTo("parent_1")); - - flush(); - - getResponse = client().prepareGet(indexOrAlias(), "my-type1", "1") - .setStoredFields("field1") - .setRouting("1") - .get(); - assertThat(getResponse.isExists(), equalTo(true)); - assertThat(getResponse.getField("field1").isMetadataField(), equalTo(false)); - assertThat(getResponse.getField("field1").getValue().toString(), equalTo("value")); - assertThat(getResponse.getField("_routing").isMetadataField(), equalTo(true)); - assertThat(getResponse.getField("_routing").getValue().toString(), equalTo("1")); - assertThat(getResponse.getField("_parent").isMetadataField(), equalTo(true)); - assertThat(getResponse.getField("_parent").getValue().toString(), equalTo("parent_1")); - } - public void testGetFieldsNonLeafField() throws Exception { assertAcked(prepareCreate("test").addAlias(new Alias("alias")) .addMapping("my-type1", jsonBuilder().startObject().startObject("my-type1").startObject("properties") @@ -682,8 +567,8 @@ public void testGetFieldsNonLeafField() throws Exception { public void testGetFieldsComplexField() throws Exception { assertAcked(prepareCreate("my-index") // multi types in 5.6 - .setSettings(Settings.builder().put("index.refresh_interval", -1).put("index.version.created", Version.V_5_6_0.id)) - .addMapping("my-type2", jsonBuilder().startObject().startObject("my-type2").startObject("properties") + .setSettings(Settings.builder().put("index.refresh_interval", -1)) + .addMapping("my-type", jsonBuilder().startObject().startObject("my-type").startObject("properties") .startObject("field1").field("type", "object").startObject("properties") .startObject("field2").field("type", "object").startObject("properties") .startObject("field3").field("type", "object").startObject("properties") @@ -718,20 +603,19 @@ public void testGetFieldsComplexField() throws Exception { logger.info("indexing documents"); - client().prepareIndex("my-index", "my-type1", "1").setSource(source, XContentType.JSON).get(); - client().prepareIndex("my-index", "my-type2", "1").setSource(source, XContentType.JSON).get(); + client().prepareIndex("my-index", "my-type", "1").setSource(source, XContentType.JSON).get(); logger.info("checking real time retrieval"); String field = "field1.field2.field3.field4"; - GetResponse getResponse = client().prepareGet("my-index", "my-type1", "1").setStoredFields(field).get(); + GetResponse getResponse = client().prepareGet("my-index", "my-type", "1").setStoredFields(field).get(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getField(field).isMetadataField(), equalTo(false)); assertThat(getResponse.getField(field).getValues().size(), equalTo(2)); assertThat(getResponse.getField(field).getValues().get(0).toString(), equalTo("value1")); assertThat(getResponse.getField(field).getValues().get(1).toString(), equalTo("value2")); - getResponse = client().prepareGet("my-index", "my-type2", "1").setStoredFields(field).get(); + getResponse = client().prepareGet("my-index", "my-type", "1").setStoredFields(field).get(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getField(field).isMetadataField(), equalTo(false)); assertThat(getResponse.getField(field).getValues().size(), equalTo(2)); @@ -756,14 +640,7 @@ public void testGetFieldsComplexField() throws Exception { logger.info("checking post-flush retrieval"); - getResponse = client().prepareGet("my-index", "my-type1", "1").setStoredFields(field).get(); - assertThat(getResponse.isExists(), equalTo(true)); - assertThat(getResponse.getField(field).isMetadataField(), equalTo(false)); - assertThat(getResponse.getField(field).getValues().size(), equalTo(2)); - assertThat(getResponse.getField(field).getValues().get(0).toString(), equalTo("value1")); - assertThat(getResponse.getField(field).getValues().get(1).toString(), equalTo("value2")); - - getResponse = client().prepareGet("my-index", "my-type2", "1").setStoredFields(field).get(); + getResponse = client().prepareGet("my-index", "my-type", "1").setStoredFields(field).get(); assertThat(getResponse.isExists(), equalTo(true)); assertThat(getResponse.getField(field).isMetadataField(), equalTo(false)); assertThat(getResponse.getField(field).getValues().size(), equalTo(2)); diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java index dde9c1ca3bdb6..008b05f6a1e95 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -242,7 +242,7 @@ public Engine.Index preIndex(ShardId shardId, Engine.Index operation) { assertSame(listener, indexService.getIndexOperationListeners().get(1)); ParsedDocument doc = InternalEngineTests.createParsedDoc("1", null); - Engine.Index index = new Engine.Index(new Term("_uid", Uid.createUidAsBytes(doc.type(), doc.id())), doc); + Engine.Index index = new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), randomNonNegativeLong(), doc); ShardId shardId = new ShardId(new Index("foo", "bar"), 0); for (IndexingOperationListener l : indexService.getIndexOperationListeners()) { l.preIndex(shardId, index); diff --git a/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java b/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java index a923a4b80992d..b7da5add2acf6 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java @@ -405,27 +405,6 @@ public void testGCDeletesSetting() { assertEquals(-1, settings.getGcDeletesInMillis()); } - public void testIsTTLPurgeDisabled() { - IndexMetaData metaData = newIndexMeta("index", Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexSettings.INDEX_TTL_DISABLE_PURGE_SETTING.getKey(), false) - .build()); - IndexSettings settings = new IndexSettings(metaData, Settings.EMPTY); - assertFalse(settings.isTTLPurgeDisabled()); - settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexSettings.INDEX_TTL_DISABLE_PURGE_SETTING.getKey(), - "true").build())); - assertTrue(settings.isTTLPurgeDisabled()); - - settings.updateIndexMetaData(newIndexMeta("index", Settings.EMPTY)); - assertFalse("reset to default", settings.isTTLPurgeDisabled()); - - metaData = newIndexMeta("index", Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .build()); - settings = new IndexSettings(metaData, Settings.EMPTY); - assertFalse(settings.isTTLPurgeDisabled()); - } - public void testTranslogFlushSizeThreshold() { ByteSizeValue translogFlushThresholdSize = new ByteSizeValue(Math.abs(randomInt())); ByteSizeValue actualValue = ByteSizeValue.parseBytesSizeValue(translogFlushThresholdSize.getBytes() + "B", @@ -560,51 +539,6 @@ public void testArchiveBrokenIndexSettings() { assertEquals("2s", settings.get("index.refresh_interval")); } - public void testSingleTypeSetting() { - { - IndexSettings index = newIndexSettings(newIndexMeta("index", Settings.EMPTY), Settings.EMPTY); - IndexScopedSettings scopedSettings = index.getScopedSettings(); - Settings build = Settings.builder().put(IndexSettings.INDEX_MAPPING_SINGLE_TYPE_SETTING_KEY, randomBoolean()).build(); - scopedSettings.archiveUnknownOrInvalidSettings(build, e -> fail("unexpected unknown setting " + e), - (e, ex) -> fail("unexpected illegal setting")); - assertTrue(index.isSingleType()); - expectThrows(IllegalArgumentException.class, () -> { - index.getScopedSettings() - .validate(Settings.builder().put(IndexSettings.INDEX_MAPPING_SINGLE_TYPE_SETTING_KEY, randomBoolean()).build(), false); - }); - } - { - boolean single_type = randomBoolean(); - Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_6_0) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexSettings.INDEX_MAPPING_SINGLE_TYPE_SETTING_KEY, single_type) - .build(); - IndexMetaData meta = IndexMetaData.builder("index").settings(settings).build(); - IndexSettings index = newIndexSettings(meta, Settings.EMPTY); - IndexScopedSettings scopedSettings = index.getScopedSettings(); - Settings build = Settings.builder().put(IndexSettings.INDEX_MAPPING_SINGLE_TYPE_SETTING_KEY, randomBoolean()).build(); - scopedSettings.archiveUnknownOrInvalidSettings(build, e -> fail("unexpected unknown setting " + e), - (e, ex) -> fail("unexpected illegal setting")); - assertEquals(single_type, index.isSingleType()); - } - - { - Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexSettings.INDEX_MAPPING_SINGLE_TYPE_SETTING_KEY, false) - .build(); - IndexMetaData meta = IndexMetaData.builder("index").settings(settings).build(); - try { - newIndexSettings(meta, Settings.EMPTY); - fail("should fail with assertion error"); - } catch (AssertionError e) { - // all is well - } - } - } - public void testQueryDefaultField() { IndexSettings index = newIndexSettings( newIndexMeta("index", Settings.EMPTY), Settings.EMPTY diff --git a/server/src/test/java/org/elasticsearch/index/IndexTests.java b/server/src/test/java/org/elasticsearch/index/IndexTests.java index fda181614ffa1..f1360071745d0 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexTests.java @@ -60,4 +60,18 @@ public void testXContent() throws IOException { parser.nextToken(); // the beginning of the parser assertThat(Index.fromXContent(parser), equalTo(original)); } + + public void testEquals() { + Index index1 = new Index("a", "a"); + Index index2 = new Index("a", "a"); + Index index3 = new Index("a", "b"); + Index index4 = new Index("b", "a"); + String s = "Some random other object"; + assertEquals(index1, index1); + assertEquals(index1, index2); + assertNotEquals(index1, null); + assertNotEquals(index1, s); + assertNotEquals(index1, index3); + assertNotEquals(index1, index4); + } } diff --git a/server/src/test/java/org/elasticsearch/index/VersionTypeTests.java b/server/src/test/java/org/elasticsearch/index/VersionTypeTests.java index 2afe0b7feacfd..21ac77e889b85 100644 --- a/server/src/test/java/org/elasticsearch/index/VersionTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/VersionTypeTests.java @@ -35,6 +35,11 @@ public void testInternalVersionConflict() throws Exception { assertFalse(VersionType.INTERNAL.isVersionConflictForWrites(Versions.NOT_FOUND, Versions.MATCH_ANY, randomBoolean())); assertFalse(VersionType.INTERNAL.isVersionConflictForReads(Versions.NOT_FOUND, Versions.MATCH_ANY)); + assertEquals("current version [1] is different than the one provided [2]", + VersionType.INTERNAL.explainConflictForReads(1, 2)); + assertEquals("document does not exist (expected version [2])", + VersionType.INTERNAL.explainConflictForReads(Versions.NOT_FOUND, 2)); + // deletes assertFalse(VersionType.INTERNAL.isVersionConflictForWrites(Versions.NOT_FOUND, Versions.MATCH_DELETED, true)); assertFalse(VersionType.INTERNAL.isVersionConflictForWrites(10, Versions.MATCH_DELETED, true)); @@ -70,6 +75,11 @@ public void testVersionValidation() { assertTrue(VersionType.EXTERNAL.validateVersionForReads(randomIntBetween(1, Integer.MAX_VALUE))); assertFalse(VersionType.EXTERNAL.validateVersionForReads(randomIntBetween(Integer.MIN_VALUE, -1))); + assertEquals("current version [1] is different than the one provided [2]", + VersionType.EXTERNAL.explainConflictForReads(1, 2)); + assertEquals("document does not exist (expected version [2])", + VersionType.EXTERNAL.explainConflictForReads(Versions.NOT_FOUND, 2)); + assertTrue(VersionType.EXTERNAL_GTE.validateVersionForWrites(randomIntBetween(1, Integer.MAX_VALUE))); assertFalse(VersionType.EXTERNAL_GTE.validateVersionForWrites(Versions.MATCH_ANY)); assertFalse(VersionType.EXTERNAL_GTE.validateVersionForWrites(randomIntBetween(Integer.MIN_VALUE, 0))); @@ -77,6 +87,11 @@ public void testVersionValidation() { assertTrue(VersionType.EXTERNAL_GTE.validateVersionForReads(randomIntBetween(1, Integer.MAX_VALUE))); assertFalse(VersionType.EXTERNAL_GTE.validateVersionForReads(randomIntBetween(Integer.MIN_VALUE, -1))); + assertEquals("current version [1] is different than the one provided [2]", + VersionType.EXTERNAL_GTE.explainConflictForReads(1, 2)); + assertEquals("document does not exist (expected version [2])", + VersionType.EXTERNAL_GTE.explainConflictForReads(Versions.NOT_FOUND, 2)); + assertTrue(VersionType.INTERNAL.validateVersionForWrites(randomIntBetween(1, Integer.MAX_VALUE))); assertTrue(VersionType.INTERNAL.validateVersionForWrites(Versions.MATCH_ANY)); assertFalse(VersionType.INTERNAL.validateVersionForWrites(randomIntBetween(Integer.MIN_VALUE, 0))); diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 9cdc68444ea16..60913c644eadb 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -235,7 +235,7 @@ public void testVersionMapAfterAutoIDDocument() throws IOException { assertEquals(2, searcher.reader().numDocs()); } assertFalse("safe access should NOT be required last indexing round was only append only", engine.isSafeAccessRequired()); - engine.delete(new Engine.Delete(operation.type(), operation.id(), operation.uid())); + engine.delete(new Engine.Delete(operation.type(), operation.id(), operation.uid(), primaryTerm.get())); assertTrue("safe access should be required", engine.isSafeAccessRequired()); engine.refresh("test"); assertTrue("safe access should be required", engine.isSafeAccessRequired()); @@ -317,7 +317,7 @@ public void testSegments() throws Exception { assertThat(segments.get(1).isCompound(), equalTo(true)); - engine.delete(new Engine.Delete("test", "1", newUid(doc))); + engine.delete(new Engine.Delete("test", "1", newUid(doc), primaryTerm.get())); engine.refresh("test"); segments = engine.segments(false); @@ -890,7 +890,7 @@ public void testSimpleOperations() throws Exception { searchResult.close(); // now delete - engine.delete(new Engine.Delete("test", "1", newUid(doc))); + engine.delete(new Engine.Delete("test", "1", newUid(doc), primaryTerm.get())); // its not deleted yet searchResult = engine.acquireSearcher("test"); @@ -917,7 +917,7 @@ public void testSimpleOperations() throws Exception { document = testDocumentWithTextField(); document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); doc = testParsedDocument("1", null, document, B_1, null); - engine.index(new Engine.Index(newUid(doc), doc, Versions.MATCH_DELETED)); + engine.index(new Engine.Index(newUid(doc), primaryTerm.get(), doc, Versions.MATCH_DELETED)); // its not there... searchResult = engine.acquireSearcher("test"); @@ -994,7 +994,7 @@ public void testSearchResultRelease() throws Exception { // don't release the search result yet... // delete, refresh and do a new search, it should not be there - engine.delete(new Engine.Delete("test", "1", newUid(doc))); + engine.delete(new Engine.Delete("test", "1", newUid(doc), primaryTerm.get())); engine.refresh("test"); Engine.Searcher updateSearchResult = engine.acquireSearcher("test"); MatcherAssert.assertThat(updateSearchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); @@ -1113,7 +1113,7 @@ public void testRenewSyncFlush() throws Exception { engine.index(doc4); assertEquals(engine.getLastWriteNanos(), doc4.startTime()); } else { - Engine.Delete delete = new Engine.Delete(doc1.type(), doc1.id(), doc1.uid()); + Engine.Delete delete = new Engine.Delete(doc1.type(), doc1.id(), doc1.uid(), primaryTerm.get()); engine.delete(delete); assertEquals(engine.getLastWriteNanos(), delete.startTime()); } @@ -1147,7 +1147,7 @@ public void testSyncedFlushSurvivesEngineRestart() throws IOException { } if (randomBoolean()) { final String translogUUID = Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), - SequenceNumbers.UNASSIGNED_SEQ_NO, shardId); + SequenceNumbers.UNASSIGNED_SEQ_NO, shardId, primaryTerm.get()); store.associateIndexWithNewTranslog(translogUUID); } trimUnsafeCommits(config); @@ -1177,7 +1177,7 @@ public void testSyncedFlushVanishesOnReplay() throws IOException { public void testVersioningNewCreate() throws IOException { ParsedDocument doc = testParsedDocument("1", null, testDocument(), B_1, null); - Engine.Index create = new Engine.Index(newUid(doc), doc, Versions.MATCH_DELETED); + Engine.Index create = new Engine.Index(newUid(doc), primaryTerm.get(), doc, Versions.MATCH_DELETED); Engine.IndexResult indexResult = engine.index(create); assertThat(indexResult.getVersion(), equalTo(1L)); @@ -1189,7 +1189,7 @@ public void testVersioningNewCreate() throws IOException { public void testReplicatedVersioningWithFlush() throws IOException { ParsedDocument doc = testParsedDocument("1", null, testDocument(), B_1, null); - Engine.Index create = new Engine.Index(newUid(doc), doc, Versions.MATCH_DELETED); + Engine.Index create = new Engine.Index(newUid(doc), primaryTerm.get(), doc, Versions.MATCH_DELETED); Engine.IndexResult indexResult = engine.index(create); assertThat(indexResult.getVersion(), equalTo(1L)); assertTrue(indexResult.isCreated()); @@ -1208,7 +1208,7 @@ public void testReplicatedVersioningWithFlush() throws IOException { replicaEngine.flush(); } - Engine.Index update = new Engine.Index(newUid(doc), doc, 1); + Engine.Index update = new Engine.Index(newUid(doc), primaryTerm.get(), doc, 1); Engine.IndexResult updateResult = engine.index(update); assertThat(updateResult.getVersion(), equalTo(2L)); assertFalse(updateResult.isCreated()); @@ -1237,14 +1237,14 @@ public void testVersionedUpdate() throws IOException { final BiFunction searcherFactory = engine::acquireSearcher; ParsedDocument doc = testParsedDocument("1", null, testDocument(), B_1, null); - Engine.Index create = new Engine.Index(newUid(doc), doc, Versions.MATCH_DELETED); + Engine.Index create = new Engine.Index(newUid(doc), primaryTerm.get(), doc, Versions.MATCH_DELETED); Engine.IndexResult indexResult = engine.index(create); assertThat(indexResult.getVersion(), equalTo(1L)); try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()), searcherFactory)) { assertEquals(1, get.version()); } - Engine.Index update_1 = new Engine.Index(newUid(doc), doc, 1); + Engine.Index update_1 = new Engine.Index(newUid(doc), primaryTerm.get(), doc, 1); Engine.IndexResult update_1_result = engine.index(update_1); assertThat(update_1_result.getVersion(), equalTo(2L)); @@ -1252,7 +1252,7 @@ public void testVersionedUpdate() throws IOException { assertEquals(2, get.version()); } - Engine.Index update_2 = new Engine.Index(newUid(doc), doc, 2); + Engine.Index update_2 = new Engine.Index(newUid(doc), primaryTerm.get(), doc, 2); Engine.IndexResult update_2_result = engine.index(update_2); assertThat(update_2_result.getVersion(), equalTo(3L)); @@ -1293,7 +1293,7 @@ public void testForceMerge() throws IOException { ParsedDocument doc = testParsedDocument(Integer.toString(0), null, testDocument(), B_1, null); Engine.Index index = indexForDoc(doc); - engine.delete(new Engine.Delete(index.type(), index.id(), index.uid())); + engine.delete(new Engine.Delete(index.type(), index.id(), index.uid(), primaryTerm.get())); engine.forceMerge(true, 10, true, false, false); //expunge deletes engine.refresh("test"); @@ -1305,7 +1305,7 @@ public void testForceMerge() throws IOException { doc = testParsedDocument(Integer.toString(1), null, testDocument(), B_1, null); index = indexForDoc(doc); - engine.delete(new Engine.Delete(index.type(), index.id(), index.uid())); + engine.delete(new Engine.Delete(index.type(), index.id(), index.uid(), primaryTerm.get())); engine.forceMerge(true, 10, false, false, false); //expunge deletes engine.refresh("test"); assertEquals(engine.segments(true).size(), 1); @@ -1892,7 +1892,7 @@ public void testBasicCreatedFlag() throws IOException { indexResult = engine.index(index); assertFalse(indexResult.isCreated()); - engine.delete(new Engine.Delete("doc", "1", newUid(doc))); + engine.delete(new Engine.Delete("doc", "1", newUid(doc), primaryTerm.get())); index = indexForDoc(doc); indexResult = engine.index(index); @@ -2368,7 +2368,7 @@ public void testCurrentTranslogIDisCommitted() throws IOException { { store.createEmpty(); final String translogUUID = - Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId); + Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); store.associateIndexWithNewTranslog(translogUUID); ParsedDocument doc = testParsedDocument(Integer.toString(0), null, testDocument(), new BytesArray("{}"), null); Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, @@ -2409,7 +2409,7 @@ public void testCurrentTranslogIDisCommitted() throws IOException { // open index with new tlog { final String translogUUID = - Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId); + Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); store.associateIndexWithNewTranslog(translogUUID); trimUnsafeCommits(config); try (InternalEngine engine = new InternalEngine(config)) { @@ -2444,7 +2444,8 @@ public void testMissingTranslog() throws IOException { // test that we can force start the engine , even if the translog is missing. engine.close(); // fake a new translog, causing the engine to point to a missing one. - Translog translog = createTranslog(); + final long primaryTerm = randomNonNegativeLong(); + Translog translog = createTranslog(() -> primaryTerm); long id = translog.currentFileGeneration(); translog.close(); IOUtils.rm(translog.location().resolve(Translog.getFilename(id))); @@ -2455,7 +2456,7 @@ public void testMissingTranslog() throws IOException { // expected } // when a new translog is created it should be ok - final String translogUUID = Translog.createEmptyTranslog(primaryTranslogDir, SequenceNumbers.UNASSIGNED_SEQ_NO, shardId); + final String translogUUID = Translog.createEmptyTranslog(primaryTranslogDir, SequenceNumbers.UNASSIGNED_SEQ_NO, shardId, primaryTerm); store.associateIndexWithNewTranslog(translogUUID); EngineConfig config = config(defaultSettings, store, primaryTranslogDir, newMergePolicy(), null); engine = new InternalEngine(config); @@ -2521,7 +2522,7 @@ public void testTranslogCleanUpPostCommitCrash() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); final LongSupplier globalCheckpointSupplier = () -> globalCheckpoint.get(); store.createEmpty(); - final String translogUUID = Translog.createEmptyTranslog(translogPath, globalCheckpoint.get(), shardId); + final String translogUUID = Translog.createEmptyTranslog(translogPath, globalCheckpoint.get(), shardId, primaryTerm.get()); store.associateIndexWithNewTranslog(translogUUID); try (InternalEngine engine = new InternalEngine(config(indexSettings, store, translogPath, newMergePolicy(), null, null, @@ -2659,7 +2660,7 @@ public void testTranslogReplay() throws IOException { } parser = (TranslogHandler) engine.config().getTranslogRecoveryRunner(); assertEquals(flush ? 1 : 2, parser.appliedOperations()); - engine.delete(new Engine.Delete("test", Integer.toString(randomId), newUid(doc))); + engine.delete(new Engine.Delete("test", Integer.toString(randomId), newUid(doc), primaryTerm.get())); if (randomBoolean()) { engine.refresh("test"); } else { @@ -2685,11 +2686,11 @@ public void testRecoverFromForeignTranslog() throws IOException { engine.close(); final Path badTranslogLog = createTempDir(); - final String badUUID = Translog.createEmptyTranslog(badTranslogLog, SequenceNumbers.NO_OPS_PERFORMED, shardId); + final String badUUID = Translog.createEmptyTranslog(badTranslogLog, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); Translog translog = new Translog( new TranslogConfig(shardId, badTranslogLog, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), - badUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.NO_OPS_PERFORMED); - translog.add(new Translog.Index("test", "SomeBogusId", 0, "{}".getBytes(Charset.forName("UTF-8")))); + badUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + translog.add(new Translog.Index("test", "SomeBogusId", 0, primaryTerm.get(), "{}".getBytes(Charset.forName("UTF-8")))); assertEquals(generation.translogFileGeneration, translog.currentFileGeneration()); translog.close(); @@ -2703,7 +2704,7 @@ public void testRecoverFromForeignTranslog() throws IOException { new CodecService(null, logger), config.getEventListener(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), config.getExternalRefreshListener(), config.getInternalRefreshListener(), null, config.getTranslogRecoveryRunner(), - new NoneCircuitBreakerService(), () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + new NoneCircuitBreakerService(), () -> SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm::get); try { InternalEngine internalEngine = new InternalEngine(brokenConfig); fail("translog belongs to a different engine"); @@ -2886,11 +2887,11 @@ public void testHandleDocumentFailure() throws Exception { final Engine.DeleteResult deleteResult; if (randomBoolean()) { throwingIndexWriter.get().setThrowFailure(() -> new IOException("simulated")); - deleteResult = engine.delete(new Engine.Delete("test", "1", newUid(doc1))); + deleteResult = engine.delete(new Engine.Delete("test", "1", newUid(doc1), primaryTerm.get())); assertThat(deleteResult.getFailure(), instanceOf(IOException.class)); } else { throwingIndexWriter.get().setThrowFailure(() -> new IllegalArgumentException("simulated max token length")); - deleteResult = engine.delete(new Engine.Delete("test", "1", newUid(doc1))); + deleteResult = engine.delete(new Engine.Delete("test", "1", newUid(doc1), primaryTerm.get())); assertThat(deleteResult.getFailure(), instanceOf(IllegalArgumentException.class)); } @@ -2923,7 +2924,7 @@ public BytesRef binaryValue() { if (randomBoolean()) { engine.index(indexForDoc(doc1)); } else { - engine.delete(new Engine.Delete("test", "", newUid(doc1))); + engine.delete(new Engine.Delete("test", "", newUid(doc1), primaryTerm.get())); } fail("engine should be closed"); } catch (Exception e) { @@ -3324,7 +3325,7 @@ public void testEngineMaxTimestampIsInitialized() throws IOException { } try (Store store = createStore(newFSDirectory(storeDir))) { if (randomBoolean() || true) { - final String translogUUID = Translog.createEmptyTranslog(translogDir, SequenceNumbers.NO_OPS_PERFORMED, shardId); + final String translogUUID = Translog.createEmptyTranslog(translogDir, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); store.associateIndexWithNewTranslog(translogUUID); } try (Engine engine = new InternalEngine(configSupplier.apply(store))) { @@ -3478,7 +3479,7 @@ public void testSequenceIDs() throws Exception { seqID = getSequenceID(engine, newGet(false, doc)); logger.info("--> got seqID: {}", seqID); assertThat(seqID.v1(), equalTo(0L)); - assertThat(seqID.v2(), equalTo(2L)); + assertThat(seqID.v2(), equalTo(primaryTerm.get())); // Index the same document again document = testDocumentWithTextField(); @@ -3490,7 +3491,7 @@ public void testSequenceIDs() throws Exception { seqID = getSequenceID(engine, newGet(false, doc)); logger.info("--> got seqID: {}", seqID); assertThat(seqID.v1(), equalTo(1L)); - assertThat(seqID.v2(), equalTo(2L)); + assertThat(seqID.v2(), equalTo(primaryTerm.get())); // Index the same document for the third time, this time changing the primary term document = testDocumentWithTextField(); @@ -3704,13 +3705,12 @@ protected long doGenerateSeqNoForOperation(Operation operation) { } }; noOpEngine.recoverFromTranslog(); - final long primaryTerm = randomNonNegativeLong(); - final int gapsFilled = noOpEngine.fillSeqNoGaps(primaryTerm); + final int gapsFilled = noOpEngine.fillSeqNoGaps(primaryTerm.get()); final String reason = randomAlphaOfLength(16); noOpEngine.noOp( new Engine.NoOp( maxSeqNo + 1, - primaryTerm, + primaryTerm.get(), randomFrom(PRIMARY, REPLICA, PEER_RECOVERY, LOCAL_TRANSLOG_RECOVERY), System.nanoTime(), reason)); @@ -3728,7 +3728,7 @@ protected long doGenerateSeqNoForOperation(Operation operation) { assertThat(last, instanceOf(Translog.NoOp.class)); final Translog.NoOp noOp = (Translog.NoOp) last; assertThat(noOp.seqNo(), equalTo((long) (maxSeqNo + 1))); - assertThat(noOp.primaryTerm(), equalTo(primaryTerm)); + assertThat(noOp.primaryTerm(), equalTo(primaryTerm.get())); assertThat(noOp.reason(), equalTo(reason)); } finally { IOUtils.close(noOpEngine); @@ -3931,7 +3931,7 @@ public void testFillUpSequenceIdGapsOnRecovery() throws IOException { if (operation.opType() == Translog.Operation.Type.NO_OP) { assertEquals(2, operation.primaryTerm()); } else { - assertEquals(1, operation.primaryTerm()); + assertEquals(primaryTerm.get(), operation.primaryTerm()); } } @@ -4131,7 +4131,7 @@ public void testKeepTranslogAfterGlobalCheckpoint() throws Exception { store = createStore(); final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); store.createEmpty(); - final String translogUUID = Translog.createEmptyTranslog(translogPath, globalCheckpoint.get(), shardId); + final String translogUUID = Translog.createEmptyTranslog(translogPath, globalCheckpoint.get(), shardId, primaryTerm.get()); store.associateIndexWithNewTranslog(translogUUID); final EngineConfig engineConfig = config(indexSettings, store, translogPath, NoMergePolicy.INSTANCE, null, null, @@ -4201,7 +4201,7 @@ public void testConcurrentAppendUpdateAndRefresh() throws InterruptedException, Engine.Index operation = appendOnlyPrimary(doc, false, 1); engine.index(operation); if (rarely()) { - engine.delete(new Engine.Delete(operation.type(), operation.id(), operation.uid())); + engine.delete(new Engine.Delete(operation.type(), operation.id(), operation.uid(), primaryTerm.get())); numDeletes.incrementAndGet(); } else { doc = testParsedDocument(docID, null, testDocumentWithTextField("updated"), @@ -4340,7 +4340,7 @@ public void testShouldPeriodicallyFlush() throws Exception { engine.index(indexForDoc(doc)); } assertThat("Not exceeded translog flush threshold yet", engine.shouldPeriodicallyFlush(), equalTo(false)); - long flushThreshold = RandomNumbers.randomLongBetween(random(), 100, + long flushThreshold = RandomNumbers.randomLongBetween(random(), 120, engine.getTranslog().stats().getUncommittedSizeInBytes()- extraTranslogSizeInNewEngine); final IndexSettings indexSettings = engine.config().getIndexSettings(); final IndexMetaData indexMetaData = IndexMetaData.builder(indexSettings.getIndexMetaData()) @@ -4382,7 +4382,7 @@ public void testShouldPeriodicallyFlush() throws Exception { } public void testStressShouldPeriodicallyFlush() throws Exception { - final long flushThreshold = randomLongBetween(100, 5000); + final long flushThreshold = randomLongBetween(120, 5000); final long generationThreshold = randomLongBetween(1000, 5000); final IndexSettings indexSettings = engine.config().getIndexSettings(); final IndexMetaData indexMetaData = IndexMetaData.builder(indexSettings.getIndexMetaData()) @@ -4423,7 +4423,7 @@ public void testStressUpdateSameDocWhileGettingIt() throws IOException, Interrup Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), 0, false); // first index an append only document and then delete it. such that we have it in the tombstones engine.index(doc); - engine.delete(new Engine.Delete(doc.type(), doc.id(), doc.uid())); + engine.delete(new Engine.Delete(doc.type(), doc.id(), doc.uid(), primaryTerm.get())); // now index more append only docs and refresh so we re-enabel the optimization for unsafe version map ParsedDocument document1 = testParsedDocument(Integer.toString(1), null, testDocumentWithTextField(), SOURCE, null); @@ -4559,7 +4559,7 @@ public void testTrackMaxSeqNoOfNonAppendOnlyOperations() throws Exception { if (randomBoolean()) { engine.index(indexForDoc(parsedDocument)); } else { - engine.delete(new Engine.Delete(parsedDocument.type(), parsedDocument.id(), newUid(parsedDocument.id()))); + engine.delete(new Engine.Delete(parsedDocument.type(), parsedDocument.id(), newUid(parsedDocument.id()), primaryTerm.get())); } } } diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java b/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java index 482f8d90bedfd..c204690c76e07 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java @@ -31,7 +31,6 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.store.RAMDirectory; -import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; @@ -43,10 +42,8 @@ import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper.BuilderContext; -import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.NumberFieldMapper; -import org.elasticsearch.index.mapper.ParentFieldMapper; import org.elasticsearch.index.mapper.TextFieldMapper; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.ShardId; @@ -124,8 +121,6 @@ public > IFD getForField(String type, String field .docValues(docValues).build(context).fieldType(); } else if (type.equals("geo_point")) { fieldType = new GeoPointFieldMapper.Builder(fieldName).docValues(docValues).build(context).fieldType(); - } else if (type.equals("_parent")) { - fieldType = new ParentFieldMapper.Builder("_type").type(fieldName).build(context).fieldType(); } else if (type.equals("binary")) { fieldType = new BinaryFieldMapper.Builder(fieldName).docValues(docValues).build(context).fieldType(); } else { diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/UidFieldDataTests.java b/server/src/test/java/org/elasticsearch/index/fielddata/UidFieldDataTests.java deleted file mode 100644 index 240c7fc37faee..0000000000000 --- a/server/src/test/java/org/elasticsearch/index/fielddata/UidFieldDataTests.java +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.fielddata; - -import org.apache.lucene.index.DocValues; -import org.apache.lucene.index.SortedDocValues; -import org.apache.lucene.index.SortedSetDocValues; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; -import java.util.Collections; -import java.util.Locale; - -public class UidFieldDataTests extends ESTestCase { - - private static class DummySortedDocValues extends SortedDocValues { - - private int doc = -1; - - @Override - public int ordValue() throws IOException { - return doc; - } - - @Override - public BytesRef lookupOrd(int ord) throws IOException { - return new BytesRef(String.format(Locale.ENGLISH, "%03d", ord)); - } - - @Override - public int getValueCount() { - return 100; - } - - @Override - public boolean advanceExact(int target) throws IOException { - doc = target; - return true; - } - - @Override - public int docID() { - return doc; - } - - @Override - public int nextDoc() throws IOException { - return advance(doc + 1); - } - - @Override - public int advance(int target) throws IOException { - if (target >= getValueCount()) { - return doc = NO_MORE_DOCS; - } else { - return doc = target; - } - } - - @Override - public long cost() { - return getValueCount(); - } - - } - - private static class DummyAtomicOrdinalsFieldData implements AtomicOrdinalsFieldData { - - @Override - public ScriptDocValues getScriptValues() { - throw new UnsupportedOperationException(); - } - - @Override - public SortedBinaryDocValues getBytesValues() { - return FieldData.toString(getOrdinalsValues()); - } - - @Override - public long ramBytesUsed() { - return 0; - } - - @Override - public void close() { - } - - @Override - public SortedSetDocValues getOrdinalsValues() { - return DocValues.singleton(new DummySortedDocValues()); - } - - } - - public void testSortedSetValues() throws Exception { - AtomicFieldData fd = new UidIndexFieldData.UidAtomicFieldData(new BytesRef("type#"), new DummyAtomicOrdinalsFieldData()); - SortedBinaryDocValues dv = fd.getBytesValues(); - assertTrue(dv.advanceExact(30)); - assertEquals(1, dv.docValueCount()); - assertEquals(new BytesRef("type#030"), dv.nextValue()); - } - - public void testScriptValues() throws IOException { - AtomicFieldData fd = new UidIndexFieldData.UidAtomicFieldData(new BytesRef("type#"), new DummyAtomicOrdinalsFieldData()); - ScriptDocValues values = fd.getScriptValues(); - values.setNextDocId(30); - assertEquals(Collections.singletonList("type#030"), values); - } - -} diff --git a/server/src/test/java/org/elasticsearch/index/get/DocumentFieldTests.java b/server/src/test/java/org/elasticsearch/index/get/DocumentFieldTests.java index 9d581054f46b8..51153d28d29d3 100644 --- a/server/src/test/java/org/elasticsearch/index/get/DocumentFieldTests.java +++ b/server/src/test/java/org/elasticsearch/index/get/DocumentFieldTests.java @@ -26,9 +26,8 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.mapper.ParentFieldMapper; +import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; -import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.RandomObjects; @@ -100,7 +99,7 @@ private static DocumentField mutateDocumentField(DocumentField documentField) { public static Tuple randomDocumentField(XContentType xContentType) { if (randomBoolean()) { - String fieldName = randomFrom(ParentFieldMapper.NAME, RoutingFieldMapper.NAME, UidFieldMapper.NAME); + String fieldName = randomFrom(RoutingFieldMapper.NAME); DocumentField documentField = new DocumentField(fieldName, Collections.singletonList(randomAlphaOfLengthBetween(3, 10))); return Tuple.tuple(documentField, documentField); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java index 81c8397c036d3..668d3432e957a 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java @@ -64,10 +64,6 @@ public class BooleanFieldMapperTests extends ESSingleNodeTestCase { public void setup() { indexService = createIndex("test"); parser = indexService.mapperService().documentMapperParser(); - - IndexService preEs6IndexService = createIndex("legacy", - Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0).build()); - preEs6Parser = preEs6IndexService.mapperService().documentMapperParser(); } @Override @@ -134,50 +130,7 @@ public void testSerialization() throws IOException { assertEquals("{\"field\":{\"type\":\"boolean\",\"doc_values\":false,\"null_value\":true}}", Strings.toString(builder)); } - public void testParsesPreEs6BooleansLenient() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder() - .startObject() - .startObject("type") - .startObject("properties") - .startObject("field1") - .field("type", "boolean") - .endObject() - .startObject("field2") - .field("type", "boolean") - .endObject() - .endObject() - .endObject() - .endObject()); - DocumentMapper defaultMapper = preEs6Parser.parse("type", new CompressedXContent(mapping)); - - String falsy = randomFrom("false", "off", "no", "0"); - String truthy = randomFrom("true", "on", "yes", "1"); - - ParsedDocument parsedDoc = defaultMapper.parse(SourceToParse.source("legacy", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() - .startObject() - .field("field1", falsy) - .field("field2", truthy) - .endObject()), - XContentType.JSON)); - Document doc = parsedDoc.rootDoc(); - assertEquals("F", doc.getField("field1").stringValue()); - assertEquals("T", doc.getField("field2").stringValue()); - - List expectedDeprecationWarnings = new ArrayList<>(); - if (Booleans.isBoolean(falsy) == false) { - expectedDeprecationWarnings.add("Expected a boolean for property [field1] but got ["+ falsy + "]"); - } - if (Booleans.isBoolean(truthy) == false) { - expectedDeprecationWarnings.add("Expected a boolean for property [field2] but got [" + truthy + "]"); - } - - if (expectedDeprecationWarnings.isEmpty() == false) { - assertWarnings(expectedDeprecationWarnings.toArray(new String[1])); - } - } - - public void testParsesEs6BooleansStrict() throws IOException { + public void testParsesBooleansStrict() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder() .startObject() .startObject("type") diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperMergeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperMergeTests.java index 41d98aa173df7..e69d48b7b44b7 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperMergeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperMergeTests.java @@ -230,67 +230,6 @@ public void testDoNotRepeatOriginalMapping() throws IOException { assertFalse(mapper.sourceMapper().enabled()); } - public void testMergeChildType() throws IOException { - DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); - - String initMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("child") - .startObject("_parent").field("type", "parent").endObject() - .endObject().endObject()); - DocumentMapper initMapper = parser.parse("child", new CompressedXContent(initMapping)); - - assertThat(initMapper.mappers().getMapper("_parent#parent"), notNullValue()); - - String updatedMapping1 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("child") - .startObject("properties") - .startObject("name").field("type", "text").endObject() - .endObject().endObject().endObject()); - DocumentMapper updatedMapper1 = parser.parse("child", new CompressedXContent(updatedMapping1)); - DocumentMapper mergedMapper1 = initMapper.merge(updatedMapper1.mapping()); - - assertThat(mergedMapper1.mappers().getMapper("_parent#parent"), notNullValue()); - assertThat(mergedMapper1.mappers().getMapper("name"), notNullValue()); - - String updatedMapping2 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("child") - .startObject("_parent").field("type", "parent").endObject() - .startObject("properties") - .startObject("age").field("type", "byte").endObject() - .endObject().endObject().endObject()); - DocumentMapper updatedMapper2 = parser.parse("child", new CompressedXContent(updatedMapping2)); - DocumentMapper mergedMapper2 = mergedMapper1.merge(updatedMapper2.mapping()); - - assertThat(mergedMapper2.mappers().getMapper("_parent#parent"), notNullValue()); - assertThat(mergedMapper2.mappers().getMapper("name"), notNullValue()); - assertThat(mergedMapper2.mappers().getMapper("age"), notNullValue()); - - String modParentMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("child") - .startObject("_parent").field("type", "new_parent").endObject() - .endObject().endObject()); - DocumentMapper modParentMapper = parser.parse("child", new CompressedXContent(modParentMapping)); - Exception e = expectThrows(IllegalArgumentException.class, () -> initMapper.merge(modParentMapper.mapping())); - assertThat(e.getMessage(), containsString("The _parent field's type option can't be changed: [parent]->[new_parent]")); - } - - public void testMergeAddingParent() throws IOException { - DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); - - String initMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("cowboy") - .startObject("properties") - .startObject("name").field("type", "text").endObject() - .endObject().endObject().endObject()); - DocumentMapper initMapper = parser.parse("cowboy", new CompressedXContent(initMapping)); - - assertThat(initMapper.mappers().getMapper("name"), notNullValue()); - - String updatedMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("cowboy") - .startObject("_parent").field("type", "parent").endObject() - .startObject("properties") - .startObject("age").field("type", "byte").endObject() - .endObject().endObject().endObject()); - DocumentMapper updatedMapper = parser.parse("cowboy", new CompressedXContent(updatedMapping)); - Exception e = expectThrows(IllegalArgumentException.class, () -> initMapper.merge(updatedMapper.mapping())); - assertThat(e.getMessage(), containsString("The _parent field's type option can't be changed: [null]->[parent]")); - } - public void testMergeMeta() throws IOException { DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java index dd4717a1a0f6a..11f69c738e949 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java @@ -152,10 +152,6 @@ public void testDotsWithDynamicNestedMapper() throws Exception { } public void testNestedHaveIdAndTypeFields() throws Exception { - DocumentMapperParser mapperParser1 = createIndex("index1", Settings.builder() - .put("index.version.created", Version.V_5_6_0) // allows for multiple types - .build() - ).mapperService().documentMapperParser(); DocumentMapperParser mapperParser2 = createIndex("index2").mapperService().documentMapperParser(); XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties"); @@ -180,8 +176,7 @@ public void testNestedHaveIdAndTypeFields() throws Exception { mapping.endObject(); } mapping.endObject().endObject().endObject(); - DocumentMapper mapper1 = mapperParser1.parse("type", new CompressedXContent(Strings.toString(mapping))); - DocumentMapper mapper2 = mapperParser2.parse("type", new CompressedXContent(Strings.toString(mapping))); + DocumentMapper mapper = mapperParser2.parse("type", new CompressedXContent(Strings.toString(mapping))); XContentBuilder doc = XContentFactory.jsonBuilder().startObject(); { @@ -196,31 +191,10 @@ public void testNestedHaveIdAndTypeFields() throws Exception { } doc.endObject(); - // Verify in the case where multiple types are allowed that the _uid field is added to nested documents: - ParsedDocument result = mapper1.parse(SourceToParse.source("index1", "type", "1", BytesReference.bytes(doc), XContentType.JSON)); - assertEquals(2, result.docs().size()); - // Nested document: - assertNull(result.docs().get(0).getField(IdFieldMapper.NAME)); - assertNotNull(result.docs().get(0).getField(UidFieldMapper.NAME)); - assertEquals("type#1", result.docs().get(0).getField(UidFieldMapper.NAME).stringValue()); - assertEquals(UidFieldMapper.Defaults.NESTED_FIELD_TYPE, result.docs().get(0).getField(UidFieldMapper.NAME).fieldType()); - assertNotNull(result.docs().get(0).getField(TypeFieldMapper.NAME)); - assertEquals("__foo", result.docs().get(0).getField(TypeFieldMapper.NAME).stringValue()); - assertEquals("value1", result.docs().get(0).getField("foo.bar").binaryValue().utf8ToString()); - // Root document: - assertNull(result.docs().get(1).getField(IdFieldMapper.NAME)); - assertNotNull(result.docs().get(1).getField(UidFieldMapper.NAME)); - assertEquals("type#1", result.docs().get(1).getField(UidFieldMapper.NAME).stringValue()); - assertEquals(UidFieldMapper.Defaults.FIELD_TYPE, result.docs().get(1).getField(UidFieldMapper.NAME).fieldType()); - assertNotNull(result.docs().get(1).getField(TypeFieldMapper.NAME)); - assertEquals("type", result.docs().get(1).getField(TypeFieldMapper.NAME).stringValue()); - assertEquals("value2", result.docs().get(1).getField("baz").binaryValue().utf8ToString()); - // Verify in the case where only a single type is allowed that the _id field is added to nested documents: - result = mapper2.parse(SourceToParse.source("index2", "type", "1", BytesReference.bytes(doc), XContentType.JSON)); + ParsedDocument result = mapper.parse(SourceToParse.source("index2", "type", "1", BytesReference.bytes(doc), XContentType.JSON)); assertEquals(2, result.docs().size()); // Nested document: - assertNull(result.docs().get(0).getField(UidFieldMapper.NAME)); assertNotNull(result.docs().get(0).getField(IdFieldMapper.NAME)); assertEquals(Uid.encodeId("1"), result.docs().get(0).getField(IdFieldMapper.NAME).binaryValue()); assertEquals(IdFieldMapper.Defaults.NESTED_FIELD_TYPE, result.docs().get(0).getField(IdFieldMapper.NAME).fieldType()); @@ -228,7 +202,6 @@ public void testNestedHaveIdAndTypeFields() throws Exception { assertEquals("__foo", result.docs().get(0).getField(TypeFieldMapper.NAME).stringValue()); assertEquals("value1", result.docs().get(0).getField("foo.bar").binaryValue().utf8ToString()); // Root document: - assertNull(result.docs().get(1).getField(UidFieldMapper.NAME)); assertNotNull(result.docs().get(1).getField(IdFieldMapper.NAME)); assertEquals(Uid.encodeId("1"), result.docs().get(1).getField(IdFieldMapper.NAME).binaryValue()); assertEquals(IdFieldMapper.Defaults.FIELD_TYPE, result.docs().get(1).getField(IdFieldMapper.NAME).fieldType()); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java index 5ee0740505cb8..94a7f60120d1b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java @@ -97,26 +97,6 @@ public void testMappingsPropagatedToMasterNodeImmediately() throws IOException { assertMappingsHaveField(mappings, "index", "type", "bar"); } - public void testMappingsPropagatedToMasterNodeImmediatelyMultiType() throws IOException { - assertAcked(prepareCreate("index").setSettings(Settings.builder().put("index.version.created", Version.V_5_6_0.id))); - // allows for multiple types - - // works when the type has been dynamically created - client().prepareIndex("index", "type", "1").setSource("foo", 3).get(); - GetMappingsResponse mappings = client().admin().indices().prepareGetMappings("index").setTypes("type").get(); - assertMappingsHaveField(mappings, "index", "type", "foo"); - - // works if the type already existed - client().prepareIndex("index", "type", "1").setSource("bar", "baz").get(); - mappings = client().admin().indices().prepareGetMappings("index").setTypes("type").get(); - assertMappingsHaveField(mappings, "index", "type", "bar"); - - // works if we indexed an empty document - client().prepareIndex("index", "type2", "1").setSource().get(); - mappings = client().admin().indices().prepareGetMappings("index").setTypes("type2").get(); - assertTrue(mappings.getMappings().get("index").toString(), mappings.getMappings().get("index").containsKey("type2")); - } - public void testConcurrentDynamicUpdates() throws Throwable { createIndex("index"); final Thread[] indexThreads = new Thread[32]; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java index aaca31aeea3e6..30923b5a6559b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java @@ -574,53 +574,6 @@ public void testMixTemplateMultiFieldAndMappingReuse() throws Exception { assertNull(parsed.dynamicMappingsUpdate()); } - public void testMixTemplateMultiFieldMultiTypeAndMappingReuse() throws Exception { - IndexService indexService = createIndex("test", Settings.builder().put("index.version.created", Version.V_5_6_0).build()); - XContentBuilder mappings1 = jsonBuilder().startObject() - .startObject("type1") - .startArray("dynamic_templates") - .startObject() - .startObject("template1") - .field("match_mapping_type", "string") - .startObject("mapping") - .field("type", "text") - .startObject("fields") - .startObject("raw") - .field("type", "keyword") - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - .endArray() - .endObject().endObject(); - indexService.mapperService().merge("type1", new CompressedXContent(BytesReference.bytes(mappings1)), MapperService.MergeReason.MAPPING_UPDATE); - XContentBuilder mappings2 = jsonBuilder().startObject() - .startObject("type2") - .startObject("properties") - .startObject("field") - .field("type", "text") - .endObject() - .endObject() - .endObject().endObject(); - indexService.mapperService().merge("type2", new CompressedXContent(BytesReference.bytes(mappings2)), MapperService.MergeReason.MAPPING_UPDATE); - - XContentBuilder json = XContentFactory.jsonBuilder().startObject() - .field("field", "foo") - .endObject(); - SourceToParse source = SourceToParse.source("test", "type1", "1", BytesReference.bytes(json), json.contentType()); - DocumentMapper mapper = indexService.mapperService().documentMapper("type1"); - assertNull(mapper.mappers().getMapper("field.raw")); - ParsedDocument parsed = mapper.parse(source); - assertNotNull(parsed.dynamicMappingsUpdate()); - - indexService.mapperService().merge("type1", new CompressedXContent(parsed.dynamicMappingsUpdate().toString()), MapperService.MergeReason.MAPPING_UPDATE); - mapper = indexService.mapperService().documentMapper("type1"); - assertNotNull(mapper.mappers().getMapper("field.raw")); - parsed = mapper.parse(source); - assertNull(parsed.dynamicMappingsUpdate()); - } - public void testDefaultFloatingPointMappings() throws IOException { MapperService mapperService = createIndex("test").mapperService(); String mapping = Strings.toString(jsonBuilder().startObject() diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IdFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IdFieldMapperTests.java index 764cabf65363b..18413100418ed 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IdFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IdFieldMapperTests.java @@ -59,17 +59,7 @@ public void testIncludeInObjectNotAllowed() throws Exception { } } - public void testDefaultsMultipleTypes() throws IOException { - Settings indexSettings = Settings.builder() - .put("index.version.created", Version.V_5_6_0) - .build(); - MapperService mapperService = createIndex("test", indexSettings).mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE); - ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), XContentType.JSON)); - assertEquals(Collections.emptyList(), Arrays.asList(document.rootDoc().getFields(IdFieldMapper.NAME))); - } - - public void testDefaultsSingleType() throws IOException { + public void testDefaults() throws IOException { Settings indexSettings = Settings.EMPTY; MapperService mapperService = createIndex("test", indexSettings).mapperService(); DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IdFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IdFieldTypeTests.java index 5be1923cbed3c..f8b60db0e5da2 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IdFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IdFieldTypeTests.java @@ -46,55 +46,6 @@ public void testRangeQuery() { assertEquals("Field [_id] of type [_id] does not support range queries", e.getMessage()); } - public void testTermsQueryWhenTypesAreEnabled() throws Exception { - QueryShardContext context = Mockito.mock(QueryShardContext.class); - Settings indexSettings = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_6_0) // allows for multiple types - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build(); - IndexMetaData indexMetaData = IndexMetaData.builder(IndexMetaData.INDEX_UUID_NA_VALUE).settings(indexSettings).build(); - IndexSettings mockSettings = new IndexSettings(indexMetaData, Settings.EMPTY); - Mockito.when(context.getIndexSettings()).thenReturn(mockSettings); - - MapperService mapperService = Mockito.mock(MapperService.class); - Collection types = Collections.emptySet(); - Mockito.when(context.queryTypes()).thenReturn(types); - Mockito.when(context.getMapperService()).thenReturn(mapperService); - - MappedFieldType ft = IdFieldMapper.defaultFieldType(mockSettings); - ft.setName(IdFieldMapper.NAME); - Query query = ft.termQuery("id", context); - assertEquals(new TermInSetQuery("_uid"), query); - - types = Collections.singleton("type"); - Mockito.when(context.queryTypes()).thenReturn(types); - query = ft.termQuery("id", context); - assertEquals(new TermInSetQuery("_uid", new BytesRef("type#id")), query); - } - - public void testTermsQueryWhenTypesAreDisabled() throws Exception { - QueryShardContext context = Mockito.mock(QueryShardContext.class); - Settings indexSettings = Settings.builder() - .put(IndexSettings.INDEX_MAPPING_SINGLE_TYPE_SETTING_KEY, true) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_6_0) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build(); - IndexMetaData indexMetaData = IndexMetaData.builder(IndexMetaData.INDEX_UUID_NA_VALUE).settings(indexSettings).build(); - IndexSettings mockSettings = new IndexSettings(indexMetaData, Settings.EMPTY); - Mockito.when(context.getIndexSettings()).thenReturn(mockSettings); - Mockito.when(context.indexVersionCreated()).thenReturn(indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null)); - - MapperService mapperService = Mockito.mock(MapperService.class); - Collection types = Collections.singleton("type"); - Mockito.when(context.queryTypes()).thenReturn(types); - Mockito.when(context.getMapperService()).thenReturn(mapperService); - MappedFieldType ft = IdFieldMapper.defaultFieldType(mockSettings); - Query query = ft.termQuery("id", context); - assertEquals(new TermInSetQuery("_id", new BytesRef("id")), query); - } - public void testTermsQuery() throws Exception { QueryShardContext context = Mockito.mock(QueryShardContext.class); Settings indexSettings = Settings.builder() diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index 732fa9bad184c..59ef784aea3a2 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -195,51 +195,6 @@ public void testMergeWithMap() throws Throwable { assertThat(e.getMessage(), startsWith("Failed to parse mapping [type1]: ")); } - public void testMergeParentTypesSame() { - // Verifies that a merge (absent a DocumentMapper change) - // doesn't change the parentTypes reference. - // The collection was being rewrapped with each merge - // in v5.2 resulting in eventual StackOverflowErrors. - // https://github.com/elastic/elasticsearch/issues/23604 - - IndexService indexService1 = createIndex("index1"); - MapperService mapperService = indexService1.mapperService(); - Set parentTypes = mapperService.getParentTypes(); - - Map> mappings = new HashMap<>(); - mapperService.merge(mappings, MergeReason.MAPPING_UPDATE); - assertSame(parentTypes, mapperService.getParentTypes()); - } - - public void testOtherDocumentMappersOnlyUpdatedWhenChangingFieldType() throws IOException { - IndexService indexService = createIndex("test", - Settings.builder().put("index.version.created", Version.V_5_6_0).build()); // multiple types - - CompressedXContent simpleMapping = new CompressedXContent(BytesReference.bytes(XContentFactory.jsonBuilder().startObject() - .startObject("properties") - .startObject("field") - .field("type", "text") - .endObject() - .endObject().endObject())); - - indexService.mapperService().merge("type1", simpleMapping, MergeReason.MAPPING_UPDATE); - DocumentMapper documentMapper = indexService.mapperService().documentMapper("type1"); - - indexService.mapperService().merge("type2", simpleMapping, MergeReason.MAPPING_UPDATE); - assertSame(indexService.mapperService().documentMapper("type1"), documentMapper); - - CompressedXContent normsDisabledMapping = new CompressedXContent(BytesReference.bytes(XContentFactory.jsonBuilder().startObject() - .startObject("properties") - .startObject("field") - .field("type", "text") - .field("norms", false) - .endObject() - .endObject().endObject())); - - indexService.mapperService().merge("type3", normsDisabledMapping, MergeReason.MAPPING_UPDATE); - assertNotSame(indexService.mapperService().documentMapper("type1"), documentMapper); - } - public void testPartitionedConstraints() { // partitioned index must have routing IllegalArgumentException noRoutingException = expectThrows(IllegalArgumentException.class, () -> { @@ -252,18 +207,6 @@ public void testPartitionedConstraints() { }); assertTrue(noRoutingException.getMessage(), noRoutingException.getMessage().contains("must have routing")); - // partitioned index cannot have parent/child relationships - IllegalArgumentException parentException = expectThrows(IllegalArgumentException.class, () -> { - client().admin().indices().prepareCreate("test-index") - .addMapping("child", "{\"child\": {\"_routing\":{\"required\":true}, \"_parent\": {\"type\": \"parent\"}}}", - XContentType.JSON) - .setSettings(Settings.builder() - .put("index.number_of_shards", 4) - .put("index.routing_partition_size", 2)) - .execute().actionGet(); - }); - assertTrue(parentException.getMessage(), parentException.getMessage().contains("cannot have a _parent field")); - // valid partitioned index assertTrue(client().admin().indices().prepareCreate("test-index") .addMapping("type", "{\"type\":{\"_routing\":{\"required\":true}}}", XContentType.JSON) diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java index 8bb9d84f576d9..916a9d093d12a 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java @@ -465,37 +465,6 @@ public void testLimitOfNestedFieldsPerIndex() throws Exception { .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_RECOVERY); } - public void testLimitOfNestedFieldsWithMultiTypePerIndex() throws Exception { - Function mapping = type -> { - try { - return Strings.toString(XContentFactory.jsonBuilder().startObject().startObject(type).startObject("properties") - .startObject("nested1").field("type", "nested").startObject("properties") - .startObject("nested2").field("type", "nested") - .endObject().endObject().endObject() - .endObject().endObject().endObject()); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - }; - - MapperService mapperService = createIndex("test4", Settings.builder() - .put("index.version.created", Version.V_5_6_0) - .put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 2).build()).mapperService(); - mapperService.merge("type1", new CompressedXContent(mapping.apply("type1")), MergeReason.MAPPING_UPDATE); - // merging same fields, but different type is ok - mapperService.merge("type2", new CompressedXContent(mapping.apply("type2")), MergeReason.MAPPING_UPDATE); - // adding new fields from different type is not ok - String mapping2 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type3").startObject("properties").startObject("nested3") - .field("type", "nested").startObject("properties").endObject().endObject().endObject().endObject().endObject()); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - mapperService.merge("type3", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE)); - assertThat(e.getMessage(), containsString("Limit of nested fields [2] in index [test4] has been exceeded")); - - // do not check nested fields limit if mapping is not updated - createIndex("test5", Settings.builder().put(MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), 0).build()) - .mapperService().merge("type", new CompressedXContent(mapping.apply("type")), MergeReason.MAPPING_RECOVERY); - } - public void testParentObjectMapperAreNested() throws Exception { MapperService mapperService = createIndex("index1", Settings.EMPTY, "_doc", jsonBuilder().startObject() .startObject("properties") diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java deleted file mode 100644 index f399902920e33..0000000000000 --- a/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldMapperTests.java +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.index.mapper; - -import org.apache.lucene.analysis.standard.StandardAnalyzer; -import org.apache.lucene.index.IndexableField; -import org.elasticsearch.Version; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.analysis.AnalyzerScope; -import org.elasticsearch.index.analysis.IndexAnalyzers; -import org.elasticsearch.index.analysis.NamedAnalyzer; -import org.elasticsearch.index.mapper.MapperService.MergeReason; -import org.elasticsearch.index.similarity.SimilarityService; -import org.elasticsearch.indices.IndicesModule; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.test.IndexSettingsModule; -import org.elasticsearch.test.InternalSettingsPlugin; - -import java.io.IOException; -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; -import java.util.Set; - -import static java.util.Collections.emptyList; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; - -public class ParentFieldMapperTests extends ESSingleNodeTestCase { - - @Override - protected Collection> getPlugins() { - return Collections.singleton(InternalSettingsPlugin.class); - } - - public void testParentSetInDocNotAllowed() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") - .endObject().endObject()); - DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - - try { - docMapper.parse(SourceToParse.source("test", "type", "1", BytesReference.bytes(XContentFactory.jsonBuilder() - .startObject().field("_parent", "1122").endObject()), XContentType.JSON)); - fail("Expected failure to parse metadata field"); - } catch (MapperParsingException e) { - assertTrue(e.getMessage(), e.getMessage().contains("Field [_parent] is a metadata field and cannot be added inside a document")); - } - } - - public void testJoinFieldSet() throws Exception { - String parentMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("parent_type") - .endObject().endObject()); - String childMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("child_type") - .startObject("_parent").field("type", "parent_type").endObject() - .endObject().endObject()); - IndexService indexService = createIndex("test", Settings.builder().put("index.version.created", Version.V_5_6_0).build()); - indexService.mapperService().merge("parent_type", new CompressedXContent(parentMapping), MergeReason.MAPPING_UPDATE); - indexService.mapperService().merge("child_type", new CompressedXContent(childMapping), MergeReason.MAPPING_UPDATE); - - // Indexing parent doc: - DocumentMapper parentDocMapper = indexService.mapperService().documentMapper("parent_type"); - ParsedDocument doc = - parentDocMapper.parse(SourceToParse.source("test", "parent_type", "1122", new BytesArray("{}"), XContentType.JSON)); - assertEquals(1, getNumberOfFieldWithParentPrefix(doc.rootDoc())); - assertEquals("1122", doc.rootDoc().getBinaryValue("_parent#parent_type").utf8ToString()); - - // Indexing child doc: - DocumentMapper childDocMapper = indexService.mapperService().documentMapper("child_type"); - doc = childDocMapper.parse(SourceToParse.source("test", "child_type", "1", new BytesArray("{}"), XContentType.JSON).parent("1122")); - - assertEquals(1, getNumberOfFieldWithParentPrefix(doc.rootDoc())); - assertEquals("1122", doc.rootDoc().getBinaryValue("_parent#parent_type").utf8ToString()); - } - - public void testJoinFieldNotSet() throws Exception { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") - .endObject().endObject()); - DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() - .startObject() - .field("x_field", "x_value") - .endObject()), XContentType.JSON)); - assertEquals(0, getNumberOfFieldWithParentPrefix(doc.rootDoc())); - } - - public void testNoParentNullFieldCreatedIfNoParentSpecified() throws Exception { - Index index = new Index("_index", "testUUID"); - IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(index, Settings.EMPTY); - NamedAnalyzer namedAnalyzer = new NamedAnalyzer("default", AnalyzerScope.INDEX, new StandardAnalyzer()); - IndexAnalyzers indexAnalyzers = new IndexAnalyzers(indexSettings, namedAnalyzer, namedAnalyzer, namedAnalyzer, - Collections.emptyMap(), Collections.emptyMap()); - SimilarityService similarityService = new SimilarityService(indexSettings, null, Collections.emptyMap()); - MapperService mapperService = new MapperService(indexSettings, indexAnalyzers, xContentRegistry(), similarityService, - new IndicesModule(emptyList()).getMapperRegistry(), () -> null); - XContentBuilder mappingSource = jsonBuilder().startObject().startObject("some_type") - .startObject("properties") - .endObject() - .endObject().endObject(); - mapperService.merge("some_type", new CompressedXContent(Strings.toString(mappingSource)), MergeReason.MAPPING_UPDATE); - Set allFields = new HashSet<>(mapperService.simpleMatchToIndexNames("*")); - assertTrue(allFields.contains("_parent")); - assertFalse(allFields.contains("_parent#null")); - MappedFieldType fieldType = mapperService.fullName("_parent"); - assertFalse(fieldType.eagerGlobalOrdinals()); - } - - private static int getNumberOfFieldWithParentPrefix(ParseContext.Document doc) { - int numFieldWithParentPrefix = 0; - for (IndexableField field : doc) { - if (field.name().startsWith("_parent")) { - numFieldWithParentPrefix++; - } - } - return numFieldWithParentPrefix; - } - - public void testUpdateEagerGlobalOrds() throws IOException { - String parentMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("parent_type") - .endObject().endObject()); - String childMapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("child_type") - .startObject("_parent").field("type", "parent_type").endObject() - .endObject().endObject()); - IndexService indexService = createIndex("test", Settings.builder().put("index.version.created", Version.V_5_6_0).build()); - indexService.mapperService().merge("parent_type", new CompressedXContent(parentMapping), MergeReason.MAPPING_UPDATE); - indexService.mapperService().merge("child_type", new CompressedXContent(childMapping), MergeReason.MAPPING_UPDATE); - - assertTrue(indexService.mapperService().documentMapper("child_type").parentFieldMapper().fieldType().eagerGlobalOrdinals()); - - String childMappingUpdate = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("child_type") - .startObject("_parent").field("type", "parent_type").field("eager_global_ordinals", false).endObject() - .endObject().endObject()); - indexService.mapperService().merge("child_type", new CompressedXContent(childMappingUpdate), MergeReason.MAPPING_UPDATE); - - assertFalse(indexService.mapperService().documentMapper("child_type").parentFieldMapper().fieldType().eagerGlobalOrdinals()); - } -} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldTypeTests.java deleted file mode 100644 index 64adad9aa4818..0000000000000 --- a/server/src/test/java/org/elasticsearch/index/mapper/ParentFieldTypeTests.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.index.mapper; - -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.ParentFieldMapper; - -public class ParentFieldTypeTests extends FieldTypeTestCase { - @Override - protected MappedFieldType createDefaultFieldType() { - return new ParentFieldMapper.ParentFieldType(); - } -} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java index 4687a3a24ef74..4e30d9c92aed6 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java @@ -88,19 +88,7 @@ public void testDocValues(boolean singleType) throws IOException { dir.close(); } - public void testDefaultsMultipleTypes() throws IOException { - Settings indexSettings = Settings.builder() - .put("index.version.created", Version.V_5_6_0) - .build(); - MapperService mapperService = createIndex("test", indexSettings).mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE); - ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), XContentType.JSON)); - IndexableField[] fields = document.rootDoc().getFields(TypeFieldMapper.NAME); - assertEquals(IndexOptions.DOCS, fields[0].fieldType().indexOptions()); - assertEquals(DocValuesType.SORTED_SET, fields[1].fieldType().docValuesType()); - } - - public void testDefaultsSingleType() throws IOException { + public void testDefaults() throws IOException { Settings indexSettings = Settings.EMPTY; MapperService mapperService = createIndex("test", indexSettings).mapperService(); DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldTypeTests.java index 1fdfb52b47f67..e8bf09ca5fa61 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TypeFieldTypeTests.java @@ -22,21 +22,10 @@ import org.apache.lucene.document.Field.Store; import org.apache.lucene.document.StringField; import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.BooleanClause.Occur; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; -import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermQuery; -import org.apache.lucene.store.Directory; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.UUIDs; @@ -57,7 +46,7 @@ protected MappedFieldType createDefaultFieldType() { return new TypeFieldMapper.TypeFieldType(); } - public void testTermsQueryWhenTypesAreDisabled() throws Exception { + public void testTermsQuery() throws Exception { QueryShardContext context = Mockito.mock(QueryShardContext.class); Version indexVersionCreated = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); Settings indexSettings = Settings.builder() @@ -95,76 +84,6 @@ public void testTermsQueryWhenTypesAreDisabled() throws Exception { assertEquals(new MatchNoDocsQuery(), query); } - public void testTermsQueryWhenTypesAreEnabled() throws Exception { - Directory dir = newDirectory(); - IndexWriter w = new IndexWriter(dir, newIndexWriterConfig()); - IndexReader reader = openReaderWithNewType("my_type", w); - - QueryShardContext context = Mockito.mock(QueryShardContext.class); - Settings indexSettings = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_6_0) // to allow for multiple types - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) - .build(); - IndexMetaData indexMetaData = IndexMetaData.builder(IndexMetaData.INDEX_UUID_NA_VALUE).settings(indexSettings).build(); - IndexSettings mockSettings = new IndexSettings(indexMetaData, Settings.EMPTY); - Mockito.when(context.getIndexSettings()).thenReturn(mockSettings); - - TypeFieldMapper.TypeFieldType ft = new TypeFieldMapper.TypeFieldType(); - ft.setName(TypeFieldMapper.NAME); - Query query = ft.termQuery("my_type", context); - assertEquals(new MatchAllDocsQuery(), query.rewrite(reader)); - - // Make sure that Lucene actually simplifies the query when there is a single type - Query userQuery = new PhraseQuery("body", "quick", "fox"); - Query filteredQuery = new BooleanQuery.Builder().add(userQuery, Occur.MUST).add(query, Occur.FILTER).build(); - Query rewritten = new IndexSearcher(reader).rewrite(filteredQuery); - assertEquals(userQuery, rewritten); - - // ... and does not rewrite it if there is more than one type - reader.close(); - reader = openReaderWithNewType("my_type2", w); - Query expected = new ConstantScoreQuery( - new BooleanQuery.Builder() - .add(new TermQuery(new Term(TypeFieldMapper.NAME, "my_type")), Occur.SHOULD) - .build() - ); - assertEquals(expected, query.rewrite(reader)); - - BytesRef[] types = - new BytesRef[] {new BytesRef("my_type"), new BytesRef("my_type2"), new BytesRef("my_type3")}; - // the query should match all documents - query = new TypeFieldMapper.TypesQuery(types); - assertEquals(new MatchAllDocsQuery(), query.rewrite(reader)); - - reader.close(); - reader = openReaderWithNewType("unknown_type", w); - // the query cannot rewrite to a match all docs sinc unknown_type is not queried. - query = new TypeFieldMapper.TypesQuery(types); - expected = - new ConstantScoreQuery( - new BooleanQuery.Builder() - .add(new TermQuery(new Term(TypeFieldMapper.CONTENT_TYPE, types[0])), Occur.SHOULD) - .add(new TermQuery(new Term(TypeFieldMapper.CONTENT_TYPE, types[1])), Occur.SHOULD) - .build() - ); - rewritten = query.rewrite(reader); - assertEquals(expected, rewritten); - - // make sure that redundant types does not rewrite to MatchAllDocsQuery - query = new TypeFieldMapper.TypesQuery(new BytesRef("my_type"), new BytesRef("my_type"), new BytesRef("my_type")); - expected = - new ConstantScoreQuery( - new BooleanQuery.Builder() - .add(new TermQuery(new Term(TypeFieldMapper.CONTENT_TYPE, "my_type")), Occur.SHOULD) - .build() - ); - rewritten = query.rewrite(reader); - assertEquals(expected, rewritten); - - IOUtils.close(reader, w, dir); - } static DirectoryReader openReaderWithNewType(String type, IndexWriter writer) throws IOException { Document doc = new Document(); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/UidFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/UidFieldMapperTests.java deleted file mode 100644 index 4128cec082e0a..0000000000000 --- a/server/src/test/java/org/elasticsearch/index/mapper/UidFieldMapperTests.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.mapper; - -import org.apache.lucene.index.IndexOptions; -import org.apache.lucene.index.IndexableField; -import org.elasticsearch.Version; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.mapper.MapperService.MergeReason; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.test.InternalSettingsPlugin; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; - -public class UidFieldMapperTests extends ESSingleNodeTestCase { - - @Override - protected Collection> getPlugins() { - return Collections.singleton(InternalSettingsPlugin.class); - } - - public void testDefaultsMultipleTypes() throws IOException { - Settings indexSettings = Settings.builder() - .put("index.version.created", Version.V_5_6_0) - .build(); - MapperService mapperService = createIndex("test", indexSettings).mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE); - ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), XContentType.JSON)); - IndexableField[] fields = document.rootDoc().getFields(UidFieldMapper.NAME); - assertEquals(1, fields.length); - assertEquals(IndexOptions.DOCS, fields[0].fieldType().indexOptions()); - assertTrue(fields[0].fieldType().stored()); - assertEquals("type#id", fields[0].stringValue()); - } - - public void testDefaultsSingleType() throws IOException { - Settings indexSettings = Settings.EMPTY; - MapperService mapperService = createIndex("test", indexSettings).mapperService(); - DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE); - ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), XContentType.JSON)); - assertEquals(Collections.emptyList(), Arrays.asList(document.rootDoc().getFields(UidFieldMapper.NAME))); - } -} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/UidFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/UidFieldTypeTests.java deleted file mode 100644 index 9b2e0ceb0721f..0000000000000 --- a/server/src/test/java/org/elasticsearch/index/mapper/UidFieldTypeTests.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.index.mapper; - -import org.apache.lucene.search.MatchNoDocsQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermInSetQuery; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.UidFieldMapper; -import org.elasticsearch.index.query.QueryShardContext; -import org.mockito.Mockito; - -import java.util.Collection; -import java.util.Collections; - -public class UidFieldTypeTests extends FieldTypeTestCase { - @Override - protected MappedFieldType createDefaultFieldType() { - return new UidFieldMapper.UidFieldType(); - } - - public void testRangeQuery() { - MappedFieldType ft = createDefaultFieldType(); - ft.setName("_uid"); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> ft.rangeQuery(null, null, randomBoolean(), randomBoolean(), null, null, null, null)); - assertEquals("Field [_uid] of type [_uid] does not support range queries", e.getMessage()); - } - - public void testTermsQueryWhenTypesAreEnabled() throws Exception { - QueryShardContext context = Mockito.mock(QueryShardContext.class); - Settings indexSettings = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_6_0) // to allow for multipel types - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) - .build(); - IndexMetaData indexMetaData = IndexMetaData.builder(IndexMetaData.INDEX_UUID_NA_VALUE).settings(indexSettings).build(); - IndexSettings mockSettings = new IndexSettings(indexMetaData, Settings.EMPTY); - Mockito.when(context.getIndexSettings()).thenReturn(mockSettings); - - MapperService mapperService = Mockito.mock(MapperService.class); - Collection types = Collections.emptySet(); - Mockito.when(context.queryTypes()).thenReturn(types); - Mockito.when(context.getMapperService()).thenReturn(mapperService); - - MappedFieldType ft = UidFieldMapper.defaultFieldType(mockSettings); - ft.setName(UidFieldMapper.NAME); - Query query = ft.termQuery("type#id", context); - assertEquals(new TermInSetQuery("_uid", new BytesRef("type#id")), query); - } - - public void testTermsQueryWhenTypesAreDisabled() throws Exception { - QueryShardContext context = Mockito.mock(QueryShardContext.class); - Settings indexSettings = Settings.builder() - .put(IndexSettings.INDEX_MAPPING_SINGLE_TYPE_SETTING_KEY, true) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_6_0) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build(); - IndexMetaData indexMetaData = IndexMetaData.builder(IndexMetaData.INDEX_UUID_NA_VALUE).settings(indexSettings).build(); - IndexSettings mockSettings = new IndexSettings(indexMetaData, Settings.EMPTY); - Mockito.when(context.getIndexSettings()).thenReturn(mockSettings); - - MapperService mapperService = Mockito.mock(MapperService.class); - Collection types = Collections.emptySet(); - Mockito.when(mapperService.types()).thenReturn(types); - Mockito.when(context.getMapperService()).thenReturn(mapperService); - Mockito.when(context.indexVersionCreated()).thenReturn(indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null)); - - MappedFieldType ft = UidFieldMapper.defaultFieldType(mockSettings); - ft.setName(UidFieldMapper.NAME); - Query query = ft.termQuery("type#id", context); - assertEquals(new MatchNoDocsQuery(), query); - - types = Collections.singleton("type"); - Mockito.when(mapperService.types()).thenReturn(types); - query = ft.termQuery("type#id", context); - assertEquals(new TermInSetQuery("_id", new BytesRef("id")), query); - query = ft.termQuery("type2#id", context); - assertEquals(new TermInSetQuery("_id"), query); - } - - public void testTermsQuery() throws Exception { - QueryShardContext context = Mockito.mock(QueryShardContext.class); - Settings indexSettings = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build(); - IndexMetaData indexMetaData = IndexMetaData.builder(IndexMetaData.INDEX_UUID_NA_VALUE).settings(indexSettings).build(); - IndexSettings mockSettings = new IndexSettings(indexMetaData, Settings.EMPTY); - Mockito.when(context.getIndexSettings()).thenReturn(mockSettings); - - MapperService mapperService = Mockito.mock(MapperService.class); - Collection types = Collections.emptySet(); - Mockito.when(mapperService.types()).thenReturn(types); - Mockito.when(context.getMapperService()).thenReturn(mapperService); - Mockito.when(context.indexVersionCreated()).thenReturn(indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null)); - - MappedFieldType ft = UidFieldMapper.defaultFieldType(mockSettings); - ft.setName(UidFieldMapper.NAME); - Query query = ft.termQuery("type#id", context); - assertEquals(new MatchNoDocsQuery(), query); - - types = Collections.singleton("type"); - Mockito.when(mapperService.types()).thenReturn(types); - query = ft.termQuery("type#id", context); - assertEquals(new TermInSetQuery("_id", Uid.encodeId("id")), query); - query = ft.termQuery("type2#id", context); - assertEquals(new TermInSetQuery("_id"), query); - } -} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/UidTests.java b/server/src/test/java/org/elasticsearch/index/mapper/UidTests.java index c4fb94abd3846..35e54d6caa8b5 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/UidTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/UidTests.java @@ -25,23 +25,7 @@ import java.util.Arrays; import java.util.Base64; -import static org.hamcrest.Matchers.equalTo; - public class UidTests extends ESTestCase { - public void testCreateAndSplitId() { - BytesRef createUid = Uid.createUidAsBytes("foo", "bar"); - BytesRef[] splitUidIntoTypeAndId = splitUidIntoTypeAndId(createUid); - assertThat("foo", equalTo(splitUidIntoTypeAndId[0].utf8ToString())); - assertThat("bar", equalTo(splitUidIntoTypeAndId[1].utf8ToString())); - // split also with an offset - BytesRef ref = new BytesRef(createUid.length+10); - ref.offset = 9; - ref.length = createUid.length; - System.arraycopy(createUid.bytes, createUid.offset, ref.bytes, ref.offset, ref.length); - splitUidIntoTypeAndId = splitUidIntoTypeAndId(ref); - assertThat("foo", equalTo(splitUidIntoTypeAndId[0].utf8ToString())); - assertThat("bar", equalTo(splitUidIntoTypeAndId[1].utf8ToString())); - } public static BytesRef[] splitUidIntoTypeAndId(BytesRef uid) { int loc = -1; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java b/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java index 311257b837d1b..f21a1d97798b3 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java @@ -138,44 +138,6 @@ public void testConflictNewType() throws Exception { equalTo("long")); } - // same as the testConflictNewType except that the mapping update is on an existing type - public void testConflictNewTypeUpdate() throws Exception { - XContentBuilder mapping1 = XContentFactory.jsonBuilder().startObject().startObject("type1") - .startObject("properties").startObject("foo").field("type", "long").endObject() - .endObject().endObject().endObject(); - XContentBuilder mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type2").endObject().endObject(); - MapperService mapperService = createIndex("test", Settings.builder().put("index.version.created", - Version.V_5_6_0).build()).mapperService(); - - mapperService.merge("type1", new CompressedXContent(Strings.toString(mapping1)), MapperService.MergeReason.MAPPING_UPDATE); - mapperService.merge("type2", new CompressedXContent(Strings.toString(mapping2)), MapperService.MergeReason.MAPPING_UPDATE); - - XContentBuilder update = XContentFactory.jsonBuilder().startObject().startObject("type2") - .startObject("properties").startObject("foo").field("type", "double").endObject() - .endObject().endObject().endObject(); - - try { - mapperService.merge("type2", new CompressedXContent(Strings.toString(update)), MapperService.MergeReason.MAPPING_UPDATE); - fail(); - } catch (IllegalArgumentException e) { - // expected - assertTrue(e.getMessage(), e.getMessage().contains("mapper [foo] cannot be changed from type [long] to [double]")); - } - - try { - mapperService.merge("type2", new CompressedXContent(Strings.toString(update)), MapperService.MergeReason.MAPPING_UPDATE); - fail(); - } catch (IllegalArgumentException e) { - // expected - assertTrue(e.getMessage(), e.getMessage().contains("mapper [foo] cannot be changed from type [long] to [double]")); - } - - assertThat(((FieldMapper) mapperService.documentMapper("type1").mapping().root().getMapper("foo")).fieldType().typeName(), - equalTo("long")); - assertNotNull(mapperService.documentMapper("type2")); - assertNull(mapperService.documentMapper("type2").mapping().root().getMapper("foo")); - } - public void testReuseMetaField() throws IOException { XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("_id").field("type", "text").endObject() diff --git a/server/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java index e440fc0277229..dab25b0ce3d96 100644 --- a/server/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java @@ -25,14 +25,17 @@ import org.apache.lucene.search.TermInSetQuery; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.ParsingException; -import org.elasticsearch.index.mapper.UidFieldMapper; +import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; public class IdsQueryBuilderTests extends AbstractQueryTestCase { @@ -74,7 +77,14 @@ protected IdsQueryBuilder doCreateTestQueryBuilder() { @Override protected void doAssertLuceneQuery(IdsQueryBuilder queryBuilder, Query query, SearchContext context) throws IOException { - if (queryBuilder.ids().size() == 0 || context.getQueryShardContext().fieldMapper(UidFieldMapper.NAME) == null) { + boolean allTypes = queryBuilder.types().length == 0 || + queryBuilder.types().length == 1 && "_all".equals(queryBuilder.types()[0]); + if (queryBuilder.ids().size() == 0 + // no types + || context.getQueryShardContext().fieldMapper(IdFieldMapper.NAME) == null + // there are types, but disjoint from the query + || (allTypes == false && + Arrays.asList(queryBuilder.types()).indexOf(context.mapperService().types().iterator().next()) == -1)) { assertThat(query, instanceOf(MatchNoDocsQuery.class)); } else { assertThat(query, instanceOf(TermInSetQuery.class)); @@ -94,7 +104,7 @@ public void testIllegalArguments() { public void testIdsQueryWithInvalidValues() throws Exception { String query = "{ \"ids\": { \"values\": [[1]] } }"; ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(query)); - assertEquals("[ids] failed to parse field [values]", e.getMessage()); + assertThat(e.getMessage(), containsString("[ids] failed to parse field [values]")); } public void testFromJson() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index 939f1add0094f..aafc66b3985e1 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -66,7 +66,9 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.HashMap; import java.util.List; +import java.util.Map; import static org.elasticsearch.index.query.AbstractQueryBuilder.parseInnerQueryBuilder; import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; @@ -172,6 +174,206 @@ protected QueryStringQueryBuilder doCreateTestQueryBuilder() { return queryStringQueryBuilder; } + @Override + public QueryStringQueryBuilder mutateInstance(QueryStringQueryBuilder instance) throws IOException { + String query = instance.queryString(); + String defaultField = instance.defaultField(); + Map fields = instance.fields(); + Operator operator = instance.defaultOperator(); + Fuzziness fuzziness = instance.fuzziness(); + String analyzer = instance.analyzer(); + String quoteAnalyzer = instance.quoteAnalyzer(); + Boolean allowLeadingWildCard = instance.allowLeadingWildcard(); + Boolean analyzeWildcard = instance.analyzeWildcard(); + int maxDeterminizedStates = instance.maxDeterminizedStates(); + boolean enablePositionIncrements = instance.enablePositionIncrements(); + boolean escape = instance.escape(); + int phraseSlop = instance.phraseSlop(); + int fuzzyMaxExpansions = instance.fuzzyMaxExpansions(); + int fuzzyPrefixLength = instance.fuzzyPrefixLength(); + String fuzzyRewrite = instance.fuzzyRewrite(); + String rewrite = instance.rewrite(); + String quoteFieldSuffix = instance.quoteFieldSuffix(); + Float tieBreaker = instance.tieBreaker(); + String minimumShouldMatch = instance.minimumShouldMatch(); + String timeZone = instance.timeZone() == null ? null : instance.timeZone().getID(); + boolean autoGenerateSynonymsPhraseQuery = instance.autoGenerateSynonymsPhraseQuery(); + boolean fuzzyTranspositions = instance.fuzzyTranspositions(); + + switch (between(0, 23)) { + case 0: + query = query + " foo"; + break; + case 1: + if (defaultField == null) { + defaultField = randomAlphaOfLengthBetween(1, 10); + } else { + defaultField = defaultField + randomAlphaOfLength(5); + } + break; + case 2: + fields = new HashMap<>(fields); + fields.put(randomAlphaOfLength(10), 1.0f); + break; + case 3: + operator = randomValueOtherThan(operator, () -> randomFrom(Operator.values())); + break; + case 4: + fuzziness = randomValueOtherThan(fuzziness, () -> randomFrom(Fuzziness.AUTO, Fuzziness.ZERO, Fuzziness.ONE, Fuzziness.TWO)); + break; + case 5: + if (analyzer == null) { + analyzer = randomAnalyzer(); + } else { + analyzer = null; + } + break; + case 6: + if (quoteAnalyzer == null) { + quoteAnalyzer = randomAnalyzer(); + } else { + quoteAnalyzer = null; + } + break; + case 7: + if (allowLeadingWildCard == null) { + allowLeadingWildCard = randomBoolean(); + } else { + allowLeadingWildCard = randomBoolean() ? null : (allowLeadingWildCard == false); + } + break; + case 8: + if (analyzeWildcard == null) { + analyzeWildcard = randomBoolean(); + } else { + analyzeWildcard = randomBoolean() ? null : (analyzeWildcard == false); + } + break; + case 9: + maxDeterminizedStates += 5; + break; + case 10: + enablePositionIncrements = (enablePositionIncrements == false); + break; + case 11: + escape = (escape == false); + break; + case 12: + phraseSlop += 5; + break; + case 13: + fuzzyMaxExpansions += 5; + break; + case 14: + fuzzyPrefixLength += 5; + break; + case 15: + if (fuzzyRewrite == null) { + fuzzyRewrite = getRandomRewriteMethod(); + } else { + fuzzyRewrite = null; + } + break; + case 16: + if (rewrite == null) { + rewrite = getRandomRewriteMethod(); + } else { + rewrite = null; + } + break; + case 17: + if (quoteFieldSuffix == null) { + quoteFieldSuffix = randomAlphaOfLengthBetween(1, 3); + } else { + quoteFieldSuffix = quoteFieldSuffix + randomAlphaOfLength(1); + } + break; + case 18: + if (tieBreaker == null) { + tieBreaker = randomFloat(); + } else { + tieBreaker += 0.05f; + } + break; + case 19: + if (minimumShouldMatch == null) { + minimumShouldMatch = randomMinimumShouldMatch(); + } else { + minimumShouldMatch = null; + } + break; + case 20: + if (timeZone == null) { + timeZone = randomDateTimeZone().getID(); + } else { + if (randomBoolean()) { + timeZone = null; + } else { + timeZone = randomValueOtherThan(timeZone, () -> randomDateTimeZone().getID()); + } + } + break; + case 21: + autoGenerateSynonymsPhraseQuery = (autoGenerateSynonymsPhraseQuery == false); + break; + case 22: + fuzzyTranspositions = (fuzzyTranspositions == false); + break; + case 23: + return changeNameOrBoost(instance); + default: + throw new AssertionError("Illegal randomisation branch"); + } + + QueryStringQueryBuilder newInstance = new QueryStringQueryBuilder(query); + if (defaultField != null) { + newInstance.defaultField(defaultField); + } + newInstance.fields(fields); + newInstance.defaultOperator(operator); + newInstance.fuzziness(fuzziness); + if (analyzer != null) { + newInstance.analyzer(analyzer); + } + if (quoteAnalyzer != null) { + newInstance.quoteAnalyzer(quoteAnalyzer); + } + if (allowLeadingWildCard != null) { + newInstance.allowLeadingWildcard(allowLeadingWildCard); + } + if (analyzeWildcard != null) { + newInstance.analyzeWildcard(analyzeWildcard); + } + newInstance.maxDeterminizedStates(maxDeterminizedStates); + newInstance.enablePositionIncrements(enablePositionIncrements); + newInstance.escape(escape); + newInstance.phraseSlop(phraseSlop); + newInstance.fuzzyMaxExpansions(fuzzyMaxExpansions); + newInstance.fuzzyPrefixLength(fuzzyPrefixLength); + if (fuzzyRewrite != null) { + newInstance.fuzzyRewrite(fuzzyRewrite); + } + if (rewrite != null) { + newInstance.rewrite(rewrite); + } + if (quoteFieldSuffix != null) { + newInstance.quoteFieldSuffix(quoteFieldSuffix); + } + if (tieBreaker != null) { + newInstance.tieBreaker(tieBreaker); + } + if (minimumShouldMatch != null) { + newInstance.minimumShouldMatch(minimumShouldMatch); + } + if (timeZone != null) { + newInstance.timeZone(timeZone); + } + newInstance.autoGenerateSynonymsPhraseQuery(autoGenerateSynonymsPhraseQuery); + newInstance.fuzzyTranspositions(fuzzyTranspositions); + + return newInstance; + } + @Override protected void doAssertLuceneQuery(QueryStringQueryBuilder queryBuilder, Query query, SearchContext context) throws IOException { @@ -182,6 +384,16 @@ protected void doAssertLuceneQuery(QueryStringQueryBuilder queryBuilder, .or(instanceOf(MatchNoDocsQuery.class))); } + // Tests fix for https://github.com/elastic/elasticsearch/issues/29403 + public void testTimezoneEquals() { + QueryStringQueryBuilder builder1 = new QueryStringQueryBuilder("bar"); + QueryStringQueryBuilder builder2 = new QueryStringQueryBuilder("foo"); + assertNotEquals(builder1, builder2); + builder1.timeZone("Europe/London"); + builder2.timeZone("Europe/London"); + assertNotEquals(builder1, builder2); + } + public void testIllegalArguments() { expectThrows(IllegalArgumentException.class, () -> new QueryStringQueryBuilder((String) null)); } @@ -1040,6 +1252,37 @@ public void testQuoteAnalyzer() throws Exception { assertEquals(expectedQuery, query); } + public void testQuoteFieldSuffix() throws IOException { + assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); + QueryShardContext context = createShardContext(); + assertEquals(new TermQuery(new Term(STRING_FIELD_NAME, "bar")), + new QueryStringQueryBuilder("bar") + .quoteFieldSuffix("_2") + .field(STRING_FIELD_NAME) + .doToQuery(context) + ); + assertEquals(new TermQuery(new Term(STRING_FIELD_NAME_2, "bar")), + new QueryStringQueryBuilder("\"bar\"") + .quoteFieldSuffix("_2") + .field(STRING_FIELD_NAME) + .doToQuery(context) + ); + + // Now check what happens if the quote field does not exist + assertEquals(new TermQuery(new Term(STRING_FIELD_NAME, "bar")), + new QueryStringQueryBuilder("bar") + .quoteFieldSuffix(".quote") + .field(STRING_FIELD_NAME) + .doToQuery(context) + ); + assertEquals(new TermQuery(new Term(STRING_FIELD_NAME, "bar")), + new QueryStringQueryBuilder("\"bar\"") + .quoteFieldSuffix(".quote") + .field(STRING_FIELD_NAME) + .doToQuery(context) + ); + } + public void testToFuzzyQuery() throws Exception { assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); diff --git a/server/src/test/java/org/elasticsearch/index/query/SpanGapQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SpanGapQueryBuilderTests.java new file mode 100644 index 0000000000000..024d43b1a6bab --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/query/SpanGapQueryBuilderTests.java @@ -0,0 +1,127 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.apache.lucene.search.Query; +import org.apache.lucene.search.spans.SpanBoostQuery; +import org.apache.lucene.search.spans.SpanNearQuery; +import org.apache.lucene.search.spans.SpanQuery; +import org.apache.lucene.search.spans.SpanTermQuery; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.test.AbstractQueryTestCase; + +import java.io.IOException; +import java.util.Iterator; + +import static org.elasticsearch.index.query.SpanNearQueryBuilder.SpanGapQueryBuilder; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.either; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; + +/* + * SpanGapQueryBuilder, unlike other QBs, is not used to build a Query. Therefore, it is not suited + * to test pattern of AbstractQueryTestCase. Since it is only used in SpanNearQueryBuilder, its test cases + * are same as those of later with SpanGapQueryBuilder included as clauses. + */ + +public class SpanGapQueryBuilderTests extends AbstractQueryTestCase { + @Override + protected SpanNearQueryBuilder doCreateTestQueryBuilder() { + SpanTermQueryBuilder[] spanTermQueries = new SpanTermQueryBuilderTests().createSpanTermQueryBuilders(randomIntBetween(1, 6)); + SpanNearQueryBuilder queryBuilder = new SpanNearQueryBuilder(spanTermQueries[0], randomIntBetween(-10, 10)); + for (int i = 1; i < spanTermQueries.length; i++) { + SpanTermQueryBuilder termQB = spanTermQueries[i]; + queryBuilder.addClause(termQB); + if (i % 2 == 1) { + SpanGapQueryBuilder gapQB = new SpanGapQueryBuilder(termQB.fieldName(), randomIntBetween(1,2)); + queryBuilder.addClause(gapQB); + } + } + queryBuilder.inOrder(true); + return queryBuilder; + } + + @Override + protected void doAssertLuceneQuery(SpanNearQueryBuilder queryBuilder, Query query, SearchContext context) throws IOException { + assertThat(query, either(instanceOf(SpanNearQuery.class)) + .or(instanceOf(SpanTermQuery.class)) + .or(instanceOf(SpanBoostQuery.class)) + .or(instanceOf(MatchAllQueryBuilder.class))); + if (query instanceof SpanNearQuery) { + SpanNearQuery spanNearQuery = (SpanNearQuery) query; + assertThat(spanNearQuery.getSlop(), equalTo(queryBuilder.slop())); + assertThat(spanNearQuery.isInOrder(), equalTo(queryBuilder.inOrder())); + assertThat(spanNearQuery.getClauses().length, equalTo(queryBuilder.clauses().size())); + Iterator spanQueryBuilderIterator = queryBuilder.clauses().iterator(); + for (SpanQuery spanQuery : spanNearQuery.getClauses()) { + SpanQueryBuilder spanQB = spanQueryBuilderIterator.next(); + if (spanQB instanceof SpanGapQueryBuilder) continue; + assertThat(spanQuery, equalTo(spanQB.toQuery(context.getQueryShardContext()))); + } + } else if (query instanceof SpanTermQuery || query instanceof SpanBoostQuery) { + assertThat(queryBuilder.clauses().size(), equalTo(1)); + assertThat(query, equalTo(queryBuilder.clauses().get(0).toQuery(context.getQueryShardContext()))); + } + } + + public void testIllegalArguments() { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new SpanGapQueryBuilder(null, 1)); + assertEquals("[span_gap] field name is null or empty", e.getMessage()); + } + + public void testFromJson() throws IOException { + String json = + "{\n" + + " \"span_near\" : {\n" + + " \"clauses\" : [ {\n" + + " \"span_term\" : {\n" + + " \"field\" : {\n" + + " \"value\" : \"value1\",\n" + + " \"boost\" : 1.0\n" + + " }\n" + + " }\n" + + " }, {\n" + + " \"span_gap\" : {\n" + + " \"field\" : 2" + + " }\n" + + " }, {\n" + + " \"span_term\" : {\n" + + " \"field\" : {\n" + + " \"value\" : \"value3\",\n" + + " \"boost\" : 1.0\n" + + " }\n" + + " }\n" + + " } ],\n" + + " \"slop\" : 12,\n" + + " \"in_order\" : false,\n" + + " \"boost\" : 1.0\n" + + " }\n" + + "}"; + + SpanNearQueryBuilder parsed = (SpanNearQueryBuilder) parseQuery(json); + checkGeneratedJson(json, parsed); + + assertEquals(json, 3, parsed.clauses().size()); + assertEquals(json, 12, parsed.slop()); + assertEquals(json, false, parsed.inOrder()); + } +} diff --git a/server/src/test/java/org/elasticsearch/index/query/SpanNearQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SpanNearQueryBuilderTests.java index 21b15fe53fa5e..359793adcf6af 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SpanNearQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SpanNearQueryBuilderTests.java @@ -184,4 +184,5 @@ public void testCollectPayloadsNoLongerSupported() throws Exception { () -> parseQuery(json)); assertThat(e.getMessage(), containsString("[span_near] query does not support [collect_payloads]")); } + } diff --git a/server/src/test/java/org/elasticsearch/index/query/TypeQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/TypeQueryBuilderTests.java index 9d6d1d8aa90a1..b75319b15c3e6 100644 --- a/server/src/test/java/org/elasticsearch/index/query/TypeQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/TypeQueryBuilderTests.java @@ -71,4 +71,16 @@ public void testFromJson() throws IOException { assertEquals(json, "my_type", parsed.type()); } + + @Override + public void testToQuery() throws IOException { + super.testToQuery(); + assertWarnings("The [type] query is deprecated, filter on a field instead."); + } + + @Override + public void testMustRewrite() throws IOException { + super.testMustRewrite(); + assertWarnings("The [type] query is deprecated, filter on a field instead."); + } } diff --git a/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index ba5b43b1d9204..97fc1b528acf3 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -264,7 +264,7 @@ public synchronized IndexShard addReplicaWithExistingPath(final ShardPath shardP RecoverySource.PeerRecoverySource.INSTANCE); final IndexShard newReplica = - newShard(shardRouting, shardPath, indexMetaData, null, getEngineFactory(shardRouting), () -> {}); + newShard(shardRouting, shardPath, indexMetaData, null, getEngineFactory(shardRouting), () -> {}, EMPTY_EVENT_LISTENER); replicas.add(newReplica); updateAllocationIDsOnPrimary(); return newReplica; diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index 66e2a09750a2d..c7469f2432ad3 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -185,7 +185,7 @@ public void testRecoveryToReplicaThatReceivedExtraDocument() throws Exception { false, SourceToParse.source("index", "type", "replica", new BytesArray("{}"), XContentType.JSON), mapping -> {}); - shards.promoteReplicaToPrimary(promotedReplica); + shards.promoteReplicaToPrimary(promotedReplica).get(); oldPrimary.close("demoted", randomBoolean()); oldPrimary.store().close(); shards.removeReplica(remainingReplica); diff --git a/server/src/test/java/org/elasticsearch/index/search/SimpleQueryStringQueryParserTests.java b/server/src/test/java/org/elasticsearch/index/search/SimpleQueryStringQueryParserTests.java new file mode 100644 index 0000000000000..b89d7d056c026 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/search/SimpleQueryStringQueryParserTests.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.search; + +import org.elasticsearch.test.ESTestCase; + +public class SimpleQueryStringQueryParserTests extends ESTestCase { + + public void testEqualsSettings() { + SimpleQueryStringQueryParser.Settings settings1 = new SimpleQueryStringQueryParser.Settings(); + SimpleQueryStringQueryParser.Settings settings2 = new SimpleQueryStringQueryParser.Settings(); + String s = "Some random other object"; + assertEquals(settings1, settings1); + assertEquals(settings1, settings2); + assertNotEquals(settings1, null); + assertNotEquals(settings1, s); + + settings2.lenient(!settings1.lenient()); + assertNotEquals(settings1, settings2); + + settings2 = new SimpleQueryStringQueryParser.Settings(); + settings2.analyzeWildcard(!settings1.analyzeWildcard()); + assertNotEquals(settings1, settings2); + + settings2 = new SimpleQueryStringQueryParser.Settings(); + settings2.quoteFieldSuffix("a"); + assertNotEquals(settings1, settings2); + + settings2 = new SimpleQueryStringQueryParser.Settings(); + settings2.autoGenerateSynonymsPhraseQuery(!settings1.autoGenerateSynonymsPhraseQuery()); + assertNotEquals(settings1, settings2); + + settings2 = new SimpleQueryStringQueryParser.Settings(); + settings2.fuzzyPrefixLength(settings1.fuzzyPrefixLength() + 1); + assertNotEquals(settings1, settings2); + + settings2 = new SimpleQueryStringQueryParser.Settings(); + settings2.fuzzyMaxExpansions(settings1.fuzzyMaxExpansions() + 1); + assertNotEquals(settings1, settings2); + + settings2 = new SimpleQueryStringQueryParser.Settings(); + settings2.fuzzyTranspositions(!settings1.fuzzyTranspositions()); + assertNotEquals(settings1, settings2); + } +} diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index b14030d46e4ca..f7ee54b32ee84 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.shard; import org.apache.lucene.store.LockObtainFailedException; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -42,6 +41,7 @@ import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.CheckedRunnable; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.lucene.uid.Versions; @@ -50,6 +50,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.ShardLock; @@ -102,6 +103,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoSearchHits; +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -333,7 +335,7 @@ public void testMaybeFlush() throws Exception { assertFalse(shard.shouldPeriodicallyFlush()); client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), - new ByteSizeValue(160 /* size of the operation + two generations header&footer*/, ByteSizeUnit.BYTES)).build()).get(); + new ByteSizeValue(190 /* size of the operation + two generations header&footer*/, ByteSizeUnit.BYTES)).build()).get(); client().prepareIndex("test", "test", "0") .setSource("{}", XContentType.JSON).setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); assertFalse(shard.shouldPeriodicallyFlush()); @@ -347,6 +349,7 @@ public void testMaybeFlush() throws Exception { .setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); assertBusy(() -> { // this is async assertFalse(shard.shouldPeriodicallyFlush()); + assertThat(shard.flushStats().getPeriodic(), greaterThan(0L)); }); assertEquals(0, translog.stats().getUncommittedOperations()); translog.sync(); @@ -444,8 +447,12 @@ public void testStressMaybeFlushOrRollTranslogGeneration() throws Exception { if (flush) { final FlushStats flushStats = shard.flushStats(); final long total = flushStats.getTotal(); + final long periodic = flushStats.getPeriodic(); client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get(); - check = () -> assertEquals(total + 1, shard.flushStats().getTotal()); + check = () -> { + assertThat(shard.flushStats().getTotal(), equalTo(total + 1)); + assertThat(shard.flushStats().getPeriodic(), equalTo(periodic + 1)); + }; } else { final long generation = shard.getEngine().getTranslog().currentFileGeneration(); client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get(); @@ -461,6 +468,30 @@ public void testStressMaybeFlushOrRollTranslogGeneration() throws Exception { check.run(); } + public void testFlushStats() throws Exception { + final IndexService indexService = createIndex("test"); + ensureGreen(); + Settings settings = Settings.builder().put("index.translog.flush_threshold_size", "" + between(200, 300) + "b").build(); + client().admin().indices().prepareUpdateSettings("test").setSettings(settings).get(); + final int numDocs = between(10, 100); + for (int i = 0; i < numDocs; i++) { + client().prepareIndex("test", "doc", Integer.toString(i)).setSource("{}", XContentType.JSON).get(); + } + // A flush stats may include the new total count but the old period count - assert eventually. + assertBusy(() -> { + final FlushStats flushStats = client().admin().indices().prepareStats("test").clear().setFlush(true).get().getTotal().flush; + assertThat(flushStats.getPeriodic(), allOf(equalTo(flushStats.getTotal()), greaterThan(0L))); + }); + assertBusy(() -> assertThat(indexService.getShard(0).shouldPeriodicallyFlush(), equalTo(false))); + settings = Settings.builder().put("index.translog.flush_threshold_size", (String) null).build(); + client().admin().indices().prepareUpdateSettings("test").setSettings(settings).get(); + + client().prepareIndex("test", "doc", UUIDs.randomBase64UUID()).setSource("{}", XContentType.JSON).get(); + client().admin().indices().prepareFlush("test").setForce(randomBoolean()).setWaitIfOngoing(true).get(); + final FlushStats flushStats = client().admin().indices().prepareStats("test").clear().setFlush(true).get().getTotal().flush; + assertThat(flushStats.getTotal(), greaterThan(flushStats.getPeriodic())); + } + public void testShardHasMemoryBufferOnTranslogRecover() throws Throwable { createIndex("test"); ensureGreen(); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 7aa597c2d4d42..5506bc515f24c 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -72,10 +72,12 @@ import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.engine.InternalEngine; +import org.elasticsearch.index.engine.Segment; import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.index.fielddata.FieldDataStats; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -90,6 +92,7 @@ import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreStats; +import org.elasticsearch.index.translog.TestTranslog; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogTests; import org.elasticsearch.indices.IndicesQueryCache; @@ -519,6 +522,7 @@ public void testPrimaryPromotionRollsGeneration() throws Exception { // promote the replica final ShardRouting replicaRouting = indexShard.routingEntry(); + final long newPrimaryTerm = indexShard.getPrimaryTerm() + between(1, 10000); final ShardRouting primaryRouting = newShardRouting( replicaRouting.shardId(), @@ -527,7 +531,7 @@ public void testPrimaryPromotionRollsGeneration() throws Exception { true, ShardRoutingState.STARTED, replicaRouting.allocationId()); - indexShard.updateShardState(primaryRouting, indexShard.getPrimaryTerm() + 1, (shard, listener) -> {}, + indexShard.updateShardState(primaryRouting, newPrimaryTerm, (shard, listener) -> {}, 0L, Collections.singleton(primaryRouting.allocationId().getId()), new IndexShardRoutingTable.Builder(primaryRouting.shardId()).addShard(primaryRouting).build(), Collections.emptySet()); @@ -553,6 +557,7 @@ public void onFailure(Exception e) { latch.await(); assertThat(indexShard.getTranslog().getGeneration().translogFileGeneration, equalTo(currentTranslogGeneration + 1)); + assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(newPrimaryTerm)); closeShards(indexShard); } @@ -571,7 +576,10 @@ public void testOperationPermitsOnPrimaryShards() throws InterruptedException, E ShardRouting replicaRouting = indexShard.routingEntry(); ShardRouting primaryRouting = newShardRouting(replicaRouting.shardId(), replicaRouting.currentNodeId(), null, true, ShardRoutingState.STARTED, replicaRouting.allocationId()); - indexShard.updateShardState(primaryRouting, indexShard.getPrimaryTerm() + 1, (shard, listener) -> {}, 0L, + final long newPrimaryTerm = indexShard.getPrimaryTerm() + between(1, 1000); + indexShard.updateShardState(primaryRouting, newPrimaryTerm, (shard, listener) -> { + assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(newPrimaryTerm)); + }, 0L, Collections.singleton(indexShard.routingEntry().allocationId().getId()), new IndexShardRoutingTable.Builder(indexShard.shardId()).addShard(primaryRouting).build(), Collections.emptySet()); @@ -739,6 +747,7 @@ public void onFailure(Exception e) { @Override public void onResponse(Releasable releasable) { assertThat(indexShard.getPrimaryTerm(), equalTo(newPrimaryTerm)); + assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(newPrimaryTerm)); assertThat(indexShard.getLocalCheckpoint(), equalTo(expectedLocalCheckpoint)); assertThat(indexShard.getGlobalCheckpoint(), equalTo(newGlobalCheckPoint)); onResponse.set(true); @@ -784,15 +793,18 @@ private void finish() { assertFalse(onResponse.get()); assertNull(onFailure.get()); assertThat(indexShard.getPrimaryTerm(), equalTo(primaryTerm)); + assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(primaryTerm)); Releasables.close(operation1); // our operation should still be blocked assertFalse(onResponse.get()); assertNull(onFailure.get()); assertThat(indexShard.getPrimaryTerm(), equalTo(primaryTerm)); + assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(primaryTerm)); Releasables.close(operation2); barrier.await(); // now lock acquisition should have succeeded assertThat(indexShard.getPrimaryTerm(), equalTo(newPrimaryTerm)); + assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(newPrimaryTerm)); if (engineClosed) { assertFalse(onResponse.get()); assertThat(onFailure.get(), instanceOf(AlreadyClosedException.class)); @@ -1739,7 +1751,7 @@ public void testRecoverFromStoreRemoveStaleOperations() throws Exception { flushShard(shard); assertThat(getShardDocUIDs(shard), containsInAnyOrder("doc-0", "doc-1")); // Simulate resync (without rollback): Noop #1, index #2 - shard.primaryTerm++; + acquireReplicaOperationPermitBlockingly(shard, shard.primaryTerm + 1); shard.markSeqNoAsNoop(1, "test"); shard.applyIndexOperationOnReplica(2, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, SourceToParse.source(indexName, "doc", "doc-2", new BytesArray("{}"), XContentType.JSON), mapping); @@ -1857,7 +1869,7 @@ public IndexSearcher wrap(IndexSearcher searcher) throws EngineException { closeShards(shard); IndexShard newShard = newShard( ShardRoutingHelper.initWithSameId(shard.routingEntry(), RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE), - shard.shardPath(), shard.indexSettings().getIndexMetaData(), wrapper, null, () -> {}); + shard.shardPath(), shard.indexSettings().getIndexMetaData(), wrapper, null, () -> {}, EMPTY_EVENT_LISTENER); recoverShardFromStore(newShard); @@ -2003,7 +2015,7 @@ public IndexSearcher wrap(IndexSearcher searcher) throws EngineException { closeShards(shard); IndexShard newShard = newShard( ShardRoutingHelper.initWithSameId(shard.routingEntry(), RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE), - shard.shardPath(), shard.indexSettings().getIndexMetaData(), wrapper, null, () -> {}); + shard.shardPath(), shard.indexSettings().getIndexMetaData(), wrapper, null, () -> {}, EMPTY_EVENT_LISTENER); recoverShardFromStore(newShard); @@ -2052,19 +2064,19 @@ public void testRecoverFromTranslog() throws IOException { IndexMetaData metaData = IndexMetaData.builder("test") .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") .settings(settings) - .primaryTerm(0, 1).build(); + .primaryTerm(0, randomLongBetween(1, Long.MAX_VALUE)).build(); IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); List operations = new ArrayList<>(); int numTotalEntries = randomIntBetween(0, 10); int numCorruptEntries = 0; for (int i = 0; i < numTotalEntries; i++) { if (randomBoolean()) { - operations.add(new Translog.Index("test", "1", 0, 1, VersionType.INTERNAL, - "{\"foo\" : \"bar\"}".getBytes(Charset.forName("UTF-8")), null, null, -1)); + operations.add(new Translog.Index("test", "1", 0, primary.getPrimaryTerm(), 1, VersionType.INTERNAL, + "{\"foo\" : \"bar\"}".getBytes(Charset.forName("UTF-8")), null, -1)); } else { // corrupt entry - operations.add(new Translog.Index("test", "2", 1, 1, VersionType.INTERNAL, - "{\"foo\" : \"bar}".getBytes(Charset.forName("UTF-8")), null, null, -1)); + operations.add(new Translog.Index("test", "2", 1, primary.getPrimaryTerm(), 1, VersionType.INTERNAL, + "{\"foo\" : \"bar}".getBytes(Charset.forName("UTF-8")), null, -1)); numCorruptEntries++; } } @@ -2486,7 +2498,7 @@ public void testReadSnapshotAndCheckIndexConcurrently() throws Exception { .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), randomFrom("false", "true", "checksum", "fix"))) .build(); final IndexShard newShard = newShard(shardRouting, indexShard.shardPath(), indexMetaData, - null, indexShard.engineFactory, indexShard.getGlobalCheckpointSyncer()); + null, indexShard.engineFactory, indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER); Store.MetadataSnapshot storeFileMetaDatas = newShard.snapshotStoreMetadata(); assertTrue("at least 2 files, commit and data: " + storeFileMetaDatas.toString(), storeFileMetaDatas.size() > 1); @@ -2970,4 +2982,74 @@ public void testSegmentMemoryTrackedWithRandomSearchers() throws Exception { breaker = primary.circuitBreakerService.getBreaker(CircuitBreaker.ACCOUNTING); assertThat(breaker.getUsed(), equalTo(0L)); } + + public void testFlushOnInactive() throws Exception { + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .build(); + IndexMetaData metaData = IndexMetaData.builder("test") + .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") + .settings(settings) + .primaryTerm(0, 1).build(); + ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(metaData.getIndex(), 0), "n1", true, ShardRoutingState + .INITIALIZING, RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE); + final ShardId shardId = shardRouting.shardId(); + final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir()); + ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId); + AtomicBoolean markedInactive = new AtomicBoolean(); + AtomicReference primaryRef = new AtomicReference<>(); + IndexShard primary = newShard(shardRouting, shardPath, metaData, null, null, () -> { + }, new IndexEventListener() { + @Override + public void onShardInactive(IndexShard indexShard) { + markedInactive.set(true); + primaryRef.get().flush(new FlushRequest()); + } + }); + primaryRef.set(primary); + recoverShardFromStore(primary); + for (int i = 0; i < 3; i++) { + indexDoc(primary, "test", "" + i, "{\"foo\" : \"" + randomAlphaOfLength(10) + "\"}"); + primary.refresh("test"); // produce segments + } + List segments = primary.segments(false); + Set names = new HashSet<>(); + for (Segment segment : segments) { + assertFalse(segment.committed); + assertTrue(segment.search); + names.add(segment.getName()); + } + assertEquals(3, segments.size()); + primary.flush(new FlushRequest()); + primary.forceMerge(new ForceMergeRequest().maxNumSegments(1).flush(false)); + primary.refresh("test"); + segments = primary.segments(false); + for (Segment segment : segments) { + if (names.contains(segment.getName())) { + assertTrue(segment.committed); + assertFalse(segment.search); + } else { + assertFalse(segment.committed); + assertTrue(segment.search); + } + } + assertEquals(4, segments.size()); + + assertFalse(markedInactive.get()); + assertBusy(() -> { + primary.checkIdle(0); + assertFalse(primary.isActive()); + }); + + assertTrue(markedInactive.get()); + segments = primary.segments(false); + assertEquals(1, segments.size()); + for (Segment segment : segments) { + assertTrue(segment.committed); + assertTrue(segment.search); + } + closeShards(primary); + } + } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java index f3bf76c57a550..91e439dcda98d 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java @@ -136,8 +136,8 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Exception ex) { IndexingOperationListener.CompositeListener compositeListener = new IndexingOperationListener.CompositeListener(indexingOperationListeners, logger); ParsedDocument doc = InternalEngineTests.createParsedDoc("1", null); - Engine.Delete delete = new Engine.Delete("test", "1", new Term("_uid", Uid.createUidAsBytes(doc.type(), doc.id()))); - Engine.Index index = new Engine.Index(new Term("_uid", Uid.createUidAsBytes(doc.type(), doc.id())), doc); + Engine.Delete delete = new Engine.Delete("test", "1", new Term("_id", Uid.encodeId(doc.id())), randomNonNegativeLong()); + Engine.Index index = new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), randomNonNegativeLong(), doc); compositeListener.postDelete(randomShardId, delete, new Engine.DeleteResult(1, SequenceNumbers.UNASSIGNED_SEQ_NO, true)); assertEquals(0, preIndex.get()); assertEquals(0, postIndex.get()); diff --git a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 1bd98cd1c9e69..5803bf263633d 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -122,14 +122,15 @@ public void onFailedEngine(String reason, @Nullable Exception e) { } }; store.createEmpty(); + final long primaryTerm = randomNonNegativeLong(); final String translogUUID = - Translog.createEmptyTranslog(translogConfig.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId); + Translog.createEmptyTranslog(translogConfig.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm); store.associateIndexWithNewTranslog(translogUUID); EngineConfig config = new EngineConfig(shardId, allocationId, threadPool, indexSettings, null, store, newMergePolicy(), iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), eventListener, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), Collections.singletonList(listeners), Collections.emptyList(), null, - (e, s) -> 0, new NoneCircuitBreakerService(), () -> SequenceNumbers.NO_OPS_PERFORMED); + (e, s) -> 0, new NoneCircuitBreakerService(), () -> SequenceNumbers.NO_OPS_PERFORMED, () -> primaryTerm); engine = new InternalEngine(config); engine.recoverFromTranslog(); listeners.setTranslog(engine.getTranslog()); @@ -363,7 +364,7 @@ private Engine.IndexResult index(String id, String testFieldValue) throws IOExce BytesReference source = new BytesArray(new byte[] { 1 }); ParsedDocument doc = new ParsedDocument(versionField, seqID, id, "test", null, Arrays.asList(document), source, XContentType.JSON, null); - Engine.Index index = new Engine.Index(new Term("_id", doc.id()), doc); + Engine.Index index = new Engine.Index(new Term("_id", doc.id()), engine.config().getPrimaryTermSupplier().getAsLong(), doc); return engine.index(index); } diff --git a/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java index c626f2d18522c..95772910747c4 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java @@ -25,7 +25,6 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.get.GetResult; -import org.elasticsearch.index.mapper.ParentFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; import java.io.IOException; @@ -54,12 +53,11 @@ public void testGetForUpdate() throws IOException { assertEquals(searcher.reader().maxDoc(), 1); // we refreshed } - Engine.IndexResult test1 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar", null); + Engine.IndexResult test1 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar"); assertTrue(primary.getEngine().refreshNeeded()); GetResult testGet1 = primary.getService().getForUpdate("test", "1", test1.getVersion(), VersionType.INTERNAL); assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); assertTrue(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); - assertFalse(testGet1.getFields().containsKey(ParentFieldMapper.NAME)); assertEquals("foobar", testGet1.getFields().get(RoutingFieldMapper.NAME).getValue()); try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { assertEquals(searcher.reader().maxDoc(), 1); // we read from the translog @@ -70,12 +68,11 @@ public void testGetForUpdate() throws IOException { } // now again from the reader - test1 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar", null); + test1 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar"); assertTrue(primary.getEngine().refreshNeeded()); testGet1 = primary.getService().getForUpdate("test", "1", test1.getVersion(), VersionType.INTERNAL); assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); assertTrue(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); - assertFalse(testGet1.getFields().containsKey(ParentFieldMapper.NAME)); assertEquals("foobar", testGet1.getFields().get(RoutingFieldMapper.NAME).getValue()); closeShards(primary); @@ -88,8 +85,7 @@ public void testGetForUpdateWithParentField() throws IOException { .put("index.version.created", Version.V_5_6_0) // for parent field mapper .build(); IndexMetaData metaData = IndexMetaData.builder("test") - .putMapping("parent", "{ \"properties\": {}}") - .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}, \"_parent\": { \"type\": \"parent\"}}") + .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") .settings(settings) .primaryTerm(0, 1).build(); IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); @@ -103,13 +99,11 @@ public void testGetForUpdateWithParentField() throws IOException { assertEquals(searcher.reader().maxDoc(), 1); // we refreshed } - Engine.IndexResult test1 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, null, "foobar"); + Engine.IndexResult test1 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, null); assertTrue(primary.getEngine().refreshNeeded()); GetResult testGet1 = primary.getService().getForUpdate("test", "1", test1.getVersion(), VersionType.INTERNAL); assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); - assertTrue(testGet1.getFields().containsKey(ParentFieldMapper.NAME)); assertFalse(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); - assertEquals("foobar", testGet1.getFields().get(ParentFieldMapper.NAME).getValue()); try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { assertEquals(searcher.reader().maxDoc(), 1); // we read from the translog } @@ -119,13 +113,11 @@ public void testGetForUpdateWithParentField() throws IOException { } // now again from the reader - test1 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, null, "foobar"); + test1 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, null); assertTrue(primary.getEngine().refreshNeeded()); testGet1 = primary.getService().getForUpdate("test", "1", test1.getVersion(), VersionType.INTERNAL); assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); - assertTrue(testGet1.getFields().containsKey(ParentFieldMapper.NAME)); assertFalse(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); - assertEquals("foobar", testGet1.getFields().get(ParentFieldMapper.NAME).getValue()); closeShards(primary); } diff --git a/server/src/test/java/org/elasticsearch/index/shard/ShardIdTests.java b/server/src/test/java/org/elasticsearch/index/shard/ShardIdTests.java index d7f6d14760471..93895d4e43a7c 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/ShardIdTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/ShardIdTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.shard; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.index.Index; import org.elasticsearch.test.ESTestCase; public class ShardIdTests extends ESTestCase { @@ -51,4 +52,20 @@ public void testShardIdFromString() { ex = expectThrows(IllegalArgumentException.class, () -> ShardId.fromString(badId3)); } + + public void testEquals() { + Index index1 = new Index("a", "a"); + Index index2 = new Index("a", "b"); + ShardId shardId1 = new ShardId(index1, 0); + ShardId shardId2 = new ShardId(index1, 0); + ShardId shardId3 = new ShardId(index2, 0); + ShardId shardId4 = new ShardId(index1, 1); + String s = "Some random other object"; + assertEquals(shardId1, shardId1); + assertEquals(shardId1, shardId2); + assertNotEquals(shardId1, null); + assertNotEquals(shardId1, s); + assertNotEquals(shardId1, shardId3); + assertNotEquals(shardId1, shardId4); + } } diff --git a/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java b/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java index 4077d033da9cd..7ab9fa6733011 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java @@ -83,26 +83,7 @@ public static Set corruptTranslogFiles(Logger logger, Random random, Colle int corruptions = RandomNumbers.randomIntBetween(random, 5, 20); for (int i = 0; i < corruptions; i++) { Path fileToCorrupt = RandomPicks.randomFrom(random, candidates); - try (FileChannel raf = FileChannel.open(fileToCorrupt, StandardOpenOption.READ, StandardOpenOption.WRITE)) { - // read - raf.position(RandomNumbers.randomLongBetween(random, 0, raf.size() - 1)); - long filePointer = raf.position(); - ByteBuffer bb = ByteBuffer.wrap(new byte[1]); - raf.read(bb); - bb.flip(); - - // corrupt - byte oldValue = bb.get(0); - byte newValue = (byte) (oldValue + 1); - bb.put(0, newValue); - - // rewrite - raf.position(filePointer); - raf.write(bb); - logger.info("--> corrupting file {} -- flipping at position {} from {} to {} file: {}", - fileToCorrupt, filePointer, Integer.toHexString(oldValue), - Integer.toHexString(newValue), fileToCorrupt); - } + corruptFile(logger, random, fileToCorrupt); corruptedFiles.add(fileToCorrupt); } } @@ -110,6 +91,29 @@ public static Set corruptTranslogFiles(Logger logger, Random random, Colle return corruptedFiles; } + static void corruptFile(Logger logger, Random random, Path fileToCorrupt) throws IOException { + try (FileChannel raf = FileChannel.open(fileToCorrupt, StandardOpenOption.READ, StandardOpenOption.WRITE)) { + // read + raf.position(RandomNumbers.randomLongBetween(random, 0, raf.size() - 1)); + long filePointer = raf.position(); + ByteBuffer bb = ByteBuffer.wrap(new byte[1]); + raf.read(bb); + bb.flip(); + + // corrupt + byte oldValue = bb.get(0); + byte newValue = (byte) (oldValue + 1); + bb.put(0, newValue); + + // rewrite + raf.position(filePointer); + raf.write(bb); + logger.info("--> corrupting file {} -- flipping at position {} from {} to {} file: {}", + fileToCorrupt, filePointer, Integer.toHexString(oldValue), + Integer.toHexString(newValue), fileToCorrupt); + } + } + /** * Lists all existing commits in a given index path, then read the minimum translog generation that will be used in recoverFromTranslog. */ @@ -122,4 +126,11 @@ private static long minTranslogGenUsedInRecovery(Path translogPath) throws IOExc return Long.parseLong(recoveringCommit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)); } } + + /** + * Returns the primary term associated with the current translog writer of the given translog. + */ + public static long getCurrentTerm(Translog translog) { + return translog.getCurrent().getPrimaryTerm(); + } } diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java index 2f6f4ee3178f2..9ae502fecb580 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java @@ -171,7 +171,7 @@ private Tuple, TranslogWriter> createReadersAndWriter(final } writer = TranslogWriter.create(new ShardId("index", "uuid", 0), translogUUID, gen, tempDir.resolve(Translog.getFilename(gen)), FileChannel::open, TranslogConfig.DEFAULT_BUFFER_SIZE, 1L, 1L, () -> 1L, - () -> 1L); + () -> 1L, randomNonNegativeLong()); writer = Mockito.spy(writer); Mockito.doReturn(now - (numberOfReaders - gen + 1) * 1000).when(writer).getLastModifiedTime(); diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java new file mode 100644 index 0000000000000..0dc404767de3c --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java @@ -0,0 +1,128 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.translog; + +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.store.OutputStreamDataOutput; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; +import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.nio.channels.Channels; +import java.nio.channels.FileChannel; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.lessThan; + +public class TranslogHeaderTests extends ESTestCase { + + public void testCurrentHeaderVersion() throws Exception { + final String translogUUID = UUIDs.randomBase64UUID(); + final TranslogHeader outHeader = new TranslogHeader(translogUUID, randomNonNegativeLong()); + final long generation = randomNonNegativeLong(); + final Path translogFile = createTempDir().resolve(Translog.getFilename(generation)); + try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE)) { + outHeader.write(channel); + assertThat(outHeader.sizeInBytes(), equalTo((int)channel.position())); + } + try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) { + final TranslogHeader inHeader = TranslogHeader.read(translogUUID, translogFile, channel); + assertThat(inHeader.getTranslogUUID(), equalTo(translogUUID)); + assertThat(inHeader.getPrimaryTerm(), equalTo(outHeader.getPrimaryTerm())); + assertThat(inHeader.sizeInBytes(), equalTo((int)channel.position())); + } + final TranslogCorruptedException mismatchUUID = expectThrows(TranslogCorruptedException.class, () -> { + try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) { + TranslogHeader.read(UUIDs.randomBase64UUID(), translogFile, channel); + } + }); + assertThat(mismatchUUID.getMessage(), containsString("this translog file belongs to a different translog")); + int corruptions = between(1, 10); + for (int i = 0; i < corruptions; i++) { + TestTranslog.corruptFile(logger, random(), translogFile); + } + expectThrows(TranslogCorruptedException.class, () -> { + try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) { + TranslogHeader.read(outHeader.getTranslogUUID(), translogFile, channel); + } + }); + } + + public void testHeaderWithoutPrimaryTerm() throws Exception { + final String translogUUID = UUIDs.randomBase64UUID(); + final long generation = randomNonNegativeLong(); + final Path translogFile = createTempDir().resolve(Translog.getFilename(generation)); + try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE)) { + writeHeaderWithoutTerm(channel, translogUUID); + assertThat((int)channel.position(), lessThan(TranslogHeader.headerSizeInBytes(translogUUID))); + } + try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) { + final TranslogHeader inHeader = TranslogHeader.read(translogUUID, translogFile, channel); + assertThat(inHeader.getTranslogUUID(), equalTo(translogUUID)); + assertThat(inHeader.getPrimaryTerm(), equalTo(TranslogHeader.UNKNOWN_PRIMARY_TERM)); + assertThat(inHeader.sizeInBytes(), equalTo((int)channel.position())); + } + expectThrows(TranslogCorruptedException.class, () -> { + try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) { + TranslogHeader.read(UUIDs.randomBase64UUID(), translogFile, channel); + } + }); + } + + static void writeHeaderWithoutTerm(FileChannel channel, String translogUUID) throws IOException { + final OutputStreamStreamOutput out = new OutputStreamStreamOutput(Channels.newOutputStream(channel)); + CodecUtil.writeHeader(new OutputStreamDataOutput(out), TranslogHeader.TRANSLOG_CODEC, TranslogHeader.VERSION_CHECKPOINTS); + final BytesRef uuid = new BytesRef(translogUUID); + out.writeInt(uuid.length); + out.writeBytes(uuid.bytes, uuid.offset, uuid.length); + channel.force(true); + assertThat(channel.position(), equalTo(43L)); + } + + public void testLegacyTranslogVersions() throws Exception { + checkFailsToOpen("/org/elasticsearch/index/translog/translog-v0.binary", IllegalStateException.class, "pre-1.4 translog"); + checkFailsToOpen("/org/elasticsearch/index/translog/translog-v1.binary", IllegalStateException.class, "pre-2.0 translog"); + checkFailsToOpen("/org/elasticsearch/index/translog/translog-v1-truncated.binary", IllegalStateException.class, "pre-2.0 translog"); + checkFailsToOpen("/org/elasticsearch/index/translog/translog-v1-corrupted-magic.binary", + TranslogCorruptedException.class, "translog looks like version 1 or later, but has corrupted header"); + checkFailsToOpen("/org/elasticsearch/index/translog/translog-v1-corrupted-body.binary", + IllegalStateException.class, "pre-2.0 translog"); + } + + private void checkFailsToOpen(String file, Class expectedErrorType, String expectedMessage) { + final Path translogFile = getDataPath(file); + assertThat("test file [" + translogFile + "] should exist", Files.exists(translogFile), equalTo(true)); + final E error = expectThrows(expectedErrorType, () -> { + final Checkpoint checkpoint = new Checkpoint(Files.size(translogFile), 1, 1, + SequenceNumbers.NO_OPS_PERFORMED, SequenceNumbers.NO_OPS_PERFORMED, SequenceNumbers.NO_OPS_PERFORMED, 1); + try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) { + TranslogReader.open(channel, translogFile, checkpoint, null); + } + }); + assertThat(error.getMessage(), containsString(expectedMessage)); + } +} diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 61e5cdcfd953a..b3b9fca886e17 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.translog; import com.carrotsearch.randomizedtesting.generators.RandomPicks; + import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.document.Field; @@ -30,7 +31,6 @@ import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.ByteArrayDataOutput; import org.apache.lucene.store.MockDirectoryWrapper; -import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.LineFileDocs; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -53,15 +53,16 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine.Operation.Origin; +import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.seqno.LocalCheckpointTracker; import org.elasticsearch.index.seqno.LocalCheckpointTrackerTests; import org.elasticsearch.index.seqno.SequenceNumbers; @@ -108,6 +109,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; import java.util.stream.LongStream; +import java.util.stream.Stream; import static org.elasticsearch.common.util.BigArrays.NON_RECYCLING_INSTANCE; import static org.elasticsearch.index.translog.SnapshotMatchers.containsOperationsInAnyOrder; @@ -131,6 +133,8 @@ public class TranslogTests extends ESTestCase { protected Translog translog; private AtomicLong globalCheckpoint; protected Path translogDir; + // A default primary term is used by translog instances created in this test. + private final AtomicLong primaryTerm = new AtomicLong(); @Override protected void afterIfSuccessful() throws Exception { @@ -151,14 +155,14 @@ protected void afterIfSuccessful() throws Exception { protected Translog createTranslog(TranslogConfig config) throws IOException { String translogUUID = - Translog.createEmptyTranslog(config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId); + Translog.createEmptyTranslog(config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); return new Translog(config, translogUUID, createTranslogDeletionPolicy(config.getIndexSettings()), - () -> SequenceNumbers.NO_OPS_PERFORMED); + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); } protected Translog openTranslog(TranslogConfig config, String translogUUID) throws IOException { return new Translog(config, translogUUID, createTranslogDeletionPolicy(config.getIndexSettings()), - () -> SequenceNumbers.NO_OPS_PERFORMED); + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); } @@ -187,6 +191,7 @@ private void commit(Translog translog, long genToRetain, long genToCommit) throw @Before public void setUp() throws Exception { super.setUp(); + primaryTerm.set(randomLongBetween(1, Integer.MAX_VALUE)); // if a previous test failed we clean up things here translogDir = createTempDir(); translog = create(translogDir); @@ -207,8 +212,8 @@ private Translog create(Path path) throws IOException { globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); final TranslogConfig translogConfig = getTranslogConfig(path); final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(translogConfig.getIndexSettings()); - final String translogUUID = Translog.createEmptyTranslog(path, SequenceNumbers.NO_OPS_PERFORMED, shardId); - return new Translog(translogConfig, translogUUID, deletionPolicy, () -> globalCheckpoint.get()); + final String translogUUID = Translog.createEmptyTranslog(path, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); + return new Translog(translogConfig, translogUUID, deletionPolicy, () -> globalCheckpoint.get(), primaryTerm::get); } private TranslogConfig getTranslogConfig(final Path path) { @@ -302,22 +307,22 @@ public void testSimpleOperations() throws IOException { assertThat(snapshot, SnapshotMatchers.size(0)); } - addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, new byte[]{1})); + addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, primaryTerm.get(), new byte[]{1})); try (Translog.Snapshot snapshot = translog.newSnapshot()) { assertThat(snapshot, SnapshotMatchers.equalsTo(ops)); assertThat(snapshot.totalOperations(), equalTo(ops.size())); } - addToTranslogAndList(translog, ops, new Translog.Delete("test", "2", 1, newUid("2"))); + addToTranslogAndList(translog, ops, new Translog.Delete("test", "2", 1, primaryTerm.get(), newUid("2"))); try (Translog.Snapshot snapshot = translog.newSnapshot()) { assertThat(snapshot, SnapshotMatchers.equalsTo(ops)); assertThat(snapshot.totalOperations(), equalTo(ops.size())); } final long seqNo = randomNonNegativeLong(); - final long primaryTerm = randomNonNegativeLong(); final String reason = randomAlphaOfLength(16); - addToTranslogAndList(translog, ops, new Translog.NoOp(seqNo, primaryTerm, reason)); + final long noopTerm = randomLongBetween(1, primaryTerm.get()); + addToTranslogAndList(translog, ops, new Translog.NoOp(seqNo, noopTerm, reason)); try (Translog.Snapshot snapshot = translog.newSnapshot()) { @@ -332,7 +337,7 @@ public void testSimpleOperations() throws IOException { Translog.NoOp noOp = (Translog.NoOp) snapshot.next(); assertNotNull(noOp); assertThat(noOp.seqNo(), equalTo(seqNo)); - assertThat(noOp.primaryTerm(), equalTo(primaryTerm)); + assertThat(noOp.primaryTerm(), equalTo(noopTerm)); assertThat(noOp.reason(), equalTo(reason)); assertNull(snapshot.next()); @@ -400,35 +405,35 @@ public void testStats() throws IOException { final TranslogStats stats = stats(); assertThat(stats.estimatedNumberOfOperations(), equalTo(0)); } - assertThat((int) firstOperationPosition, greaterThan(CodecUtil.headerLength(TranslogWriter.TRANSLOG_CODEC))); - translog.add(new Translog.Index("test", "1", 0, new byte[]{1})); + assertThat((int) firstOperationPosition, greaterThan(CodecUtil.headerLength(TranslogHeader.TRANSLOG_CODEC))); + translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), new byte[]{1})); { final TranslogStats stats = stats(); assertThat(stats.estimatedNumberOfOperations(), equalTo(1)); - assertThat(stats.getTranslogSizeInBytes(), equalTo(140L)); + assertThat(stats.getTranslogSizeInBytes(), equalTo(163L)); assertThat(stats.getUncommittedOperations(), equalTo(1)); - assertThat(stats.getUncommittedSizeInBytes(), equalTo(140L)); + assertThat(stats.getUncommittedSizeInBytes(), equalTo(163L)); assertThat(stats.getEarliestLastModifiedAge(), greaterThan(1L)); } - translog.add(new Translog.Delete("test", "2", 1, newUid("2"))); + translog.add(new Translog.Delete("test", "2", 1, primaryTerm.get(), newUid("2"))); { final TranslogStats stats = stats(); assertThat(stats.estimatedNumberOfOperations(), equalTo(2)); - assertThat(stats.getTranslogSizeInBytes(), equalTo(189L)); + assertThat(stats.getTranslogSizeInBytes(), equalTo(212L)); assertThat(stats.getUncommittedOperations(), equalTo(2)); - assertThat(stats.getUncommittedSizeInBytes(), equalTo(189L)); + assertThat(stats.getUncommittedSizeInBytes(), equalTo(212L)); assertThat(stats.getEarliestLastModifiedAge(), greaterThan(1L)); } - translog.add(new Translog.Delete("test", "3", 2, newUid("3"))); + translog.add(new Translog.Delete("test", "3", 2, primaryTerm.get(), newUid("3"))); { final TranslogStats stats = stats(); assertThat(stats.estimatedNumberOfOperations(), equalTo(3)); - assertThat(stats.getTranslogSizeInBytes(), equalTo(238L)); + assertThat(stats.getTranslogSizeInBytes(), equalTo(261L)); assertThat(stats.getUncommittedOperations(), equalTo(3)); - assertThat(stats.getUncommittedSizeInBytes(), equalTo(238L)); + assertThat(stats.getUncommittedSizeInBytes(), equalTo(261L)); assertThat(stats.getEarliestLastModifiedAge(), greaterThan(1L)); } @@ -436,13 +441,13 @@ public void testStats() throws IOException { { final TranslogStats stats = stats(); assertThat(stats.estimatedNumberOfOperations(), equalTo(4)); - assertThat(stats.getTranslogSizeInBytes(), equalTo(280L)); + assertThat(stats.getTranslogSizeInBytes(), equalTo(303L)); assertThat(stats.getUncommittedOperations(), equalTo(4)); - assertThat(stats.getUncommittedSizeInBytes(), equalTo(280L)); + assertThat(stats.getUncommittedSizeInBytes(), equalTo(303L)); assertThat(stats.getEarliestLastModifiedAge(), greaterThan(1L)); } - final long expectedSizeInBytes = 323L; + final long expectedSizeInBytes = 358L; translog.rollGeneration(); { final TranslogStats stats = stats(); @@ -493,7 +498,7 @@ public void testUncommittedOperations() throws Exception { int uncommittedOps = 0; int operationsInLastGen = 0; for (int i = 0; i < operations; i++) { - translog.add(new Translog.Index("test", Integer.toString(i), i, new byte[]{1})); + translog.add(new Translog.Index("test", Integer.toString(i), i, primaryTerm.get(), new byte[]{1})); uncommittedOps++; operationsInLastGen++; if (rarely()) { @@ -562,7 +567,7 @@ public void testSnapshot() throws IOException { assertThat(snapshot, SnapshotMatchers.size(0)); } - addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, new byte[]{1})); + addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, primaryTerm.get(), new byte[]{1})); try (Translog.Snapshot snapshot = translog.newSnapshot()) { assertThat(snapshot, SnapshotMatchers.equalsTo(ops)); @@ -582,9 +587,9 @@ public void testSnapshot() throws IOException { public void testReadLocation() throws IOException { ArrayList ops = new ArrayList<>(); ArrayList locs = new ArrayList<>(); - locs.add(addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, new byte[]{1}))); - locs.add(addToTranslogAndList(translog, ops, new Translog.Index("test", "2", 1, new byte[]{1}))); - locs.add(addToTranslogAndList(translog, ops, new Translog.Index("test", "3", 2, new byte[]{1}))); + locs.add(addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, primaryTerm.get(), new byte[]{1}))); + locs.add(addToTranslogAndList(translog, ops, new Translog.Index("test", "2", 1, primaryTerm.get(), new byte[]{1}))); + locs.add(addToTranslogAndList(translog, ops, new Translog.Index("test", "3", 2, primaryTerm.get(), new byte[]{1}))); int i = 0; for (Translog.Operation op : ops) { assertEquals(op, translog.readOperation(locs.get(i++))); @@ -600,16 +605,16 @@ public void testSnapshotWithNewTranslog() throws IOException { toClose.add(snapshot); assertThat(snapshot, SnapshotMatchers.size(0)); - addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, new byte[]{1})); + addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, primaryTerm.get(), new byte[]{1})); Translog.Snapshot snapshot1 = translog.newSnapshot(); toClose.add(snapshot1); - addToTranslogAndList(translog, ops, new Translog.Index("test", "2", 1, new byte[]{2})); + addToTranslogAndList(translog, ops, new Translog.Index("test", "2", 1, primaryTerm.get(), new byte[]{2})); assertThat(snapshot1, SnapshotMatchers.equalsTo(ops.get(0))); translog.rollGeneration(); - addToTranslogAndList(translog, ops, new Translog.Index("test", "3", 2, new byte[]{3})); + addToTranslogAndList(translog, ops, new Translog.Index("test", "3", 2, primaryTerm.get(), new byte[]{3})); Translog.Snapshot snapshot2 = translog.newSnapshot(); toClose.add(snapshot2); @@ -623,7 +628,7 @@ public void testSnapshotWithNewTranslog() throws IOException { public void testSnapshotOnClosedTranslog() throws IOException { assertTrue(Files.exists(translogDir.resolve(Translog.getFilename(1)))); - translog.add(new Translog.Index("test", "1", 0, new byte[]{1})); + translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), new byte[]{1})); translog.close(); try { Translog.Snapshot snapshot = translog.newSnapshot(); @@ -746,7 +751,7 @@ public void testTranslogChecksums() throws Exception { int translogOperations = randomIntBetween(10, 100); for (int op = 0; op < translogOperations; op++) { String ascii = randomAlphaOfLengthBetween(1, 50); - locations.add(translog.add(new Translog.Index("test", "" + op, op, ascii.getBytes("UTF-8")))); + locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), ascii.getBytes("UTF-8")))); } translog.sync(); @@ -773,7 +778,7 @@ public void testTruncatedTranslogs() throws Exception { int translogOperations = randomIntBetween(10, 100); for (int op = 0; op < translogOperations; op++) { String ascii = randomAlphaOfLengthBetween(1, 50); - locations.add(translog.add(new Translog.Index("test", "" + op, op, ascii.getBytes("UTF-8")))); + locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), ascii.getBytes("UTF-8")))); } translog.sync(); @@ -828,16 +833,16 @@ private void corruptTranslogs(Path directory) throws Exception { } private Term newUid(ParsedDocument doc) { - return new Term("_uid", Uid.createUidAsBytes(doc.type(), doc.id())); + return new Term("_id", Uid.encodeId(doc.id())); } - private Term newUid(String uid) { - return new Term("_uid", uid); + private Term newUid(String id) { + return new Term("_id", Uid.encodeId(id)); } public void testVerifyTranslogIsNotDeleted() throws IOException { assertFileIsPresent(translog, 1); - translog.add(new Translog.Index("test", "1", 0, new byte[]{1})); + translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), new byte[]{1})); try (Translog.Snapshot snapshot = translog.newSnapshot()) { assertThat(snapshot, SnapshotMatchers.size(1)); assertFileIsPresent(translog, 1); @@ -889,10 +894,10 @@ public void doRun() throws BrokenBarrierException, InterruptedException, IOExcep switch (type) { case CREATE: case INDEX: - op = new Translog.Index("type", "" + id, id, new byte[]{(byte) id}); + op = new Translog.Index("type", "" + id, id, primaryTerm.get(), new byte[]{(byte) id}); break; case DELETE: - op = new Translog.Delete("test", Long.toString(id), id, newUid(Long.toString(id))); + op = new Translog.Delete("test", Long.toString(id), id, primaryTerm.get(), newUid(Long.toString(id))); break; case NO_OP: op = new Translog.NoOp(id, 1, Long.toString(id)); @@ -1051,13 +1056,13 @@ public void testSyncUpTo() throws IOException { for (int op = 0; op < translogOperations; op++) { int seqNo = ++count; final Translog.Location location = - translog.add(new Translog.Index("test", "" + op, seqNo, Integer.toString(seqNo).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + op, seqNo, primaryTerm.get(), Integer.toString(seqNo).getBytes(Charset.forName("UTF-8")))); if (randomBoolean()) { assertTrue("at least one operation pending", translog.syncNeeded()); assertTrue("this operation has not been synced", translog.ensureSynced(location)); assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); // we are the last location so everything should be synced seqNo = ++count; - translog.add(new Translog.Index("test", "" + op, seqNo, Integer.toString(seqNo).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + op, seqNo, primaryTerm.get(), Integer.toString(seqNo).getBytes(Charset.forName("UTF-8")))); assertTrue("one pending operation", translog.syncNeeded()); assertFalse("this op has been synced before", translog.ensureSynced(location)); // not syncing now assertTrue("we only synced a previous operation yet", translog.syncNeeded()); @@ -1086,7 +1091,7 @@ public void testSyncUpToStream() throws IOException { rollAndCommit(translog); // do this first so that there is at least one pending tlog entry } final Translog.Location location = - translog.add(new Translog.Index("test", "" + op, op, Integer.toString(++count).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(++count).getBytes(Charset.forName("UTF-8")))); locations.add(location); } Collections.shuffle(locations, random()); @@ -1114,7 +1119,7 @@ public void testLocationComparison() throws IOException { int count = 0; for (int op = 0; op < translogOperations; op++) { locations.add( - translog.add(new Translog.Index("test", "" + op, op, Integer.toString(++count).getBytes(Charset.forName("UTF-8"))))); + translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(++count).getBytes(Charset.forName("UTF-8"))))); if (rarely() && translogOperations > op + 1) { rollAndCommit(translog); } @@ -1151,7 +1156,7 @@ public void testBasicCheckpoint() throws IOException { int lastSynced = -1; long lastSyncedGlobalCheckpoint = globalCheckpoint.get(); for (int op = 0; op < translogOperations; op++) { - locations.add(translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); if (randomBoolean()) { globalCheckpoint.set(globalCheckpoint.get() + randomIntBetween(1, 16)); } @@ -1162,8 +1167,8 @@ public void testBasicCheckpoint() throws IOException { } } assertEquals(translogOperations, translog.totalOperations()); - translog.add(new Translog.Index( - "test", "" + translogOperations, translogOperations, Integer.toString(translogOperations).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + translogOperations, translogOperations, primaryTerm.get(), + Integer.toString(translogOperations).getBytes(Charset.forName("UTF-8")))); final Checkpoint checkpoint = Checkpoint.read(translog.location().resolve(Translog.CHECKPOINT_FILE_NAME)); try (TranslogReader reader = translog.openReader(translog.location().resolve(Translog.getFilename(translog.currentFileGeneration())), checkpoint)) { @@ -1287,7 +1292,7 @@ public void testBasicRecovery() throws IOException { int minUncommittedOp = -1; final boolean commitOften = randomBoolean(); for (int op = 0; op < translogOperations; op++) { - locations.add(translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); final boolean commit = commitOften ? frequently() : rarely(); if (commit && op < translogOperations - 1) { rollAndCommit(translog); @@ -1308,7 +1313,7 @@ public void testBasicRecovery() throws IOException { assertNull(snapshot.next()); } } else { - translog = new Translog(config, translogGeneration.translogUUID, translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED); + translog = new Translog(config, translogGeneration.translogUUID, translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); try (Translog.Snapshot snapshot = translog.newSnapshotFromGen(translogGeneration.translogFileGeneration)) { @@ -1330,7 +1335,7 @@ public void testRecoveryUncommitted() throws IOException { Translog.TranslogGeneration translogGeneration = null; final boolean sync = randomBoolean(); for (int op = 0; op < translogOperations; op++) { - locations.add(translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); if (op == prepareOp) { translogGeneration = translog.getGeneration(); translog.rollGeneration(); @@ -1347,7 +1352,7 @@ public void testRecoveryUncommitted() throws IOException { TranslogConfig config = translog.getConfig(); final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); @@ -1361,7 +1366,7 @@ public void testRecoveryUncommitted() throws IOException { } } if (randomBoolean()) { // recover twice - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 3 less than current - we never finished the commit and run recovery twice", translogGeneration.translogFileGeneration + 3, translog.currentFileGeneration()); @@ -1386,7 +1391,7 @@ public void testRecoveryUncommittedFileExists() throws IOException { Translog.TranslogGeneration translogGeneration = null; final boolean sync = randomBoolean(); for (int op = 0; op < translogOperations; op++) { - locations.add(translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); if (op == prepareOp) { translogGeneration = translog.getGeneration(); translog.rollGeneration(); @@ -1407,7 +1412,7 @@ public void testRecoveryUncommittedFileExists() throws IOException { final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); @@ -1422,7 +1427,7 @@ public void testRecoveryUncommittedFileExists() throws IOException { } if (randomBoolean()) { // recover twice - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 3 less than current - we never finished the commit and run recovery twice", translogGeneration.translogFileGeneration + 3, translog.currentFileGeneration()); @@ -1446,7 +1451,7 @@ public void testRecoveryUncommittedCorruptedCheckpoint() throws IOException { Translog.TranslogGeneration translogGeneration = null; final boolean sync = randomBoolean(); for (int op = 0; op < translogOperations; op++) { - locations.add(translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); if (op == prepareOp) { translogGeneration = translog.getGeneration(); translog.rollGeneration(); @@ -1465,15 +1470,15 @@ public void testRecoveryUncommittedCorruptedCheckpoint() throws IOException { Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), corrupted, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); - try (Translog ignored = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { + try (Translog ignored = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { fail("corrupted"); } catch (IllegalStateException ex) { - assertEquals("Checkpoint file translog-3.ckp already exists but has corrupted content expected: Checkpoint{offset=3123, " + + assertEquals("Checkpoint file translog-3.ckp already exists but has corrupted content expected: Checkpoint{offset=3080, " + "numOps=55, generation=3, minSeqNo=45, maxSeqNo=99, globalCheckpoint=-1, minTranslogGeneration=1} but got: Checkpoint{offset=0, numOps=0, " + "generation=0, minSeqNo=-1, maxSeqNo=-1, globalCheckpoint=-1, minTranslogGeneration=0}", ex.getMessage()); } Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), read, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); @@ -1493,7 +1498,7 @@ public void testSnapshotFromStreamInput() throws IOException { List ops = new ArrayList<>(); int translogOperations = randomIntBetween(10, 100); for (int op = 0; op < translogOperations; op++) { - Translog.Index test = new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))); + Translog.Index test = new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))); ops.add(test); } Translog.writeOperations(out, ops); @@ -1508,8 +1513,8 @@ public void testLocationHashCodeEquals() throws IOException { int translogOperations = randomIntBetween(10, 100); try (Translog translog2 = create(createTempDir())) { for (int op = 0; op < translogOperations; op++) { - locations.add(translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); - locations2.add(translog2.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations2.add(translog2.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); } int iters = randomIntBetween(10, 100); for (int i = 0; i < iters; i++) { @@ -1535,7 +1540,7 @@ public void testOpenForeignTranslog() throws IOException { int translogOperations = randomIntBetween(1, 10); int firstUncommitted = 0; for (int op = 0; op < translogOperations; op++) { - locations.add(translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); if (randomBoolean()) { rollAndCommit(translog); firstUncommitted = op + 1; @@ -1550,12 +1555,12 @@ public void testOpenForeignTranslog() throws IOException { final String foreignTranslog = randomRealisticUnicodeOfCodepointLengthBetween(1, translogGeneration.translogUUID.length()); try { - new Translog(config, foreignTranslog, createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED); + new Translog(config, foreignTranslog, createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); fail("translog doesn't belong to this UUID"); } catch (TranslogCorruptedException ex) { } - this.translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED); + this.translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); try (Translog.Snapshot snapshot = this.translog.newSnapshotFromGen(translogGeneration.translogFileGeneration)) { for (int i = firstUncommitted; i < translogOperations; i++) { Translog.Operation next = snapshot.next(); @@ -1567,10 +1572,10 @@ public void testOpenForeignTranslog() throws IOException { } public void testFailOnClosedWrite() throws IOException { - translog.add(new Translog.Index("test", "1", 0, Integer.toString(1).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), Integer.toString(1).getBytes(Charset.forName("UTF-8")))); translog.close(); try { - translog.add(new Translog.Index("test", "1", 0, Integer.toString(1).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), Integer.toString(1).getBytes(Charset.forName("UTF-8")))); fail("closed"); } catch (AlreadyClosedException ex) { // all is well @@ -1608,7 +1613,7 @@ public void testCloseConcurrently() throws Throwable { } } - private static class TranslogThread extends Thread { + private class TranslogThread extends Thread { private final CountDownLatch downLatch; private final int opsPerThread; private final int threadId; @@ -1639,19 +1644,19 @@ public void run() { case CREATE: case INDEX: op = new Translog.Index("test", threadId + "_" + opCount, seqNoGenerator.getAndIncrement(), - randomUnicodeOfLengthBetween(1, 20 * 1024).getBytes("UTF-8")); + primaryTerm.get(), randomUnicodeOfLengthBetween(1, 20 * 1024).getBytes("UTF-8")); break; case DELETE: op = new Translog.Delete( "test", threadId + "_" + opCount, new Term("_uid", threadId + "_" + opCount), seqNoGenerator.getAndIncrement(), - 0, + primaryTerm.get(), 1 + randomInt(100000), randomFrom(VersionType.values())); break; case NO_OP: - op = new Translog.NoOp(seqNoGenerator.getAndIncrement(), randomNonNegativeLong(), randomAlphaOfLength(16)); + op = new Translog.NoOp(seqNoGenerator.getAndIncrement(), primaryTerm.get(), randomAlphaOfLength(16)); break; default: throw new AssertionError("unsupported operation type [" + type + "]"); @@ -1689,7 +1694,7 @@ public void testFailFlush() throws IOException { while (failed == false) { try { locations.add(translog.add( - new Translog.Index("test", "" + opsSynced, opsSynced, Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8"))))); + new Translog.Index("test", "" + opsSynced, opsSynced, primaryTerm.get(), Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8"))))); translog.sync(); opsSynced++; } catch (MockDirectoryWrapper.FakeIOException ex) { @@ -1710,7 +1715,7 @@ public void testFailFlush() throws IOException { if (randomBoolean()) { try { locations.add(translog.add( - new Translog.Index("test", "" + opsSynced, opsSynced, Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8"))))); + new Translog.Index("test", "" + opsSynced, opsSynced, primaryTerm.get(), Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8"))))); fail("we are already closed"); } catch (AlreadyClosedException ex) { assertNotNull(ex.getCause()); @@ -1744,7 +1749,7 @@ public void testFailFlush() throws IOException { translog.close(); // we are closed final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); - try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { + try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, tlog.currentFileGeneration()); assertFalse(tlog.syncNeeded()); @@ -1767,7 +1772,7 @@ public void testTranslogOpsCountIsCorrect() throws IOException { LineFileDocs lineFileDocs = new LineFileDocs(random()); // writes pretty big docs so we cross buffer borders regularly for (int opsAdded = 0; opsAdded < numOps; opsAdded++) { locations.add(translog.add( - new Translog.Index("test", "" + opsAdded, opsAdded, lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8"))))); + new Translog.Index("test", "" + opsAdded, opsAdded, primaryTerm.get(), lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8"))))); try (Translog.Snapshot snapshot = this.translog.newSnapshot()) { assertEquals(opsAdded + 1, snapshot.totalOperations()); for (int i = 0; i < opsAdded; i++) { @@ -1786,11 +1791,11 @@ public void testTragicEventCanBeAnyException() throws IOException { TranslogConfig config = getTranslogConfig(tempDir); Translog translog = getFailableTranslog(fail, config, false, true, null, createTranslogDeletionPolicy()); LineFileDocs lineFileDocs = new LineFileDocs(random()); // writes pretty big docs so we cross buffer boarders regularly - translog.add(new Translog.Index("test", "1", 0, lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))); fail.failAlways(); try { Translog.Location location = translog.add( - new Translog.Index("test", "2", 1, lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))); + new Translog.Index("test", "2", 1, primaryTerm.get(), lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))); if (randomBoolean()) { translog.ensureSynced(location); } else { @@ -1880,7 +1885,7 @@ protected void afterAdd() throws IOException { } } try (Translog tlog = - new Translog(config, translogUUID, createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED); + new Translog(config, translogUUID, createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); Translog.Snapshot snapshot = tlog.newSnapshot()) { if (writtenOperations.size() != snapshot.totalOperations()) { for (int i = 0; i < threadCount; i++) { @@ -1907,7 +1912,7 @@ protected void afterAdd() throws IOException { public void testRecoveryFromAFutureGenerationCleansUp() throws IOException { int translogOperations = randomIntBetween(10, 100); for (int op = 0; op < translogOperations / 2; op++) { - translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))); if (rarely()) { translog.rollGeneration(); } @@ -1915,7 +1920,7 @@ public void testRecoveryFromAFutureGenerationCleansUp() throws IOException { translog.rollGeneration(); long comittedGeneration = randomLongBetween(2, translog.currentFileGeneration()); for (int op = translogOperations / 2; op < translogOperations; op++) { - translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))); if (rarely()) { translog.rollGeneration(); } @@ -1926,7 +1931,7 @@ public void testRecoveryFromAFutureGenerationCleansUp() throws IOException { final TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(-1, -1); deletionPolicy.setTranslogGenerationOfLastCommit(randomLongBetween(comittedGeneration, Long.MAX_VALUE)); deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration); - translog = new Translog(config, translog.getTranslogUUID(), deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED); + translog = new Translog(config, translog.getTranslogUUID(), deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); assertThat(translog.getMinFileGeneration(), equalTo(1L)); // no trimming done yet, just recovered for (long gen = 1; gen < translog.currentFileGeneration(); gen++) { @@ -1957,7 +1962,7 @@ public void testRecoveryFromFailureOnTrimming() throws IOException { translogUUID = translog.getTranslogUUID(); int translogOperations = randomIntBetween(10, 100); for (int op = 0; op < translogOperations / 2; op++) { - translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))); if (rarely()) { translog.rollGeneration(); } @@ -1965,7 +1970,7 @@ public void testRecoveryFromFailureOnTrimming() throws IOException { translog.rollGeneration(); comittedGeneration = randomLongBetween(2, translog.currentFileGeneration()); for (int op = translogOperations / 2; op < translogOperations; op++) { - translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))); if (rarely()) { translog.rollGeneration(); } @@ -1982,7 +1987,7 @@ public void testRecoveryFromFailureOnTrimming() throws IOException { final TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(-1, -1); deletionPolicy.setTranslogGenerationOfLastCommit(randomLongBetween(comittedGeneration, Long.MAX_VALUE)); deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration); - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { // we don't know when things broke exactly assertThat(translog.getMinFileGeneration(), greaterThanOrEqualTo(1L)); assertThat(translog.getMinFileGeneration(), lessThanOrEqualTo(comittedGeneration)); @@ -2046,9 +2051,9 @@ private Translog getFailableTranslog(final FailSwitch fail, final TranslogConfig }; if (translogUUID == null) { translogUUID = Translog.createEmptyTranslog( - config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, channelFactory); + config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, channelFactory, primaryTerm.get()); } - return new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED) { + return new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get) { @Override ChannelFactory getChannelFactory() { return channelFactory; @@ -2156,10 +2161,10 @@ public void testFailWhileCreateWriteWithRecoveredTLogs() throws IOException { Path tempDir = createTempDir(); TranslogConfig config = getTranslogConfig(tempDir); Translog translog = createTranslog(config); - translog.add(new Translog.Index("test", "boom", 0, "boom".getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "boom", 0, primaryTerm.get(), "boom".getBytes(Charset.forName("UTF-8")))); translog.close(); try { - new Translog(config, translog.getTranslogUUID(), createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED) { + new Translog(config, translog.getTranslogUUID(), createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get) { @Override protected TranslogWriter createWriter(long fileGeneration, long initialMinTranslogGen, long initialGlobalCheckpoint) throws IOException { @@ -2174,7 +2179,7 @@ protected TranslogWriter createWriter(long fileGeneration, long initialMinTransl } public void testRecoverWithUnbackedNextGen() throws IOException { - translog.add(new Translog.Index("test", "" + 0, 0, Integer.toString(1).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + 0, 0, primaryTerm.get(), Integer.toString(1).getBytes(Charset.forName("UTF-8")))); translog.close(); TranslogConfig config = translog.getConfig(); @@ -2190,7 +2195,7 @@ public void testRecoverWithUnbackedNextGen() throws IOException { assertNotNull("operation 1 must be non-null", op); assertEquals("payload mismatch for operation 1", 1, Integer.parseInt(op.getSource().source.utf8ToString())); - tlog.add(new Translog.Index("test", "" + 1, 1, Integer.toString(2).getBytes(Charset.forName("UTF-8")))); + tlog.add(new Translog.Index("test", "" + 1, 1, primaryTerm.get(), Integer.toString(2).getBytes(Charset.forName("UTF-8")))); } try (Translog tlog = openTranslog(config, translog.getTranslogUUID()); @@ -2208,7 +2213,7 @@ public void testRecoverWithUnbackedNextGen() throws IOException { } public void testRecoverWithUnbackedNextGenInIllegalState() throws IOException { - translog.add(new Translog.Index("test", "" + 0, 0, Integer.toString(0).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + 0, 0, primaryTerm.get(), Integer.toString(0).getBytes(Charset.forName("UTF-8")))); translog.close(); TranslogConfig config = translog.getConfig(); Path ckp = config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME); @@ -2217,7 +2222,7 @@ public void testRecoverWithUnbackedNextGenInIllegalState() throws IOException { Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog")); try { - Translog tlog = new Translog(config, translog.getTranslogUUID(), translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED); + Translog tlog = new Translog(config, translog.getTranslogUUID(), translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); fail("file already exists?"); } catch (TranslogException ex) { // all is well @@ -2227,7 +2232,7 @@ public void testRecoverWithUnbackedNextGenInIllegalState() throws IOException { } public void testRecoverWithUnbackedNextGenAndFutureFile() throws IOException { - translog.add(new Translog.Index("test", "" + 0, 0, Integer.toString(0).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + 0, 0, primaryTerm.get(), Integer.toString(0).getBytes(Charset.forName("UTF-8")))); translog.close(); TranslogConfig config = translog.getConfig(); final String translogUUID = translog.getTranslogUUID(); @@ -2239,7 +2244,7 @@ public void testRecoverWithUnbackedNextGenAndFutureFile() throws IOException { Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog")); // we add N+1 and N+2 to ensure we only delete the N+1 file and never jump ahead and wipe without the right condition Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 2) + ".tlog")); - try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { + try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { assertFalse(tlog.syncNeeded()); try (Translog.Snapshot snapshot = tlog.newSnapshot()) { for (int i = 0; i < 1; i++) { @@ -2248,11 +2253,11 @@ public void testRecoverWithUnbackedNextGenAndFutureFile() throws IOException { assertEquals("payload missmatch", i, Integer.parseInt(next.getSource().source.utf8ToString())); } } - tlog.add(new Translog.Index("test", "" + 1, 1, Integer.toString(1).getBytes(Charset.forName("UTF-8")))); + tlog.add(new Translog.Index("test", "" + 1, 1, primaryTerm.get(), Integer.toString(1).getBytes(Charset.forName("UTF-8")))); } try { - Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED); + Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); fail("file already exists?"); } catch (TranslogException ex) { // all is well @@ -2288,7 +2293,7 @@ public void testWithRandomException() throws IOException { LineFileDocs lineFileDocs = new LineFileDocs(random()); //writes pretty big docs so we cross buffer boarders regularly for (int opsAdded = 0; opsAdded < numOps; opsAdded++) { String doc = lineFileDocs.nextDoc().toString(); - failableTLog.add(new Translog.Index("test", "" + opsAdded, opsAdded, doc.getBytes(Charset.forName("UTF-8")))); + failableTLog.add(new Translog.Index("test", "" + opsAdded, opsAdded, primaryTerm.get(), doc.getBytes(Charset.forName("UTF-8")))); unsynced.add(doc); if (randomBoolean()) { failableTLog.sync(); @@ -2360,9 +2365,9 @@ public void testWithRandomException() throws IOException { deletionPolicy.setMinTranslogGenerationForRecovery(minGenForRecovery); if (generationUUID == null) { // we never managed to successfully create a translog, make it - generationUUID = Translog.createEmptyTranslog(config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId); + generationUUID = Translog.createEmptyTranslog(config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); } - try (Translog translog = new Translog(config, generationUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED); + try (Translog translog = new Translog(config, generationUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); Translog.Snapshot snapshot = translog.newSnapshotFromGen(minGenForRecovery)) { assertEquals(syncedDocs.size(), snapshot.totalOperations()); for (int i = 0; i < syncedDocs.size(); i++) { @@ -2421,20 +2426,20 @@ public void testCheckpointOnDiskFull() throws IOException { * Tests that closing views after the translog is fine and we can reopen the translog */ public void testPendingDelete() throws IOException { - translog.add(new Translog.Index("test", "1", 0, new byte[]{1})); + translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), new byte[]{1})); translog.rollGeneration(); TranslogConfig config = translog.getConfig(); final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(config.getIndexSettings()); translog.close(); - translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED); - translog.add(new Translog.Index("test", "2", 1, new byte[]{2})); + translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + translog.add(new Translog.Index("test", "2", 1, primaryTerm.get(), new byte[]{2})); translog.rollGeneration(); Closeable lock = translog.acquireRetentionLock(); - translog.add(new Translog.Index("test", "3", 2, new byte[]{3})); + translog.add(new Translog.Index("test", "3", 2, primaryTerm.get(), new byte[]{3})); translog.close(); IOUtils.close(lock); - translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED); + translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); } public static Translog.Location randomTranslogLocation() { @@ -2449,11 +2454,11 @@ public void testTranslogOpSerialization() throws Exception { seqID.seqNo.setLongValue(randomSeqNum); seqID.seqNoDocValue.setLongValue(randomSeqNum); seqID.primaryTerm.setLongValue(randomPrimaryTerm); - Field uidField = new Field("_uid", Uid.createUid("test", "1"), UidFieldMapper.Defaults.FIELD_TYPE); + Field idField = new Field("_id", Uid.encodeId("1"), IdFieldMapper.Defaults.FIELD_TYPE); Field versionField = new NumericDocValuesField("_version", 1); Document document = new Document(); document.add(new TextField("value", "test", Field.Store.YES)); - document.add(uidField); + document.add(idField); document.add(versionField); document.add(seqID.seqNo); document.add(seqID.seqNoDocValue); @@ -2482,21 +2487,6 @@ public void testTranslogOpSerialization() throws Exception { in = out.bytes().streamInput(); Translog.Delete serializedDelete = (Translog.Delete) Translog.Operation.readOperation(in); assertEquals(delete, serializedDelete); - - // simulate legacy delete serialization - out = new BytesStreamOutput(); - out.writeByte(Translog.Operation.Type.DELETE.id()); - out.writeVInt(Translog.Delete.FORMAT_5_0); - out.writeString(UidFieldMapper.NAME); - out.writeString("my_type#my_id"); - out.writeLong(3); // version - out.writeByte(VersionType.INTERNAL.getValue()); - out.writeLong(2); // seq no - out.writeLong(0); // primary term - in = out.bytes().streamInput(); - serializedDelete = (Translog.Delete) Translog.Operation.readOperation(in); - assertEquals("my_type", serializedDelete.type()); - assertEquals("my_id", serializedDelete.id()); } public void testRollGeneration() throws Exception { @@ -2514,20 +2504,31 @@ public void testRollGeneration() throws Exception { final int rolls = randomIntBetween(1, 16); int totalOperations = 0; int seqNo = 0; + final List primaryTerms = new ArrayList<>(); + primaryTerms.add(primaryTerm.get()); // We always create an empty translog. + primaryTerms.add(primaryTerm.get()); for (int i = 0; i < rolls; i++) { final int operations = randomIntBetween(1, 128); for (int j = 0; j < operations; j++) { - translog.add(new Translog.NoOp(seqNo++, 0, "test")); + translog.add(new Translog.NoOp(seqNo++, primaryTerm.get(), "test")); totalOperations++; } try (ReleasableLock ignored = translog.writeLock.acquire()) { + if (randomBoolean()){ + primaryTerm.incrementAndGet(); + } translog.rollGeneration(); + primaryTerms.add(primaryTerm.get()); } assertThat(translog.currentFileGeneration(), equalTo(generation + i + 1)); + assertThat(translog.getCurrent().getPrimaryTerm(), equalTo(primaryTerm.get())); assertThat(translog.totalOperations(), equalTo(totalOperations)); } for (int i = 0; i <= rolls; i++) { assertFileIsPresent(translog, generation + i); + final List storedPrimaryTerms = Stream.concat(translog.getReaders().stream(), Stream.of(translog.getCurrent())) + .map(t -> t.getPrimaryTerm()).collect(Collectors.toList()); + assertThat(storedPrimaryTerms, equalTo(primaryTerms)); } long minGenForRecovery = randomLongBetween(generation, generation + rolls); commit(translog, minGenForRecovery, generation + rolls); @@ -2630,8 +2631,11 @@ public void testSimpleCommit() throws IOException { final int operations = randomIntBetween(1, 4096); long seqNo = 0; for (int i = 0; i < operations; i++) { - translog.add(new Translog.NoOp(seqNo++, 0, "test'")); + translog.add(new Translog.NoOp(seqNo++, primaryTerm.get(), "test'")); if (rarely()) { + if (rarely()) { + primaryTerm.incrementAndGet(); + } translog.rollGeneration(); } } @@ -2688,7 +2692,7 @@ public void testSnapshotReadOperationInReverse() throws Exception { for (int gen = 0; gen < generations; gen++) { final int operations = randomIntBetween(1, 100); for (int i = 0; i < operations; i++) { - Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10), seqNo.getAndIncrement(), new byte[]{1}); + Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10), seqNo.getAndIncrement(), primaryTerm.get(), new byte[]{1}); translog.add(op); views.peek().add(op); } @@ -2713,7 +2717,7 @@ public void testSnapshotDedupOperations() throws Exception { List batch = LongStream.rangeClosed(0, between(0, 500)).boxed().collect(Collectors.toList()); Randomness.shuffle(batch); for (Long seqNo : batch) { - Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10), seqNo, new byte[]{1}); + Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10), seqNo, primaryTerm.get(), new byte[]{1}); translog.add(op); latestOperations.put(op.seqNo(), op); } diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java deleted file mode 100644 index d57373ebfe349..0000000000000 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.translog; - -import org.elasticsearch.index.seqno.SequenceNumbers; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; -import java.nio.channels.FileChannel; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardOpenOption; - -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; - -/** - * Tests for reading old and new translog files - */ -public class TranslogVersionTests extends ESTestCase { - - private void checkFailsToOpen(String file, String expectedMessage) throws IOException { - Path translogFile = getDataPath(file); - assertThat("test file should exist", Files.exists(translogFile), equalTo(true)); - try { - openReader(translogFile, 0); - fail("should be able to open an old translog"); - } catch (IllegalStateException e) { - assertThat(e.getMessage(), containsString(expectedMessage)); - } - - } - - public void testV0LegacyTranslogVersion() throws Exception { - checkFailsToOpen("/org/elasticsearch/index/translog/translog-v0.binary", "pre-1.4 translog"); - } - - public void testV1ChecksummedTranslogVersion() throws Exception { - checkFailsToOpen("/org/elasticsearch/index/translog/translog-v1.binary", "pre-2.0 translog"); - } - - public void testCorruptedTranslogs() throws Exception { - try { - Path translogFile = getDataPath("/org/elasticsearch/index/translog/translog-v1-corrupted-magic.binary"); - assertThat("test file should exist", Files.exists(translogFile), equalTo(true)); - openReader(translogFile, 0); - fail("should have thrown an exception about the header being corrupt"); - } catch (TranslogCorruptedException e) { - assertThat("translog corruption from header: " + e.getMessage(), - e.getMessage().contains("translog looks like version 1 or later, but has corrupted header"), equalTo(true)); - } - - try { - Path translogFile = getDataPath("/org/elasticsearch/index/translog/translog-invalid-first-byte.binary"); - assertThat("test file should exist", Files.exists(translogFile), equalTo(true)); - openReader(translogFile, 0); - fail("should have thrown an exception about the header being corrupt"); - } catch (TranslogCorruptedException e) { - assertThat("translog corruption from header: " + e.getMessage(), - e.getMessage().contains("Invalid first byte in translog file, got: 1, expected 0x00 or 0x3f"), equalTo(true)); - } - - checkFailsToOpen("/org/elasticsearch/index/translog/translog-v1-corrupted-body.binary", "pre-2.0 translog"); - } - - public void testTruncatedTranslog() throws Exception { - checkFailsToOpen("/org/elasticsearch/index/translog/translog-v1-truncated.binary", "pre-2.0 translog"); - } - - public TranslogReader openReader(final Path path, final long id) throws IOException { - try (FileChannel channel = FileChannel.open(path, StandardOpenOption.READ)) { - final long minSeqNo = SequenceNumbers.NO_OPS_PERFORMED; - final long maxSeqNo = SequenceNumbers.NO_OPS_PERFORMED; - final Checkpoint checkpoint = - new Checkpoint(Files.size(path), 1, id, minSeqNo, maxSeqNo, SequenceNumbers.UNASSIGNED_SEQ_NO, id); - return TranslogReader.open(channel, path, checkpoint, null); - } - } -} diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java index 6c4cda7fc52b5..3ebb268a7b8bc 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java @@ -26,13 +26,11 @@ import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MetadataFieldMapper; -import org.elasticsearch.index.mapper.ParentFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.TextFieldMapper; import org.elasticsearch.index.mapper.TypeFieldMapper; -import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.mapper.VersionFieldMapper; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.plugins.MapperPlugin; @@ -84,8 +82,8 @@ public Map getMetadataMappers() { } }); - private static String[] EXPECTED_METADATA_FIELDS = new String[]{UidFieldMapper.NAME, IdFieldMapper.NAME, RoutingFieldMapper.NAME, - IndexFieldMapper.NAME, SourceFieldMapper.NAME, TypeFieldMapper.NAME, VersionFieldMapper.NAME, ParentFieldMapper.NAME, + private static String[] EXPECTED_METADATA_FIELDS = new String[]{IdFieldMapper.NAME, RoutingFieldMapper.NAME, + IndexFieldMapper.NAME, SourceFieldMapper.NAME, TypeFieldMapper.NAME, VersionFieldMapper.NAME, SeqNoFieldMapper.NAME, FieldNamesFieldMapper.NAME}; public void testBuiltinMappers() { @@ -108,7 +106,7 @@ public void testBuiltinWithPlugins() { greaterThan(noPluginsModule.getMapperRegistry().getMetadataMapperParsers().size())); Map metadataMapperParsers = module.getMapperRegistry().getMetadataMapperParsers(); Iterator iterator = metadataMapperParsers.keySet().iterator(); - assertEquals(UidFieldMapper.NAME, iterator.next()); + assertEquals(IdFieldMapper.NAME, iterator.next()); String last = null; while(iterator.hasNext()) { last = iterator.next(); diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java b/server/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java index 977719b5398dd..e2808554cdcb9 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java @@ -593,40 +593,6 @@ public void testPutMapping() throws Exception { assertThat(client().admin().indices().prepareGetMappings("barbaz").get().mappings().get("barbaz").get("type"), notNullValue()); } - public void testPutMappingMultiType() throws Exception { - assertTrue("remove this multi type test", Version.CURRENT.before(Version.fromString("7.0.0"))); - verify(client().admin().indices().preparePutMapping("foo").setType("type1").setSource("field", "type=text"), true); - verify(client().admin().indices().preparePutMapping("_all").setType("type1").setSource("field", "type=text"), true); - - for (String index : Arrays.asList("foo", "foobar", "bar", "barbaz")) { - assertAcked(prepareCreate(index).setSettings(Settings.builder().put("index.version.created", Version.V_5_6_0.id))); - // allows for multiple types - } - - verify(client().admin().indices().preparePutMapping("foo").setType("type1").setSource("field", "type=text"), false); - assertThat(client().admin().indices().prepareGetMappings("foo").get().mappings().get("foo").get("type1"), notNullValue()); - verify(client().admin().indices().preparePutMapping("b*").setType("type1").setSource("field", "type=text"), false); - assertThat(client().admin().indices().prepareGetMappings("bar").get().mappings().get("bar").get("type1"), notNullValue()); - assertThat(client().admin().indices().prepareGetMappings("barbaz").get().mappings().get("barbaz").get("type1"), notNullValue()); - verify(client().admin().indices().preparePutMapping("_all").setType("type2").setSource("field", "type=text"), false); - assertThat(client().admin().indices().prepareGetMappings("foo").get().mappings().get("foo").get("type2"), notNullValue()); - assertThat(client().admin().indices().prepareGetMappings("foobar").get().mappings().get("foobar").get("type2"), notNullValue()); - assertThat(client().admin().indices().prepareGetMappings("bar").get().mappings().get("bar").get("type2"), notNullValue()); - assertThat(client().admin().indices().prepareGetMappings("barbaz").get().mappings().get("barbaz").get("type2"), notNullValue()); - verify(client().admin().indices().preparePutMapping().setType("type3").setSource("field", "type=text"), false); - assertThat(client().admin().indices().prepareGetMappings("foo").get().mappings().get("foo").get("type3"), notNullValue()); - assertThat(client().admin().indices().prepareGetMappings("foobar").get().mappings().get("foobar").get("type3"), notNullValue()); - assertThat(client().admin().indices().prepareGetMappings("bar").get().mappings().get("bar").get("type3"), notNullValue()); - assertThat(client().admin().indices().prepareGetMappings("barbaz").get().mappings().get("barbaz").get("type3"), notNullValue()); - - - verify(client().admin().indices().preparePutMapping("c*").setType("type1").setSource("field", "type=text"), true); - - assertAcked(client().admin().indices().prepareClose("barbaz").get()); - verify(client().admin().indices().preparePutMapping("barbaz").setType("type4").setSource("field", "type=text"), false); - assertThat(client().admin().indices().prepareGetMappings("barbaz").get().mappings().get("barbaz").get("type4"), notNullValue()); - } - public static final class TestPlugin extends Plugin { private static final Setting INDEX_A = diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java index 9a2a5d1eacd75..8059c8a103927 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheTests.java @@ -29,6 +29,8 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; +import org.apache.lucene.util.Accountable; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -44,6 +46,7 @@ import java.io.IOException; import java.util.Arrays; +import java.util.Collection; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Supplier; @@ -421,6 +424,79 @@ public void testInvalidate() throws Exception { assertEquals(0, cache.numRegisteredCloseListeners()); } + public void testEqualsKey() { + AtomicBoolean trueBoolean = new AtomicBoolean(true); + AtomicBoolean falseBoolean = new AtomicBoolean(false); + IndicesRequestCache.Key key1 = new IndicesRequestCache.Key(new TestEntity(null, trueBoolean), 1L, new TestBytesReference(1)); + IndicesRequestCache.Key key2 = new IndicesRequestCache.Key(new TestEntity(null, trueBoolean), 1L, new TestBytesReference(1)); + IndicesRequestCache.Key key3 = new IndicesRequestCache.Key(new TestEntity(null, falseBoolean), 1L, new TestBytesReference(1)); + IndicesRequestCache.Key key4 = new IndicesRequestCache.Key(new TestEntity(null, trueBoolean), 2L, new TestBytesReference(1)); + IndicesRequestCache.Key key5 = new IndicesRequestCache.Key(new TestEntity(null, trueBoolean), 1L, new TestBytesReference(2)); + String s = "Some other random object"; + assertEquals(key1, key1); + assertEquals(key1, key2); + assertNotEquals(key1, null); + assertNotEquals(key1, s); + assertNotEquals(key1, key3); + assertNotEquals(key1, key4); + assertNotEquals(key1, key5); + } + + private class TestBytesReference extends BytesReference { + + int dummyValue; + TestBytesReference(int dummyValue) { + this.dummyValue = dummyValue; + } + + @Override + public boolean equals(Object other) { + return other instanceof TestBytesReference && this.dummyValue == ((TestBytesReference) other).dummyValue; + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + dummyValue; + return result; + } + + @Override + public byte get(int index) { + return 0; + } + + @Override + public int length() { + return 0; + } + + @Override + public BytesReference slice(int from, int length) { + return null; + } + + @Override + public BytesRef toBytesRef() { + return null; + } + + @Override + public long ramBytesUsed() { + return 0; + } + + @Override + public Collection getChildResources() { + return null; + } + + @Override + public boolean isFragment() { + return false; + } + } + private class TestEntity extends AbstractIndexShardCacheEntity { private final AtomicBoolean standInForIndexShard; private final ShardRequestCache shardRequestCache; diff --git a/server/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsIT.java b/server/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsIT.java index ae3ec36759eee..21a1d39e31f21 100644 --- a/server/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsIT.java @@ -52,12 +52,10 @@ protected Collection> nodePlugins() { public void testSimple() throws Exception { Client client = client(); CreateIndexResponse response1 = client.admin().indices().prepareCreate("test1") - .setSettings(Settings.builder().put("index.version.created", Version.V_5_6_0.id)) .addMapping("type1", jsonBuilder().startObject().startObject("type1").endObject().endObject()) - .addMapping("type2", jsonBuilder().startObject().startObject("type2").endObject().endObject()) .execute().actionGet(); CreateIndexResponse response2 = client.admin().indices().prepareCreate("test2") - .addMapping("type1", jsonBuilder().startObject().startObject("type1").endObject().endObject()) + .addMapping("type2", jsonBuilder().startObject().startObject("type2").endObject().endObject()) .execute().actionGet(); client.admin().indices().prepareAliases().addAlias("test1", "alias1").execute().actionGet(); assertAcked(response1); @@ -66,8 +64,6 @@ public void testSimple() throws Exception { TypesExistsResponse response = client.admin().indices().prepareTypesExists("test1").setTypes("type1").execute().actionGet(); assertThat(response.isExists(), equalTo(true)); response = client.admin().indices().prepareTypesExists("test1").setTypes("type2").execute().actionGet(); - assertThat(response.isExists(), equalTo(true)); - response = client.admin().indices().prepareTypesExists("test1").setTypes("type3").execute().actionGet(); assertThat(response.isExists(), equalTo(false)); try { client.admin().indices().prepareTypesExists("notExist").setTypes("type1").execute().actionGet(); @@ -80,9 +76,9 @@ public void testSimple() throws Exception { response = client.admin().indices().prepareTypesExists("alias1").setTypes("type1").execute().actionGet(); assertThat(response.isExists(), equalTo(true)); response = client.admin().indices().prepareTypesExists("*").setTypes("type1").execute().actionGet(); - assertThat(response.isExists(), equalTo(true)); + assertThat(response.isExists(), equalTo(false)); response = client.admin().indices().prepareTypesExists("test1", "test2").setTypes("type1").execute().actionGet(); - assertThat(response.isExists(), equalTo(true)); + assertThat(response.isExists(), equalTo(false)); response = client.admin().indices().prepareTypesExists("test1", "test2").setTypes("type2").execute().actionGet(); assertThat(response.isExists(), equalTo(false)); } diff --git a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java index a914eb435bb7d..a2149b9d28a0b 100644 --- a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java +++ b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java @@ -241,7 +241,7 @@ public void testUnallocatedShardsDoesNotHang() throws InterruptedException { private void indexDoc(Engine engine, String id) throws IOException { final ParsedDocument doc = InternalEngineTests.createParsedDoc(id, null); - final Engine.IndexResult indexResult = engine.index(new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), doc)); + final Engine.IndexResult indexResult = engine.index(new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), 1L, doc)); assertThat(indexResult.getFailure(), nullValue()); } diff --git a/server/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java b/server/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java index 5970e1121bdee..446bc88f9f098 100644 --- a/server/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java @@ -127,77 +127,6 @@ public void testGetFieldMappings() throws Exception { assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue()); } - public void testGetFieldMappingsMultiType() throws Exception { - assertTrue("remove this multi type test", Version.CURRENT.before(Version.fromString("7.0.0"))); - assertAcked(prepareCreate("indexa") - .setSettings(Settings.builder().put("index.version.created", Version.V_5_6_0.id)) - .addMapping("typeA", getMappingForType("typeA")) - .addMapping("typeB", getMappingForType("typeB"))); - assertAcked(client().admin().indices().prepareCreate("indexb") - .setSettings(Settings.builder().put("index.version.created", Version.V_5_6_0.id)) - .addMapping("typeA", getMappingForType("typeA")) - .addMapping("typeB", getMappingForType("typeB"))); - - - // Get mappings by full name - GetFieldMappingsResponse response = client().admin().indices().prepareGetFieldMappings("indexa").setTypes("typeA").setFields("field1", "obj.subfield").get(); - assertThat(response.fieldMappings("indexa", "typeA", "field1").fullName(), equalTo("field1")); - assertThat(response.fieldMappings("indexa", "typeA", "field1").sourceAsMap(), hasKey("field1")); - assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").fullName(), equalTo("obj.subfield")); - assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").sourceAsMap(), hasKey("subfield")); - assertThat(response.mappings().get("indexa"), not(hasKey("typeB"))); - assertThat(response.fieldMappings("indexa", "typeB", "field1"), nullValue()); - assertThat(response.mappings(), not(hasKey("indexb"))); - assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue()); - - // Get mappings by name - response = client().admin().indices().prepareGetFieldMappings("indexa").setTypes("typeA").setFields("field1", "obj.subfield").get(); - assertThat(response.fieldMappings("indexa", "typeA", "field1").fullName(), equalTo("field1")); - assertThat(response.fieldMappings("indexa", "typeA", "field1").sourceAsMap(), hasKey("field1")); - assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").fullName(), equalTo("obj.subfield")); - assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").sourceAsMap(), hasKey("subfield")); - assertThat(response.fieldMappings("indexa", "typeB", "field1"), nullValue()); - assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue()); - - // get mappings by name across multiple indices - response = client().admin().indices().prepareGetFieldMappings().setTypes("typeA").setFields("obj.subfield").get(); - assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").fullName(), equalTo("obj.subfield")); - assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").sourceAsMap(), hasKey("subfield")); - assertThat(response.fieldMappings("indexa", "typeB", "obj.subfield"), nullValue()); - assertThat(response.fieldMappings("indexb", "typeA", "obj.subfield").fullName(), equalTo("obj.subfield")); - assertThat(response.fieldMappings("indexb", "typeA", "obj.subfield").sourceAsMap(), hasKey("subfield")); - assertThat(response.fieldMappings("indexb", "typeB", "obj.subfield"), nullValue()); - - // get mappings by name across multiple types - response = client().admin().indices().prepareGetFieldMappings("indexa").setFields("obj.subfield").get(); - assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").fullName(), equalTo("obj.subfield")); - assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").sourceAsMap(), hasKey("subfield")); - assertThat(response.fieldMappings("indexa", "typeA", "field1"), nullValue()); - assertThat(response.fieldMappings("indexa", "typeB", "obj.subfield").fullName(), equalTo("obj.subfield")); - assertThat(response.fieldMappings("indexa", "typeB", "obj.subfield").sourceAsMap(), hasKey("subfield")); - assertThat(response.fieldMappings("indexa", "typeB", "field1"), nullValue()); - assertThat(response.fieldMappings("indexb", "typeA", "obj.subfield"), nullValue()); - assertThat(response.fieldMappings("indexb", "typeA", "field1"), nullValue()); - assertThat(response.fieldMappings("indexb", "typeB", "obj.subfield"), nullValue()); - assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue()); - - // get mappings by name across multiple types & indices - response = client().admin().indices().prepareGetFieldMappings().setFields("obj.subfield").get(); - assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").fullName(), equalTo("obj.subfield")); - assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").sourceAsMap(), hasKey("subfield")); - assertThat(response.fieldMappings("indexa", "typeA", "field1"), nullValue()); - assertThat(response.fieldMappings("indexa", "typeB", "obj.subfield").fullName(), equalTo("obj.subfield")); - assertThat(response.fieldMappings("indexa", "typeB", "obj.subfield").sourceAsMap(), hasKey("subfield")); - assertThat(response.fieldMappings("indexa", "typeB", "field1"), nullValue()); - assertThat(response.fieldMappings("indexb", "typeA", "obj.subfield").fullName(), equalTo("obj.subfield")); - assertThat(response.fieldMappings("indexb", "typeA", "obj.subfield").sourceAsMap(), hasKey("subfield")); - assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue()); - assertThat(response.fieldMappings("indexb", "typeB", "obj.subfield").fullName(), equalTo("obj.subfield")); - assertThat(response.fieldMappings("indexb", "typeB", "obj.subfield").sourceAsMap(), hasKey("subfield")); - assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue()); - - } - @SuppressWarnings("unchecked") public void testSimpleGetFieldMappingsWithDefaults() throws Exception { assertAcked(prepareCreate("test").addMapping("type", getMappingForType("type"))); diff --git a/server/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java b/server/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java index da6691660d8f7..40c9384fd3cfd 100644 --- a/server/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsIT.java @@ -67,18 +67,10 @@ private XContentBuilder getMappingForType(String type) throws IOException { public void testSimpleGetMappings() throws Exception { client().admin().indices().prepareCreate("indexa") - .setSettings(Settings.builder().put("index.version.created", Version.V_5_6_0.id)) .addMapping("typeA", getMappingForType("typeA")) - .addMapping("typeB", getMappingForType("typeB")) - .addMapping("Atype", getMappingForType("Atype")) - .addMapping("Btype", getMappingForType("Btype")) .execute().actionGet(); client().admin().indices().prepareCreate("indexb") - .setSettings(Settings.builder().put("index.version.created", Version.V_5_6_0.id)) .addMapping("typeA", getMappingForType("typeA")) - .addMapping("typeB", getMappingForType("typeB")) - .addMapping("Atype", getMappingForType("Atype")) - .addMapping("Btype", getMappingForType("Btype")) .execute().actionGet(); ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); @@ -87,30 +79,18 @@ public void testSimpleGetMappings() throws Exception { // Get all mappings GetMappingsResponse response = client().admin().indices().prepareGetMappings().execute().actionGet(); assertThat(response.mappings().size(), equalTo(2)); - assertThat(response.mappings().get("indexa").size(), equalTo(4)); + assertThat(response.mappings().get("indexa").size(), equalTo(1)); assertThat(response.mappings().get("indexa").get("typeA"), notNullValue()); - assertThat(response.mappings().get("indexa").get("typeB"), notNullValue()); - assertThat(response.mappings().get("indexa").get("Atype"), notNullValue()); - assertThat(response.mappings().get("indexa").get("Btype"), notNullValue()); - assertThat(response.mappings().get("indexb").size(), equalTo(4)); + assertThat(response.mappings().get("indexb").size(), equalTo(1)); assertThat(response.mappings().get("indexb").get("typeA"), notNullValue()); - assertThat(response.mappings().get("indexb").get("typeB"), notNullValue()); - assertThat(response.mappings().get("indexb").get("Atype"), notNullValue()); - assertThat(response.mappings().get("indexb").get("Btype"), notNullValue()); // Get all mappings, via wildcard support response = client().admin().indices().prepareGetMappings("*").setTypes("*").execute().actionGet(); assertThat(response.mappings().size(), equalTo(2)); - assertThat(response.mappings().get("indexa").size(), equalTo(4)); + assertThat(response.mappings().get("indexa").size(), equalTo(1)); assertThat(response.mappings().get("indexa").get("typeA"), notNullValue()); - assertThat(response.mappings().get("indexa").get("typeB"), notNullValue()); - assertThat(response.mappings().get("indexa").get("Atype"), notNullValue()); - assertThat(response.mappings().get("indexa").get("Btype"), notNullValue()); - assertThat(response.mappings().get("indexb").size(), equalTo(4)); + assertThat(response.mappings().get("indexb").size(), equalTo(1)); assertThat(response.mappings().get("indexb").get("typeA"), notNullValue()); - assertThat(response.mappings().get("indexb").get("typeB"), notNullValue()); - assertThat(response.mappings().get("indexb").get("Atype"), notNullValue()); - assertThat(response.mappings().get("indexb").get("Btype"), notNullValue()); // Get all typeA mappings in all indices response = client().admin().indices().prepareGetMappings("*").setTypes("typeA").execute().actionGet(); @@ -123,35 +103,26 @@ public void testSimpleGetMappings() throws Exception { // Get all mappings in indexa response = client().admin().indices().prepareGetMappings("indexa").execute().actionGet(); assertThat(response.mappings().size(), equalTo(1)); - assertThat(response.mappings().get("indexa").size(), equalTo(4)); + assertThat(response.mappings().get("indexa").size(), equalTo(1)); assertThat(response.mappings().get("indexa").get("typeA"), notNullValue()); - assertThat(response.mappings().get("indexa").get("typeB"), notNullValue()); - assertThat(response.mappings().get("indexa").get("Atype"), notNullValue()); - assertThat(response.mappings().get("indexa").get("Btype"), notNullValue()); // Get all mappings beginning with A* in indexa - response = client().admin().indices().prepareGetMappings("indexa").setTypes("A*").execute().actionGet(); + response = client().admin().indices().prepareGetMappings("indexa").setTypes("*A").execute().actionGet(); assertThat(response.mappings().size(), equalTo(1)); assertThat(response.mappings().get("indexa").size(), equalTo(1)); - assertThat(response.mappings().get("indexa").get("Atype"), notNullValue()); + assertThat(response.mappings().get("indexa").get("typeA"), notNullValue()); // Get all mappings beginning with B* in all indices response = client().admin().indices().prepareGetMappings().setTypes("B*").execute().actionGet(); - assertThat(response.mappings().size(), equalTo(2)); - assertThat(response.mappings().get("indexa").size(), equalTo(1)); - assertThat(response.mappings().get("indexa").get("Btype"), notNullValue()); - assertThat(response.mappings().get("indexb").size(), equalTo(1)); - assertThat(response.mappings().get("indexb").get("Btype"), notNullValue()); + assertThat(response.mappings().size(), equalTo(0)); // Get all mappings beginning with B* and A* in all indices - response = client().admin().indices().prepareGetMappings().setTypes("B*", "A*").execute().actionGet(); + response = client().admin().indices().prepareGetMappings().setTypes("B*", "*A").execute().actionGet(); assertThat(response.mappings().size(), equalTo(2)); - assertThat(response.mappings().get("indexa").size(), equalTo(2)); - assertThat(response.mappings().get("indexa").get("Atype"), notNullValue()); - assertThat(response.mappings().get("indexa").get("Btype"), notNullValue()); - assertThat(response.mappings().get("indexb").size(), equalTo(2)); - assertThat(response.mappings().get("indexb").get("Atype"), notNullValue()); - assertThat(response.mappings().get("indexb").get("Btype"), notNullValue()); + assertThat(response.mappings().get("indexa").size(), equalTo(1)); + assertThat(response.mappings().get("indexa").get("typeA"), notNullValue()); + assertThat(response.mappings().get("indexb").size(), equalTo(1)); + assertThat(response.mappings().get("indexb").get("typeA"), notNullValue()); } public void testGetMappingsWithBlocks() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java b/server/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java index d830889c5a615..094cf83186ccf 100644 --- a/server/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/server/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -77,7 +77,6 @@ public void testDynamicUpdates() throws Exception { .put("index.number_of_shards", 1) .put("index.number_of_replicas", 0) .put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), Long.MAX_VALUE) - .put("index.version.created", Version.V_5_6_0) // for multiple types ).execute().actionGet(); client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); @@ -85,7 +84,7 @@ public void testDynamicUpdates() throws Exception { int numberOfTypes = randomIntBetween(1, 5); List indexRequests = new ArrayList<>(); for (int rec = 0; rec < recCount; rec++) { - String type = "type" + (rec % numberOfTypes); + String type = "type"; String fieldName = "field_" + type + "_" + rec; indexRequests.add(client().prepareIndex("test", type, Integer.toString(rec)).setSource(fieldName, "some_value")); } @@ -100,7 +99,7 @@ public void testDynamicUpdates() throws Exception { logger.info("checking all the fields are in the mappings"); for (int rec = 0; rec < recCount; rec++) { - String type = "type" + (rec % numberOfTypes); + String type = "type"; String fieldName = "field_" + type + "_" + rec; assertConcreteMappingsOnAll("test", type, fieldName); } @@ -347,22 +346,4 @@ public void testPutMappingsWithBlocks() { } } - @SuppressWarnings("unchecked") - public void testUpdateMappingOnAllTypes() { - assertTrue("remove this multi type test", Version.CURRENT.before(Version.fromString("7.0.0"))); - assertAcked(prepareCreate("index") - .setSettings(Settings.builder().put("index.version.created", Version.V_5_6_0.id)) - .addMapping("type1", "f", "type=keyword").addMapping("type2", "f", "type=keyword")); - - assertAcked(client().admin().indices().preparePutMapping("index") - .setType("type1") - .setSource("f", "type=keyword,null_value=n/a") - .get()); - - GetMappingsResponse mappings = client().admin().indices().prepareGetMappings("index").setTypes("type2").get(); - MappingMetaData type2Mapping = mappings.getMappings().get("index").get("type2").get(); - Map properties = (Map) type2Mapping.sourceAsMap().get("properties"); - Map f = (Map) properties.get("f"); - assertEquals("n/a", f.get("null_value")); - } } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index 4287b675f353c..babf8518d4492 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -51,11 +51,11 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.SegmentsStats; +import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; @@ -257,10 +257,10 @@ private Engine.Index getIndex(final String id) { final String type = "test"; final ParseContext.Document document = new ParseContext.Document(); document.add(new TextField("test", "test", Field.Store.YES)); - final Field uidField = new Field("_uid", Uid.createUid(type, id), UidFieldMapper.Defaults.FIELD_TYPE); + final Field idField = new Field("_id", Uid.encodeId(id), IdFieldMapper.Defaults.FIELD_TYPE); final Field versionField = new NumericDocValuesField("_version", Versions.MATCH_ANY); final SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID(); - document.add(uidField); + document.add(idField); document.add(versionField); document.add(seqID.seqNo); document.add(seqID.seqNoDocValue); @@ -268,7 +268,7 @@ private Engine.Index getIndex(final String id) { final BytesReference source = new BytesArray(new byte[] { 1 }); final ParsedDocument doc = new ParsedDocument(versionField, seqID, id, type, null, Arrays.asList(document), source, XContentType.JSON, null); - return new Engine.Index(new Term("_uid", Uid.createUidAsBytes(doc.type(), doc.id())), doc); + return new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), randomNonNegativeLong(), doc); } public void testHandleCorruptedIndexOnSendSendFiles() throws Throwable { diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index 49e557c3dde78..f46ab7ebbd603 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -200,7 +200,8 @@ public void testDifferentHistoryUUIDDisablesOPsRecovery() throws Exception { final String historyUUIDtoUse = UUIDs.randomBase64UUID(random()); if (randomBoolean()) { // create a new translog - translogUUIDtoUse = Translog.createEmptyTranslog(replica.shardPath().resolveTranslog(), flushedDocs, replica.shardId()); + translogUUIDtoUse = Translog.createEmptyTranslog(replica.shardPath().resolveTranslog(), flushedDocs, + replica.shardId(), replica.getPrimaryTerm()); translogGenToUse = 1; } else { translogUUIDtoUse = translogGeneration.translogUUID; diff --git a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java index f25a9234698b2..ad2095a6dd073 100644 --- a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -390,14 +390,11 @@ public void testThrottleStats() throws Exception { } public void testSimpleStats() throws Exception { - // this test has some type stats tests that can be removed in 7.0 - assertAcked(prepareCreate("test1") - .setSettings(Settings.builder().put("index.version.created", Version.V_5_6_0.id))); // allows for multiple types - createIndex("test2"); + createIndex("test1", "test2"); ensureGreen(); - client().prepareIndex("test1", "type1", Integer.toString(1)).setSource("field", "value").execute().actionGet(); - client().prepareIndex("test1", "type2", Integer.toString(1)).setSource("field", "value").execute().actionGet(); + client().prepareIndex("test1", "type", Integer.toString(1)).setSource("field", "value").execute().actionGet(); + client().prepareIndex("test1", "type", Integer.toString(2)).setSource("field", "value").execute().actionGet(); client().prepareIndex("test2", "type", Integer.toString(1)).setSource("field", "value").execute().actionGet(); refresh(); @@ -435,6 +432,10 @@ public void testSimpleStats() throws Exception { assertThat(stats.getIndex("test1").getTotal().getIndexing().getTotal().getDeleteCurrent(), equalTo(0L)); assertThat(stats.getIndex("test1").getTotal().getSearch().getTotal().getFetchCurrent(), equalTo(0L)); assertThat(stats.getIndex("test1").getTotal().getSearch().getTotal().getQueryCurrent(), equalTo(0L)); + assertThat(stats.getIndex("test2").getTotal().getIndexing().getTotal().getIndexCurrent(), equalTo(0L)); + assertThat(stats.getIndex("test2").getTotal().getIndexing().getTotal().getDeleteCurrent(), equalTo(0L)); + assertThat(stats.getIndex("test2").getTotal().getSearch().getTotal().getFetchCurrent(), equalTo(0L)); + assertThat(stats.getIndex("test2").getTotal().getSearch().getTotal().getQueryCurrent(), equalTo(0L)); // check flags stats = client().admin().indices().prepareStats().clear() @@ -450,19 +451,8 @@ public void testSimpleStats() throws Exception { assertThat(stats.getTotal().getFlush(), notNullValue()); assertThat(stats.getTotal().getRefresh(), notNullValue()); - // check types - stats = client().admin().indices().prepareStats().setTypes("type1", "type").execute().actionGet(); - assertThat(stats.getPrimaries().getIndexing().getTypeStats().get("type1").getIndexCount(), equalTo(1L)); - assertThat(stats.getPrimaries().getIndexing().getTypeStats().get("type").getIndexCount(), equalTo(1L)); - assertThat(stats.getPrimaries().getIndexing().getTypeStats().get("type1").getIndexFailedCount(), equalTo(0L)); - assertThat(stats.getPrimaries().getIndexing().getTypeStats().get("type2"), nullValue()); - assertThat(stats.getPrimaries().getIndexing().getTypeStats().get("type1").getIndexCurrent(), equalTo(0L)); - assertThat(stats.getPrimaries().getIndexing().getTypeStats().get("type1").getDeleteCurrent(), equalTo(0L)); - - - assertThat(stats.getTotal().getGet().getCount(), equalTo(0L)); // check get - GetResponse getResponse = client().prepareGet("test1", "type1", "1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test2", "type", "1").execute().actionGet(); assertThat(getResponse.isExists(), equalTo(true)); stats = client().admin().indices().prepareStats().execute().actionGet(); @@ -471,7 +461,7 @@ public void testSimpleStats() throws Exception { assertThat(stats.getTotal().getGet().getMissingCount(), equalTo(0L)); // missing get - getResponse = client().prepareGet("test1", "type1", "2").execute().actionGet(); + getResponse = client().prepareGet("test2", "type", "2").execute().actionGet(); assertThat(getResponse.isExists(), equalTo(false)); stats = client().admin().indices().prepareStats().execute().actionGet(); @@ -498,12 +488,12 @@ public void testSimpleStats() throws Exception { // index failed try { - client().prepareIndex("test1", "type1", Integer.toString(1)).setSource("field", "value").setVersion(1) + client().prepareIndex("test1", "type", Integer.toString(1)).setSource("field", "value").setVersion(1) .setVersionType(VersionType.EXTERNAL).execute().actionGet(); fail("Expected a version conflict"); } catch (VersionConflictEngineException e) {} try { - client().prepareIndex("test1", "type2", Integer.toString(1)).setSource("field", "value").setVersion(1) + client().prepareIndex("test2", "type", Integer.toString(1)).setSource("field", "value").setVersion(1) .setVersionType(VersionType.EXTERNAL).execute().actionGet(); fail("Expected a version conflict"); } catch (VersionConflictEngineException e) {} @@ -513,11 +503,9 @@ public void testSimpleStats() throws Exception { fail("Expected a version conflict"); } catch (VersionConflictEngineException e) {} - stats = client().admin().indices().prepareStats().setTypes("type1", "type2").execute().actionGet(); - assertThat(stats.getIndex("test1").getPrimaries().getIndexing().getTotal().getIndexFailedCount(), equalTo(2L)); - assertThat(stats.getIndex("test2").getPrimaries().getIndexing().getTotal().getIndexFailedCount(), equalTo(1L)); - assertThat(stats.getPrimaries().getIndexing().getTypeStats().get("type1").getIndexFailedCount(), equalTo(1L)); - assertThat(stats.getPrimaries().getIndexing().getTypeStats().get("type2").getIndexFailedCount(), equalTo(1L)); + + stats = client().admin().indices().prepareStats().execute().actionGet(); + assertThat(stats.getIndex("test2").getPrimaries().getIndexing().getTotal().getIndexFailedCount(), equalTo(2L)); assertThat(stats.getPrimaries().getIndexing().getTotal().getIndexFailedCount(), equalTo(3L)); } diff --git a/server/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java b/server/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java index 434b981681fca..2c6f7675673b4 100644 --- a/server/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java +++ b/server/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java @@ -383,24 +383,22 @@ public void testIndexTemplateWithAliases() throws Exception { client().admin().indices().preparePutTemplate("template_with_aliases") .setPatterns(Collections.singletonList("te*")) - .addMapping("type1", "{\"type1\" : {\"properties\" : {\"value\" : {\"type\" : \"text\"}}}}", XContentType.JSON) + .addMapping("_doc", "type", "type=keyword", "field", "type=text") .addAlias(new Alias("simple_alias")) .addAlias(new Alias("templated_alias-{index}")) - .addAlias(new Alias("filtered_alias").filter("{\"type\":{\"value\":\"type2\"}}")) + .addAlias(new Alias("filtered_alias").filter("{\"term\":{\"type\":\"type2\"}}")) .addAlias(new Alias("complex_filtered_alias") - .filter(QueryBuilders.termsQuery("_type", "typeX", "typeY", "typeZ"))) + .filter(QueryBuilders.termsQuery("type", "typeX", "typeY", "typeZ"))) .get(); - assertAcked(prepareCreate("test_index") - .setSettings(Settings.builder().put("index.version.created", Version.V_5_6_0.id)) // allow for multiple version - .addMapping("type1").addMapping("type2").addMapping("typeX").addMapping("typeY").addMapping("typeZ")); + assertAcked(prepareCreate("test_index")); ensureGreen(); - client().prepareIndex("test_index", "type1", "1").setSource("field", "A value").get(); - client().prepareIndex("test_index", "type2", "2").setSource("field", "B value").get(); - client().prepareIndex("test_index", "typeX", "3").setSource("field", "C value").get(); - client().prepareIndex("test_index", "typeY", "4").setSource("field", "D value").get(); - client().prepareIndex("test_index", "typeZ", "5").setSource("field", "E value").get(); + client().prepareIndex("test_index", "_doc", "1").setSource("type", "type1", "field", "A value").get(); + client().prepareIndex("test_index", "_doc", "2").setSource("type", "type2", "field", "B value").get(); + client().prepareIndex("test_index", "_doc", "3").setSource("type", "typeX", "field", "C value").get(); + client().prepareIndex("test_index", "_doc", "4").setSource("type", "typeY", "field", "D value").get(); + client().prepareIndex("test_index", "_doc", "5").setSource("type", "typeZ", "field", "E value").get(); GetAliasesResponse getAliasesResponse = client().admin().indices().prepareGetAliases().setIndices("test_index").get(); assertThat(getAliasesResponse.getAliases().size(), equalTo(1)); @@ -419,7 +417,7 @@ public void testIndexTemplateWithAliases() throws Exception { searchResponse = client().prepareSearch("filtered_alias").get(); assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getAt(0).getType(), equalTo("type2")); + assertThat(searchResponse.getHits().getAt(0).getSourceAsMap().get("type"), equalTo("type2")); // Search the complex filter alias searchResponse = client().prepareSearch("complex_filtered_alias").get(); @@ -427,7 +425,7 @@ public void testIndexTemplateWithAliases() throws Exception { Set types = new HashSet<>(); for (SearchHit searchHit : searchResponse.getHits().getHits()) { - types.add(searchHit.getType()); + types.add(searchHit.getSourceAsMap().get("type").toString()); } assertThat(types.size(), equalTo(3)); assertThat(types, containsInAnyOrder("typeX", "typeY", "typeZ")); diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestClientIT.java b/server/src/test/java/org/elasticsearch/ingest/IngestClientIT.java index 9e97e9bbfd449..0481385909fe2 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestClientIT.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestClientIT.java @@ -121,7 +121,7 @@ public void testSimulate() throws Exception { source.put("foo", "bar"); source.put("fail", false); source.put("processed", true); - IngestDocument ingestDocument = new IngestDocument("index", "type", "id", null, null, null, null, source); + IngestDocument ingestDocument = new IngestDocument("index", "type", "id", null, null, null, source); assertThat(simulateDocumentBaseResult.getIngestDocument().getSourceAndMetadata(), equalTo(ingestDocument.getSourceAndMetadata())); assertThat(simulateDocumentBaseResult.getFailure(), nullValue()); diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java index 04285b3432e12..4f9423ddff752 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java @@ -76,7 +76,7 @@ public void setTestIngestDocument() { list.add(null); document.put("list", list); - ingestDocument = new IngestDocument("index", "type", "id", null, null, null, null, document); + ingestDocument = new IngestDocument("index", "type", "id", null, null, null, document); } public void testSimpleGetFieldValue() { diff --git a/server/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java index 5a3b57a6d7e0b..15a23421da26a 100644 --- a/server/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java @@ -40,6 +40,7 @@ import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.concurrent.ExecutorService; @@ -48,9 +49,10 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyBoolean; import static org.mockito.Matchers.anyString; import static org.mockito.Matchers.argThat; import static org.mockito.Matchers.eq; @@ -78,19 +80,23 @@ public void setup() { } public void testExecuteIndexPipelineDoesNotExist() { - IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - @SuppressWarnings("unchecked") - Consumer failureHandler = mock(Consumer.class); - @SuppressWarnings("unchecked") - Consumer completionHandler = mock(Consumer.class); - try { - executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); - fail("IllegalArgumentException expected"); - } catch (IllegalArgumentException e) { + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); + + final SetOnce failure = new SetOnce<>(); + final BiConsumer failureHandler = (request, e) -> { + failure.set(true); + assertThat(request, sameInstance(indexRequest)); + assertThat(e, instanceOf(IllegalArgumentException.class)); assertThat(e.getMessage(), equalTo("pipeline with id [_id] does not exist")); - } - verify(failureHandler, never()).accept(any(Exception.class)); - verify(completionHandler, never()).accept(anyBoolean()); + }; + + @SuppressWarnings("unchecked") + final Consumer completionHandler = mock(Consumer.class); + + executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); + + assertTrue(failure.get()); + verify(completionHandler, times(1)).accept(null); } public void testExecuteIndexPipelineExistsButFailedParsing() { @@ -106,17 +112,23 @@ public String getType() { return null; } }))); - SetOnce failed = new SetOnce<>(); - IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - Consumer failureHandler = (e) -> { - assertThat(e.getCause().getClass(), equalTo(IllegalArgumentException.class)); - assertThat(e.getCause().getCause().getClass(), equalTo(IllegalStateException.class)); + + final SetOnce failure = new SetOnce<>(); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); + final BiConsumer failureHandler = (request, e) -> { + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); + assertThat(e.getCause().getCause(), instanceOf(IllegalStateException.class)); assertThat(e.getCause().getCause().getMessage(), equalTo("error")); - failed.set(true); + failure.set(true); }; - Consumer completionHandler = (e) -> failed.set(false); - executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); - assertTrue(failed.get()); + + @SuppressWarnings("unchecked") + final Consumer completionHandler = mock(Consumer.class); + + executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); + + assertTrue(failure.get()); + verify(completionHandler, times(1)).accept(null); } public void testExecuteBulkPipelineDoesNotExist() { @@ -152,41 +164,40 @@ protected boolean matchesSafely(IllegalArgumentException iae) { verify(completionHandler, times(1)).accept(null); } - public void testExecuteSuccess() throws Exception { - CompoundProcessor processor = mock(CompoundProcessor.class); + public void testExecuteSuccess() { + final CompoundProcessor processor = mock(CompoundProcessor.class); when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, processor)); - - IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); @SuppressWarnings("unchecked") - Consumer failureHandler = mock(Consumer.class); + final BiConsumer failureHandler = mock(BiConsumer.class); @SuppressWarnings("unchecked") - Consumer completionHandler = mock(Consumer.class); - executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); - verify(failureHandler, never()).accept(any()); - verify(completionHandler, times(1)).accept(true); + final Consumer completionHandler = mock(Consumer.class); + executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); + verify(failureHandler, never()).accept(any(), any()); + verify(completionHandler, times(1)).accept(null); } public void testExecuteEmptyPipeline() throws Exception { - CompoundProcessor processor = mock(CompoundProcessor.class); + final CompoundProcessor processor = mock(CompoundProcessor.class); when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, processor)); when(processor.getProcessors()).thenReturn(Collections.emptyList()); - IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); @SuppressWarnings("unchecked") - Consumer failureHandler = mock(Consumer.class); + final BiConsumer failureHandler = mock(BiConsumer.class); @SuppressWarnings("unchecked") - Consumer completionHandler = mock(Consumer.class); - executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); + final Consumer completionHandler = mock(Consumer.class); + executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); verify(processor, never()).execute(any()); - verify(failureHandler, never()).accept(any()); - verify(completionHandler, times(1)).accept(true); + verify(failureHandler, never()).accept(any(), any()); + verify(completionHandler, times(1)).accept(null); } public void testExecutePropagateAllMetaDataUpdates() throws Exception { - CompoundProcessor processor = mock(CompoundProcessor.class); + final CompoundProcessor processor = mock(CompoundProcessor.class); when(processor.getProcessors()).thenReturn(Collections.singletonList(mock(Processor.class))); - long newVersion = randomLong(); - String versionType = randomFrom("internal", "external", "external_gt", "external_gte"); + final long newVersion = randomLong(); + final String versionType = randomFrom("internal", "external", "external_gt", "external_gte"); doAnswer((InvocationOnMock invocationOnMock) -> { IngestDocument ingestDocument = (IngestDocument) invocationOnMock.getArguments()[0]; for (IngestDocument.MetaData metaData : IngestDocument.MetaData.values()) { @@ -202,108 +213,112 @@ public void testExecutePropagateAllMetaDataUpdates() throws Exception { }).when(processor).execute(any()); when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, processor)); - IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); @SuppressWarnings("unchecked") - Consumer failureHandler = mock(Consumer.class); + final BiConsumer failureHandler = mock(BiConsumer.class); @SuppressWarnings("unchecked") - Consumer completionHandler = mock(Consumer.class); - executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); + final Consumer completionHandler = mock(Consumer.class); + executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); verify(processor).execute(any()); - verify(failureHandler, never()).accept(any()); - verify(completionHandler, times(1)).accept(true); + verify(failureHandler, never()).accept(any(), any()); + verify(completionHandler, times(1)).accept(null); assertThat(indexRequest.index(), equalTo("update_index")); assertThat(indexRequest.type(), equalTo("update_type")); assertThat(indexRequest.id(), equalTo("update_id")); assertThat(indexRequest.routing(), equalTo("update_routing")); - assertThat(indexRequest.parent(), equalTo("update_parent")); assertThat(indexRequest.version(), equalTo(newVersion)); assertThat(indexRequest.versionType(), equalTo(VersionType.fromString(versionType))); } public void testExecuteFailure() throws Exception { - CompoundProcessor processor = mock(CompoundProcessor.class); + final CompoundProcessor processor = mock(CompoundProcessor.class); when(processor.getProcessors()).thenReturn(Collections.singletonList(mock(Processor.class))); when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, processor)); - IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - doThrow(new RuntimeException()).when(processor).execute(eqID("_index", "_type", "_id", - indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); + doThrow(new RuntimeException()) + .when(processor) + .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); @SuppressWarnings("unchecked") - Consumer failureHandler = mock(Consumer.class); + final BiConsumer failureHandler = mock(BiConsumer.class); @SuppressWarnings("unchecked") - Consumer completionHandler = mock(Consumer.class); - executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); - verify(processor).execute(eqID("_index", "_type", "_id", - indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - verify(failureHandler, times(1)).accept(any(RuntimeException.class)); - verify(completionHandler, never()).accept(anyBoolean()); + final Consumer completionHandler = mock(Consumer.class); + executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); + verify(processor).execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); + verify(failureHandler, times(1)).accept(eq(indexRequest), any(RuntimeException.class)); + verify(completionHandler, times(1)).accept(null); } public void testExecuteSuccessWithOnFailure() throws Exception { - Processor processor = mock(Processor.class); + final Processor processor = mock(Processor.class); when(processor.getType()).thenReturn("mock_processor_type"); when(processor.getTag()).thenReturn("mock_processor_tag"); - Processor onFailureProcessor = mock(Processor.class); - CompoundProcessor compoundProcessor = new CompoundProcessor(false, Collections.singletonList(processor), - Collections.singletonList(new CompoundProcessor(onFailureProcessor))); + final Processor onFailureProcessor = mock(Processor.class); + final CompoundProcessor compoundProcessor = new CompoundProcessor( + false, Collections.singletonList(processor), Collections.singletonList(new CompoundProcessor(onFailureProcessor))); when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, compoundProcessor)); - IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id") - .source(Collections.emptyMap()).setPipeline("_id"); - doThrow(new RuntimeException()).when(processor).execute(eqID("_index", "_type", "_id", Collections.emptyMap())); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); + doThrow(new RuntimeException()).when(processor).execute(eqIndexTypeId(Collections.emptyMap())); @SuppressWarnings("unchecked") - Consumer failureHandler = mock(Consumer.class); + final BiConsumer failureHandler = mock(BiConsumer.class); @SuppressWarnings("unchecked") - Consumer completionHandler = mock(Consumer.class); - executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); - verify(failureHandler, never()).accept(any(ElasticsearchException.class)); - verify(completionHandler, times(1)).accept(true); + final Consumer completionHandler = mock(Consumer.class); + executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); + verify(failureHandler, never()).accept(eq(indexRequest), any(ElasticsearchException.class)); + verify(completionHandler, times(1)).accept(null); } public void testExecuteFailureWithOnFailure() throws Exception { - Processor processor = mock(Processor.class); - Processor onFailureProcessor = mock(Processor.class); - CompoundProcessor compoundProcessor = new CompoundProcessor(false, Collections.singletonList(processor), - Collections.singletonList(new CompoundProcessor(onFailureProcessor))); + final Processor processor = mock(Processor.class); + final Processor onFailureProcessor = mock(Processor.class); + final CompoundProcessor compoundProcessor = new CompoundProcessor( + false, Collections.singletonList(processor), Collections.singletonList(new CompoundProcessor(onFailureProcessor))); when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, compoundProcessor)); - IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - doThrow(new RuntimeException()).when(processor).execute(eqID("_index", "_type", "_id", - indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - doThrow(new RuntimeException()).when(onFailureProcessor).execute(eqID("_index", "_type", "_id", indexRequest.version(), - indexRequest.versionType(), Collections.emptyMap())); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); + doThrow(new RuntimeException()) + .when(processor) + .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); + doThrow(new RuntimeException()) + .when(onFailureProcessor) + .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); @SuppressWarnings("unchecked") - Consumer failureHandler = mock(Consumer.class); + final BiConsumer failureHandler = mock(BiConsumer.class); @SuppressWarnings("unchecked") - Consumer completionHandler = mock(Consumer.class); - executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); - verify(processor).execute(eqID("_index", "_type", "_id", - indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - verify(failureHandler, times(1)).accept(any(RuntimeException.class)); - verify(completionHandler, never()).accept(anyBoolean()); + final Consumer completionHandler = mock(Consumer.class); + executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); + verify(processor).execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); + verify(failureHandler, times(1)).accept(eq(indexRequest), any(RuntimeException.class)); + verify(completionHandler, times(1)).accept(null); } public void testExecuteFailureWithNestedOnFailure() throws Exception { - Processor processor = mock(Processor.class); - Processor onFailureProcessor = mock(Processor.class); - Processor onFailureOnFailureProcessor = mock(Processor.class); - CompoundProcessor compoundProcessor = new CompoundProcessor(false, Collections.singletonList(processor), - Collections.singletonList(new CompoundProcessor(false, Collections.singletonList(onFailureProcessor), - Collections.singletonList(onFailureOnFailureProcessor)))); + final Processor processor = mock(Processor.class); + final Processor onFailureProcessor = mock(Processor.class); + final Processor onFailureOnFailureProcessor = mock(Processor.class); + final List processors = Collections.singletonList(onFailureProcessor); + final List onFailureProcessors = Collections.singletonList(onFailureOnFailureProcessor); + final CompoundProcessor compoundProcessor = new CompoundProcessor( + false, + Collections.singletonList(processor), + Collections.singletonList(new CompoundProcessor(false, processors, onFailureProcessors))); when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, compoundProcessor)); - IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); - doThrow(new RuntimeException()).when(onFailureOnFailureProcessor).execute(eqID("_index", "_type", "_id", - indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - doThrow(new RuntimeException()).when(onFailureProcessor).execute(eqID("_index", "_type", "_id", - indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - doThrow(new RuntimeException()).when(processor).execute(eqID("_index", "_type", "_id", - indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); + final IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); + doThrow(new RuntimeException()) + .when(onFailureOnFailureProcessor) + .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); + doThrow(new RuntimeException()) + .when(onFailureProcessor) + .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); + doThrow(new RuntimeException()) + .when(processor) + .execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); @SuppressWarnings("unchecked") - Consumer failureHandler = mock(Consumer.class); + final BiConsumer failureHandler = mock(BiConsumer.class); @SuppressWarnings("unchecked") - Consumer completionHandler = mock(Consumer.class); - executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); - verify(processor).execute(eqID("_index", "_type", "_id", - indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); - verify(failureHandler, times(1)).accept(any(RuntimeException.class)); - verify(completionHandler, never()).accept(anyBoolean()); + final Consumer completionHandler = mock(Consumer.class); + executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); + verify(processor).execute(eqIndexTypeId(indexRequest.version(), indexRequest.versionType(), Collections.emptyMap())); + verify(failureHandler, times(1)).accept(eq(indexRequest), any(RuntimeException.class)); + verify(completionHandler, times(1)).accept(null); } public void testBulkRequestExecutionWithFailures() throws Exception { @@ -345,7 +360,7 @@ public void testBulkRequestExecutionWithFailures() throws Exception { verify(completionHandler, times(1)).accept(null); } - public void testBulkRequestExecution() throws Exception { + public void testBulkRequestExecution() { BulkRequest bulkRequest = new BulkRequest(); String pipelineId = "_id"; @@ -368,47 +383,47 @@ public void testBulkRequestExecution() throws Exception { verify(completionHandler, times(1)).accept(null); } - public void testStats() throws Exception { - IngestStats ingestStats = executionService.stats(); - assertThat(ingestStats.getStatsPerPipeline().size(), equalTo(0)); - assertThat(ingestStats.getTotalStats().getIngestCount(), equalTo(0L)); - assertThat(ingestStats.getTotalStats().getIngestCurrent(), equalTo(0L)); - assertThat(ingestStats.getTotalStats().getIngestFailedCount(), equalTo(0L)); - assertThat(ingestStats.getTotalStats().getIngestTimeInMillis(), equalTo(0L)); + public void testStats() { + final IngestStats initialStats = executionService.stats(); + assertThat(initialStats.getStatsPerPipeline().size(), equalTo(0)); + assertThat(initialStats.getTotalStats().getIngestCount(), equalTo(0L)); + assertThat(initialStats.getTotalStats().getIngestCurrent(), equalTo(0L)); + assertThat(initialStats.getTotalStats().getIngestFailedCount(), equalTo(0L)); + assertThat(initialStats.getTotalStats().getIngestTimeInMillis(), equalTo(0L)); when(store.get("_id1")).thenReturn(new Pipeline("_id1", null, version, new CompoundProcessor(mock(Processor.class)))); when(store.get("_id2")).thenReturn(new Pipeline("_id2", null, null, new CompoundProcessor(mock(Processor.class)))); - Map configurationMap = new HashMap<>(); + final Map configurationMap = new HashMap<>(); configurationMap.put("_id1", new PipelineConfiguration("_id1", new BytesArray("{}"), XContentType.JSON)); configurationMap.put("_id2", new PipelineConfiguration("_id2", new BytesArray("{}"), XContentType.JSON)); executionService.updatePipelineStats(new IngestMetadata(configurationMap)); @SuppressWarnings("unchecked") - Consumer failureHandler = mock(Consumer.class); + final BiConsumer failureHandler = mock(BiConsumer.class); @SuppressWarnings("unchecked") - Consumer completionHandler = mock(Consumer.class); + final Consumer completionHandler = mock(Consumer.class); - IndexRequest indexRequest = new IndexRequest("_index"); + final IndexRequest indexRequest = new IndexRequest("_index"); indexRequest.setPipeline("_id1"); - executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); - ingestStats = executionService.stats(); - assertThat(ingestStats.getStatsPerPipeline().size(), equalTo(2)); - assertThat(ingestStats.getStatsPerPipeline().get("_id1").getIngestCount(), equalTo(1L)); - assertThat(ingestStats.getStatsPerPipeline().get("_id2").getIngestCount(), equalTo(0L)); - assertThat(ingestStats.getTotalStats().getIngestCount(), equalTo(1L)); + executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); + final IngestStats afterFirstRequestStats = executionService.stats(); + assertThat(afterFirstRequestStats.getStatsPerPipeline().size(), equalTo(2)); + assertThat(afterFirstRequestStats.getStatsPerPipeline().get("_id1").getIngestCount(), equalTo(1L)); + assertThat(afterFirstRequestStats.getStatsPerPipeline().get("_id2").getIngestCount(), equalTo(0L)); + assertThat(afterFirstRequestStats.getTotalStats().getIngestCount(), equalTo(1L)); indexRequest.setPipeline("_id2"); - executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); - ingestStats = executionService.stats(); - assertThat(ingestStats.getStatsPerPipeline().size(), equalTo(2)); - assertThat(ingestStats.getStatsPerPipeline().get("_id1").getIngestCount(), equalTo(1L)); - assertThat(ingestStats.getStatsPerPipeline().get("_id2").getIngestCount(), equalTo(1L)); - assertThat(ingestStats.getTotalStats().getIngestCount(), equalTo(2L)); + executionService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler); + final IngestStats afterSecondRequestStats = executionService.stats(); + assertThat(afterSecondRequestStats.getStatsPerPipeline().size(), equalTo(2)); + assertThat(afterSecondRequestStats.getStatsPerPipeline().get("_id1").getIngestCount(), equalTo(1L)); + assertThat(afterSecondRequestStats.getStatsPerPipeline().get("_id2").getIngestCount(), equalTo(1L)); + assertThat(afterSecondRequestStats.getTotalStats().getIngestCount(), equalTo(2L)); } // issue: https://github.com/elastic/elasticsearch/issues/18126 - public void testUpdatingStatsWhenRemovingPipelineWorks() throws Exception { + public void testUpdatingStatsWhenRemovingPipelineWorks() { Map configurationMap = new HashMap<>(); configurationMap.put("_id1", new PipelineConfiguration("_id1", new BytesArray("{}"), XContentType.JSON)); configurationMap.put("_id2", new PipelineConfiguration("_id2", new BytesArray("{}"), XContentType.JSON)); @@ -423,12 +438,12 @@ public void testUpdatingStatsWhenRemovingPipelineWorks() throws Exception { assertThat(executionService.stats().getStatsPerPipeline(), not(hasKey("_id2"))); } - private IngestDocument eqID(String index, String type, String id, Map source) { - return argThat(new IngestDocumentMatcher(index, type, id, source)); + private IngestDocument eqIndexTypeId(final Map source) { + return argThat(new IngestDocumentMatcher("_index", "_type", "_id", source)); } - private IngestDocument eqID(String index, String type, String id, Long version, VersionType versionType, Map source) { - return argThat(new IngestDocumentMatcher(index, type, id, version, versionType, source)); + private IngestDocument eqIndexTypeId(final Long version, final VersionType versionType, final Map source) { + return argThat(new IngestDocumentMatcher("_index", "_type", "_id", version, versionType, source)); } private class IngestDocumentMatcher extends ArgumentMatcher { @@ -436,11 +451,11 @@ private class IngestDocumentMatcher extends ArgumentMatcher { private final IngestDocument ingestDocument; IngestDocumentMatcher(String index, String type, String id, Map source) { - this.ingestDocument = new IngestDocument(index, type, id, null, null, null, null, source); + this.ingestDocument = new IngestDocument(index, type, id, null, null, null, source); } IngestDocumentMatcher(String index, String type, String id, Long version, VersionType versionType, Map source) { - this.ingestDocument = new IngestDocument(index, type, id, null, null, version, versionType, source); + this.ingestDocument = new IngestDocument(index, type, id, null, version, versionType, source); } @Override diff --git a/server/src/test/java/org/elasticsearch/mget/SimpleMgetIT.java b/server/src/test/java/org/elasticsearch/mget/SimpleMgetIT.java index 07dcabf396b59..670f9cdfa08be 100644 --- a/server/src/test/java/org/elasticsearch/mget/SimpleMgetIT.java +++ b/server/src/test/java/org/elasticsearch/mget/SimpleMgetIT.java @@ -123,35 +123,6 @@ public void testThatMgetShouldWorkWithAliasRouting() throws IOException { assertFalse(mgetResponse.getResponses()[0].isFailed()); } - public void testThatParentPerDocumentIsSupported() throws Exception { - assertAcked(prepareCreate("test").addAlias(new Alias("alias")) - .addMapping("test", jsonBuilder() - .startObject() - .startObject("test") - .startObject("_parent") - .field("type", "foo") - .endObject() - .endObject() - .endObject())); - - client().prepareIndex("test", "test", "1").setParent("4").setRefreshPolicy(IMMEDIATE) - .setSource(jsonBuilder().startObject().field("foo", "bar").endObject()) - .get(); - - MultiGetResponse mgetResponse = client().prepareMultiGet() - .add(new MultiGetRequest.Item(indexOrAlias(), "test", "1").parent("4")) - .add(new MultiGetRequest.Item(indexOrAlias(), "test", "1")) - .get(); - - assertThat(mgetResponse.getResponses().length, is(2)); - assertThat(mgetResponse.getResponses()[0].isFailed(), is(false)); - assertThat(mgetResponse.getResponses()[0].getResponse().isExists(), is(true)); - - assertThat(mgetResponse.getResponses()[1].isFailed(), is(true)); - assertThat(mgetResponse.getResponses()[1].getResponse(), nullValue()); - assertThat(mgetResponse.getResponses()[1].getFailure().getMessage(), equalTo("routing is required for [test]/[test]/[1]")); - } - @SuppressWarnings("unchecked") public void testThatSourceFilteringIsSupported() throws Exception { assertAcked(prepareCreate("test").addAlias(new Alias("alias"))); diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java index 4c079b545c28f..4830d48df79f6 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java @@ -97,7 +97,8 @@ public void testRestoreSnapshotWithExistingFiles() throws IOException { // build a new shard using the same store directory as the closed shard ShardRouting shardRouting = ShardRoutingHelper.initWithSameId(shard.routingEntry(), EXISTING_STORE_INSTANCE); - shard = newShard(shardRouting, shard.shardPath(), shard.indexSettings().getIndexMetaData(), null, null, () -> {}); + shard = newShard(shardRouting, shard.shardPath(), shard.indexSettings().getIndexMetaData(), null, null, () -> {}, + EMPTY_EVENT_LISTENER); // restore the shard recoverShardFromSnapshot(shard, snapshot, repository); diff --git a/server/src/test/java/org/elasticsearch/routing/AliasResolveRoutingIT.java b/server/src/test/java/org/elasticsearch/routing/AliasResolveRoutingIT.java index dfebb3f754e99..e89ab1e715d03 100644 --- a/server/src/test/java/org/elasticsearch/routing/AliasResolveRoutingIT.java +++ b/server/src/test/java/org/elasticsearch/routing/AliasResolveRoutingIT.java @@ -75,27 +75,25 @@ public void testResolveIndexRouting() { .addAliasAction(AliasActions.add().index("test1").alias("alias0").routing("0")) .addAliasAction(AliasActions.add().index("test2").alias("alias0").routing("0")).get(); - assertThat(clusterService().state().metaData().resolveIndexRouting(null, null, "test1"), nullValue()); - assertThat(clusterService().state().metaData().resolveIndexRouting(null, null, "alias"), nullValue()); - - assertThat(clusterService().state().metaData().resolveIndexRouting(null, null, "test1"), nullValue()); - assertThat(clusterService().state().metaData().resolveIndexRouting(null, null, "alias10"), equalTo("0")); - assertThat(clusterService().state().metaData().resolveIndexRouting(null, null, "alias20"), equalTo("0")); - assertThat(clusterService().state().metaData().resolveIndexRouting(null, null, "alias21"), equalTo("1")); - assertThat(clusterService().state().metaData().resolveIndexRouting(null, "3", "test1"), equalTo("3")); - assertThat(clusterService().state().metaData().resolveIndexRouting(null, "0", "alias10"), equalTo("0")); - - // Force the alias routing and ignore the parent. - assertThat(clusterService().state().metaData().resolveIndexRouting("1", null, "alias10"), equalTo("0")); + assertThat(clusterService().state().metaData().resolveIndexRouting(null, "test1"), nullValue()); + assertThat(clusterService().state().metaData().resolveIndexRouting(null, "alias"), nullValue()); + + assertThat(clusterService().state().metaData().resolveIndexRouting(null, "test1"), nullValue()); + assertThat(clusterService().state().metaData().resolveIndexRouting(null, "alias10"), equalTo("0")); + assertThat(clusterService().state().metaData().resolveIndexRouting(null, "alias20"), equalTo("0")); + assertThat(clusterService().state().metaData().resolveIndexRouting(null, "alias21"), equalTo("1")); + assertThat(clusterService().state().metaData().resolveIndexRouting("3", "test1"), equalTo("3")); + assertThat(clusterService().state().metaData().resolveIndexRouting("0", "alias10"), equalTo("0")); + try { - clusterService().state().metaData().resolveIndexRouting(null, "1", "alias10"); + clusterService().state().metaData().resolveIndexRouting("1", "alias10"); fail("should fail"); } catch (IllegalArgumentException e) { // all is well, we can't have two mappings, one provided, and one in the alias } try { - clusterService().state().metaData().resolveIndexRouting(null, null, "alias0"); + clusterService().state().metaData().resolveIndexRouting(null, "alias0"); fail("should fail"); } catch (IllegalArgumentException ex) { // Expected diff --git a/server/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/server/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index fb140462086b2..21b5fa89a30d6 100644 --- a/server/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/server/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -116,8 +116,8 @@ public void testCompilationCircuitBreaking() throws Exception { public void testMaxCompilationRateSetting() throws Exception { assertThat(MAX_COMPILATION_RATE_FUNCTION.apply("10/1m"), is(Tuple.tuple(10, TimeValue.timeValueMinutes(1)))); assertThat(MAX_COMPILATION_RATE_FUNCTION.apply("10/60s"), is(Tuple.tuple(10, TimeValue.timeValueMinutes(1)))); - assertException("10/m", ElasticsearchParseException.class, "failed to parse [m]"); - assertException("6/1.6m", ElasticsearchParseException.class, "failed to parse [1.6m], fractional time values are not supported"); + assertException("10/m", IllegalArgumentException.class, "failed to parse [m]"); + assertException("6/1.6m", IllegalArgumentException.class, "failed to parse [1.6m], fractional time values are not supported"); assertException("foo/bar", IllegalArgumentException.class, "could not parse [foo] as integer in value [foo/bar]"); assertException("6.0/1m", IllegalArgumentException.class, "could not parse [6.0] as integer in value [6.0/1m]"); assertException("6/-1m", IllegalArgumentException.class, "time value [-1m] must be positive"); diff --git a/server/src/test/java/org/elasticsearch/search/SearchCancellationIT.java b/server/src/test/java/org/elasticsearch/search/SearchCancellationIT.java index bc1e106696315..0294f9f67f88c 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchCancellationIT.java +++ b/server/src/test/java/org/elasticsearch/search/SearchCancellationIT.java @@ -266,7 +266,7 @@ public void enableBlock() { public Map, Object>> pluginScripts() { return Collections.singletonMap(SCRIPT_NAME, params -> { LeafFieldsLookup fieldsLookup = (LeafFieldsLookup) params.get("_fields"); - Loggers.getLogger(SearchCancellationIT.class).info("Blocking on the document {}", fieldsLookup.get("_uid")); + Loggers.getLogger(SearchCancellationIT.class).info("Blocking on the document {}", fieldsLookup.get("_id")); hits.incrementAndGet(); try { awaitBusy(() -> shouldBlock.get() == false); diff --git a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java index ca5efe0236720..78040f5bfb254 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java @@ -324,6 +324,7 @@ public List> getRescorers() { "simple_query_string", "span_containing", "span_first", + "span_gap", "span_multi", "span_near", "span_not", diff --git a/server/src/test/java/org/elasticsearch/search/SearchTimeoutIT.java b/server/src/test/java/org/elasticsearch/search/SearchTimeoutIT.java index 15bed6979fc85..762a22c823fba 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchTimeoutIT.java +++ b/server/src/test/java/org/elasticsearch/search/SearchTimeoutIT.java @@ -21,15 +21,12 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; -import org.elasticsearch.search.aggregations.AggregatorFactories; -import org.elasticsearch.search.query.QueryPhaseExecutionException; import org.elasticsearch.test.ESIntegTestCase; import java.util.Collection; @@ -57,7 +54,10 @@ protected Settings nodeSettings(int nodeOrdinal) { } public void testSimpleTimeout() throws Exception { - client().prepareIndex("test", "type", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + for (int i = 0; i < 32; i++) { + client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value").get(); + } + refresh("test"); SearchResponse searchResponse = client().prepareSearch("test").setTimeout(new TimeValue(10, TimeUnit.MILLISECONDS)) .setQuery(scriptQuery( @@ -66,19 +66,19 @@ public void testSimpleTimeout() throws Exception { .execute().actionGet(); assertThat(searchResponse.isTimedOut(), equalTo(true)); } - + public void testPartialResultsIntolerantTimeout() throws Exception { client().prepareIndex("test", "type", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - - ElasticsearchException ex = expectThrows(ElasticsearchException.class, () -> + + ElasticsearchException ex = expectThrows(ElasticsearchException.class, () -> client().prepareSearch("test").setTimeout(new TimeValue(10, TimeUnit.MILLISECONDS)) .setQuery(scriptQuery( new Script(ScriptType.INLINE, "mockscript", SCRIPT_NAME, Collections.emptyMap()))) .setAllowPartialSearchResults(false) // this line causes timeouts to report failures - .execute().actionGet() + .execute().actionGet() ); assertTrue(ex.toString().contains("Time exceeded")); - } + } public static class ScriptedTimeoutPlugin extends MockScriptPlugin { static final String SCRIPT_NAME = "search_timeout"; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java index acb6b0f0992e0..08ae503102e86 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java @@ -19,7 +19,8 @@ package org.elasticsearch.search.aggregations.bucket; -import org.elasticsearch.common.ParsingException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; @@ -78,8 +79,9 @@ public void testParsingRangeStrict() throws IOException { "]\n" + "}"; XContentParser parser = createParser(JsonXContent.jsonXContent, rangeAggregation); - ParsingException ex = expectThrows(ParsingException.class, () -> DateRangeAggregationBuilder.parse("aggregationName", parser)); - assertThat(ex.getDetailedMessage(), containsString("badField")); + XContentParseException ex = expectThrows(XContentParseException.class, + () -> DateRangeAggregationBuilder.parse("aggregationName", parser)); + assertThat(ExceptionsHelper.detailedMessage(ex), containsString("badField")); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceRangeTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceRangeTests.java index 9549c22019b63..fcdbc81c0c63e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceRangeTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceRangeTests.java @@ -19,10 +19,11 @@ package org.elasticsearch.search.aggregations.bucket; -import org.elasticsearch.common.ParsingException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.unit.DistanceUnit; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; @@ -78,8 +79,9 @@ public void testParsingRangeStrict() throws IOException { "]\n" + "}"; XContentParser parser = createParser(JsonXContent.jsonXContent, rangeAggregation); - ParsingException ex = expectThrows(ParsingException.class, () -> GeoDistanceAggregationBuilder.parse("aggregationName", parser)); - assertThat(ex.getDetailedMessage(), containsString("badField")); + XContentParseException ex = expectThrows(XContentParseException.class, + () -> GeoDistanceAggregationBuilder.parse("aggregationName", parser)); + assertThat(ExceptionsHelper.detailedMessage(ex), containsString("badField")); } /** diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ParentIdAggIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ParentIdAggIT.java deleted file mode 100644 index 4ee01301b8ac3..0000000000000 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/ParentIdAggIT.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.search.aggregations.bucket; - -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.search.aggregations.AggregationBuilders; -import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.test.ESIntegTestCase; - -import java.io.IOException; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.instanceOf; - -public class ParentIdAggIT extends ESIntegTestCase { - public void testParentIdAggregation() throws IOException { - XContentBuilder mapping = jsonBuilder().startObject() - .startObject("childtype") - .startObject("_parent") - .field("type", "parenttype") - .endObject() - .endObject() - .endObject(); - assertAcked(prepareCreate("testidx").addMapping("childtype", mapping)); - client().prepareIndex("testidx", "childtype").setSource(jsonBuilder().startObject().field("num", 1).endObject()).setParent("p1").get(); - client().prepareIndex("testidx", "childtype").setSource(jsonBuilder().startObject().field("num", 2).endObject()).setParent("p1").get(); - - refresh(); - ensureGreen("testidx"); - SearchResponse searchResponse = client().prepareSearch("testidx").setTypes("childtype").setQuery(matchAllQuery()).addAggregation(AggregationBuilders.terms("children").field("_parent#parenttype")).get(); - assertThat(searchResponse.getHits().getTotalHits(), equalTo(2L)); - assertSearchResponse(searchResponse); - assertThat(searchResponse.getAggregations().getAsMap().get("children"), instanceOf(Terms.class)); - Terms terms = (Terms) searchResponse.getAggregations().getAsMap().get("children"); - assertThat(terms.getBuckets().iterator().next().getDocCount(), equalTo(2L)); - } -} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeTests.java index b502645a24c81..2cf03b9609328 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeTests.java @@ -19,7 +19,8 @@ package org.elasticsearch.search.aggregations.bucket; -import org.elasticsearch.common.ParsingException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; @@ -74,8 +75,9 @@ public void testParsingRangeStrict() throws IOException { "]\n" + "}"; XContentParser parser = createParser(JsonXContent.jsonXContent, rangeAggregation); - ParsingException ex = expectThrows(ParsingException.class, () -> RangeAggregationBuilder.parse("aggregationName", parser)); - assertThat(ex.getDetailedMessage(), containsString("badField")); + XContentParseException ex = expectThrows(XContentParseException.class, + () -> RangeAggregationBuilder.parse("aggregationName", parser)); + assertThat(ExceptionsHelper.detailedMessage(ex), containsString("badField")); } /** diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java index e52a4b7bbbc9e..900b0b627b6f2 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java @@ -22,6 +22,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.DoublePoint; import org.apache.lucene.document.Field; +import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.document.IntPoint; import org.apache.lucene.document.LongPoint; import org.apache.lucene.document.SortedNumericDocValuesField; @@ -41,6 +42,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.IpFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; @@ -62,6 +64,7 @@ import org.junit.Before; import java.io.IOException; +import java.net.InetAddress; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -87,7 +90,7 @@ public class CompositeAggregatorTests extends AggregatorTestCase { @Before public void setUp() throws Exception { super.setUp(); - FIELD_TYPES = new MappedFieldType[6]; + FIELD_TYPES = new MappedFieldType[7]; FIELD_TYPES[0] = new KeywordFieldMapper.KeywordFieldType(); FIELD_TYPES[0].setName("keyword"); FIELD_TYPES[0].setHasDocValues(true); @@ -113,6 +116,10 @@ public void setUp() throws Exception { FIELD_TYPES[5] = new KeywordFieldMapper.KeywordFieldType(); FIELD_TYPES[5].setName("terms"); FIELD_TYPES[5].setHasDocValues(true); + + FIELD_TYPES[6] = new IpFieldMapper.IpFieldType(); + FIELD_TYPES[6].setName("ip"); + FIELD_TYPES[6].setHasDocValues(true); } @Override @@ -812,6 +819,47 @@ public void testWithDateHistogram() throws IOException { ); } + public void testWithDateTerms() throws IOException { + final List>> dataset = new ArrayList<>(); + dataset.addAll( + Arrays.asList( + createDocument("date", asLong("2017-10-20T03:08:45")), + createDocument("date", asLong("2016-09-20T09:00:34")), + createDocument("date", asLong("2016-09-20T11:34:00")), + createDocument("date", asLong("2017-10-20T06:09:24")), + createDocument("date", asLong("2017-10-19T06:09:24")), + createDocument("long", 4L) + ) + ); + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("date"), + LongPoint.newRangeQuery( + "date", + asLong("2016-09-20T09:00:34"), + asLong("2017-10-20T06:09:24") + )), dataset, + () -> { + TermsValuesSourceBuilder histo = new TermsValuesSourceBuilder("date") + .field("date"); + return new CompositeAggregationBuilder("name", Collections.singletonList(histo)); + }, + (result) -> { + assertEquals(5, result.getBuckets().size()); + assertEquals("{date=1508479764000}", result.afterKey().toString()); + assertThat(result.getBuckets().get(0).getKey().get("date"), instanceOf(Long.class)); + assertEquals("{date=1474362034000}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(0).getDocCount()); + assertEquals("{date=1474371240000}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(1).getDocCount()); + assertEquals("{date=1508393364000}", result.getBuckets().get(2).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(2).getDocCount()); + assertEquals("{date=1508468925000}", result.getBuckets().get(3).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(3).getDocCount()); + assertEquals("{date=1508479764000}", result.getBuckets().get(4).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(4).getDocCount()); + } + ); + } + public void testWithDateHistogramAndFormat() throws IOException { final List>> dataset = new ArrayList<>(); dataset.addAll( @@ -1437,6 +1485,51 @@ private , V extends Comparable> void testRandomTerms( assertEquals(expected, seen); } + public void testWithIP() throws Exception { + final List>> dataset = new ArrayList<>(); + dataset.addAll( + Arrays.asList( + createDocument("ip", InetAddress.getByName("127.0.0.1")), + createDocument("ip", InetAddress.getByName("192.168.0.1")), + createDocument("ip", InetAddress.getByName("::1")), + createDocument("ip", InetAddress.getByName("::1")), + createDocument("ip", InetAddress.getByName("192.168.0.1")) + ) + ); + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("ip")), dataset, + () -> { + TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("ip") + .field("ip"); + return new CompositeAggregationBuilder("name", Collections.singletonList(terms)); + }, (result) -> { + assertEquals(3, result.getBuckets().size()); + assertEquals("{ip=192.168.0.1}", result.afterKey().toString()); + assertEquals("{ip=::1}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(0).getDocCount()); + assertEquals("{ip=127.0.0.1}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(1).getDocCount()); + assertEquals("{ip=192.168.0.1}", result.getBuckets().get(2).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(2).getDocCount()); + } + ); + + testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("ip")), dataset, + () -> { + TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("ip") + .field("ip"); + return new CompositeAggregationBuilder("name", Collections.singletonList(terms)) + .aggregateAfter(Collections.singletonMap("ip", "::1")); + }, (result) -> { + assertEquals(2, result.getBuckets().size()); + assertEquals("{ip=192.168.0.1}", result.afterKey().toString()); + assertEquals("{ip=127.0.0.1}", result.getBuckets().get(0).getKeyAsString()); + assertEquals(1L, result.getBuckets().get(0).getDocCount()); + assertEquals("{ip=192.168.0.1}", result.getBuckets().get(1).getKeyAsString()); + assertEquals(2L, result.getBuckets().get(1).getDocCount()); + } + ); + } + private void testSearchCase(List queries, List>> dataset, Supplier create, @@ -1491,6 +1584,9 @@ private void addToDocument(Document doc, Map> keys) { } else if (value instanceof String) { doc.add(new SortedSetDocValuesField(name, new BytesRef((String) value))); doc.add(new StringField(name, new BytesRef((String) value), Field.Store.NO)); + } else if (value instanceof InetAddress) { + doc.add(new SortedSetDocValuesField(name, new BytesRef(InetAddressPoint.encode((InetAddress) value)))); + doc.add(new InetAddressPoint(name, (InetAddress) value)); } else { throw new AssertionError("invalid object: " + value.getClass().getSimpleName()); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueueTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueueTests.java index edf732ce24a41..a6cf15c4105d7 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueueTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueueTests.java @@ -212,20 +212,28 @@ private void testRandomCase(boolean forceMerge, ClassAndName... types) throws IO if (types[i].clazz == Long.class) { sources[i] = new LongValuesSource(bigArrays, fieldType, context -> DocValues.getSortedNumeric(context.reader(), fieldType.name()), value -> value, - DocValueFormat.RAW, size, 1); + DocValueFormat.RAW, null, size, 1); } else if (types[i].clazz == Double.class) { - sources[i] = new DoubleValuesSource(bigArrays, fieldType, + sources[i] = new DoubleValuesSource( + bigArrays, fieldType, context -> FieldData.sortableLongBitsToDoubles(DocValues.getSortedNumeric(context.reader(), fieldType.name())), - size, 1); + DocValueFormat.RAW, null, size, 1 + ); } else if (types[i].clazz == BytesRef.class) { if (forceMerge) { // we don't create global ordinals but we test this mode when the reader has a single segment // since ordinals are global in this case. - sources[i] = new GlobalOrdinalValuesSource(bigArrays, fieldType, - context -> DocValues.getSortedSet(context.reader(), fieldType.name()), size, 1); + sources[i] = new GlobalOrdinalValuesSource( + bigArrays, fieldType, + context -> DocValues.getSortedSet(context.reader(), fieldType.name()), + DocValueFormat.RAW, null, size, 1 + ); } else { - sources[i] = new BinaryValuesSource(fieldType, - context -> FieldData.toString(DocValues.getSortedSet(context.reader(), fieldType.name())), size, 1); + sources[i] = new BinaryValuesSource( + fieldType, + context -> FieldData.toString(DocValues.getSortedSet(context.reader(), fieldType.name())), + DocValueFormat.RAW, null, size, 1 + ); } } else { assert(false); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/SingleDimensionValuesSourceTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/SingleDimensionValuesSourceTests.java index 2fd14fe6b697d..fa653e5ed4195 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/SingleDimensionValuesSourceTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/SingleDimensionValuesSourceTests.java @@ -25,6 +25,7 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TermQuery; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.index.mapper.IpFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; @@ -38,28 +39,100 @@ public class SingleDimensionValuesSourceTests extends ESTestCase { public void testBinarySorted() { MappedFieldType keyword = new KeywordFieldMapper.KeywordFieldType(); keyword.setName("keyword"); - BinaryValuesSource source = new BinaryValuesSource(keyword, context -> null, 1, 1); + BinaryValuesSource source = new BinaryValuesSource( + keyword, + context -> null, + DocValueFormat.RAW, + null, + 1, + 1 + ); assertNull(source.createSortedDocsProducerOrNull(mockIndexReader(100, 49), null)); IndexReader reader = mockIndexReader(1, 1); assertNotNull(source.createSortedDocsProducerOrNull(reader, new MatchAllDocsQuery())); assertNotNull(source.createSortedDocsProducerOrNull(reader, null)); assertNull(source.createSortedDocsProducerOrNull(reader, new TermQuery(new Term("keyword", "toto)")))); - source = new BinaryValuesSource(keyword, context -> null, 0, -1); + + source = new BinaryValuesSource( + keyword, + context -> null, + DocValueFormat.RAW, + "missing_value", + 1, + 1 + ); + assertNull(source.createSortedDocsProducerOrNull(reader, new MatchAllDocsQuery())); + assertNull(source.createSortedDocsProducerOrNull(reader, null)); + + source = new BinaryValuesSource( + keyword, + context -> null, + DocValueFormat.RAW, + null, + 0, + -1 + ); + assertNull(source.createSortedDocsProducerOrNull(reader, null)); + + MappedFieldType ip = new IpFieldMapper.IpFieldType(); + ip.setName("ip"); + source = new BinaryValuesSource(ip, context -> null, DocValueFormat.RAW,null, 1, 1); assertNull(source.createSortedDocsProducerOrNull(reader, null)); } public void testGlobalOrdinalsSorted() { - MappedFieldType keyword = new KeywordFieldMapper.KeywordFieldType(); + final MappedFieldType keyword = new KeywordFieldMapper.KeywordFieldType(); keyword.setName("keyword"); - BinaryValuesSource source = new BinaryValuesSource(keyword, context -> null, 1, 1); + GlobalOrdinalValuesSource source = new GlobalOrdinalValuesSource( + BigArrays.NON_RECYCLING_INSTANCE, + keyword, context -> null, + DocValueFormat.RAW, + null, + 1, + 1 + ); assertNull(source.createSortedDocsProducerOrNull(mockIndexReader(100, 49), null)); IndexReader reader = mockIndexReader(1, 1); assertNotNull(source.createSortedDocsProducerOrNull(reader, new MatchAllDocsQuery())); assertNotNull(source.createSortedDocsProducerOrNull(reader, null)); assertNull(source.createSortedDocsProducerOrNull(reader, new TermQuery(new Term("keyword", "toto)")))); - source = new BinaryValuesSource(keyword, context -> null, 1, -1); + + source = new GlobalOrdinalValuesSource( + BigArrays.NON_RECYCLING_INSTANCE, + keyword, + context -> null, + DocValueFormat.RAW, + "missing_value", + 1, + 1 + ); + assertNull(source.createSortedDocsProducerOrNull(reader, new MatchAllDocsQuery())); + assertNull(source.createSortedDocsProducerOrNull(reader, null)); + + source = new GlobalOrdinalValuesSource( + BigArrays.NON_RECYCLING_INSTANCE, + keyword, + context -> null, + DocValueFormat.RAW, + null, + 1, + -1 + ); + assertNull(source.createSortedDocsProducerOrNull(reader, null)); + + final MappedFieldType ip = new IpFieldMapper.IpFieldType(); + ip.setName("ip"); + source = new GlobalOrdinalValuesSource( + BigArrays.NON_RECYCLING_INSTANCE, + ip, + context -> null, + DocValueFormat.RAW, + null, + 1, + 1 + ); assertNull(source.createSortedDocsProducerOrNull(reader, null)); } @@ -72,23 +145,62 @@ public void testNumericSorted() { numberType == NumberFieldMapper.NumberType.SHORT || numberType == NumberFieldMapper.NumberType.INTEGER || numberType == NumberFieldMapper.NumberType.LONG) { - source = new LongValuesSource(BigArrays.NON_RECYCLING_INSTANCE, - number, context -> null, value -> value, DocValueFormat.RAW, 1, 1); + + source = new LongValuesSource( + BigArrays.NON_RECYCLING_INSTANCE, + number, + context -> null, + value -> value, + DocValueFormat.RAW, + null, + 1, + 1 + ); assertNull(source.createSortedDocsProducerOrNull(mockIndexReader(100, 49), null)); IndexReader reader = mockIndexReader(1, 1); assertNotNull(source.createSortedDocsProducerOrNull(reader, new MatchAllDocsQuery())); assertNotNull(source.createSortedDocsProducerOrNull(reader, null)); assertNotNull(source.createSortedDocsProducerOrNull(reader, LongPoint.newRangeQuery("number", 0, 1))); assertNull(source.createSortedDocsProducerOrNull(reader, new TermQuery(new Term("keyword", "toto)")))); - LongValuesSource sourceRev = - new LongValuesSource(BigArrays.NON_RECYCLING_INSTANCE, - number, context -> null, value -> value, DocValueFormat.RAW, 1, -1); + + LongValuesSource sourceWithMissing = new LongValuesSource( + BigArrays.NON_RECYCLING_INSTANCE, + number, + context -> null, + value -> value, + DocValueFormat.RAW, + 0d, + 1, + 1); + assertNull(sourceWithMissing.createSortedDocsProducerOrNull(reader, new MatchAllDocsQuery())); + assertNull(sourceWithMissing.createSortedDocsProducerOrNull(reader, null)); + assertNull(sourceWithMissing.createSortedDocsProducerOrNull(reader, new TermQuery(new Term("keyword", "toto)")))); + + LongValuesSource sourceRev = new LongValuesSource( + BigArrays.NON_RECYCLING_INSTANCE, + number, + context -> null, + value -> value, + DocValueFormat.RAW, + null, + 1, + -1 + ); assertNull(sourceRev.createSortedDocsProducerOrNull(reader, null)); } else if (numberType == NumberFieldMapper.NumberType.HALF_FLOAT || - numberType == NumberFieldMapper.NumberType.FLOAT || - numberType == NumberFieldMapper.NumberType.DOUBLE) { - source = new DoubleValuesSource(BigArrays.NON_RECYCLING_INSTANCE, - number, context -> null, 1, 1); + numberType == NumberFieldMapper.NumberType.FLOAT || + numberType == NumberFieldMapper.NumberType.DOUBLE) { + source = new DoubleValuesSource( + BigArrays.NON_RECYCLING_INSTANCE, + number, + context -> null, + DocValueFormat.RAW, + null, + 1, + 1 + ); + IndexReader reader = mockIndexReader(1, 1); + assertNull(source.createSortedDocsProducerOrNull(reader, null)); } else{ throw new AssertionError ("missing type:" + numberType.typeName()); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java index 7f46cb9e551a8..e431bf19ff3de 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParserTests.java @@ -18,8 +18,9 @@ */ package org.elasticsearch.search.aggregations.bucket.geogrid; -import org.elasticsearch.common.ParsingException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.unit.DistanceUnit; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.ESTestCase; @@ -73,8 +74,9 @@ public void testParseInvalidUnitPrecision() throws Exception { "{\"field\":\"my_loc\", \"precision\": \"10kg\", \"size\": \"500\", \"shard_size\": \"550\"}"); XContentParser.Token token = stParser.nextToken(); assertSame(XContentParser.Token.START_OBJECT, token); - ParsingException ex = expectThrows(ParsingException.class, () -> GeoGridAggregationBuilder.parse("geohash_grid", stParser)); - assertEquals("[geohash_grid] failed to parse field [precision]", ex.getMessage()); + XContentParseException ex = expectThrows(XContentParseException.class, + () -> GeoGridAggregationBuilder.parse("geohash_grid", stParser)); + assertThat(ex.getMessage(), containsString("[geohash_grid] failed to parse field [precision]")); assertThat(ex.getCause(), instanceOf(NumberFormatException.class)); assertEquals("For input string: \"10kg\"", ex.getCause().getMessage()); } @@ -84,8 +86,9 @@ public void testParseDistanceUnitPrecisionTooSmall() throws Exception { "{\"field\":\"my_loc\", \"precision\": \"1cm\", \"size\": \"500\", \"shard_size\": \"550\"}"); XContentParser.Token token = stParser.nextToken(); assertSame(XContentParser.Token.START_OBJECT, token); - ParsingException ex = expectThrows(ParsingException.class, () -> GeoGridAggregationBuilder.parse("geohash_grid", stParser)); - assertEquals("[geohash_grid] failed to parse field [precision]", ex.getMessage()); + XContentParseException ex = expectThrows(XContentParseException.class, + () -> GeoGridAggregationBuilder.parse("geohash_grid", stParser)); + assertThat(ex.getMessage(), containsString("[geohash_grid] failed to parse field [precision]")); assertThat(ex.getCause(), instanceOf(IllegalArgumentException.class)); assertEquals("precision too high [1cm]", ex.getCause().getMessage()); } @@ -94,8 +97,10 @@ public void testParseErrorOnBooleanPrecision() throws Exception { XContentParser stParser = createParser(JsonXContent.jsonXContent, "{\"field\":\"my_loc\", \"precision\":false}"); XContentParser.Token token = stParser.nextToken(); assertSame(XContentParser.Token.START_OBJECT, token); - Exception e = expectThrows(ParsingException.class, () -> GeoGridAggregationBuilder.parse("geohash_grid", stParser)); - assertThat(e.getMessage(), containsString("[geohash_grid] precision doesn't support values of type: VALUE_BOOLEAN")); + XContentParseException e = expectThrows(XContentParseException.class, + () -> GeoGridAggregationBuilder.parse("geohash_grid", stParser)); + assertThat(ExceptionsHelper.detailedMessage(e), + containsString("[geohash_grid] precision doesn't support values of type: VALUE_BOOLEAN")); } public void testParseErrorOnPrecisionOutOfRange() throws Exception { @@ -105,9 +110,9 @@ public void testParseErrorOnPrecisionOutOfRange() throws Exception { try { GeoGridAggregationBuilder.parse("geohash_grid", stParser); fail(); - } catch (ParsingException ex) { + } catch (XContentParseException ex) { assertThat(ex.getCause(), instanceOf(IllegalArgumentException.class)); assertEquals("Invalid geohash aggregation precision of 13. Must be between 1 and 12.", ex.getCause().getMessage()); } } -} \ No newline at end of file +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java index 35d30ebd6b57f..11a44a1d89bad 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java @@ -40,12 +40,13 @@ import org.elasticsearch.Version; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.lucene.search.Queries; +import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.TypeFieldMapper; -import org.elasticsearch.index.mapper.UidFieldMapper; +import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.BucketOrder; @@ -127,8 +128,7 @@ public void testSingleNestingMax() throws IOException { expectedNestedDocs += numNestedDocs; Document document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "type#" + i, - UidFieldMapper.Defaults.FIELD_TYPE)); + document.add(new Field(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(i)), IdFieldMapper.Defaults.FIELD_TYPE)); document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE)); document.add(sequenceIDFields.primaryTerm); @@ -176,8 +176,7 @@ public void testDoubleNestingMax() throws IOException { expectedNestedDocs += numNestedDocs; Document document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "type#" + i, - UidFieldMapper.Defaults.FIELD_TYPE)); + document.add(new Field(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(i)), IdFieldMapper.Defaults.FIELD_TYPE)); document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE)); document.add(sequenceIDFields.primaryTerm); @@ -225,8 +224,7 @@ public void testOrphanedDocs() throws IOException { expectedNestedDocs += numNestedDocs; Document document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "type#" + i, - UidFieldMapper.Defaults.FIELD_TYPE)); + document.add(new Field(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(i)), IdFieldMapper.Defaults.FIELD_TYPE)); document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE)); document.add(sequenceIDFields.primaryTerm); @@ -275,19 +273,19 @@ public void testResetRootDocId() throws Exception { // 1 segment with, 1 root document, with 3 nested sub docs Document document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("1"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE)); documents.add(document); document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("1"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE)); documents.add(document); document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("1"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE)); documents.add(document); document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.FIELD_TYPE)); + document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("1"), IdFieldMapper.Defaults.FIELD_TYPE)); document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE)); document.add(sequenceIDFields.primaryTerm); documents.add(document); @@ -298,11 +296,11 @@ public void testResetRootDocId() throws Exception { // 1 segment with: // 1 document, with 1 nested subdoc document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "type#2", UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("2"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE)); documents.add(document); document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "type#2", UidFieldMapper.Defaults.FIELD_TYPE)); + document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("2"), IdFieldMapper.Defaults.FIELD_TYPE)); document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE)); document.add(sequenceIDFields.primaryTerm); documents.add(document); @@ -310,11 +308,11 @@ public void testResetRootDocId() throws Exception { documents.clear(); // and 1 document, with 1 nested subdoc document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "type#3", UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("3"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE)); documents.add(document); document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "type#3", UidFieldMapper.Defaults.FIELD_TYPE)); + document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("3"), IdFieldMapper.Defaults.FIELD_TYPE)); document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE)); document.add(sequenceIDFields.primaryTerm); documents.add(document); @@ -333,7 +331,7 @@ public void testResetRootDocId() throws Exception { BooleanQuery.Builder bq = new BooleanQuery.Builder(); bq.add(Queries.newNonNestedFilter(VersionUtils.randomVersion(random())), BooleanClause.Occur.MUST); - bq.add(new TermQuery(new Term(UidFieldMapper.NAME, "type#2")), BooleanClause.Occur.MUST_NOT); + bq.add(new TermQuery(new Term(IdFieldMapper.NAME, Uid.encodeId("2"))), BooleanClause.Occur.MUST_NOT); Nested nested = search(newSearcher(indexReader, false, true), new ConstantScoreQuery(bq.build()), nestedBuilder, fieldType); @@ -533,19 +531,19 @@ public void testPreGetChildLeafCollectors() throws IOException { try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { List documents = new ArrayList<>(); Document document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "_doc#1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("1"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE)); document.add(new SortedDocValuesField("key", new BytesRef("key1"))); document.add(new SortedDocValuesField("value", new BytesRef("a1"))); documents.add(document); document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "_doc#1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("1"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE)); document.add(new SortedDocValuesField("key", new BytesRef("key2"))); document.add(new SortedDocValuesField("value", new BytesRef("b1"))); documents.add(document); document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "_doc#1", UidFieldMapper.Defaults.FIELD_TYPE)); + document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("1"), IdFieldMapper.Defaults.FIELD_TYPE)); document.add(new Field(TypeFieldMapper.NAME, "_doc", TypeFieldMapper.Defaults.FIELD_TYPE)); document.add(sequenceIDFields.primaryTerm); documents.add(document); @@ -554,19 +552,19 @@ public void testPreGetChildLeafCollectors() throws IOException { documents.clear(); document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "_doc#2", UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("2"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE)); document.add(new SortedDocValuesField("key", new BytesRef("key1"))); document.add(new SortedDocValuesField("value", new BytesRef("a2"))); documents.add(document); document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "_doc#2", UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("2"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE)); document.add(new SortedDocValuesField("key", new BytesRef("key2"))); document.add(new SortedDocValuesField("value", new BytesRef("b2"))); documents.add(document); document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "_doc#2", UidFieldMapper.Defaults.FIELD_TYPE)); + document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("2"), IdFieldMapper.Defaults.FIELD_TYPE)); document.add(new Field(TypeFieldMapper.NAME, "_doc", TypeFieldMapper.Defaults.FIELD_TYPE)); document.add(sequenceIDFields.primaryTerm); documents.add(document); @@ -575,19 +573,19 @@ public void testPreGetChildLeafCollectors() throws IOException { documents.clear(); document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "_doc#3", UidFieldMapper.Defaults.FIELD_TYPE)); + document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("3"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE)); document.add(new SortedDocValuesField("key", new BytesRef("key1"))); document.add(new SortedDocValuesField("value", new BytesRef("a3"))); documents.add(document); document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "_doc#3", UidFieldMapper.Defaults.FIELD_TYPE)); + document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("3"), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE)); document.add(new SortedDocValuesField("key", new BytesRef("key2"))); document.add(new SortedDocValuesField("value", new BytesRef("b3"))); documents.add(document); document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "_doc#1", UidFieldMapper.Defaults.FIELD_TYPE)); + document.add(new Field(IdFieldMapper.NAME, Uid.encodeId("3"), IdFieldMapper.Defaults.FIELD_TYPE)); document.add(new Field(TypeFieldMapper.NAME, "_doc", TypeFieldMapper.Defaults.FIELD_TYPE)); document.add(sequenceIDFields.primaryTerm); documents.add(document); @@ -654,8 +652,8 @@ private double[] generateDocuments(List documents, int numNestedDocs, double[] values = new double[numNestedDocs]; for (int nested = 0; nested < numNestedDocs; nested++) { Document document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "type#" + id, - UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(id)), + IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); document.add(new Field(TypeFieldMapper.NAME, "__" + path, TypeFieldMapper.Defaults.FIELD_TYPE)); long value = randomNonNegativeLong() % 10000; @@ -671,14 +669,14 @@ private List generateBook(String id, String[] authors, int[] numPages) for (int numPage : numPages) { Document document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "book#" + id, UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(IdFieldMapper.NAME, Uid.encodeId(id), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); document.add(new Field(TypeFieldMapper.NAME, "__nested_chapters", TypeFieldMapper.Defaults.FIELD_TYPE)); document.add(new SortedNumericDocValuesField("num_pages", numPage)); documents.add(document); } Document document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "book#" + id, UidFieldMapper.Defaults.FIELD_TYPE)); + document.add(new Field(IdFieldMapper.NAME, Uid.encodeId(id), IdFieldMapper.Defaults.FIELD_TYPE)); document.add(new Field(TypeFieldMapper.NAME, "book", TypeFieldMapper.Defaults.FIELD_TYPE)); document.add(sequenceIDFields.primaryTerm); for (String author : authors) { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregatorTests.java index 7281228f3c388..36d6c6bd6e45b 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregatorTests.java @@ -27,11 +27,12 @@ import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.store.Directory; +import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.TypeFieldMapper; -import org.elasticsearch.index.mapper.UidFieldMapper; +import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.max.InternalMax; @@ -95,16 +96,16 @@ public void testMaxFromParentDocs() throws IOException { int numNestedDocs = randomIntBetween(0, 20); for (int nested = 0; nested < numNestedDocs; nested++) { Document document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "type#" + i, - UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(i)), + IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); document.add(new Field(TypeFieldMapper.NAME, "__" + NESTED_OBJECT, TypeFieldMapper.Defaults.FIELD_TYPE)); documents.add(document); expectedNestedDocs++; } Document document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "type#" + i, - UidFieldMapper.Defaults.FIELD_TYPE)); + document.add(new Field(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(i)), + IdFieldMapper.Defaults.FIELD_TYPE)); document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE)); long value = randomNonNegativeLong() % 10000; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java index b8c9825d9b5a5..02909d673beb0 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.xcontent.ParseFieldRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.Index; @@ -270,7 +271,7 @@ protected void checkParseException(ParseFieldRegistry generateDocsWithNested(String id, int value, int[] nested for (int nestedValue : nestedValues) { Document document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "docs#" + id, UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(IdFieldMapper.NAME, Uid.encodeId(id), IdFieldMapper.Defaults.NESTED_FIELD_TYPE)); document.add(new Field(TypeFieldMapper.NAME, "__nested_object", TypeFieldMapper.Defaults.FIELD_TYPE)); document.add(new SortedNumericDocValuesField("nested_value", nestedValue)); documents.add(document); } Document document = new Document(); - document.add(new Field(UidFieldMapper.NAME, "docs#" + id, UidFieldMapper.Defaults.FIELD_TYPE)); + document.add(new Field(IdFieldMapper.NAME, Uid.encodeId(id), IdFieldMapper.Defaults.FIELD_TYPE)); document.add(new Field(TypeFieldMapper.NAME, "docs", TypeFieldMapper.Defaults.FIELD_TYPE)); document.add(new SortedNumericDocValuesField("value", value)); document.add(sequenceIDFields.primaryTerm); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentilesTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentilesTests.java index a492e19496f3c..7410ce0c3e372 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentilesTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentilesTests.java @@ -19,7 +19,9 @@ package org.elasticsearch.search.aggregations.metrics; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; @@ -27,6 +29,8 @@ import java.io.IOException; +import static org.hamcrest.Matchers.containsString; + public class PercentilesTests extends BaseAggregationTestCase { @Override @@ -85,12 +89,8 @@ public void testExceptionMultipleMethods() throws IOException { XContentParser parser = createParser(JsonXContent.jsonXContent, illegalAgg); assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); - ParsingException e = expectThrows(ParsingException.class, + XContentParseException e = expectThrows(XContentParseException.class, () -> PercentilesAggregationBuilder.parse("myPercentiles", parser)); - assertEquals( - "ParsingException[[percentiles] failed to parse field [hdr]]; " - + "nested: IllegalStateException[Only one percentiles method should be declared.];; " - + "java.lang.IllegalStateException: Only one percentiles method should be declared.", - e.getDetailedMessage()); + assertThat(ExceptionsHelper.detailedMessage(e), containsString("[percentiles] failed to parse field [hdr]")); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroidTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroidTests.java index c797fcb91db43..9dc7896638c4e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroidTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroidTests.java @@ -68,8 +68,10 @@ protected void assertReduced(InternalGeoCentroid reduced, List 0) { + assertEquals(latSum/totalCount, reduced.centroid().getLat(), 1E-5D); + assertEquals(lonSum/totalCount, reduced.centroid().getLon(), 1E-5D); + } assertEquals(totalCount, reduced.count()); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorTests.java index 5555e987ec402..3fe75b77e7f12 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregatorTests.java @@ -39,10 +39,10 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -149,7 +149,7 @@ private Aggregation testCase(Query query, AggregationBuilder builder) throws IOE private Document document(String id, String... stringValues) { Document document = new Document(); - document.add(new Field(UidFieldMapper.NAME, Uid.createUid("type", id), UidFieldMapper.Defaults.FIELD_TYPE)); + document.add(new Field(IdFieldMapper.NAME, Uid.encodeId(id), IdFieldMapper.Defaults.FIELD_TYPE)); for (String stringValue : stringValues) { document.add(new Field("string", stringValue, STRING_FIELD_TYPE)); document.add(new SortedSetDocValuesField("string", new BytesRef(stringValue))); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java index bbe6ecc3a4e68..159b7e28b1269 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.pipeline.moving.avg; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequestBuilder; @@ -67,6 +68,7 @@ import static org.hamcrest.core.IsNull.notNullValue; import static org.hamcrest.core.IsNull.nullValue; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29456") @ESIntegTestCase.SuiteScopeTestCase public class MovAvgIT extends ESIntegTestCase { private static final String INTERVAL_FIELD = "l_value"; @@ -600,6 +602,7 @@ public void testHoltSingleValuedField() { } } + public void testHoltWintersValuedField() { SearchResponse response = client() .prepareSearch("idx").setTypes("type") @@ -1292,8 +1295,8 @@ private void assertBucketContents(Histogram.Bucket actual, Double expectedCount, assertThat("[_count] movavg should be NaN, but is ["+countMovAvg.value()+"] instead", countMovAvg.value(), equalTo(Double.NaN)); } else { assertThat("[_count] movavg is null", countMovAvg, notNullValue()); - assertTrue("[_count] movavg does not match expected [" + countMovAvg.value() + " vs " + expectedCount + "]", - nearlyEqual(countMovAvg.value(), expectedCount, 0.1)); + assertEquals("[_count] movavg does not match expected [" + countMovAvg.value() + " vs " + expectedCount + "]", + countMovAvg.value(), expectedCount, 0.1); } // This is a gap bucket @@ -1304,29 +1307,8 @@ private void assertBucketContents(Histogram.Bucket actual, Double expectedCount, assertThat("[value] movavg should be NaN, but is ["+valuesMovAvg.value()+"] instead", valuesMovAvg.value(), equalTo(Double.NaN)); } else { assertThat("[value] movavg is null", valuesMovAvg, notNullValue()); - assertTrue("[value] movavg does not match expected [" + valuesMovAvg.value() + " vs " + expectedValue + "]", - nearlyEqual(valuesMovAvg.value(), expectedValue, 0.1)); - } - } - - /** - * Better floating point comparisons courtesy of https://github.com/brazzy/floating-point-gui.de - * - * Snippet adapted to use doubles instead of floats - */ - private static boolean nearlyEqual(double a, double b, double epsilon) { - final double absA = Math.abs(a); - final double absB = Math.abs(b); - final double diff = Math.abs(a - b); - - if (a == b) { // shortcut, handles infinities - return true; - } else if (a == 0 || b == 0 || diff < Double.MIN_NORMAL) { - // a or b is zero or both are extremely close to it - // relative error is less meaningful here - return diff < (epsilon * Double.MIN_NORMAL); - } else { // use relative error - return diff / Math.min((absA + absB), Double.MAX_VALUE) < epsilon; + assertEquals("[value] movavg does not match expected [" + valuesMovAvg.value() + " vs " + expectedValue + "]", + valuesMovAvg.value(), expectedValue, 0.1); } } diff --git a/server/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java b/server/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java index 66d6f68b8a4aa..541a3b04254d8 100644 --- a/server/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.builder; +import com.fasterxml.jackson.core.JsonParseException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesReference; @@ -67,6 +68,18 @@ public void testFromXContent() throws IOException { assertParseSearchSource(testSearchSourceBuilder, createParser(builder)); } + public void testFromXContentInvalid() throws IOException { + try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{}}")) { + JsonParseException exc = expectThrows(JsonParseException.class, () -> SearchSourceBuilder.fromXContent(parser)); + assertThat(exc.getMessage(), containsString("Unexpected close marker")); + } + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{}{}")) { + ParsingException exc = expectThrows(ParsingException.class, () -> SearchSourceBuilder.fromXContent(parser)); + assertThat(exc.getDetailedMessage(), containsString("found after the main object")); + } + } + private static void assertParseSearchSource(SearchSourceBuilder testBuilder, XContentParser parser) throws IOException { if (randomBoolean()) { parser.nextToken(); // sometimes we move it on the START_OBJECT to @@ -332,7 +345,7 @@ public void testTimeoutWithoutUnits() throws IOException { final int timeout = randomIntBetween(1, 1024); final String query = "{ \"query\": { \"match_all\": {}}, \"timeout\": \"" + timeout + "\"}"; try (XContentParser parser = createParser(JsonXContent.jsonXContent, query)) { - final ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> SearchSourceBuilder.fromXContent( + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> SearchSourceBuilder.fromXContent( parser)); assertThat(e, hasToString(containsString("unit is missing or unrecognized"))); } diff --git a/server/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingIT.java b/server/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingIT.java deleted file mode 100644 index 36d672e40f278..0000000000000 --- a/server/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingIT.java +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.search.child; - -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.MergePolicyConfig; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalSettingsPlugin; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; - -public class ParentFieldLoadingIT extends ESIntegTestCase { - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(InternalSettingsPlugin.class); // uses index.merge.enabled - } - - private final Settings indexSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1) - // We never want merges in this test to ensure we have two segments for the last validation - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) - .put("index.version.created", Version.V_5_6_0) - .build(); - - public void testEagerParentFieldLoading() throws Exception { - logger.info("testing lazy loading..."); - assertAcked(prepareCreate("test") - .setSettings(indexSettings) - .addMapping("parent") - .addMapping("child", childMapping(false))); - ensureGreen(); - - client().prepareIndex("test", "parent", "1").setSource("{}", XContentType.JSON).get(); - client().prepareIndex("test", "child", "1").setParent("1").setSource("{}", XContentType.JSON).get(); - refresh(); - - ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get(); - assertThat(response.getIndicesStats().getFieldData().getMemorySizeInBytes(), equalTo(0L)); - - logger.info("testing default loading..."); - assertAcked(client().admin().indices().prepareDelete("test").get()); - assertAcked(prepareCreate("test") - .setSettings(indexSettings) - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - ensureGreen(); - - client().prepareIndex("test", "parent", "1").setSource("{}", XContentType.JSON).get(); - client().prepareIndex("test", "child", "1").setParent("1").setSource("{}", XContentType.JSON).get(); - refresh(); - - response = client().admin().cluster().prepareClusterStats().get(); - assertThat(response.getIndicesStats().getFieldData().getMemorySizeInBytes(), equalTo(0L)); - - logger.info("testing eager global ordinals loading..."); - assertAcked(client().admin().indices().prepareDelete("test").get()); - assertAcked(prepareCreate("test") - .setSettings(indexSettings) - .addMapping("parent") - .addMapping("child", childMapping(true))); - ensureGreen(); - - // Need to do 2 separate refreshes, otherwise we have 1 segment and then we can't measure if global ordinals - // is loaded by the size of the field data cache, because global ordinals on 1 segment shards takes no extra memory. - client().prepareIndex("test", "parent", "1").setSource("{}", XContentType.JSON).get(); - refresh(); - client().prepareIndex("test", "child", "1").setParent("1").setSource("{}", XContentType.JSON).get(); - refresh(); - - response = client().admin().cluster().prepareClusterStats().get(); - assertThat(response.getIndicesStats().getFieldData().getMemorySizeInBytes(), greaterThan(0L)); - } - - public void testChangingEagerParentFieldLoadingAtRuntime() throws Exception { - assertAcked(prepareCreate("test") - .setSettings(indexSettings) - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - ensureGreen(); - - client().prepareIndex("test", "parent", "1").setSource("{}", XContentType.JSON).get(); - client().prepareIndex("test", "child", "1").setParent("1").setSource("{}", XContentType.JSON).get(); - refresh(); - - ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get(); - assertThat(response.getIndicesStats().getFieldData().getMemorySizeInBytes(), equalTo(0L)); - - PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("child") - .setSource(childMapping(true)) - .get(); - assertAcked(putMappingResponse); - Index test = resolveIndex("test"); - assertBusy(() -> { - ClusterState clusterState = internalCluster().clusterService().state(); - ShardRouting shardRouting = clusterState.routingTable().index("test").shard(0).getShards().get(0); - String nodeName = clusterState.getNodes().get(shardRouting.currentNodeId()).getName(); - - boolean verified = false; - IndicesService indicesService = internalCluster().getInstance(IndicesService.class, nodeName); - IndexService indexService = indicesService.indexService(test); - if (indexService != null) { - MapperService mapperService = indexService.mapperService(); - DocumentMapper documentMapper = mapperService.documentMapper("child"); - if (documentMapper != null) { - verified = documentMapper.parentFieldMapper().fieldType().eagerGlobalOrdinals(); - } - } - assertTrue(verified); - }); - - // Need to add a new doc otherwise the refresh doesn't trigger a new searcher - // Because it ends up in its own segment, but isn't of type parent or child, this doc doesn't contribute to the size of the fielddata cache - client().prepareIndex("test", "dummy", "dummy").setSource("{}", XContentType.JSON).get(); - refresh(); - response = client().admin().cluster().prepareClusterStats().get(); - assertThat(response.getIndicesStats().getFieldData().getMemorySizeInBytes(), greaterThan(0L)); - } - - private XContentBuilder childMapping(boolean eagerGlobalOrds) throws IOException { - return jsonBuilder().startObject().startObject("child").startObject("_parent") - .field("type", "parent") - .startObject("fielddata").field("eager_global_ordinals", eagerGlobalOrds).endObject() - .endObject().endObject().endObject(); - } - -} diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java index f0cad04215823..e01cbc470d531 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java @@ -650,13 +650,10 @@ public void testNestedSource() throws Exception { public void testInnerHitsWithIgnoreUnmapped() throws Exception { assertAcked(prepareCreate("index1") - .setSettings(Settings.builder().put("index.version.created", Version.V_5_6_0.id)) - .addMapping("parent_type", "nested_type", "type=nested") - .addMapping("child_type", "_parent", "type=parent_type") + .addMapping("_doc", "nested_type", "type=nested") ); createIndex("index2"); - client().prepareIndex("index1", "parent_type", "1").setSource("nested_type", Collections.singletonMap("key", "value")).get(); - client().prepareIndex("index1", "child_type", "2").setParent("1").setSource("{}", XContentType.JSON).get(); + client().prepareIndex("index1", "_doc", "1").setSource("nested_type", Collections.singletonMap("key", "value")).get(); client().prepareIndex("index2", "type", "3").setSource("key", "value").get(); refresh(); diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/ParentFieldSubFetchPhaseTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/ParentFieldSubFetchPhaseTests.java deleted file mode 100644 index 968424eee735c..0000000000000 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/ParentFieldSubFetchPhaseTests.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.search.fetch.subphase; - -import org.apache.lucene.document.Document; -import org.apache.lucene.document.SortedDocValuesField; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.store.Directory; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.mapper.ContentPath; -import org.elasticsearch.index.mapper.Mapper; -import org.elasticsearch.index.mapper.ParentFieldMapper; -import org.elasticsearch.test.ESTestCase; - -public class ParentFieldSubFetchPhaseTests extends ESTestCase { - - public void testGetParentId() throws Exception { - ParentFieldMapper fieldMapper = createParentFieldMapper(); - Directory directory = newDirectory(); - IndexWriter indexWriter = new IndexWriter(directory, newIndexWriterConfig()); - Document document = new Document(); - document.add(new SortedDocValuesField(fieldMapper.fieldType().name(), new BytesRef("1"))); - indexWriter.addDocument(document); - indexWriter.close(); - - IndexReader indexReader = DirectoryReader.open(directory); - String id = ParentFieldSubFetchPhase.getParentId(fieldMapper, indexReader.leaves().get(0).reader(), 0); - assertEquals("1", id); - - indexReader.close(); - directory.close(); - } - - public void testGetParentIdNoParentField() throws Exception { - ParentFieldMapper fieldMapper = createParentFieldMapper(); - Directory directory = newDirectory(); - IndexWriter indexWriter = new IndexWriter(directory, newIndexWriterConfig()); - Document document = new Document(); - document.add(new SortedDocValuesField("different_field", new BytesRef("1"))); - indexWriter.addDocument(document); - indexWriter.close(); - - IndexReader indexReader = DirectoryReader.open(directory); - String id = ParentFieldSubFetchPhase.getParentId(fieldMapper, indexReader.leaves().get(0).reader(), 0); - assertNull(id); - - indexReader.close(); - directory.close(); - } - - private ParentFieldMapper createParentFieldMapper() { - Settings settings = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .build(); - return new ParentFieldMapper.Builder("type") - .type("parent_type") - .build(new Mapper.BuilderContext(settings, new ContentPath(0))); - } - - -} diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java index c002d08e6f7a4..5d06fd4cd400b 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; @@ -69,6 +70,7 @@ import static java.util.Collections.emptyList; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class HighlightBuilderTests extends ESTestCase { @@ -163,15 +165,15 @@ public void testUnknownArrayNameExpection() throws IOException { } { - ParsingException e = expectParseThrows(ParsingException.class, "{\n" + + XContentParseException e = expectParseThrows(XContentParseException.class, "{\n" + " \"fields\" : {\n" + " \"body\" : {\n" + " \"bad_fieldname\" : [ \"field1\" , \"field2\" ]\n" + " }\n" + " }\n" + "}\n"); - assertEquals("[highlight] failed to parse field [fields]", e.getMessage()); - assertEquals("[fields] failed to parse field [body]", e.getCause().getMessage()); + assertThat(e.getMessage(), containsString("[highlight] failed to parse field [fields]")); + assertThat(e.getCause().getMessage(), containsString("[fields] failed to parse field [body]")); assertEquals("[highlight_field] unknown field [bad_fieldname], parser not found", e.getCause().getCause().getMessage()); } } @@ -193,15 +195,15 @@ public void testUnknownFieldnameExpection() throws IOException { } { - ParsingException e = expectParseThrows(ParsingException.class, "{\n" + + XContentParseException e = expectParseThrows(XContentParseException.class, "{\n" + " \"fields\" : {\n" + " \"body\" : {\n" + " \"bad_fieldname\" : \"value\"\n" + " }\n" + " }\n" + "}\n"); - assertEquals("[highlight] failed to parse field [fields]", e.getMessage()); - assertEquals("[fields] failed to parse field [body]", e.getCause().getMessage()); + assertThat(e.getMessage(), containsString("[highlight] failed to parse field [fields]")); + assertThat(e.getCause().getMessage(), containsString("[fields] failed to parse field [body]")); assertEquals("[highlight_field] unknown field [bad_fieldname], parser not found", e.getCause().getCause().getMessage()); } } @@ -218,49 +220,50 @@ public void testUnknownObjectFieldnameExpection() throws IOException { } { - ParsingException e = expectParseThrows(ParsingException.class, "{\n" + + XContentParseException e = expectParseThrows(XContentParseException.class, "{\n" + " \"fields\" : {\n" + " \"body\" : {\n" + " \"bad_fieldname\" : { \"field\" : \"value\" }\n" + " }\n" + " }\n" + "}\n"); - assertEquals("[highlight] failed to parse field [fields]", e.getMessage()); - assertEquals("[fields] failed to parse field [body]", e.getCause().getMessage()); + assertThat(e.getMessage(), containsString("[highlight] failed to parse field [fields]")); + assertThat(e.getCause().getMessage(), containsString("[fields] failed to parse field [body]")); assertEquals("[highlight_field] unknown field [bad_fieldname], parser not found", e.getCause().getCause().getMessage()); } } public void testStringInFieldsArray() throws IOException { - ParsingException e = expectParseThrows(ParsingException.class, "{\"fields\" : [ \"junk\" ]}"); - assertEquals("[highlight] failed to parse field [fields]", e.getMessage()); - assertEquals( - "[fields] can be a single object with any number of fields or an array where each entry is an object with a single field", - e.getCause().getMessage()); + XContentParseException e = expectParseThrows(XContentParseException.class, "{\"fields\" : [ \"junk\" ]}"); + assertThat(e.getMessage(), containsString("[highlight] failed to parse field [fields]")); + assertThat(e.getCause().getMessage(), + containsString("[fields] can be a single object with any number of fields " + + "or an array where each entry is an object with a single field")); } public void testNoFieldsInObjectInFieldsArray() throws IOException { - ParsingException e = expectParseThrows(ParsingException.class, "{\n" + + XContentParseException e = expectParseThrows(XContentParseException.class, "{\n" + " \"fields\" : [ {\n" + " }] \n" + "}\n"); - assertEquals("[highlight] failed to parse field [fields]", e.getMessage()); - assertEquals( - "[fields] can be a single object with any number of fields or an array where each entry is an object with a single field", - e.getCause().getMessage()); + assertThat(e.getMessage(), containsString("[highlight] failed to parse field [fields]")); + assertThat(e.getCause().getMessage(), + containsString("[fields] can be a single object with any number of fields " + + "or an array where each entry is an object with a single field")); } public void testTwoFieldsInObjectInFieldsArray() throws IOException { - ParsingException e = expectParseThrows(ParsingException.class, "{\n" + + XContentParseException e = expectParseThrows(XContentParseException.class, "{\n" + " \"fields\" : [ {\n" + " \"body\" : {},\n" + " \"nope\" : {}\n" + " }] \n" + "}\n"); - assertEquals("[highlight] failed to parse field [fields]", e.getMessage()); - assertEquals( - "[fields] can be a single object with any number of fields or an array where each entry is an object with a single field", - e.getCause().getMessage()); } + assertThat(e.getMessage(), containsString("[highlight] failed to parse field [fields]")); + assertThat(e.getCause().getMessage(), + containsString("[fields] can be a single object with any number of fields " + + "or an array where each entry is an object with a single field")); + } /** * test that build() outputs a {@link SearchContextHighlight} that is has similar parameters @@ -405,10 +408,10 @@ public void testParsingTagsSchema() throws IOException { assertArrayEquals("setting tags_schema 'default' should alter post_tags", HighlightBuilder.DEFAULT_POST_TAGS, highlightBuilder.postTags()); - ParsingException e = expectParseThrows(ParsingException.class, "{\n" + + XContentParseException e = expectParseThrows(XContentParseException.class, "{\n" + " \"tags_schema\" : \"somthing_else\"\n" + "}\n"); - assertEquals("[highlight] failed to parse field [tags_schema]", e.getMessage()); + assertThat(e.getMessage(), containsString("[highlight] failed to parse field [tags_schema]")); assertEquals("Unknown tag schema [somthing_else]", e.getCause().getMessage()); } @@ -436,20 +439,20 @@ public void testParsingEmptyStructure() throws IOException { } public void testPreTagsWithoutPostTags() throws IOException { - ParsingException e = expectParseThrows(ParsingException.class, "{\n" + + ParsingException err = expectParseThrows(ParsingException.class, "{\n" + " \"pre_tags\" : [\"\"]\n" + "}\n"); - assertEquals("pre_tags are set but post_tags are not set", e.getMessage()); + assertEquals("pre_tags are set but post_tags are not set", err.getMessage()); - e = expectParseThrows(ParsingException.class, "{\n" + + XContentParseException e = expectParseThrows(XContentParseException.class, "{\n" + " \"fields\" : {\n" + " \"body\" : {\n" + " \"pre_tags\" : [\"\"]\n" + " }\n" + " }\n" + "}\n"); - assertEquals("[highlight] failed to parse field [fields]", e.getMessage()); - assertEquals("[fields] failed to parse field [body]", e.getCause().getMessage()); + assertThat(e.getMessage(), containsString("[highlight] failed to parse field [fields]")); + assertThat(e.getCause().getMessage(), containsString("[fields] failed to parse field [body]")); assertEquals("pre_tags are set but post_tags are not set", e.getCause().getCause().getMessage()); } diff --git a/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java b/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java index d7480c2b6fb2e..36062860202a5 100644 --- a/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java +++ b/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java @@ -342,7 +342,7 @@ public void testScriptDocAndFields() throws Exception { assertThat(response.getHits().getAt(2).getFields().get("sNum1").getValues().get(0), equalTo(6.0)); } - public void testUidBasedScriptFields() throws Exception { + public void testIdBasedScriptFields() throws Exception { prepareCreate("test").addMapping("type1", "num1", "type=long").execute().actionGet(); int numDocs = randomIntBetween(1, 30); @@ -354,23 +354,6 @@ public void testUidBasedScriptFields() throws Exception { indexRandom(true, indexRequestBuilders); SearchResponse response = client().prepareSearch() - .setQuery(matchAllQuery()) - .addSort("num1", SortOrder.ASC) - .setSize(numDocs) - .addScriptField("uid", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_fields._uid.value", Collections.emptyMap())) - .get(); - - assertNoFailures(response); - - assertThat(response.getHits().getTotalHits(), equalTo((long)numDocs)); - for (int i = 0; i < numDocs; i++) { - assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); - Set fields = new HashSet<>(response.getHits().getAt(i).getFields().keySet()); - assertThat(fields, equalTo(singleton("uid"))); - assertThat(response.getHits().getAt(i).getFields().get("uid").getValue(), equalTo("type1#" + Integer.toString(i))); - } - - response = client().prepareSearch() .setQuery(matchAllQuery()) .addSort("num1", SortOrder.ASC) .setSize(numDocs) @@ -410,7 +393,6 @@ public void testUidBasedScriptFields() throws Exception { .addSort("num1", SortOrder.ASC) .setSize(numDocs) .addScriptField("id", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_fields._id.value", Collections.emptyMap())) - .addScriptField("uid", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_fields._uid.value", Collections.emptyMap())) .addScriptField("type", new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_fields._type.value", Collections.emptyMap())) .get(); @@ -421,8 +403,7 @@ public void testUidBasedScriptFields() throws Exception { for (int i = 0; i < numDocs; i++) { assertThat(response.getHits().getAt(i).getId(), equalTo(Integer.toString(i))); Set fields = new HashSet<>(response.getHits().getAt(i).getFields().keySet()); - assertThat(fields, equalTo(newHashSet("uid", "type", "id"))); - assertThat(response.getHits().getAt(i).getFields().get("uid").getValue(), equalTo("type1#" + Integer.toString(i))); + assertThat(fields, equalTo(newHashSet("type", "id"))); assertThat(response.getHits().getAt(i).getFields().get("type").getValue(), equalTo("type1")); assertThat(response.getHits().getAt(i).getFields().get("id").getValue(), equalTo(Integer.toString(i))); } diff --git a/server/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java b/server/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java index 3b1002a6f68c4..fa0531262bb1d 100644 --- a/server/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java +++ b/server/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java @@ -42,12 +42,12 @@ import org.elasticsearch.common.geo.builders.MultiPolygonBuilder; import org.elasticsearch.common.geo.builders.PointBuilder; import org.elasticsearch.common.geo.builders.PolygonBuilder; -import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.Streams; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchHit; diff --git a/server/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java b/server/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java index 185ec53f3b4e3..3487de255e164 100644 --- a/server/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java +++ b/server/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java @@ -367,39 +367,6 @@ public void testSimpleMoreLikeThisIds() throws Exception { assertHitCount(mltResponse, 3L); } - public void testSimpleMoreLikeThisIdsMultipleTypes() throws Exception { - logger.info("Creating index test"); - int numOfTypes = randomIntBetween(2, 10); - CreateIndexRequestBuilder createRequestBuilder = prepareCreate("test") - .setSettings(Settings.builder().put("index.version.created", Version.V_5_6_0.id)); - for (int i = 0; i < numOfTypes; i++) { - createRequestBuilder.addMapping("type" + i, jsonBuilder().startObject().startObject("type" + i).startObject("properties") - .startObject("text").field("type", "text").endObject() - .endObject().endObject().endObject()); - } - assertAcked(createRequestBuilder); - - logger.info("Running Cluster Health"); - assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN)); - - logger.info("Indexing..."); - List builders = new ArrayList<>(numOfTypes); - for (int i = 0; i < numOfTypes; i++) { - builders.add(client().prepareIndex("test", "type" + i).setSource("text", "lucene" + " " + i).setId(String.valueOf(i))); - } - indexRandom(true, builders); - - logger.info("Running MoreLikeThis"); - MoreLikeThisQueryBuilder queryBuilder = QueryBuilders.moreLikeThisQuery(new String[] {"text"}, null, new Item[] {new Item("test", "type0", "0")}).include(true).minTermFreq(1).minDocFreq(1); - - String[] types = new String[numOfTypes]; - for (int i = 0; i < numOfTypes; i++) { - types[i] = "type"+i; - } - SearchResponse mltResponse = client().prepareSearch().setTypes(types).setQuery(queryBuilder).execute().actionGet(); - assertHitCount(mltResponse, numOfTypes); - } - public void testMoreLikeThisMultiValueFields() throws Exception { logger.info("Creating the index ..."); assertAcked(prepareCreate("test") diff --git a/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java b/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java index 14378fdb1c8a9..6de15146faee5 100644 --- a/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java +++ b/server/src/test/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java @@ -39,7 +39,6 @@ import java.util.Map; import static org.elasticsearch.search.profile.query.RandomQueryGenerator.randomQueryBuilder; -import static org.elasticsearch.test.hamcrest.DoubleMatcher.nearlyEqual; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.isEmptyOrNullString; @@ -156,8 +155,8 @@ public void testProfileMatchesRegular() throws Exception { assertTrue("Vanilla maxScore is NaN but Profile is not [" + profileMaxScore + "]", Float.isNaN(profileMaxScore)); } else { - assertTrue("Profile maxScore of [" + profileMaxScore + "] is not close to Vanilla maxScore [" + vanillaMaxScore + "]", - nearlyEqual(vanillaMaxScore, profileMaxScore, 0.001)); + assertEquals("Profile maxScore of [" + profileMaxScore + "] is not close to Vanilla maxScore [" + vanillaMaxScore + "]", + vanillaMaxScore, profileMaxScore, 0.001); } assertThat( diff --git a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index b2a7c045ddce9..2cab6e995b25c 100644 --- a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -556,20 +556,17 @@ public void testDateRangeInQueryStringWithTimeZone_10477() { } public void testTypeFilter() throws Exception { - assertAcked(prepareCreate("test").setSettings(Settings.builder().put("index.version.created", Version.V_5_6_0.id))); + assertAcked(prepareCreate("test")); indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "value1"), - client().prepareIndex("test", "type2", "1").setSource("field1", "value1"), - client().prepareIndex("test", "type1", "2").setSource("field1", "value1"), - client().prepareIndex("test", "type2", "2").setSource("field1", "value1"), - client().prepareIndex("test", "type2", "3").setSource("field1", "value1")); + client().prepareIndex("test", "type1", "2").setSource("field1", "value1")); assertHitCount(client().prepareSearch().setQuery(typeQuery("type1")).get(), 2L); - assertHitCount(client().prepareSearch().setQuery(typeQuery("type2")).get(), 3L); + assertHitCount(client().prepareSearch().setQuery(typeQuery("type2")).get(), 0L); assertHitCount(client().prepareSearch().setTypes("type1").setQuery(matchAllQuery()).get(), 2L); - assertHitCount(client().prepareSearch().setTypes("type2").setQuery(matchAllQuery()).get(), 3L); + assertHitCount(client().prepareSearch().setTypes("type2").setQuery(matchAllQuery()).get(), 0L); - assertHitCount(client().prepareSearch().setTypes("type1", "type2").setQuery(matchAllQuery()).get(), 5L); + assertHitCount(client().prepareSearch().setTypes("type1", "type2").setQuery(matchAllQuery()).get(), 2L); } public void testIdsQueryTestsIdIndexed() throws Exception { @@ -1220,38 +1217,6 @@ public void testBasicQueryById() throws Exception { assertThat(searchResponse.getHits().getHits().length, equalTo(3)); } - public void testBasicQueryByIdMultiType() throws Exception { - assertAcked(prepareCreate("test").setSettings(Settings.builder().put("index.version.created", Version.V_5_6_0.id))); - - client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get(); - client().prepareIndex("test", "type2", "2").setSource("field1", "value2").get(); - refresh(); - - SearchResponse searchResponse = client().prepareSearch().setQuery(idsQuery("type1", "type2").addIds("1", "2")).get(); - assertHitCount(searchResponse, 2L); - assertThat(searchResponse.getHits().getHits().length, equalTo(2)); - - searchResponse = client().prepareSearch().setQuery(idsQuery().addIds("1")).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - - searchResponse = client().prepareSearch().setQuery(idsQuery().addIds("1", "2")).get(); - assertHitCount(searchResponse, 2L); - assertThat(searchResponse.getHits().getHits().length, equalTo(2)); - - searchResponse = client().prepareSearch().setQuery(idsQuery("type1").addIds("1", "2")).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - - searchResponse = client().prepareSearch().setQuery(idsQuery(Strings.EMPTY_ARRAY).addIds("1")).get(); - assertHitCount(searchResponse, 1L); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - - searchResponse = client().prepareSearch().setQuery(idsQuery("type1", "type2", "type3").addIds("1", "2", "3", "4")).get(); - assertHitCount(searchResponse, 2L); - assertThat(searchResponse.getHits().getHits().length, equalTo(2)); - } - public void testNumericTermsAndRanges() throws Exception { assertAcked(prepareCreate("test") diff --git a/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java b/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java index 9a9797734b65f..75ac542d9853a 100644 --- a/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; @@ -55,6 +56,7 @@ import static java.util.Collections.emptyList; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; +import static org.hamcrest.Matchers.containsString; public class QueryRescorerBuilderTests extends ESTestCase { @@ -262,8 +264,8 @@ public void testUnknownFieldsExpection() throws IOException { "}\n"; { XContentParser parser = createParser(rescoreElement); - Exception e = expectThrows(ParsingException.class, () -> RescorerBuilder.parseFromXContent(parser)); - assertEquals("[query] failed to parse field [rescore_query]", e.getMessage()); + Exception e = expectThrows(XContentParseException.class, () -> RescorerBuilder.parseFromXContent(parser)); + assertThat(e.getMessage(), containsString("[query] failed to parse field [rescore_query]")); } rescoreElement = "{\n" + diff --git a/server/src/test/java/org/elasticsearch/search/scroll/SearchScrollIT.java b/server/src/test/java/org/elasticsearch/search/scroll/SearchScrollIT.java index 96582025e1af7..f2005905c1e0c 100644 --- a/server/src/test/java/org/elasticsearch/search/scroll/SearchScrollIT.java +++ b/server/src/test/java/org/elasticsearch/search/scroll/SearchScrollIT.java @@ -534,7 +534,7 @@ public void testScrollInvalidDefaultKeepAlive() throws IOException { client().admin().cluster().prepareUpdateSettings() .setPersistentSettings(Settings.builder().put("search.max_keep_alive", "1m").put("search.default_keep_alive", "2m")).get ()); - assertThat(exc.getMessage(), containsString("was (2 minutes > 1 minute)")); + assertThat(exc.getMessage(), containsString("was (2m > 1m)")); assertAcked(client().admin().cluster().prepareUpdateSettings() .setPersistentSettings(Settings.builder().put("search.default_keep_alive", "5m").put("search.max_keep_alive", "5m")).get()); @@ -548,14 +548,14 @@ public void testScrollInvalidDefaultKeepAlive() throws IOException { exc = expectThrows(IllegalArgumentException.class, () -> client().admin().cluster().prepareUpdateSettings() .setPersistentSettings(Settings.builder().put("search.default_keep_alive", "3m")).get()); - assertThat(exc.getMessage(), containsString("was (3 minutes > 2 minutes)")); + assertThat(exc.getMessage(), containsString("was (3m > 2m)")); assertAcked(client().admin().cluster().prepareUpdateSettings() .setPersistentSettings(Settings.builder().put("search.default_keep_alive", "1m")).get()); exc = expectThrows(IllegalArgumentException.class, () -> client().admin().cluster().prepareUpdateSettings() .setPersistentSettings(Settings.builder().put("search.max_keep_alive", "30s")).get()); - assertThat(exc.getMessage(), containsString("was (1 minute > 30 seconds)")); + assertThat(exc.getMessage(), containsString("was (1m > 30s)")); } public void testInvalidScrollKeepAlive() throws IOException { @@ -577,7 +577,7 @@ public void testInvalidScrollKeepAlive() throws IOException { IllegalArgumentException illegalArgumentException = (IllegalArgumentException) ExceptionsHelper.unwrap(exc, IllegalArgumentException.class); assertNotNull(illegalArgumentException); - assertThat(illegalArgumentException.getMessage(), containsString("Keep alive for scroll (2 hours) is too large")); + assertThat(illegalArgumentException.getMessage(), containsString("Keep alive for scroll (2h) is too large")); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) @@ -594,7 +594,7 @@ public void testInvalidScrollKeepAlive() throws IOException { illegalArgumentException = (IllegalArgumentException) ExceptionsHelper.unwrap(exc, IllegalArgumentException.class); assertNotNull(illegalArgumentException); - assertThat(illegalArgumentException.getMessage(), containsString("Keep alive for scroll (3 hours) is too large")); + assertThat(illegalArgumentException.getMessage(), containsString("Keep alive for scroll (3h) is too large")); } private void assertToXContentResponse(ClearScrollResponse response, boolean succeed, int numFreed) throws IOException { diff --git a/server/src/test/java/org/elasticsearch/search/slice/SearchSliceIT.java b/server/src/test/java/org/elasticsearch/search/slice/SearchSliceIT.java index b9f73869ba7ab..2227cbb806b3f 100644 --- a/server/src/test/java/org/elasticsearch/search/slice/SearchSliceIT.java +++ b/server/src/test/java/org/elasticsearch/search/slice/SearchSliceIT.java @@ -104,7 +104,7 @@ public void testDocIdSort() throws Exception { int numDocs = (int) sr.getHits().getTotalHits(); assertThat(numDocs, equalTo(NUM_DOCS)); int max = randomIntBetween(2, numShards*3); - for (String field : new String[]{"_uid", "random_int", "static_int"}) { + for (String field : new String[]{"_id", "random_int", "static_int"}) { int fetchSize = randomIntBetween(10, 100); SearchRequestBuilder request = client().prepareSearch("test") .setQuery(matchAllQuery()) @@ -125,7 +125,7 @@ public void testNumericSort() throws Exception { assertThat(numDocs, equalTo(NUM_DOCS)); int max = randomIntBetween(2, numShards*3); - for (String field : new String[]{"_uid", "random_int", "static_int"}) { + for (String field : new String[]{"_id", "random_int", "static_int"}) { int fetchSize = randomIntBetween(10, 100); SearchRequestBuilder request = client().prepareSearch("test") .setQuery(matchAllQuery()) diff --git a/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java b/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java index f6f147fc334de..75802e92ee176 100644 --- a/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java @@ -40,8 +40,8 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.IndexNumericFieldData; +import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.test.ESTestCase; @@ -158,9 +158,9 @@ public Query existsQuery(QueryShardContext context) { return null; } }; - fieldType.setName(UidFieldMapper.NAME); + fieldType.setName(IdFieldMapper.NAME); fieldType.setHasDocValues(false); - when(context.fieldMapper(UidFieldMapper.NAME)).thenReturn(fieldType); + when(context.fieldMapper(IdFieldMapper.NAME)).thenReturn(fieldType); when(context.getIndexReader()).thenReturn(reader); Settings settings = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) @@ -225,7 +225,7 @@ public Query existsQuery(QueryShardContext context) { Map numSliceMap = new HashMap<>(); for (int i = 0; i < numSlices; i++) { for (int j = 0; j < numShards; j++) { - SliceBuilder slice = new SliceBuilder("_uid", i, numSlices); + SliceBuilder slice = new SliceBuilder("_id", i, numSlices); Query q = slice.toFilter(context, j, numShards); if (q instanceof TermsSliceQuery || q instanceof MatchAllDocsQuery) { AtomicInteger count = numSliceMap.get(j); @@ -254,7 +254,7 @@ public Query existsQuery(QueryShardContext context) { List targetShards = new ArrayList<>(); for (int i = 0; i < numSlices; i++) { for (int j = 0; j < numShards; j++) { - SliceBuilder slice = new SliceBuilder("_uid", i, numSlices); + SliceBuilder slice = new SliceBuilder("_id", i, numSlices); Query q = slice.toFilter(context, j, numShards); if (q instanceof MatchNoDocsQuery == false) { assertThat(q, instanceOf(MatchAllDocsQuery.class)); @@ -270,7 +270,7 @@ public Query existsQuery(QueryShardContext context) { numSlices = numShards; for (int i = 0; i < numSlices; i++) { for (int j = 0; j < numShards; j++) { - SliceBuilder slice = new SliceBuilder("_uid", i, numSlices); + SliceBuilder slice = new SliceBuilder("_id", i, numSlices); Query q = slice.toFilter(context, j, numShards); if (i == j) { assertThat(q, instanceOf(MatchAllDocsQuery.class)); @@ -311,4 +311,68 @@ public Query existsQuery(QueryShardContext context) { assertThat(exc.getMessage(), containsString("cannot load numeric doc values")); } } + + + public void testToFilterDeprecationMessage() throws IOException { + Directory dir = new RAMDirectory(); + try (IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())))) { + writer.commit(); + } + QueryShardContext context = mock(QueryShardContext.class); + try (IndexReader reader = DirectoryReader.open(dir)) { + MappedFieldType fieldType = new MappedFieldType() { + @Override + public MappedFieldType clone() { + return null; + } + + @Override + public String typeName() { + return null; + } + + @Override + public Query termQuery(Object value, @Nullable QueryShardContext context) { + return null; + } + + public Query existsQuery(QueryShardContext context) { + return null; + } + }; + fieldType.setName("_uid"); + fieldType.setHasDocValues(false); + when(context.fieldMapper("_uid")).thenReturn(fieldType); + when(context.getIndexReader()).thenReturn(reader); + Settings settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .build(); + IndexMetaData indexState = IndexMetaData.builder("index").settings(settings).build(); + IndexSettings indexSettings = new IndexSettings(indexState, Settings.EMPTY); + when(context.getIndexSettings()).thenReturn(indexSettings); + SliceBuilder builder = new SliceBuilder("_uid", 5, 10); + Query query = builder.toFilter(context, 0, 1); + assertThat(query, instanceOf(TermsSliceQuery.class)); + assertThat(builder.toFilter(context, 0, 1), equalTo(query)); + assertWarnings("Computing slices on the [_uid] field is deprecated for 6.x indices, use [_id] instead"); + } + + } + + public void testSerializationBackcompat() throws IOException { + SliceBuilder sliceBuilder = new SliceBuilder(1, 5); + assertEquals(IdFieldMapper.NAME, sliceBuilder.getField()); + + SliceBuilder copy62 = copyWriteable(sliceBuilder, + new NamedWriteableRegistry(Collections.emptyList()), + SliceBuilder::new, Version.V_6_2_0); + assertEquals(sliceBuilder, copy62); + + SliceBuilder copy63 = copyWriteable(copy62, + new NamedWriteableRegistry(Collections.emptyList()), + SliceBuilder::new, Version.V_6_3_0); + assertEquals(sliceBuilder, copy63); + } } diff --git a/server/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java b/server/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java index 89c1537b8f169..9a24d8a50550a 100644 --- a/server/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java +++ b/server/src/test/java/org/elasticsearch/search/sort/FieldSortIT.java @@ -1382,17 +1382,17 @@ public void testSortMetaField() throws Exception { SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) .setSize(randomIntBetween(1, numDocs + 5)) - .addSort("_uid", order) + .addSort("_id", order) .execute().actionGet(); assertNoFailures(searchResponse); SearchHit[] hits = searchResponse.getHits().getHits(); BytesRef previous = order == SortOrder.ASC ? new BytesRef() : UnicodeUtil.BIG_TERM; for (int i = 0; i < hits.length; ++i) { - String uidString = Uid.createUid(hits[i].getType(), hits[i].getId()); - final BytesRef uid = new BytesRef(uidString); - assertEquals(uidString, hits[i].getSortValues()[0]); - assertThat(previous, order == SortOrder.ASC ? lessThan(uid) : greaterThan(uid)); - previous = uid; + String idString = hits[i].getId(); + final BytesRef id = new BytesRef(idString); + assertEquals(idString, hits[i].getSortValues()[0]); + assertThat(previous, order == SortOrder.ASC ? lessThan(id) : greaterThan(id)); + previous = id; } } diff --git a/server/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java b/server/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java index 9a28740d7271f..ed83011c26609 100644 --- a/server/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/sort/ScriptSortBuilderTests.java @@ -25,6 +25,7 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermQuery; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource; @@ -50,6 +51,7 @@ import java.util.Set; import static org.elasticsearch.search.sort.NestedSortBuilderTests.createRandomNestedSort; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; public class ScriptSortBuilderTests extends AbstractSortTestCase { @@ -246,8 +248,8 @@ public void testParseUnexpectedToken() throws IOException { parser.nextToken(); parser.nextToken(); - Exception e = expectThrows(ParsingException.class, () -> ScriptSortBuilder.fromXContent(parser, null)); - assertEquals("[_script] script doesn't support values of type: START_ARRAY", e.getMessage()); + Exception e = expectThrows(XContentParseException.class, () -> ScriptSortBuilder.fromXContent(parser, null)); + assertThat(e.getMessage(), containsString("[_script] script doesn't support values of type: START_ARRAY")); } /** diff --git a/server/src/test/java/org/elasticsearch/search/suggest/completion/CategoryContextMappingTests.java b/server/src/test/java/org/elasticsearch/search/suggest/completion/CategoryContextMappingTests.java index 9c62bb28483c1..b9da305e132f5 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/completion/CategoryContextMappingTests.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/completion/CategoryContextMappingTests.java @@ -28,10 +28,12 @@ import org.apache.lucene.search.suggest.document.ContextSuggestField; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; @@ -403,8 +405,8 @@ public void testNULLQueryContextParsingBasic() throws Exception { XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); CategoryContextMapping mapping = ContextBuilder.category("cat").build(); - Exception e = expectThrows(ElasticsearchParseException.class, () -> mapping.parseQueryContext(parser)); - assertEquals("category context must be an object, string, number or boolean", e.getMessage()); + XContentParseException e = expectThrows(XContentParseException.class, () -> mapping.parseQueryContext(parser)); + assertThat(ExceptionsHelper.detailedMessage(e), containsString("category context must be an object, string, number or boolean")); } public void testQueryContextParsingArray() throws Exception { @@ -460,8 +462,8 @@ public void testQueryContextParsingMixedTypeValuesArrayHavingNULL() throws Excep XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); CategoryContextMapping mapping = ContextBuilder.category("cat").build(); - Exception e = expectThrows(ElasticsearchParseException.class, () -> mapping.parseQueryContext(parser)); - assertEquals("category context must be an object, string, number or boolean", e.getMessage()); + XContentParseException e = expectThrows(XContentParseException.class, () -> mapping.parseQueryContext(parser)); + assertThat(ExceptionsHelper.detailedMessage(e), containsString("category context must be an object, string, number or boolean")); } public void testQueryContextParsingObject() throws Exception { @@ -518,8 +520,8 @@ public void testQueryContextParsingObjectHavingNULL() throws Exception { XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); CategoryContextMapping mapping = ContextBuilder.category("cat").build(); - Exception e = expectThrows(ElasticsearchParseException.class, () -> mapping.parseQueryContext(parser)); - assertEquals("category context must be a string, number or boolean", e.getMessage()); + Exception e = expectThrows(XContentParseException.class, () -> mapping.parseQueryContext(parser)); + assertThat(e.getMessage(), containsString("category context must be a string, number or boolean")); } public void testQueryContextParsingObjectArray() throws Exception { @@ -619,8 +621,8 @@ public void testQueryContextParsingMixedTypeObjectArrayHavingNULL() throws Excep XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); CategoryContextMapping mapping = ContextBuilder.category("cat").build(); - Exception e = expectThrows(ElasticsearchParseException.class, () -> mapping.parseQueryContext(parser)); - assertEquals("category context must be a string, number or boolean", e.getMessage()); + XContentParseException e = expectThrows(XContentParseException.class, () -> mapping.parseQueryContext(parser)); + assertThat(ExceptionsHelper.detailedMessage(e), containsString("category context must be a string, number or boolean")); } @@ -676,8 +678,8 @@ public void testQueryContextParsingMixedHavingNULL() throws Exception { XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); CategoryContextMapping mapping = ContextBuilder.category("cat").build(); - Exception e = expectThrows(ElasticsearchParseException.class, () -> mapping.parseQueryContext(parser)); - assertEquals("category context must be an object, string, number or boolean", e.getMessage()); + XContentParseException e = expectThrows(XContentParseException.class, () -> mapping.parseQueryContext(parser)); + assertThat(ExceptionsHelper.detailedMessage(e), containsString("category context must be an object, string, number or boolean")); } public void testUnknownQueryContextParsing() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/search/suggest/completion/GeoQueryContextTests.java b/server/src/test/java/org/elasticsearch/search/suggest/completion/GeoQueryContextTests.java index 1d058350a98a5..7764f269a03b3 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/completion/GeoQueryContextTests.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/completion/GeoQueryContextTests.java @@ -19,15 +19,20 @@ package org.elasticsearch.search.suggest.completion; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.search.suggest.completion.context.GeoQueryContext; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.List; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; public class GeoQueryContextTests extends QueryContextTestCase { @@ -105,4 +110,36 @@ public void testIllegalArguments() { assertEquals(e.getMessage(), "neighbour value must be between 1 and 12"); } } + + public void testStringPrecision() throws IOException { + XContentBuilder builder = jsonBuilder().startObject(); + { + builder.startObject("context").field("lat", 23.654242).field("lon", 90.047153).endObject(); + builder.field("boost", 10); + builder.field("precision", 12); + builder.array("neighbours", 1, 2); + } + builder.endObject(); + XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); + parser.nextToken(); + GeoQueryContext queryContext = fromXContent(parser); + assertEquals(10, queryContext.getBoost()); + assertEquals(12, queryContext.getPrecision()); + assertEquals(Arrays.asList(1, 2), queryContext.getNeighbours()); + + builder = jsonBuilder().startObject(); + { + builder.startObject("context").field("lat", 23.654242).field("lon", 90.047153).endObject(); + builder.field("boost", 10); + builder.field("precision", "12m"); + builder.array("neighbours", "4km", "10km"); + } + builder.endObject(); + parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); + parser.nextToken(); + queryContext = fromXContent(parser); + assertEquals(10, queryContext.getBoost()); + assertEquals(9, queryContext.getPrecision()); + assertEquals(Arrays.asList(6, 5), queryContext.getNeighbours()); + } } diff --git a/server/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java b/server/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java index 33b638286b4ae..ebfac5f58ef77 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGeneratorTests.java @@ -24,12 +24,12 @@ import org.apache.lucene.search.spell.LevensteinDistance; import org.apache.lucene.search.spell.LuceneLevenshteinDistance; import org.apache.lucene.search.spell.NGramDistance; -import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; @@ -43,6 +43,7 @@ import java.util.function.Supplier; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.core.IsInstanceOf.instanceOf; @@ -175,12 +176,12 @@ public void testIllegalXContent() throws IOException { // test bad value for field (e.g. size expects an int) directGenerator = "{ \"size\" : \"xxl\" }"; - assertIllegalXContent(directGenerator, ParsingException.class, + assertIllegalXContent(directGenerator, XContentParseException.class, "[direct_generator] failed to parse field [size]"); // test unexpected token directGenerator = "{ \"size\" : [ \"xxl\" ] }"; - assertIllegalXContent(directGenerator, ParsingException.class, + assertIllegalXContent(directGenerator, XContentParseException.class, "[direct_generator] size doesn't support values of type: START_ARRAY"); } @@ -188,7 +189,7 @@ private void assertIllegalXContent(String directGenerator, Class DirectCandidateGeneratorBuilder.PARSER.apply(parser, null)); - assertEquals(exceptionMsg, e.getMessage()); + assertThat(e.getMessage(), containsString(exceptionMsg)); } /** diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java index 8431c8fa69f54..777918a7d5eba 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java @@ -96,7 +96,7 @@ public void testRetryPostingSnapshotStatusMessages() throws Exception { .values().stream().map(status -> status.asCopy().getStage()).collect(Collectors.toList()); assertThat(stages, hasSize(shards)); assertThat(stages, everyItem(equalTo(IndexShardSnapshotStatus.Stage.DONE))); - }); + }, 30L, TimeUnit.SECONDS); logger.info("--> stop disrupting cluster"); networkDisruption.stopDisrupting(); @@ -110,6 +110,6 @@ public void testRetryPostingSnapshotStatusMessages() throws Exception { logger.info("Snapshot status [{}], successfulShards [{}]", snapshotInfo.state(), snapshotInfo.successfulShards()); assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfo.successfulShards(), equalTo(shards)); - }, 10, TimeUnit.SECONDS); + }, 30L, TimeUnit.SECONDS); } } diff --git a/server/src/test/java/org/elasticsearch/test/hamcrest/DoubleMatcher.java b/server/src/test/java/org/elasticsearch/test/hamcrest/DoubleMatcher.java deleted file mode 100644 index de275eaffca7b..0000000000000 --- a/server/src/test/java/org/elasticsearch/test/hamcrest/DoubleMatcher.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.test.hamcrest; - - -public class DoubleMatcher { - - /** - * Better floating point comparisons courtesy of https://github.com/brazzy/floating-point-gui.de - * - * Snippet adapted to use doubles instead of floats - */ - public static boolean nearlyEqual(double a, double b, double epsilon) { - final double absA = Math.abs(a); - final double absB = Math.abs(b); - final double diff = Math.abs(a - b); - - if (a == b) { // shortcut, handles infinities - return true; - } else if (a == 0 || b == 0 || diff < Double.MIN_NORMAL) { - // a or b is zero or both are extremely close to it - // relative error is less meaningful here - return diff < (epsilon * Double.MIN_NORMAL); - } else { // use relative error - return diff / Math.min((absA + absB), Double.MAX_VALUE) < epsilon; - } - } -} diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java new file mode 100644 index 0000000000000..a497e509c1577 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java @@ -0,0 +1,95 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.transport; + +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.Collections; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.transport.RemoteClusterConnectionTests.startTransport; + +public class RemoteClusterClientTests extends ESTestCase { + private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + + @Override + public void tearDown() throws Exception { + super.tearDown(); + ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); + } + + public void testConnectAndExecuteRequest() throws Exception { + Settings remoteSettings = Settings.builder().put(ClusterName.CLUSTER_NAME_SETTING.getKey(), "foo_bar_cluster").build(); + try (MockTransportService remoteTransport = startTransport("remote_node", Collections.emptyList(), Version.CURRENT, threadPool, + remoteSettings)) { + DiscoveryNode remoteNode = remoteTransport.getLocalDiscoNode(); + + Settings localSettings = Settings.builder() + .put(RemoteClusterService.ENABLE_REMOTE_CLUSTERS.getKey(), true) + .put("search.remote.test.seeds", remoteNode.getAddress().getAddress() + ":" + remoteNode.getAddress().getPort()).build(); + try (MockTransportService service = MockTransportService.createNewService(localSettings, Version.CURRENT, threadPool, null)) { + service.start(); + service.acceptIncomingRequests(); + RemoteClusterService remoteClusterService = service.getRemoteClusterService(); + assertTrue(remoteClusterService.isRemoteNodeConnected("test", remoteNode)); + Client client = remoteClusterService.getRemoteClusterClient(threadPool, "test"); + ClusterStateResponse clusterStateResponse = client.admin().cluster().prepareState().execute().get(); + assertNotNull(clusterStateResponse); + assertEquals("foo_bar_cluster", clusterStateResponse.getState().getClusterName().value()); + // also test a failure, there is no handler for search registered + ActionNotFoundTransportException ex = expectThrows(ActionNotFoundTransportException.class, + () -> client.prepareSearch().get()); + assertEquals("No handler for action [indices:data/read/search]", ex.getMessage()); + } + } + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29547") + public void testEnsureWeReconnect() throws Exception { + Settings remoteSettings = Settings.builder().put(ClusterName.CLUSTER_NAME_SETTING.getKey(), "foo_bar_cluster").build(); + try (MockTransportService remoteTransport = startTransport("remote_node", Collections.emptyList(), Version.CURRENT, threadPool, + remoteSettings)) { + DiscoveryNode remoteNode = remoteTransport.getLocalDiscoNode(); + Settings localSettings = Settings.builder() + .put(RemoteClusterService.ENABLE_REMOTE_CLUSTERS.getKey(), true) + .put("search.remote.test.seeds", remoteNode.getAddress().getAddress() + ":" + remoteNode.getAddress().getPort()).build(); + try (MockTransportService service = MockTransportService.createNewService(localSettings, Version.CURRENT, threadPool, null)) { + service.start(); + service.acceptIncomingRequests(); + service.disconnectFromNode(remoteNode); + RemoteClusterService remoteClusterService = service.getRemoteClusterService(); + assertBusy(() -> assertFalse(remoteClusterService.isRemoteNodeConnected("test", remoteNode))); + Client client = remoteClusterService.getRemoteClusterClient(threadPool, "test"); + ClusterStateResponse clusterStateResponse = client.admin().cluster().prepareState().execute().get(); + assertNotNull(clusterStateResponse); + assertEquals("foo_bar_cluster", clusterStateResponse.getState().getClusterName().value()); + } + } + } + +} diff --git a/server/src/test/java/org/elasticsearch/update/UpdateIT.java b/server/src/test/java/org/elasticsearch/update/UpdateIT.java index 0f7e242a4cb80..df5bdd8322f37 100644 --- a/server/src/test/java/org/elasticsearch/update/UpdateIT.java +++ b/server/src/test/java/org/elasticsearch/update/UpdateIT.java @@ -225,7 +225,7 @@ public void testUpsertDoc() throws Exception { UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1") .setDoc(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject()) .setDocAsUpsert(true) - .setFields("_source") + .setFetchSource(true) .execute().actionGet(); assertThat(updateResponse.getIndex(), equalTo("test")); assertThat(updateResponse.getGetResult(), notNullValue()); @@ -241,7 +241,7 @@ public void testNotUpsertDoc() throws Exception { assertThrows(client().prepareUpdate(indexOrAlias(), "type1", "1") .setDoc(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject()) .setDocAsUpsert(false) - .setFields("_source") + .setFetchSource(true) .execute(), DocumentMissingException.class); } @@ -264,7 +264,7 @@ public void testUpsertFields() throws Exception { updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1") .setUpsert(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject()) .setScript(new Script(ScriptType.INLINE, UPDATE_SCRIPTS, PUT_VALUES_SCRIPT, Collections.singletonMap("extra", "foo"))) - .setFields("_source") + .setFetchSource(true) .execute().actionGet(); assertThat(updateResponse.getIndex(), equalTo("test")); @@ -293,12 +293,9 @@ public void testUpdate() throws Exception { ensureGreen(); Script fieldIncScript = new Script(ScriptType.INLINE, UPDATE_SCRIPTS, FIELD_INC_SCRIPT, Collections.singletonMap("field", "field")); - try { - client().prepareUpdate(indexOrAlias(), "type1", "1").setScript(fieldIncScript).execute().actionGet(); - fail(); - } catch (DocumentMissingException e) { - // all is well - } + DocumentMissingException ex = expectThrows(DocumentMissingException.class, + () -> client().prepareUpdate(indexOrAlias(), "type1", "1").setScript(fieldIncScript).execute().actionGet()); + assertEquals("[type1][1]: document missing", ex.getMessage()); client().prepareIndex("test", "type1", "1").setSource("field", 1).execute().actionGet(); @@ -353,19 +350,6 @@ public void testUpdate() throws Exception { assertThat(getResponse.isExists(), equalTo(false)); } - // check fields parameter - client().prepareIndex("test", "type1", "1").setSource("field", 1).execute().actionGet(); - updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1") - .setScript(fieldIncScript) - .setFields("field") - .setFetchSource(true) - .execute().actionGet(); - assertThat(updateResponse.getIndex(), equalTo("test")); - assertThat(updateResponse.getGetResult(), notNullValue()); - assertThat(updateResponse.getGetResult().getIndex(), equalTo("test")); - assertThat(updateResponse.getGetResult().sourceRef(), notNullValue()); - assertThat(updateResponse.getGetResult().field("field").getValue(), notNullValue()); - // check _source parameter client().prepareIndex("test", "type1", "1").setSource("field1", 1, "field2", 2).execute().actionGet(); updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1") @@ -383,7 +367,7 @@ public void testUpdate() throws Exception { // check updates without script // add new field client().prepareIndex("test", "type1", "1").setSource("field", 1).execute().actionGet(); - updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1").setDoc(XContentFactory.jsonBuilder().startObject().field("field2", 2).endObject()).execute().actionGet(); + client().prepareUpdate(indexOrAlias(), "type1", "1").setDoc(XContentFactory.jsonBuilder().startObject().field("field2", 2).endObject()).execute().actionGet(); for (int i = 0; i < 5; i++) { GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet(); assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("1")); @@ -391,7 +375,7 @@ public void testUpdate() throws Exception { } // change existing field - updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1").setDoc(XContentFactory.jsonBuilder().startObject().field("field", 3).endObject()).execute().actionGet(); + client().prepareUpdate(indexOrAlias(), "type1", "1").setDoc(XContentFactory.jsonBuilder().startObject().field("field", 3).endObject()).execute().actionGet(); for (int i = 0; i < 5; i++) { GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet(); assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("3")); @@ -409,7 +393,7 @@ public void testUpdate() throws Exception { testMap.put("map1", 8); client().prepareIndex("test", "type1", "1").setSource("map", testMap).execute().actionGet(); - updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1").setDoc(XContentFactory.jsonBuilder().startObject().field("map", testMap3).endObject()).execute().actionGet(); + client().prepareUpdate(indexOrAlias(), "type1", "1").setDoc(XContentFactory.jsonBuilder().startObject().field("map", testMap3).endObject()).execute().actionGet(); for (int i = 0; i < 5; i++) { GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet(); Map map1 = (Map) getResponse.getSourceAsMap().get("map"); @@ -462,19 +446,8 @@ public void testUpdateRequestWithScriptAndShouldUpsertDoc() throws Exception { public void testContextVariables() throws Exception { assertAcked(prepareCreate("test") - .setSettings(Settings.builder().put("index.version.created", Version.V_5_6_0.id)) .addAlias(new Alias("alias")) - .addMapping("type1", XContentFactory.jsonBuilder() - .startObject() - .startObject("type1") - .endObject() - .endObject()) - .addMapping("subtype1", XContentFactory.jsonBuilder() - .startObject() - .startObject("subtype1") - .startObject("_parent").field("type", "type1").endObject() - .endObject() - .endObject()) + .addMapping("type1") ); ensureGreen(); @@ -482,50 +455,47 @@ public void testContextVariables() throws Exception { client().prepareIndex() .setIndex("test") .setType("type1") - .setId("parentId1") - .setSource("field1", 0, "content", "bar") + .setId("id1") + .setRouting("routing1") + .setSource("field1", 1, "content", "foo") .execute().actionGet(); client().prepareIndex() .setIndex("test") - .setType("subtype1") - .setId("id1") - .setParent("parentId1") - .setRouting("routing1") - .setSource("field1", 1, "content", "foo") + .setType("type1") + .setId("id2") + .setSource("field1", 0, "content", "bar") .execute().actionGet(); // Update the first object and note context variables values - UpdateResponse updateResponse = client().prepareUpdate("test", "subtype1", "id1") + UpdateResponse updateResponse = client().prepareUpdate("test", "type1", "id1") .setRouting("routing1") .setScript(new Script(ScriptType.INLINE, UPDATE_SCRIPTS, EXTRACT_CTX_SCRIPT, Collections.emptyMap())) .execute().actionGet(); assertEquals(2, updateResponse.getVersion()); - GetResponse getResponse = client().prepareGet("test", "subtype1", "id1").setRouting("routing1").execute().actionGet(); + GetResponse getResponse = client().prepareGet("test", "type1", "id1").setRouting("routing1").execute().actionGet(); Map updateContext = (Map) getResponse.getSourceAsMap().get("update_context"); assertEquals("test", updateContext.get("_index")); - assertEquals("subtype1", updateContext.get("_type")); + assertEquals("type1", updateContext.get("_type")); assertEquals("id1", updateContext.get("_id")); assertEquals(1, updateContext.get("_version")); - assertEquals("parentId1", updateContext.get("_parent")); assertEquals("routing1", updateContext.get("_routing")); // Idem with the second object - updateResponse = client().prepareUpdate("test", "type1", "parentId1") + updateResponse = client().prepareUpdate("test", "type1", "id2") .setScript(new Script(ScriptType.INLINE, UPDATE_SCRIPTS, EXTRACT_CTX_SCRIPT, Collections.emptyMap())) .execute().actionGet(); assertEquals(2, updateResponse.getVersion()); - getResponse = client().prepareGet("test", "type1", "parentId1").execute().actionGet(); + getResponse = client().prepareGet("test", "type1", "id2").execute().actionGet(); updateContext = (Map) getResponse.getSourceAsMap().get("update_context"); assertEquals("test", updateContext.get("_index")); assertEquals("type1", updateContext.get("_type")); - assertEquals("parentId1", updateContext.get("_id")); + assertEquals("id2", updateContext.get("_id")); assertEquals(1, updateContext.get("_version")); - assertNull(updateContext.get("_parent")); assertNull(updateContext.get("_routing")); assertNull(updateContext.get("_ttl")); } @@ -595,7 +565,7 @@ public void run() { assertThat(response.getId(), equalTo(Integer.toString(i))); assertThat(response.isExists(), equalTo(true)); assertThat(response.getVersion(), equalTo((long) numberOfThreads)); - assertThat((Integer) response.getSource().get("field"), equalTo(numberOfThreads)); + assertThat(response.getSource().get("field"), equalTo(numberOfThreads)); } } diff --git a/server/src/test/java/org/elasticsearch/update/UpdateNoopIT.java b/server/src/test/java/org/elasticsearch/update/UpdateNoopIT.java index 17fb21441e21d..2cb71d9bcbe0a 100644 --- a/server/src/test/java/org/elasticsearch/update/UpdateNoopIT.java +++ b/server/src/test/java/org/elasticsearch/update/UpdateNoopIT.java @@ -248,7 +248,7 @@ private UpdateResponse update(Boolean detectNoop, long expectedVersion, XContent UpdateRequestBuilder updateRequest = client().prepareUpdate("test", "type1", "1") .setDoc(xContentBuilder) .setDocAsUpsert(true) - .setFields("_source"); + .setFetchSource(true); if (detectNoop != null) { updateRequest.setDetectNoop(detectNoop); } diff --git a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java index 6082151721501..c4bf2518a9f8f 100644 --- a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java +++ b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java @@ -140,6 +140,7 @@ public class BootstrapForTesting { addClassCodebase(codebases,"plugin-classloader", "org.elasticsearch.plugins.ExtendedPluginsClassLoader"); addClassCodebase(codebases,"elasticsearch-nio", "org.elasticsearch.nio.ChannelFactory"); addClassCodebase(codebases, "elasticsearch-secure-sm", "org.elasticsearch.secure_sm.SecureSM"); + addClassCodebase(codebases, "elasticsearch-rest-client", "org.elasticsearch.client.RestClient"); } final Policy testFramework = Security.readPolicy(Bootstrap.class.getResource("test-framework.policy"), codebases); final Policy esPolicy = new ESPolicy(codebases, perms, getPluginPermissions(), true); diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 667adf9d990cc..dea92c2927d86 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -84,6 +84,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.concurrent.atomic.AtomicLong; import java.util.function.BiFunction; import java.util.function.LongSupplier; import java.util.function.ToLongBiFunction; @@ -110,6 +111,8 @@ public abstract class EngineTestCase extends ESTestCase { protected String codecName; protected Path primaryTranslogDir; protected Path replicaTranslogDir; + // A default primary term is used by engine instances created in this test. + protected AtomicLong primaryTerm = new AtomicLong(); protected static void assertVisibleCount(Engine engine, int numDocs) throws IOException { assertVisibleCount(engine, numDocs, true); @@ -130,7 +133,7 @@ protected static void assertVisibleCount(Engine engine, int numDocs, boolean ref @Before public void setUp() throws Exception { super.setUp(); - + primaryTerm.set(randomLongBetween(1, Long.MAX_VALUE)); CodecService codecService = new CodecService(null, logger); String name = Codec.getDefault().getName(); if (Arrays.asList(codecService.availableCodecs()).contains(name)) { @@ -178,7 +181,7 @@ public EngineConfig copy(EngineConfig config, LongSupplier globalCheckpointSuppl new CodecService(null, logger), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(), config.getTranslogConfig(), config.getFlushMergesAfter(), config.getExternalRefreshListener(), Collections.emptyList(), config.getIndexSort(), config.getTranslogRecoveryRunner(), - config.getCircuitBreakerService(), globalCheckpointSupplier); + config.getCircuitBreakerService(), globalCheckpointSupplier, config.getPrimaryTermSupplier()); } public EngineConfig copy(EngineConfig config, Analyzer analyzer) { @@ -187,7 +190,7 @@ public EngineConfig copy(EngineConfig config, Analyzer analyzer) { new CodecService(null, logger), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(), config.getTranslogConfig(), config.getFlushMergesAfter(), config.getExternalRefreshListener(), Collections.emptyList(), config.getIndexSort(), config.getTranslogRecoveryRunner(), - config.getCircuitBreakerService(), config.getGlobalCheckpointSupplier()); + config.getCircuitBreakerService(), config.getGlobalCheckpointSupplier(), config.getPrimaryTermSupplier()); } @Override @@ -260,15 +263,16 @@ public Directory newDirectory() throws IOException { return new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId)); } - protected Translog createTranslog() throws IOException { - return createTranslog(primaryTranslogDir); + protected Translog createTranslog(LongSupplier primaryTermSupplier) throws IOException { + return createTranslog(primaryTranslogDir, primaryTermSupplier); } - protected Translog createTranslog(Path translogPath) throws IOException { + protected Translog createTranslog(Path translogPath, LongSupplier primaryTermSupplier) throws IOException { TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE); - String translogUUID = Translog.createEmptyTranslog(translogPath, SequenceNumbers.NO_OPS_PERFORMED, shardId); - return new Translog( - translogConfig, translogUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.NO_OPS_PERFORMED); + String translogUUID = Translog.createEmptyTranslog(translogPath, SequenceNumbers.NO_OPS_PERFORMED, shardId, + primaryTermSupplier.getAsLong()); + return new Translog(translogConfig, translogUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTermSupplier); } protected InternalEngine createEngine(Store store, Path translogPath) throws IOException { @@ -366,8 +370,8 @@ private InternalEngine createEngine(@Nullable IndexWriterFactory indexWriterFact final Directory directory = store.directory(); if (Lucene.indexExists(directory) == false) { store.createEmpty(); - final String translogUuid = - Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId); + final String translogUuid = Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), + SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); store.associateIndexWithNewTranslog(translogUuid); } @@ -449,7 +453,7 @@ public void onFailedEngine(String reason, @Nullable Exception e) { new NoneCircuitBreakerService(), globalCheckpointSupplier == null ? new ReplicationTracker(shardId, allocationId.getId(), indexSettings, SequenceNumbers.NO_OPS_PERFORMED) : - globalCheckpointSupplier); + globalCheckpointSupplier, primaryTerm::get); return config; } @@ -475,12 +479,12 @@ protected Engine.Get newGet(boolean realtime, ParsedDocument doc) { } protected Engine.Index indexForDoc(ParsedDocument doc) { - return new Engine.Index(newUid(doc), doc); + return new Engine.Index(newUid(doc), primaryTerm.get(), doc); } protected Engine.Index replicaIndexForDoc(ParsedDocument doc, long version, long seqNo, boolean isRetry) { - return new Engine.Index(newUid(doc), doc, seqNo, 1, version, VersionType.EXTERNAL, + return new Engine.Index(newUid(doc), doc, seqNo, primaryTerm.get(), version, VersionType.EXTERNAL, Engine.Operation.Origin.REPLICA, System.nanoTime(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, isRetry); } diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java b/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java index 983ced2a6edc8..4fe18fa973828 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java @@ -123,7 +123,7 @@ private Engine.Operation convertToEngineOp(Translog.Operation operation, Engine. mapperService.getIndexSettings().getIndexVersionCreated(), source(indexName, index.type(), index.id(), index.source(), XContentHelper.xContentType(index.source())) - .routing(index.routing()).parent(index.parent()), index.seqNo(), index.primaryTerm(), + .routing(index.routing()), index.seqNo(), index.primaryTerm(), index.version(), index.versionType().versionTypeForReplicationAndRecovery(), origin, index.getAutoGeneratedIdTimestamp(), true); return engineIndex; diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 2656855b9fd15..0d535d9af3851 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -105,6 +105,8 @@ */ public abstract class IndexShardTestCase extends ESTestCase { + public static final IndexEventListener EMPTY_EVENT_LISTENER = new IndexEventListener() {}; + protected static final PeerRecoveryTargetService.RecoveryListener recoveryListener = new PeerRecoveryTargetService.RecoveryListener() { @Override public void onRecoveryDone(RecoveryState state) { @@ -260,24 +262,25 @@ protected IndexShard newShard(ShardRouting routing, IndexMetaData indexMetaData, final ShardId shardId = routing.shardId(); final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir()); ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId); - return newShard(routing, shardPath, indexMetaData, indexSearcherWrapper, engineFactory, globalCheckpointSyncer, listeners); + return newShard(routing, shardPath, indexMetaData, indexSearcherWrapper, engineFactory, globalCheckpointSyncer, + EMPTY_EVENT_LISTENER, listeners); } /** * creates a new initializing shard. - * - * @param routing shard routing to use + * @param routing shard routing to use * @param shardPath path to use for shard data * @param indexMetaData indexMetaData for the shard, including any mapping * @param indexSearcherWrapper an optional wrapper to be used during searchers * @param globalCheckpointSyncer callback for syncing global checkpoints + * @param indexEventListener * @param listeners an optional set of listeners to add to the shard */ protected IndexShard newShard(ShardRouting routing, ShardPath shardPath, IndexMetaData indexMetaData, @Nullable IndexSearcherWrapper indexSearcherWrapper, @Nullable EngineFactory engineFactory, Runnable globalCheckpointSyncer, - IndexingOperationListener... listeners) throws IOException { + IndexEventListener indexEventListener, IndexingOperationListener... listeners) throws IOException { final Settings nodeSettings = Settings.builder().put("node.name", routing.currentNodeId()).build(); final IndexSettings indexSettings = new IndexSettings(indexMetaData, nodeSettings); final IndexShard indexShard; @@ -289,8 +292,6 @@ protected IndexShard newShard(ShardRouting routing, ShardPath shardPath, IndexMe indexSettings.getSettings(), "index"); mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY); SimilarityService similarityService = new SimilarityService(indexSettings, null, Collections.emptyMap()); - final IndexEventListener indexEventListener = new IndexEventListener() { - }; final Engine.Warmer warmer = searcher -> { }; ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); @@ -335,7 +336,7 @@ protected IndexShard reinitShard(IndexShard current, ShardRouting routing, Index null, current.engineFactory, current.getGlobalCheckpointSyncer(), - listeners); + EMPTY_EVENT_LISTENER, listeners); } /** @@ -548,15 +549,14 @@ protected Engine.IndexResult indexDoc(IndexShard shard, String type, String id) } protected Engine.IndexResult indexDoc(IndexShard shard, String type, String id, String source) throws IOException { - return indexDoc(shard, type, id, source, XContentType.JSON, null, null); + return indexDoc(shard, type, id, source, XContentType.JSON, null); } protected Engine.IndexResult indexDoc(IndexShard shard, String type, String id, String source, XContentType xContentType, - String routing, String parentId) + String routing) throws IOException { SourceToParse sourceToParse = SourceToParse.source(shard.shardId().getIndexName(), type, id, new BytesArray(source), xContentType); sourceToParse.routing(routing); - sourceToParse.parent(parentId); if (shard.routingEntry().primary()) { final Engine.IndexResult result = shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, sourceToParse, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, getMappingUpdater(shard, type)); diff --git a/test/framework/src/main/java/org/elasticsearch/ingest/RandomDocumentPicks.java b/test/framework/src/main/java/org/elasticsearch/ingest/RandomDocumentPicks.java index cad7b388430bb..58eb1df129291 100644 --- a/test/framework/src/main/java/org/elasticsearch/ingest/RandomDocumentPicks.java +++ b/test/framework/src/main/java/org/elasticsearch/ingest/RandomDocumentPicks.java @@ -145,11 +145,7 @@ public static IngestDocument randomIngestDocument(Random random, Map randomSource(Random random) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index 04ac1d6cda026..1c02f960143ce 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -20,6 +20,7 @@ package org.elasticsearch.test; import com.fasterxml.jackson.core.io.JsonStringEncoder; + import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; @@ -55,11 +56,11 @@ import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentGenerator; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; @@ -338,7 +339,7 @@ public final void testUnknownObjectException() throws IOException { if (expectedException) { fail("some parsing exception expected for query: " + testQuery); } - } catch (ParsingException | ElasticsearchParseException e) { + } catch (ParsingException | ElasticsearchParseException | XContentParseException e) { // different kinds of exception wordings depending on location // of mutation, so no simple asserts possible here if (expectedException == false) { @@ -742,10 +743,14 @@ public void testEqualsAndHashcode() { for (int runs = 0; runs < NUMBER_OF_TESTQUERIES; runs++) { // TODO we only change name and boost, we should extend by any sub-test supplying a "mutate" method that randomly changes one // aspect of the object under test - checkEqualsAndHashCode(createTestQueryBuilder(), this::copyQuery, this::changeNameOrBoost); + checkEqualsAndHashCode(createTestQueryBuilder(), this::copyQuery, this::mutateInstance); } } + public QB mutateInstance(QB instance) throws IOException { + return changeNameOrBoost(instance); + } + /** * Generic test that checks that the Strings.toString() method * renders the XContent correctly. @@ -761,7 +766,7 @@ public void testValidOutput() throws IOException { } } - private QB changeNameOrBoost(QB original) throws IOException { + protected QB changeNameOrBoost(QB original) throws IOException { QB secondQuery = copyQuery(original); if (randomBoolean()) { secondQuery.queryName(secondQuery.queryName() == null ? randomAlphaOfLengthBetween(1, 30) : secondQuery.queryName() diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index a65b8b430e681..32c660cd5d24b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -135,6 +135,7 @@ import java.util.Objects; import java.util.Random; import java.util.Set; +import java.util.TimeZone; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -173,6 +174,9 @@ @LuceneTestCase.SuppressReproduceLine public abstract class ESTestCase extends LuceneTestCase { + private static final List JODA_TIMEZONE_IDS; + private static final List JAVA_TIMEZONE_IDS; + private static final AtomicInteger portGenerator = new AtomicInteger(); @AfterClass @@ -191,6 +195,14 @@ public static void resetPortCounter() { })); BootstrapForTesting.ensureInitialized(); + + List jodaTZIds = new ArrayList<>(DateTimeZone.getAvailableIDs()); + Collections.sort(jodaTZIds); + JODA_TIMEZONE_IDS = Collections.unmodifiableList(jodaTZIds); + + List javaTZIds = Arrays.asList(TimeZone.getAvailableIDs()); + Collections.sort(javaTZIds); + JAVA_TIMEZONE_IDS = Collections.unmodifiableList(javaTZIds); } protected final Logger logger = Loggers.getLogger(getClass()); @@ -669,9 +681,14 @@ public static String randomPositiveTimeValue() { * generate a random DateTimeZone from the ones available in joda library */ public static DateTimeZone randomDateTimeZone() { - List ids = new ArrayList<>(DateTimeZone.getAvailableIDs()); - Collections.sort(ids); - return DateTimeZone.forID(randomFrom(ids)); + return DateTimeZone.forID(randomFrom(JODA_TIMEZONE_IDS)); + } + + /** + * generate a random TimeZone from the ones available in java.time + */ + public static TimeZone randomTimeZone() { + return TimeZone.getTimeZone(randomFrom(JAVA_TIMEZONE_IDS)); } /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/StreamsUtils.java b/test/framework/src/main/java/org/elasticsearch/test/StreamsUtils.java index 1d0eaa7ce5154..767b74e447230 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/StreamsUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/StreamsUtils.java @@ -20,8 +20,8 @@ package org.elasticsearch.test; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.core.internal.io.Streams; import java.io.FileNotFoundException; import java.io.IOException; @@ -36,7 +36,7 @@ public static String copyToStringFromClasspath(ClassLoader classLoader, String p if (is == null) { throw new FileNotFoundException("Resource [" + path + "] not found in classpath with class loader [" + classLoader + "]"); } - return Streams.copyToString(new InputStreamReader(is, StandardCharsets.UTF_8)); + return org.elasticsearch.common.io.Streams.copyToString(new InputStreamReader(is, StandardCharsets.UTF_8)); } public static String copyToStringFromClasspath(String path) throws IOException { @@ -44,7 +44,7 @@ public static String copyToStringFromClasspath(String path) throws IOException { if (is == null) { throw new FileNotFoundException("Resource [" + path + "] not found in classpath"); } - return Streams.copyToString(new InputStreamReader(is, StandardCharsets.UTF_8)); + return org.elasticsearch.common.io.Streams.copyToString(new InputStreamReader(is, StandardCharsets.UTF_8)); } public static byte[] copyToBytesFromClasspath(String path) throws IOException { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index befc21eb1f697..90a1d2c7f1df2 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -32,6 +32,7 @@ import org.apache.http.ssl.SSLContexts; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; @@ -491,6 +492,16 @@ private static void updateIndexSettings(String index, Settings settings) throws new StringEntity(Strings.toString(settings), ContentType.APPLICATION_JSON))); } + protected static Map getIndexSettings(String index) throws IOException { + Map params = new HashMap<>(); + params.put("flat_settings", "true"); + Response response = client().performRequest(HttpGet.METHOD_NAME, index + "/_settings", params); + assertOK(response); + try (InputStream is = response.getEntity().getContent()) { + return XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); + } + } + protected static boolean indexExists(String index) throws IOException { Response response = client().performRequest(HttpHead.METHOD_NAME, index); return RestStatus.OK.getStatus() == response.getStatusLine().getStatusCode(); @@ -501,6 +512,11 @@ protected static void closeIndex(String index) throws IOException { assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); } + protected static void openIndex(String index) throws IOException { + Response response = client().performRequest(HttpPost.METHOD_NAME, index + "/_open"); + assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); + } + protected static boolean aliasExists(String alias) throws IOException { Response response = client().performRequest(HttpHead.METHOD_NAME, "/_alias/" + alias); return RestStatus.OK.getStatus() == response.getStatusLine().getStatusCode();