diff --git a/TESTING.asciidoc b/TESTING.asciidoc index f1e444dbde6e3..f7725bd17d78d 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -270,14 +270,14 @@ The REST tests are run automatically when executing the "./gradlew check" comman REST tests use the following command: --------------------------------------------------------------------------- -./gradlew :distribution:integ-test-zip:integTest \ +./gradlew :distribution:archives:integ-test-zip:integTest \ -Dtests.class="org.elasticsearch.test.rest.*Yaml*IT" --------------------------------------------------------------------------- A specific test case can be run with --------------------------------------------------------------------------- -./gradlew :distribution:integ-test-zip:integTest \ +./gradlew :distribution:archives:integ-test-zip:integTest \ -Dtests.class="org.elasticsearch.test.rest.*Yaml*IT" \ -Dtests.method="test {p0=cat.shards/10_basic/Help}" --------------------------------------------------------------------------- @@ -407,7 +407,7 @@ destructive. When working with a single package it's generally faster to run its tests in a tighter loop than Gradle provides. In one window: -------------------------------- -./gradlew :distribution:rpm:assemble +./gradlew :distribution:packages:rpm:assemble -------------------------------- and in another window: diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestProgressLogger.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestProgressLogger.groovy index 3b97cfb5f4881..7de0ac62d429a 100644 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestProgressLogger.groovy +++ b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestProgressLogger.groovy @@ -100,12 +100,15 @@ class TestProgressLogger implements AggregatedEventListener { @Subscribe void onQuit(AggregatedQuitEvent e) throws IOException { - suiteLogger.completed() - testLogger.completed() - for (ProgressLogger slaveLogger : slaveLoggers) { - slaveLogger.completed() + // if onStart was never called (eg no matching tests), suiteLogger and all the other loggers will be null + if (suiteLogger != null) { + suiteLogger.completed() + testLogger.completed() + for (ProgressLogger slaveLogger : slaveLoggers) { + slaveLogger.completed() + } + parentProgressLogger.completed() } - parentProgressLogger.completed() } @Subscribe diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 72018368a0fe6..b72d5696af720 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -558,7 +558,7 @@ class BuildPlugin implements Plugin { return { jvm "${project.runtimeJavaHome}/bin/java" parallelism System.getProperty('tests.jvms', 'auto') - ifNoTests 'fail' + ifNoTests System.getProperty('tests.ifNoTests', 'fail') onNonEmptyWorkDirectory 'wipe' leaveTemporary true @@ -582,8 +582,6 @@ class BuildPlugin implements Plugin { systemProperty 'tests.task', path systemProperty 'tests.security.manager', 'true' systemProperty 'jna.nosys', 'true' - // default test sysprop values - systemProperty 'tests.ifNoTests', 'fail' // TODO: remove setting logging level via system property systemProperty 'tests.logger.level', 'WARN' for (Map.Entry property : System.properties.entrySet()) { diff --git a/buildSrc/src/main/resources/forbidden/es-server-signatures.txt b/buildSrc/src/main/resources/forbidden/es-server-signatures.txt index 89179350174a6..9db17aaac0e93 100644 --- a/buildSrc/src/main/resources/forbidden/es-server-signatures.txt +++ b/buildSrc/src/main/resources/forbidden/es-server-signatures.txt @@ -29,8 +29,6 @@ java.util.concurrent.Executors#privilegedThreadFactory() java.lang.Character#codePointBefore(char[],int) @ Implicit start offset is error-prone when the char[] is a buffer and the first chars are random chars java.lang.Character#codePointAt(char[],int) @ Implicit end offset is error-prone when the char[] is a buffer and the last chars are random chars -java.io.StringReader#(java.lang.String) @ Use FastStringReader instead - @defaultMessage Reference management is tricky, leave it to SearcherManager org.apache.lucene.index.IndexReader#decRef() org.apache.lucene.index.IndexReader#incRef() diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java index b15e0e231946c..22421dec6d9b9 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java @@ -129,6 +129,7 @@ public void testSearch() throws Exception { SearchRequest searchRequest = new SearchRequest(); // <1> SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); // <2> searchSourceBuilder.query(QueryBuilders.matchAllQuery()); // <3> + searchRequest.source(searchSourceBuilder); // <4> // end::search-request-basic } { diff --git a/distribution/archives/build.gradle b/distribution/archives/build.gradle index 93960a3ac21b2..bb59bc84f5385 100644 --- a/distribution/archives/build.gradle +++ b/distribution/archives/build.gradle @@ -21,6 +21,7 @@ import org.apache.tools.ant.taskdefs.condition.Os import org.apache.tools.ant.filters.FixCrLfFilter import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.EmptyDirTask +import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.MavenFilteringHack import org.elasticsearch.gradle.plugin.PluginBuildPlugin @@ -125,19 +126,39 @@ subprojects { artifacts { 'default' buildDist } + + // sanity checks if a archives can be extracted + File extractionDir = new File(buildDir, 'extracted') + task testExtraction(type: LoggedExec) { + dependsOn buildDist + doFirst { + project.delete(extractionDir) + extractionDir.mkdirs() + } + } + if (project.name.contains('zip')) { + testExtraction { + onlyIf { new File('/bin/unzip').exists() || new File('/usr/bin/unzip').exists() || new File('/usr/local/bin/unzip').exists() } + commandLine 'unzip', "${-> buildDist.outputs.files.singleFile}", '-d', extractionDir + } + } else { // tar + testExtraction { + onlyIf { new File('/bin/tar').exists() || new File('/usr/bin/tar').exists() || new File('/usr/local/bin/tar').exists() } + commandLine 'tar', '-xvzf', "${-> buildDist.outputs.files.singleFile}", '-C', extractionDir + } + } + check.dependsOn testExtraction } /***************************************************************************** * Rest test config * *****************************************************************************/ -subprojects { +configure(subprojects.findAll { it.name == 'integ-test-zip' }) { apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' - if (project.name == 'integ-test-zip') { - integTest { - includePackaged true - } + integTest { + includePackaged true } integTestCluster { diff --git a/distribution/archives/tar/src/test/java/org/elasticsearch/test/rest/TarClientYamlTestSuiteIT.java b/distribution/archives/tar/src/test/java/org/elasticsearch/test/rest/TarClientYamlTestSuiteIT.java deleted file mode 100644 index 391d6fe688fd5..0000000000000 --- a/distribution/archives/tar/src/test/java/org/elasticsearch/test/rest/TarClientYamlTestSuiteIT.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.test.rest; - -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; - -import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; -import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; - -/** Rest integration test. Runs against a cluster started by {@code gradle integTest} */ -public class TarClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { - public TarClientYamlTestSuiteIT(ClientYamlTestCandidate testCandidate) { - super(testCandidate); - } - - @ParametersFactory - public static Iterable parameters() throws Exception { - return createParameters(); - } -} diff --git a/distribution/archives/tar/src/test/resources/rest-api-spec/test/smoke_test_plugins/10_modules.yml b/distribution/archives/tar/src/test/resources/rest-api-spec/test/smoke_test_plugins/10_modules.yml deleted file mode 100644 index da68232f8d8fb..0000000000000 --- a/distribution/archives/tar/src/test/resources/rest-api-spec/test/smoke_test_plugins/10_modules.yml +++ /dev/null @@ -1,13 +0,0 @@ -# Integration tests for distributions with modules -# -"Correct Modules Count": - - do: - cluster.state: {} - - # Get master node id - - set: { master_node: master } - - - do: - nodes.info: {} - - - length: { nodes.$master.modules: ${expected.modules.count} } diff --git a/distribution/archives/zip/src/test/java/org/elasticsearch/test/rest/ZipClientYamlTestSuiteIT.java b/distribution/archives/zip/src/test/java/org/elasticsearch/test/rest/ZipClientYamlTestSuiteIT.java deleted file mode 100644 index dc08af2528b52..0000000000000 --- a/distribution/archives/zip/src/test/java/org/elasticsearch/test/rest/ZipClientYamlTestSuiteIT.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.test.rest; - -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; - -import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; -import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; - -/** Rest integration test. Runs against a cluster started by {@code gradle integTest} */ -public class ZipClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { - public ZipClientYamlTestSuiteIT(ClientYamlTestCandidate testCandidate) { - super(testCandidate); - } - - @ParametersFactory - public static Iterable parameters() throws Exception { - return createParameters(); - } -} diff --git a/distribution/archives/zip/src/test/resources/rest-api-spec/test/smoke_test_plugins/10_modules.yml b/distribution/archives/zip/src/test/resources/rest-api-spec/test/smoke_test_plugins/10_modules.yml deleted file mode 100644 index da68232f8d8fb..0000000000000 --- a/distribution/archives/zip/src/test/resources/rest-api-spec/test/smoke_test_plugins/10_modules.yml +++ /dev/null @@ -1,13 +0,0 @@ -# Integration tests for distributions with modules -# -"Correct Modules Count": - - do: - cluster.state: {} - - # Get master node id - - set: { master_node: master } - - - do: - nodes.info: {} - - - length: { nodes.$master.modules: ${expected.modules.count} } diff --git a/docs/java-rest/high-level/search/search.asciidoc b/docs/java-rest/high-level/search/search.asciidoc index 67e12352f6b38..2e8dda64286f4 100644 --- a/docs/java-rest/high-level/search/search.asciidoc +++ b/docs/java-rest/high-level/search/search.asciidoc @@ -18,6 +18,7 @@ include-tagged::{doc-tests}/SearchDocumentationIT.java[search-request-basic] <1> Creates the `SeachRequest`. Without arguments this runs against all indices. <2> Most search parameters are added to the `SearchSourceBuilder`. It offers setters for everything that goes into the search request body. <3> Add a `match_all` query to the `SearchSourceBuilder`. +<4> Add the `SearchSourceBuilder` to the `SeachRequest`. ===== Optional arguments diff --git a/docs/reference/aggregations/pipeline/bucket-sort-aggregation.asciidoc b/docs/reference/aggregations/pipeline/bucket-sort-aggregation.asciidoc index 25eb30eb2f142..633175dbf2825 100644 --- a/docs/reference/aggregations/pipeline/bucket-sort-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/bucket-sort-aggregation.asciidoc @@ -20,8 +20,8 @@ A `bucket_sort` aggregation looks like this in isolation: { "bucket_sort": { "sort": [ - {"sort_field_1": {"order": "asc"},<1> - {"sort_field_2": {"order": "desc"}, + {"sort_field_1": {"order": "asc"}},<1> + {"sort_field_2": {"order": "desc"}}, "sort_field_3" ], "from": 1, diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/IngestRestartIT.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/IngestRestartIT.java index c62a8fd237148..a8ca20485c451 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/IngestRestartIT.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/IngestRestartIT.java @@ -18,10 +18,14 @@ */ package org.elasticsearch.ingest.common; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptEngine; @@ -33,6 +37,7 @@ import java.util.Collection; import java.util.Collections; import java.util.Map; +import java.util.function.Consumer; import java.util.function.Function; import static org.hamcrest.Matchers.equalTo; @@ -64,6 +69,66 @@ protected Map, Object>> pluginScripts() { } } + public void testScriptDisabled() throws Exception { + String pipelineIdWithoutScript = randomAlphaOfLengthBetween(5, 10); + String pipelineIdWithScript = pipelineIdWithoutScript + "_script"; + internalCluster().startNode(); + + BytesReference pipelineWithScript = new BytesArray("{\n" + + " \"processors\" : [\n" + + " {\"script\" : {\"lang\": \"" + MockScriptEngine.NAME + "\", \"source\": \"my_script\"}}\n" + + " ]\n" + + "}"); + BytesReference pipelineWithoutScript = new BytesArray("{\n" + + " \"processors\" : [\n" + + " {\"set\" : {\"field\": \"y\", \"value\": 0}}\n" + + " ]\n" + + "}"); + + Consumer checkPipelineExists = (id) -> assertThat(client().admin().cluster().prepareGetPipeline(id) + .get().pipelines().get(0).getId(), equalTo(id)); + + client().admin().cluster().preparePutPipeline(pipelineIdWithScript, pipelineWithScript, XContentType.JSON).get(); + client().admin().cluster().preparePutPipeline(pipelineIdWithoutScript, pipelineWithoutScript, XContentType.JSON).get(); + + checkPipelineExists.accept(pipelineIdWithScript); + checkPipelineExists.accept(pipelineIdWithoutScript); + + + internalCluster().stopCurrentMasterNode(); + internalCluster().startNode(Settings.builder().put("script.allowed_types", "none")); + + checkPipelineExists.accept(pipelineIdWithoutScript); + checkPipelineExists.accept(pipelineIdWithScript); + + client().prepareIndex("index", "doc", "1") + .setSource("x", 0) + .setPipeline(pipelineIdWithoutScript) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + + ElasticsearchException exception = expectThrows(ElasticsearchException.class, + () -> client().prepareIndex("index", "doc", "2") + .setSource("x", 0) + .setPipeline(pipelineIdWithScript) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get()); + assertThat(exception.getHeaderKeys(), equalTo(Sets.newHashSet("processor_type"))); + assertThat(exception.getHeader("processor_type"), equalTo(Arrays.asList("unknown"))); + assertThat(exception.getRootCause().getMessage(), + equalTo("pipeline with id [" + pipelineIdWithScript + "] could not be loaded, caused by " + + "[ElasticsearchParseException[Error updating pipeline with id [" + pipelineIdWithScript + "]]; " + + "nested: ElasticsearchException[java.lang.IllegalArgumentException: cannot execute [inline] scripts]; " + + "nested: IllegalArgumentException[cannot execute [inline] scripts];; " + + "ElasticsearchException[java.lang.IllegalArgumentException: cannot execute [inline] scripts]; " + + "nested: IllegalArgumentException[cannot execute [inline] scripts];; java.lang.IllegalArgumentException: " + + "cannot execute [inline] scripts]")); + + Map source = client().prepareGet("index", "doc", "1").get().getSource(); + assertThat(source.get("x"), equalTo(0)); + assertThat(source.get("y"), equalTo(0)); + } + public void testPipelineWithScriptProcessorThatHasStoredScript() throws Exception { internalCluster().startNode(); diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngine.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngine.java index 16081b3dd1b12..5a0b2e15460c5 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngine.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngine.java @@ -21,11 +21,11 @@ import com.github.mustachejava.Mustache; import com.github.mustachejava.MustacheFactory; +import java.io.StringReader; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.SpecialPermission; -import org.elasticsearch.common.io.FastStringReader; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.script.GeneralScriptException; import org.elasticsearch.script.Script; @@ -65,7 +65,7 @@ public T compile(String templateName, String templateSource, ScriptContext new MustacheExecutableScript(template, params); return context.factoryClazz.cast(compiled); diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java index 618dafc6e94b2..24b210c29d584 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java @@ -54,6 +54,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -61,6 +62,7 @@ import java.util.Objects; import java.util.Set; import java.util.function.BiFunction; +import java.util.stream.Collectors; import static java.util.stream.Collectors.toSet; @@ -366,36 +368,38 @@ private static BiFunction booleanQuery() { Set extractions = new HashSet<>(); Set seenRangeFields = new HashSet<>(); for (Result result : results) { - QueryExtraction[] t = result.extractions.toArray(new QueryExtraction[1]); - if (result.extractions.size() == 1 && t[0].range != null) { - // In case of range queries each extraction does not simply increment the minimum_should_match - // for that percolator query like for a term based extraction, so that can lead to more false - // positives for percolator queries with range queries than term based queries. - // The is because the way number fields are extracted from the document to be percolated. - // Per field a single range is extracted and if a percolator query has two or more range queries - // on the same field, then the minimum should match can be higher than clauses in the CoveringQuery. - // Therefore right now the minimum should match is incremented once per number field when processing - // the percolator query at index time. - if (seenRangeFields.add(t[0].range.fieldName)) { - msm += 1; - } - } else { - // In case that there are duplicate query extractions we need to be careful with incrementing msm, - // because that could lead to valid matches not becoming candidate matches: - // query: (field:val1 AND field:val2) AND (field:val2 AND field:val3) - // doc: field: val1 val2 val3 - // So lets be protective and decrease the msm: - int resultMsm = result.minimumShouldMatch; - for (QueryExtraction queryExtraction : result.extractions) { - if (extractions.contains(queryExtraction)) { - // To protect against negative msm: - // (sub results could consist out of disjunction and conjunction and - // then we do not know which extraction contributed to msm) - resultMsm = Math.max(0, resultMsm - 1); + // In case that there are duplicate query extractions we need to be careful with incrementing msm, + // because that could lead to valid matches not becoming candidate matches: + // query: (field:val1 AND field:val2) AND (field:val2 AND field:val3) + // doc: field: val1 val2 val3 + // So lets be protective and decrease the msm: + int resultMsm = result.minimumShouldMatch; + for (QueryExtraction queryExtraction : result.extractions) { + if (queryExtraction.range != null) { + // In case of range queries each extraction does not simply increment the minimum_should_match + // for that percolator query like for a term based extraction, so that can lead to more false + // positives for percolator queries with range queries than term based queries. + // The is because the way number fields are extracted from the document to be percolated. + // Per field a single range is extracted and if a percolator query has two or more range queries + // on the same field, then the minimum should match can be higher than clauses in the CoveringQuery. + // Therefore right now the minimum should match is incremented once per number field when processing + // the percolator query at index time. + if (seenRangeFields.add(queryExtraction.range.fieldName)) { + resultMsm = 1; + } else { + resultMsm = 0; } } - msm += resultMsm; + + if (extractions.contains(queryExtraction)) { + // To protect against negative msm: + // (sub results could consist out of disjunction and conjunction and + // then we do not know which extraction contributed to msm) + resultMsm = Math.max(0, resultMsm - 1); + } } + msm += resultMsm; + verified &= result.verified; matchAllDocs &= result.matchAllDocs; extractions.addAll(result.extractions); @@ -461,12 +465,17 @@ private static BiFunction functionScoreQuery() { return (query, version) -> { FunctionScoreQuery functionScoreQuery = (FunctionScoreQuery) query; Result result = analyze(functionScoreQuery.getSubQuery(), version); + // If min_score is specified we can't guarantee upfront that this percolator query matches, // so in that case we set verified to false. // (if it matches with the percolator document matches with the extracted terms. // Min score filters out docs, which is different than the functions, which just influences the score.) - boolean verified = functionScoreQuery.getMinScore() == null; - return new Result(verified, result.extractions, result.minimumShouldMatch); + boolean verified = result.verified && functionScoreQuery.getMinScore() == null; + if (result.matchAllDocs) { + return new Result(result.matchAllDocs, verified); + } else { + return new Result(verified, result.extractions, result.minimumShouldMatch); + } }; } @@ -518,8 +527,7 @@ private static BiFunction toParentBlockJoinQuery() { private static Result handleDisjunction(List disjunctions, int requiredShouldClauses, boolean otherClauses, Version version) { // Keep track of the msm for each clause: - int[] msmPerClause = new int[disjunctions.size()]; - String[] rangeFieldNames = new String[disjunctions.size()]; + List clauses = new ArrayList<>(disjunctions.size()); boolean verified = otherClauses == false; if (version.before(Version.V_6_1_0)) { verified &= requiredShouldClauses <= 1; @@ -535,17 +543,14 @@ private static Result handleDisjunction(List disjunctions, int requiredSh } int resultMsm = subResult.minimumShouldMatch; for (QueryExtraction extraction : subResult.extractions) { - if (terms.contains(extraction)) { - resultMsm = Math.max(1, resultMsm - 1); + if (terms.add(extraction) == false) { + resultMsm = Math.max(0, resultMsm - 1); } } - msmPerClause[i] = resultMsm; - terms.addAll(subResult.extractions); - - QueryExtraction[] t = subResult.extractions.toArray(new QueryExtraction[1]); - if (subResult.extractions.size() == 1 && t[0].range != null) { - rangeFieldNames[i] = t[0].range.fieldName; - } + clauses.add(new DisjunctionClause(resultMsm, subResult.extractions.stream() + .filter(extraction -> extraction.range != null) + .map(extraction -> extraction.range.fieldName) + .collect(toSet()))); } boolean matchAllDocs = numMatchAllClauses > 0 && numMatchAllClauses >= requiredShouldClauses; @@ -554,15 +559,20 @@ private static Result handleDisjunction(List disjunctions, int requiredSh Set seenRangeFields = new HashSet<>(); // Figure out what the combined msm is for this disjunction: // (sum the lowest required clauses, otherwise we're too strict and queries may not match) - Arrays.sort(msmPerClause); - int limit = Math.min(msmPerClause.length, Math.max(1, requiredShouldClauses)); + clauses = clauses.stream() + .filter(o -> o.msm > 0) + .sorted(Comparator.comparingInt(o -> o.msm)) + .collect(Collectors.toList()); + int limit = Math.min(clauses.size(), Math.max(1, requiredShouldClauses)); for (int i = 0; i < limit; i++) { - if (rangeFieldNames[i] != null) { - if (seenRangeFields.add(rangeFieldNames[i])) { - msm += 1; + if (clauses.get(i).rangeFieldNames.isEmpty() == false) { + for (String rangeField: clauses.get(i).rangeFieldNames) { + if (seenRangeFields.add(rangeField)) { + msm += 1; + } } } else { - msm += msmPerClause[i]; + msm += clauses.get(i).msm; } } } else { @@ -575,6 +585,17 @@ private static Result handleDisjunction(List disjunctions, int requiredSh } } + static class DisjunctionClause { + + final int msm; + final Set rangeFieldNames; + + DisjunctionClause(int msm, Set rangeFieldNames) { + this.msm = msm; + this.rangeFieldNames = rangeFieldNames; + } + } + static Set selectBestExtraction(Set extractions1, Set extractions2) { assert extractions1 != null || extractions2 != null; if (extractions1 == null) { diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java index 6af28b2bbfcff..80a6fabeca76c 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java @@ -28,6 +28,7 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.document.IntPoint; import org.apache.lucene.document.LongPoint; +import org.apache.lucene.document.StoredField; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; import org.apache.lucene.index.DirectoryReader; @@ -36,7 +37,9 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.MultiDocValues; import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.Term; import org.apache.lucene.index.memory.MemoryIndex; import org.apache.lucene.queries.BlendedTermQuery; @@ -44,16 +47,15 @@ import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.ConstantScoreScorer; import org.apache.lucene.search.CoveringQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Explanation; -import org.apache.lucene.search.FilterScorer; import org.apache.lucene.search.FilteredDocIdSetIterator; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.Scorer; @@ -76,11 +78,15 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; +import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -166,61 +172,65 @@ public void deinit() throws Exception { public void testDuel() throws Exception { int numFields = randomIntBetween(1, 3); - Map> content = new HashMap<>(); + Map> stringContent = new HashMap<>(); for (int i = 0; i < numFields; i++) { int numTokens = randomIntBetween(1, 64); List values = new ArrayList<>(); for (int j = 0; j < numTokens; j++) { values.add(randomAlphaOfLength(8)); } - content.put("field" + i, values); + stringContent.put("field" + i, values); } - List fields = new ArrayList<>(content.keySet()); + List stringFields = new ArrayList<>(stringContent.keySet()); + + int numValues = randomIntBetween(16, 64); + List intValues = new ArrayList<>(numValues); + for (int j = 0; j < numValues; j++) { + intValues.add(randomInt()); + } + Collections.sort(intValues); + + MappedFieldType intFieldType = mapperService.documentMapper("type").mappers() + .getMapper("int_field").fieldType(); List> queryFunctions = new ArrayList<>(); queryFunctions.add(MatchNoDocsQuery::new); queryFunctions.add(MatchAllDocsQuery::new); queryFunctions.add(() -> new TermQuery(new Term("unknown_field", "value"))); - String field1 = randomFrom(fields); - queryFunctions.add(() -> new TermQuery(new Term(field1, randomFrom(content.get(field1))))); - String field2 = randomFrom(fields); - queryFunctions.add(() -> new TermQuery(new Term(field2, randomFrom(content.get(field2))))); - queryFunctions.add(() -> new TermInSetQuery(field1, new BytesRef(randomFrom(content.get(field1))), - new BytesRef(randomFrom(content.get(field1))))); - queryFunctions.add(() -> new TermInSetQuery(field2, new BytesRef(randomFrom(content.get(field1))), - new BytesRef(randomFrom(content.get(field1))))); + String field1 = randomFrom(stringFields); + queryFunctions.add(() -> new TermQuery(new Term(field1, randomFrom(stringContent.get(field1))))); + String field2 = randomFrom(stringFields); + queryFunctions.add(() -> new TermQuery(new Term(field2, randomFrom(stringContent.get(field2))))); + queryFunctions.add(() -> intFieldType.termQuery(randomFrom(intValues), null)); + queryFunctions.add(() -> intFieldType.termsQuery(Arrays.asList(randomFrom(intValues), randomFrom(intValues)), null)); + queryFunctions.add(() -> intFieldType.rangeQuery(intValues.get(4), intValues.get(intValues.size() - 4), true, + true, ShapeRelation.WITHIN, null, null, null)); + queryFunctions.add(() -> new TermInSetQuery(field1, new BytesRef(randomFrom(stringContent.get(field1))), + new BytesRef(randomFrom(stringContent.get(field1))))); + queryFunctions.add(() -> new TermInSetQuery(field2, new BytesRef(randomFrom(stringContent.get(field1))), + new BytesRef(randomFrom(stringContent.get(field1))))); + int numRandomBoolQueries = randomIntBetween(16, 32); + for (int i = 0; i < numRandomBoolQueries; i++) { + queryFunctions.add(() -> createRandomBooleanQuery(1, stringFields, stringContent, intFieldType, intValues)); + } queryFunctions.add(() -> { - BooleanQuery.Builder builder = new BooleanQuery.Builder(); int numClauses = randomIntBetween(1, 16); + List clauses = new ArrayList<>(); for (int i = 0; i < numClauses; i++) { - if (rarely()) { - if (randomBoolean()) { - Occur occur = randomFrom(Arrays.asList(Occur.FILTER, Occur.MUST, Occur.SHOULD)); - builder.add(new TermQuery(new Term("unknown_field", randomAlphaOfLength(8))), occur); - } else { - String field = randomFrom(fields); - builder.add(new TermQuery(new Term(field, randomFrom(content.get(field)))), Occur.MUST_NOT); - } - } else { - if (randomBoolean()) { - Occur occur = randomFrom(Arrays.asList(Occur.FILTER, Occur.MUST, Occur.SHOULD)); - String field = randomFrom(fields); - builder.add(new TermQuery(new Term(field, randomFrom(content.get(field)))), occur); - } else { - builder.add(new TermQuery(new Term("unknown_field", randomAlphaOfLength(8))), Occur.MUST_NOT); - } - } + String field = randomFrom(stringFields); + clauses.add(new TermQuery(new Term(field, randomFrom(stringContent.get(field))))); } - return builder.build(); + return new DisjunctionMaxQuery(clauses, 0.01f); }); queryFunctions.add(() -> { - int numClauses = randomIntBetween(1, 16); - List clauses = new ArrayList<>(); - for (int i = 0; i < numClauses; i++) { - String field = randomFrom(fields); - clauses.add(new TermQuery(new Term(field, randomFrom(content.get(field))))); + Float minScore = randomBoolean() ? null : (float) randomIntBetween(1, 1000); + Query innerQuery; + if (randomBoolean()) { + innerQuery = new TermQuery(new Term(field1, randomFrom(stringContent.get(field1)))); + } else { + innerQuery = new PhraseQuery(field1, randomFrom(stringContent.get(field1)), randomFrom(stringContent.get(field1))); } - return new DisjunctionMaxQuery(clauses, 0.01f); + return new FunctionScoreQuery(innerQuery, minScore, 1f); }); List documents = new ArrayList<>(); @@ -237,14 +247,75 @@ public void testDuel() throws Exception { shardSearcher.setQueryCache(null); Document document = new Document(); - for (Map.Entry> entry : content.entrySet()) { + for (Map.Entry> entry : stringContent.entrySet()) { String value = entry.getValue().stream().collect(Collectors.joining(" ")); document.add(new TextField(entry.getKey(), value, Field.Store.NO)); } + for (Integer intValue : intValues) { + List numberFields = + NumberFieldMapper.NumberType.INTEGER.createFields("int_field", intValue, true, true, false); + for (Field numberField : numberFields) { + document.add(numberField); + } + } MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer()); duelRun(queryStore, memoryIndex, shardSearcher); } + private BooleanQuery createRandomBooleanQuery(int depth, List fields, Map> content, + MappedFieldType intFieldType, List intValues) { + BooleanQuery.Builder builder = new BooleanQuery.Builder(); + int numClauses = randomIntBetween(1, 16); + int numShouldClauses = 0; + boolean onlyShouldClauses = rarely(); + for (int i = 0; i < numClauses; i++) { + Occur occur; + if (onlyShouldClauses) { + occur = Occur.SHOULD; + if (randomBoolean()) { + String field = randomFrom(fields); + builder.add(new TermQuery(new Term(field, randomFrom(content.get(field)))), occur); + } else { + builder.add(intFieldType.termQuery(randomFrom(intValues), null), occur); + } + } else if (rarely() && depth <= 3) { + occur = randomFrom(Arrays.asList(Occur.FILTER, Occur.MUST, Occur.SHOULD)); + builder.add(createRandomBooleanQuery(depth + 1, fields, content, intFieldType, intValues), occur); + } else if (rarely()) { + if (randomBoolean()) { + occur = randomFrom(Arrays.asList(Occur.FILTER, Occur.MUST, Occur.SHOULD)); + if (randomBoolean()) { + builder.add(new TermQuery(new Term("unknown_field", randomAlphaOfLength(8))), occur); + } else { + builder.add(intFieldType.termQuery(randomFrom(intValues), null), occur); + } + } else if (randomBoolean()) { + String field = randomFrom(fields); + builder.add(new TermQuery(new Term(field, randomFrom(content.get(field)))), occur = Occur.MUST_NOT); + } else { + builder.add(intFieldType.termQuery(randomFrom(intValues), null), occur = Occur.MUST_NOT); + } + } else { + if (randomBoolean()) { + occur = randomFrom(Arrays.asList(Occur.FILTER, Occur.MUST, Occur.SHOULD)); + if (randomBoolean()) { + String field = randomFrom(fields); + builder.add(new TermQuery(new Term(field, randomFrom(content.get(field)))), occur); + } else { + builder.add(intFieldType.termQuery(randomFrom(intValues), null), occur); + } + } else { + builder.add(new TermQuery(new Term("unknown_field", randomAlphaOfLength(8))), occur = Occur.MUST_NOT); + } + } + if (occur == Occur.SHOULD) { + numShouldClauses++; + } + } + builder.setMinimumNumberShouldMatch(numShouldClauses); + return builder.build(); + } + public void testDuelIdBased() throws Exception { List> queryFunctions = new ArrayList<>(); queryFunctions.add((id) -> new PrefixQuery(new Term("field", id))); @@ -620,6 +691,31 @@ public void testPercolateMatchAll() throws Exception { assertEquals(4, topDocs.scoreDocs[2].doc); } + public void testFunctionScoreQuery() throws Exception { + List docs = new ArrayList<>(); + addQuery(new FunctionScoreQuery(new TermQuery(new Term("field", "value")), null, 1f), docs); + addQuery(new FunctionScoreQuery(new TermQuery(new Term("field", "value")), 10f, 1f), docs); + addQuery(new FunctionScoreQuery(new MatchAllDocsQuery(), null, 1f), docs); + addQuery(new FunctionScoreQuery(new MatchAllDocsQuery(), 10F, 1f), docs); + + indexWriter.addDocuments(docs); + indexWriter.close(); + directoryReader = DirectoryReader.open(directory); + IndexSearcher shardSearcher = newSearcher(directoryReader); + shardSearcher.setQueryCache(null); + + MemoryIndex memoryIndex = new MemoryIndex(); + memoryIndex.addField("field", "value", new WhitespaceAnalyzer()); + IndexSearcher percolateSearcher = memoryIndex.createSearcher(); + PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, + Collections.singletonList(new BytesArray("{}")), percolateSearcher, Version.CURRENT); + TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true); + assertEquals(2L, topDocs.totalHits); + assertEquals(2, topDocs.scoreDocs.length); + assertEquals(0, topDocs.scoreDocs[0].doc); + assertEquals(2, topDocs.scoreDocs[1].doc); + } + public void testPercolateSmallAndLargeDocument() throws Exception { List docs = new ArrayList<>(); BooleanQuery.Builder builder = new BooleanQuery.Builder(); @@ -766,11 +862,11 @@ private void duelRun(PercolateQuery.QueryStore queryStore, MemoryIndex memoryInd Query percolateQuery = fieldType.percolateQuery("_name", queryStore, Collections.singletonList(new BytesArray("{}")), percolateSearcher, Version.CURRENT); Query query = requireScore ? percolateQuery : new ConstantScoreQuery(percolateQuery); - TopDocs topDocs = shardSearcher.search(query, 10); + TopDocs topDocs = shardSearcher.search(query, 100); Query controlQuery = new ControlQuery(memoryIndex, queryStore); controlQuery = requireScore ? controlQuery : new ConstantScoreQuery(controlQuery); - TopDocs controlTopDocs = shardSearcher.search(controlQuery, 10); + TopDocs controlTopDocs = shardSearcher.search(controlQuery, 100); try { assertThat(topDocs.totalHits, equalTo(controlTopDocs.totalHits)); @@ -793,22 +889,39 @@ private void duelRun(PercolateQuery.QueryStore queryStore, MemoryIndex memoryInd logger.error("controlTopDocs.scoreDocs.length={}", controlTopDocs.scoreDocs.length); for (int i = 0; i < topDocs.scoreDocs.length; i++) { - logger.error("topDocs.scoreDocs[j].doc={}", topDocs.scoreDocs[i].doc); - logger.error("topDocs.scoreDocs[j].score={}", topDocs.scoreDocs[i].score); + logger.error("topDocs.scoreDocs[{}].doc={}", i, topDocs.scoreDocs[i].doc); + logger.error("topDocs.scoreDocs[{}].score={}", i, topDocs.scoreDocs[i].score); } for (int i = 0; i < controlTopDocs.scoreDocs.length; i++) { - logger.error("controlTopDocs.scoreDocs[j].doc={}", controlTopDocs.scoreDocs[i].doc); - logger.error("controlTopDocs.scoreDocs[j].score={}", controlTopDocs.scoreDocs[i].score); + logger.error("controlTopDocs.scoreDocs[{}].doc={}", i, controlTopDocs.scoreDocs[i].doc); + logger.error("controlTopDocs.scoreDocs[{}].score={}", i, controlTopDocs.scoreDocs[i].score); + + // Additional stored information that is useful when debugging: + String queryToString = shardSearcher.doc(controlTopDocs.scoreDocs[i].doc).get("query_to_string"); + logger.error("topDocs.scoreDocs[{}].query_to_string={}", i, queryToString); + + NumericDocValues numericValues = + MultiDocValues.getNumericValues(shardSearcher.getIndexReader(), fieldType.minimumShouldMatchField.name()); + boolean exact = numericValues.advanceExact(controlTopDocs.scoreDocs[i].doc); + if (exact) { + logger.error("controlTopDocs.scoreDocs[{}].minimum_should_match_field={}", i, numericValues.longValue()); + } else { + // Some queries do not have a msm field. (e.g. unsupported queries) + logger.error("controlTopDocs.scoreDocs[{}].minimum_should_match_field=[NO_VALUE]", i); + } } throw ae; } } - private void addQuery(Query query, List docs) throws IOException { + private void addQuery(Query query, List docs) { ParseContext.InternalParseContext parseContext = new ParseContext.InternalParseContext(Settings.EMPTY, mapperService.documentMapperParser(), documentMapper, null, null); fieldMapper.processQuery(query, parseContext); - docs.add(parseContext.doc()); + ParseContext.Document queryDocument = parseContext.doc(); + // Add to string representation of the query to make debugging easier: + queryDocument.add(new StoredField("query_to_string", query.toString())); + docs.add(queryDocument); queries.add(query); } @@ -865,8 +978,6 @@ public Weight createWeight(IndexSearcher searcher, boolean needsScores, float bo final IndexSearcher percolatorIndexSearcher = memoryIndex.createSearcher(); return new Weight(this) { - float _score; - @Override public void extractTerms(Set terms) {} @@ -889,6 +1000,7 @@ public String toString() { @Override public Scorer scorer(LeafReaderContext context) throws IOException { + float _score[] = new float[]{boost}; DocIdSetIterator allDocs = DocIdSetIterator.all(context.reader().maxDoc()); CheckedFunction leaf = queryStore.getQueries(context); FilteredDocIdSetIterator memoryIndexIterator = new FilteredDocIdSetIterator(allDocs) { @@ -900,7 +1012,7 @@ protected boolean match(int doc) { TopDocs topDocs = percolatorIndexSearcher.search(query, 1); if (topDocs.totalHits > 0) { if (needsScores) { - _score = topDocs.scoreDocs[0].score; + _score[0] = topDocs.scoreDocs[0].score; } return true; } else { @@ -911,11 +1023,21 @@ protected boolean match(int doc) { } } }; - return new FilterScorer(new ConstantScoreScorer(this, 1f, memoryIndexIterator)) { + return new Scorer(this) { + + @Override + public int docID() { + return memoryIndexIterator.docID(); + } + + @Override + public DocIdSetIterator iterator() { + return memoryIndexIterator; + } @Override public float score() throws IOException { - return _score; + return _score[0]; } }; } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java index 843dbcae90fa1..1bd0dff132d1d 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java @@ -922,7 +922,7 @@ public void testDuplicatedClauses() throws Exception { assertThat(values.get(3), equalTo("field\0value4")); assertThat(values.get(4), equalTo("field\0value5")); msm = doc.rootDoc().getFields(fieldType.minimumShouldMatchField.name())[0].numericValue().intValue(); - assertThat(msm, equalTo(3)); + assertThat(msm, equalTo(1)); } private static byte[] subByteArray(byte[] source, int offset, int length) { diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java index 5b382eb7654bd..5968f8c3f8327 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java @@ -811,6 +811,24 @@ public void testFunctionScoreQuery() { assertTermsEqual(result.extractions, new Term("_field", "_value")); } + public void testFunctionScoreQuery_withMatchAll() { + MatchAllDocsQuery innerQuery = new MatchAllDocsQuery(); + FunctionScoreQuery functionScoreQuery1 = new FunctionScoreQuery(innerQuery, new RandomScoreFunction(0, 0, null)); + Result result = analyze(functionScoreQuery1, Version.CURRENT); + assertThat(result.verified, is(true)); + assertThat(result.minimumShouldMatch, equalTo(0)); + assertThat(result.matchAllDocs, is(true)); + assertThat(result.extractions.isEmpty(), is(true)); + + FunctionScoreQuery functionScoreQuery2 = + new FunctionScoreQuery(innerQuery, new RandomScoreFunction(0, 0, null), CombineFunction.MULTIPLY, 1f, 10f); + result = analyze(functionScoreQuery2, Version.CURRENT); + assertThat(result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(0)); + assertThat(result.matchAllDocs, is(true)); + assertThat(result.extractions.isEmpty(), is(true)); + } + public void testSelectBestExtraction() { Set queryTerms1 = terms(new int[0], "12", "1234", "12345"); Set queryTerms2 = terms(new int[0], "123", "1234", "12345"); diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 7d3246376a8e5..450af05b75ac1 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -1,3 +1,8 @@ +import org.elasticsearch.gradle.test.AntFixture + +import java.security.KeyPair +import java.security.KeyPairGenerator + /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -52,3 +57,48 @@ thirdPartyAudit.excludes = [ 'org.apache.log.Hierarchy', 'org.apache.log.Logger', ] + +/** A task to start the GoogleCloudStorageFixture which emulates a Google Cloud Storage service **/ +task googleCloudStorageFixture(type: AntFixture) { + dependsOn compileTestJava + executable = new File(project.runtimeJavaHome, 'bin/java') + args '-cp', "${ -> project.sourceSets.test.runtimeClasspath.asPath }", + 'org.elasticsearch.repositories.gcs.GoogleCloudStorageFixture', + baseDir, 'bucket_test' +} + +/** A service account file that points to the Google Cloud Storage service emulated by the fixture **/ +File serviceAccountFile = new File(project.buildDir, "generated-resources/service_account_test.json") +task createServiceAccountFile() { + dependsOn googleCloudStorageFixture + doLast { + KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA") + keyPairGenerator.initialize(1024) + KeyPair keyPair = keyPairGenerator.generateKeyPair() + String encodedKey = Base64.getEncoder().encodeToString(keyPair.private.getEncoded()) + + serviceAccountFile.parentFile.mkdirs() + serviceAccountFile.setText("{\n" + + ' "type": "service_account",\n' + + ' "project_id": "integration_test",\n' + + ' "private_key_id": "' + UUID.randomUUID().toString() + '",\n' + + ' "private_key": "-----BEGIN PRIVATE KEY-----\\n' + encodedKey + '\\n-----END PRIVATE KEY-----\\n",\n' + + ' "client_email": "integration_test@appspot.gserviceaccount.com",\n' + + ' "client_id": "123456789101112130594",\n' + + " \"auth_uri\": \"http://${googleCloudStorageFixture.addressAndPort}/o/oauth2/auth\",\n" + + " \"token_uri\": \"http://${googleCloudStorageFixture.addressAndPort}/o/oauth2/token\",\n" + + ' "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",\n' + + ' "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/integration_test%40appspot.gserviceaccount.com"\n' + + '}', 'UTF-8') + } +} + +integTestCluster { + dependsOn createServiceAccountFile, googleCloudStorageFixture + setupCommand 'create-elasticsearch-keystore', 'bin/elasticsearch-keystore', 'create' + setupCommand 'add-credentials-to-elasticsearch-keystore', + 'bin/elasticsearch-keystore', 'add-file', 'gcs.client.integration_test.credentials_file', "${serviceAccountFile.absolutePath}" + + /* Use a closure on the string to delay evaluation until tests are executed */ + setting 'gcs.client.integration_test.endpoint', "http://${ -> googleCloudStorageFixture.addressAndPort }" +} diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java new file mode 100644 index 0000000000000..cddcab870de34 --- /dev/null +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java @@ -0,0 +1,135 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.gcs; + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.mocksocket.MockHttpServer; +import org.elasticsearch.repositories.gcs.GoogleCloudStorageTestServer.Response; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.net.Inet6Address; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardCopyOption; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.singleton; +import static java.util.Collections.singletonList; + +/** + * {@link GoogleCloudStorageFixture} is a fixture that emulates a Google Cloud Storage service. + *

+ * It starts an asynchronous socket server that binds to a random local port. The server parses + * HTTP requests and uses a {@link GoogleCloudStorageTestServer} to handle them before returning + * them to the client as HTTP responses. + */ +public class GoogleCloudStorageFixture { + + @SuppressForbidden(reason = "PathUtils#get is fine - we don't have environment here") + public static void main(String[] args) throws Exception { + if (args == null || args.length != 2) { + throw new IllegalArgumentException("GoogleCloudStorageFixture "); + } + + final InetSocketAddress socketAddress = new InetSocketAddress(InetAddress.getLoopbackAddress(), 43635); + final HttpServer httpServer = MockHttpServer.createHttp(socketAddress, 0); + + try { + final Path workingDirectory = Paths.get(args[0]); + /// Writes the PID of the current Java process in a `pid` file located in the working directory + writeFile(workingDirectory, "pid", ManagementFactory.getRuntimeMXBean().getName().split("@")[0]); + + final String addressAndPort = addressToString(httpServer.getAddress()); + // Writes the address and port of the http server in a `ports` file located in the working directory + writeFile(workingDirectory, "ports", addressAndPort); + + // Emulates a Google Cloud Storage server + final String storageUrl = "http://" + addressAndPort; + final GoogleCloudStorageTestServer storageTestServer = new GoogleCloudStorageTestServer(storageUrl); + storageTestServer.createBucket(args[1]); + + httpServer.createContext("/", new ResponseHandler(storageTestServer)); + httpServer.start(); + + // Wait to be killed + Thread.sleep(Long.MAX_VALUE); + + } finally { + httpServer.stop(0); + } + } + + private static void writeFile(final Path dir, final String fileName, final String content) throws IOException { + final Path tempPidFile = Files.createTempFile(dir, null, null); + Files.write(tempPidFile, singleton(content)); + Files.move(tempPidFile, dir.resolve(fileName), StandardCopyOption.ATOMIC_MOVE); + } + + private static String addressToString(final SocketAddress address) { + final InetSocketAddress inetSocketAddress = (InetSocketAddress) address; + if (inetSocketAddress.getAddress() instanceof Inet6Address) { + return "[" + inetSocketAddress.getHostString() + "]:" + inetSocketAddress.getPort(); + } else { + return inetSocketAddress.getHostString() + ":" + inetSocketAddress.getPort(); + } + } + + @SuppressForbidden(reason = "Use a http server") + static class ResponseHandler implements HttpHandler { + + private final GoogleCloudStorageTestServer storageServer; + + private ResponseHandler(final GoogleCloudStorageTestServer storageServer) { + this.storageServer = storageServer; + } + + @Override + public void handle(HttpExchange exchange) throws IOException { + String method = exchange.getRequestMethod(); + String path = storageServer.getEndpoint() + exchange.getRequestURI().getRawPath(); + String query = exchange.getRequestURI().getRawQuery(); + Map> headers = exchange.getRequestHeaders(); + + ByteArrayOutputStream out = new ByteArrayOutputStream(); + Streams.copy(exchange.getRequestBody(), out); + + final Response storageResponse = storageServer.handle(method, path, query, headers, out.toByteArray()); + + Map> responseHeaders = exchange.getResponseHeaders(); + responseHeaders.put("Content-Type", singletonList(storageResponse.contentType)); + storageResponse.headers.forEach((k, v) -> responseHeaders.put(k, singletonList(v))); + exchange.sendResponseHeaders(storageResponse.status.getStatus(), storageResponse.body.length); + if (storageResponse.body.length > 0) { + exchange.getResponseBody().write(storageResponse.body); + } + exchange.close(); + } + } +} diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java index 17255fa90ed2a..6610895e1f497 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java @@ -19,7 +19,6 @@ package org.elasticsearch.repositories.gcs; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.path.PathTrie; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -27,10 +26,11 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestUtils; -import java.io.BufferedInputStream; +import java.io.BufferedReader; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.HashMap; @@ -39,13 +39,15 @@ import java.util.Objects; import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; /** - * {@link GoogleCloudStorageTestServer} emulates a Google Cloud Storage service through a {@link #handle(String, String, byte[])} method - * that provides appropriate responses for specific requests like the real Google Cloud platform would do. It is largely based on official - * documentation available at https://cloud.google.com/storage/docs/json_api/v1/. + * {@link GoogleCloudStorageTestServer} emulates a Google Cloud Storage service through + * a {@link #handle(String, String, String, Map, byte[])} method that provides appropriate + * responses for specific requests like the real Google Cloud platform would do. + * It is largely based on official documentation available at https://cloud.google.com/storage/docs/json_api/v1/. */ public class GoogleCloudStorageTestServer { @@ -57,19 +59,22 @@ public class GoogleCloudStorageTestServer { /** Request handlers for the requests made by the Google Cloud Storage client **/ private final PathTrie handlers; + /** Server endpoint **/ + private final String endpoint; + /** * Creates a {@link GoogleCloudStorageTestServer} with the default endpoint */ GoogleCloudStorageTestServer() { - this("https://www.googleapis.com", true); + this("https://www.googleapis.com"); } /** - * Creates a {@link GoogleCloudStorageTestServer} with a custom endpoint, - * potentially prefixing the URL patterns to match with the endpoint name. + * Creates a {@link GoogleCloudStorageTestServer} with a custom endpoint */ - GoogleCloudStorageTestServer(final String endpoint, final boolean prefixWithEndpoint) { - this.handlers = defaultHandlers(endpoint, prefixWithEndpoint, buckets); + GoogleCloudStorageTestServer(final String endpoint) { + this.endpoint = Objects.requireNonNull(endpoint, "endpoint must not be null"); + this.handlers = defaultHandlers(endpoint, buckets); } /** Creates a bucket in the test server **/ @@ -77,24 +82,61 @@ void createBucket(final String bucketName) { buckets.put(bucketName, new Bucket(bucketName)); } - public Response handle(final String method, final String url, byte[] content) throws IOException { - final Map params = new HashMap<>(); + public String getEndpoint() { + return endpoint; + } - // Splits the URL to extract query string parameters - final String rawPath; - int questionMark = url.indexOf('?'); - if (questionMark != -1) { - rawPath = url.substring(0, questionMark); - RestUtils.decodeQueryString(url, questionMark + 1, params); - } else { - rawPath = url; + /** + * Returns a Google Cloud Storage response for the given request + * + * @param method the HTTP method of the request + * @param url the HTTP URL of the request + * @param headers the HTTP headers of the request + * @param body the HTTP request body + * @return a {@link Response} + * + * @throws IOException if something goes wrong + */ + public Response handle(final String method, + final String url, + final Map> headers, + byte[] body) throws IOException { + + final int questionMark = url.indexOf('?'); + if (questionMark == -1) { + return handle(method, url, null, headers, body); } + return handle(method, url.substring(0, questionMark), url.substring(questionMark + 1), headers, body); + } + + /** + * Returns a Google Cloud Storage response for the given request + * + * @param method the HTTP method of the request + * @param path the path of the URL of the request + * @param query the queryString of the URL of request + * @param headers the HTTP headers of the request + * @param body the HTTP request body + * @return a {@link Response} + * @throws IOException if something goes wrong + */ + public Response handle(final String method, + final String path, + final String query, + final Map> headers, + byte[] body) throws IOException { - final RequestHandler handler = handlers.retrieve(method + " " + rawPath, params); + final Map params = new HashMap<>(); + if (query != null) { + RestUtils.decodeQueryString(query, 0, params); + } + + final RequestHandler handler = handlers.retrieve(method + " " + path, params); if (handler != null) { - return handler.execute(url, params, content); + return handler.execute(params, headers, body); } else { - return newError(RestStatus.INTERNAL_SERVER_ERROR, "No handler defined for request [method: " + method + ", url: " + url + "]"); + return newError(RestStatus.INTERNAL_SERVER_ERROR, + "No handler defined for request [method: " + method + ", path: " + path + "]"); } } @@ -104,28 +146,24 @@ interface RequestHandler { /** * Simulates the execution of a Storage request and returns a corresponding response. * - * @param url the request URL - * @param params the request URL parameters + * @param params the request's query string parameters + * @param headers the request's headers * @param body the request body provided as a byte array * @return the corresponding response * * @throws IOException if something goes wrong */ - Response execute(String url, Map params, byte[] body) throws IOException; + Response execute(Map params, Map> headers, byte[] body) throws IOException; } /** Builds the default request handlers **/ - private static PathTrie defaultHandlers(final String endpoint, - final boolean prefixWithEndpoint, - final Map buckets) { - + private static PathTrie defaultHandlers(final String endpoint, final Map buckets) { final PathTrie handlers = new PathTrie<>(RestUtils.REST_DECODER); - final String prefix = prefixWithEndpoint ? endpoint : ""; // GET Bucket // // https://cloud.google.com/storage/docs/json_api/v1/buckets/get - handlers.insert("GET " + prefix + "/storage/v1/b/{bucket}", (url, params, body) -> { + handlers.insert("GET " + endpoint + "/storage/v1/b/{bucket}", (params, headers, body) -> { String name = params.get("bucket"); if (Strings.hasText(name) == false) { return newError(RestStatus.INTERNAL_SERVER_ERROR, "bucket name is missing"); @@ -141,7 +179,7 @@ private static PathTrie defaultHandlers(final String endpoint, // GET Object // // https://cloud.google.com/storage/docs/json_api/v1/objects/get - handlers.insert("GET " + prefix + "/storage/v1/b/{bucket}/o/{object}", (url, params, body) -> { + handlers.insert("GET " + endpoint + "/storage/v1/b/{bucket}/o/{object}", (params, headers, body) -> { String objectName = params.get("object"); if (Strings.hasText(objectName) == false) { return newError(RestStatus.INTERNAL_SERVER_ERROR, "object name is missing"); @@ -163,7 +201,7 @@ private static PathTrie defaultHandlers(final String endpoint, // Delete Object // // https://cloud.google.com/storage/docs/json_api/v1/objects/delete - handlers.insert("DELETE " + prefix + "/storage/v1/b/{bucket}/o/{object}", (url, params, body) -> { + handlers.insert("DELETE " + endpoint + "/storage/v1/b/{bucket}/o/{object}", (params, headers, body) -> { String objectName = params.get("object"); if (Strings.hasText(objectName) == false) { return newError(RestStatus.INTERNAL_SERVER_ERROR, "object name is missing"); @@ -184,7 +222,7 @@ private static PathTrie defaultHandlers(final String endpoint, // Insert Object (initialization) // // https://cloud.google.com/storage/docs/json_api/v1/objects/insert - handlers.insert("POST " + prefix + "/upload/storage/v1/b/{bucket}/o", (url, params, body) -> { + handlers.insert("POST " + endpoint + "/upload/storage/v1/b/{bucket}/o", (params, headers, body) -> { if ("resumable".equals(params.get("uploadType")) == false) { return newError(RestStatus.INTERNAL_SERVER_ERROR, "upload type must be resumable"); } @@ -210,7 +248,7 @@ private static PathTrie defaultHandlers(final String endpoint, // Insert Object (upload) // // https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload - handlers.insert("PUT " + prefix + "/upload/storage/v1/b/{bucket}/o", (url, params, body) -> { + handlers.insert("PUT " + endpoint + "/upload/storage/v1/b/{bucket}/o", (params, headers, body) -> { String objectId = params.get("upload_id"); if (Strings.hasText(objectId) == false) { return newError(RestStatus.INTERNAL_SERVER_ERROR, "upload id is missing"); @@ -232,7 +270,7 @@ private static PathTrie defaultHandlers(final String endpoint, // Copy Object // // https://cloud.google.com/storage/docs/json_api/v1/objects/copy - handlers.insert("POST " + prefix + "/storage/v1/b/{srcBucket}/o/{src}/copyTo/b/{destBucket}/o/{dest}", (url, params, body) -> { + handlers.insert("POST " + endpoint + "/storage/v1/b/{srcBucket}/o/{src}/copyTo/b/{destBucket}/o/{dest}", (params, headers, body)-> { String source = params.get("src"); if (Strings.hasText(source) == false) { return newError(RestStatus.INTERNAL_SERVER_ERROR, "source object name is missing"); @@ -265,7 +303,7 @@ private static PathTrie defaultHandlers(final String endpoint, // List Objects // // https://cloud.google.com/storage/docs/json_api/v1/objects/list - handlers.insert("GET " + prefix + "/storage/v1/b/{bucket}/o", (url, params, body) -> { + handlers.insert("GET " + endpoint + "/storage/v1/b/{bucket}/o", (params, headers, body) -> { final Bucket bucket = buckets.get(params.get("bucket")); if (bucket == null) { return newError(RestStatus.NOT_FOUND, "bucket not found"); @@ -293,7 +331,7 @@ private static PathTrie defaultHandlers(final String endpoint, // Download Object // // https://cloud.google.com/storage/docs/request-body - handlers.insert("GET " + prefix + "/download/storage/v1/b/{bucket}/o/{object}", (url, params, body) -> { + handlers.insert("GET " + endpoint + "/download/storage/v1/b/{bucket}/o/{object}", (params, headers, body) -> { String object = params.get("object"); if (Strings.hasText(object) == false) { return newError(RestStatus.INTERNAL_SERVER_ERROR, "object id is missing"); @@ -314,7 +352,7 @@ private static PathTrie defaultHandlers(final String endpoint, // Batch // // https://cloud.google.com/storage/docs/json_api/v1/how-tos/batch - handlers.insert("POST " + prefix + "/batch", (url, params, req) -> { + handlers.insert("POST " + endpoint + "/batch", (params, headers, body) -> { final List batchedResponses = new ArrayList<>(); // A batch request body looks like this: @@ -339,37 +377,88 @@ private static PathTrie defaultHandlers(final String endpoint, // // --__END_OF_PART__-- - // Here we simply process the request body line by line and delegate to other handlers - // if possible. - Streams.readAllLines(new BufferedInputStream(new ByteArrayInputStream(req)), line -> { - final int indexOfHttp = line.indexOf(" HTTP/1.1"); - if (indexOfHttp > 0) { - line = line.substring(0, indexOfHttp); + // Default multipart boundary + String boundary = "__END_OF_PART__"; + + // Determine the multipart boundary + final List contentTypes = headers.getOrDefault("Content-Type", headers.get("Content-type")); + if (contentTypes != null) { + final String contentType = contentTypes.get(0); + if (contentType != null && contentType.contains("multipart/mixed; boundary=")) { + boundary = contentType.replace("multipart/mixed; boundary=", ""); } + } - RequestHandler handler = handlers.retrieve(line, params); - if (handler != null) { - try { - batchedResponses.add(handler.execute(line, params, req)); - } catch (IOException e) { - batchedResponses.add(newError(RestStatus.INTERNAL_SERVER_ERROR, e.getMessage())); + // Read line by line the batched requests + try (BufferedReader reader = new BufferedReader( + new InputStreamReader( + new ByteArrayInputStream(body), StandardCharsets.UTF_8))) { + String line; + while ((line = reader.readLine()) != null) { + // Start of a batched request + if (line.equals("--" + boundary)) { + Map> batchedHeaders = new HashMap<>(); + + // Reads the headers, if any + while ((line = reader.readLine()) != null) { + if (line.equals("\r\n") || line.length() == 0) { + // end of headers + break; + } else { + String[] header = line.split(":", 2); + batchedHeaders.put(header[0], singletonList(header[1])); + } + } + + // Reads the method and URL + line = reader.readLine(); + String batchedUrl = line.substring(0, line.lastIndexOf(' ')); + + final Map batchedParams = new HashMap<>(); + int questionMark = batchedUrl.indexOf('?'); + if (questionMark != -1) { + RestUtils.decodeQueryString(batchedUrl.substring(questionMark + 1), 0, batchedParams); + } + + // Reads the body + line = reader.readLine(); + byte[] batchedBody = new byte[0]; + if (line != null || line.startsWith("--" + boundary) == false) { + batchedBody = line.getBytes(StandardCharsets.UTF_8); + } + + // Executes the batched request + RequestHandler handler = handlers.retrieve(batchedUrl, batchedParams); + if (handler != null) { + try { + batchedResponses.add(handler.execute(batchedParams, batchedHeaders, batchedBody)); + } catch (IOException e) { + batchedResponses.add(newError(RestStatus.INTERNAL_SERVER_ERROR, e.getMessage())); + } + } } } - }); + } // Now we can build the response - String boundary = "__END_OF_PART__"; String sep = "--"; String line = "\r\n"; StringBuilder builder = new StringBuilder(); for (Response response : batchedResponses) { builder.append(sep).append(boundary).append(line); + builder.append("Content-Type: application/http").append(line); builder.append(line); - builder.append("HTTP/1.1 ").append(response.status.getStatus()); - builder.append(' ').append(response.status.toString()); - builder.append(line); + builder.append("HTTP/1.1 ") + .append(response.status.getStatus()) + .append(' ') + .append(response.status.toString()) + .append(line); builder.append("Content-Length: ").append(response.body.length).append(line); + builder.append("Content-Type: ").append(response.contentType).append(line); + response.headers.forEach((k, v) -> builder.append(k).append(": ").append(v).append(line)); + builder.append(line); + builder.append(new String(response.body, StandardCharsets.UTF_8)).append(line); builder.append(line); } builder.append(line); @@ -379,6 +468,17 @@ private static PathTrie defaultHandlers(final String endpoint, return new Response(RestStatus.OK, emptyMap(), "multipart/mixed; boundary=" + boundary, content); }); + // Fake refresh of an OAuth2 token + // + handlers.insert("POST " + endpoint + "/o/oauth2/token", (url, params, req) -> + newResponse(RestStatus.OK, emptyMap(), jsonBuilder() + .startObject() + .field("access_token", "unknown") + .field("token_type", "Bearer") + .field("expires_in", 3600) + .endObject()) + ); + return handlers; } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java index 8be7511ab58c6..a04dae294975a 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java @@ -49,8 +49,7 @@ public LowLevelHttpRequest buildRequest(String method, String url) throws IOExce return new MockLowLevelHttpRequest() { @Override public LowLevelHttpResponse execute() throws IOException { - final GoogleCloudStorageTestServer.Response response = server.handle(method, url, getContentAsBytes()); - return convert(response); + return convert(server.handle(method, url, getHeaders(), getContentAsBytes())); } /** Returns the LowLevelHttpRequest body as an array of bytes **/ diff --git a/plugins/repository-gcs/src/test/resources/rest-api-spec/test/repository_gcs/10_basic.yml b/plugins/repository-gcs/src/test/resources/rest-api-spec/test/repository_gcs/10_basic.yml index a37fb77954971..62387227cbc9d 100644 --- a/plugins/repository-gcs/src/test/resources/rest-api-spec/test/repository_gcs/10_basic.yml +++ b/plugins/repository-gcs/src/test/resources/rest-api-spec/test/repository_gcs/10_basic.yml @@ -1,6 +1,6 @@ -# Integration tests for Repository GCS component +# Integration tests for repository-gcs # -"Repository GCS loaded": +"Plugin repository-gcs is loaded": - do: cluster.state: {} @@ -11,3 +11,176 @@ nodes.info: {} - match: { nodes.$master.plugins.0.name: repository-gcs } +--- +"Snapshot/Restore with repository-gcs": + - skip: + version: " - 6.3.0" + reason: repository-gcs was not testable through YAML tests until 6.3.0 + + # Register repository + - do: + snapshot.create_repository: + repository: repository + body: + type: gcs + settings: + bucket: "bucket_test" + client: "integration_test" + + - match: { acknowledged: true } + + # Index documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _type: doc + _id: 1 + - snapshot: one + - index: + _index: docs + _type: doc + _id: 2 + - snapshot: one + - index: + _index: docs + _type: doc + _id: 3 + - snapshot: one + + - do: + count: + index: docs + + - match: {count: 3} + + # Create a first snapshot + - do: + snapshot.create: + repository: repository + snapshot: snapshot-one + wait_for_completion: true + + - match: { snapshot.snapshot: snapshot-one } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.include_global_state: true } + - match: { snapshot.shards.failed : 0 } + + - do: + snapshot.status: + repository: repository + snapshot: snapshot-one + + - is_true: snapshots + - match: { snapshots.0.snapshot: snapshot-one } + - match: { snapshots.0.state : SUCCESS } + + # Index more documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _type: doc + _id: 4 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 5 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 6 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 7 + - snapshot: two + + - do: + count: + index: docs + + - match: {count: 7} + + # Create a second snapshot + - do: + snapshot.create: + repository: repository + snapshot: snapshot-two + wait_for_completion: true + + - match: { snapshot.snapshot: snapshot-two } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.shards.failed : 0 } + + - do: + snapshot.get: + repository: repository + snapshot: snapshot-one,snapshot-two + + - is_true: snapshots + - match: { snapshots.0.state : SUCCESS } + - match: { snapshots.1.state : SUCCESS } + + # Delete the index + - do: + indices.delete: + index: docs + + # Restore the second snapshot + - do: + snapshot.restore: + repository: repository + snapshot: snapshot-two + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 7} + + # Delete the index again + - do: + indices.delete: + index: docs + + # Restore the first snapshot + - do: + snapshot.restore: + repository: repository + snapshot: snapshot-one + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 3} + + # Remove the snapshots + - do: + snapshot.delete: + repository: repository + snapshot: snapshot-two + + - do: + snapshot.delete: + repository: repository + snapshot: snapshot-one + + # Remove our repository + - do: + snapshot.delete_repository: + repository: repository + + + + diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java index f1d63add2466a..029507a5ba49d 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java @@ -61,9 +61,4 @@ public Map> getTransports(Settings settings, ThreadP () -> new NioTransport(settings, threadPool, networkService, bigArrays, pageCacheRecycler, namedWriteableRegistry, circuitBreakerService)); } - - @Override - public List getBootstrapChecks() { - return Collections.singletonList(new NioNotEnabledBootstrapCheck()); - } } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioNotEnabledBootstrapCheck.java b/plugins/transport-nio/src/main/plugin-metadata/plugin-security.policy similarity index 61% rename from plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioNotEnabledBootstrapCheck.java rename to plugins/transport-nio/src/main/plugin-metadata/plugin-security.policy index e998f96da5ca1..2dbe07bd8a5c6 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioNotEnabledBootstrapCheck.java +++ b/plugins/transport-nio/src/main/plugin-metadata/plugin-security.policy @@ -17,16 +17,7 @@ * under the License. */ -package org.elasticsearch.transport.nio; - -import org.elasticsearch.bootstrap.BootstrapCheck; -import org.elasticsearch.bootstrap.BootstrapContext; - -public class NioNotEnabledBootstrapCheck implements BootstrapCheck { - - @Override - public BootstrapCheckResult check(BootstrapContext context) { - return BootstrapCheckResult.failure("The transport-nio plugin is experimental and not ready for production usage. It should " + - "not be enabled in production."); - } -} +grant codeBase "${codebase.elasticsearch-nio}" { + // elasticsearch-nio makes and accepts socket connections + permission java.net.SocketPermission "*", "accept,connect"; +}; diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/rank_eval.json b/rest-api-spec/src/main/resources/rest-api-spec/api/rank_eval.json index 37ff11f876470..51798c92babf6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/rank_eval.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/rank_eval.json @@ -1,6 +1,6 @@ { "rank_eval": { - "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-rank-eval.html", + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/search-rank-eval.html", "methods": ["POST"], "url": { "path": "/_rank_eval", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml index c755af7db0781..1ae9c48e59c1d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml @@ -237,6 +237,28 @@ setup: search_after: [6] sort: [{ sort: desc }] +--- +"field collapsing and rescore": + + - skip: + version: " - 5.2.99" + reason: this uses a new API that has been added in 5.3 + + - do: + catch: /cannot use \`collapse\` in conjunction with \`rescore\`/ + search: + index: test + type: test + body: + collapse: { field: numeric_group } + rescore: + window_size: 20 + query: + rescore_query: + match_all: {} + query_weight: 1 + rescore_query_weight: 2 + --- "no hits and inner_hits": diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index ab50824f59a4f..ed20f52754dd4 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -984,8 +984,8 @@ private enum ElasticsearchExceptionHandle { org.elasticsearch.tasks.TaskCancelledException::new, 146, Version.V_5_1_1), SHARD_LOCK_OBTAIN_FAILED_EXCEPTION(org.elasticsearch.env.ShardLockObtainFailedException.class, org.elasticsearch.env.ShardLockObtainFailedException::new, 147, Version.V_5_0_2), - UNKNOWN_NAMED_OBJECT_EXCEPTION(org.elasticsearch.common.xcontent.NamedXContentRegistry.UnknownNamedObjectException.class, - org.elasticsearch.common.xcontent.NamedXContentRegistry.UnknownNamedObjectException::new, 148, Version.V_5_2_0), + UNKNOWN_NAMED_OBJECT_EXCEPTION(org.elasticsearch.common.xcontent.UnknownNamedObjectException.class, + org.elasticsearch.common.xcontent.UnknownNamedObjectException::new, 148, Version.V_5_2_0), TOO_MANY_BUCKETS_EXCEPTION(MultiBucketConsumerService.TooManyBucketsException.class, MultiBucketConsumerService.TooManyBucketsException::new, 149, Version.V_7_0_0_alpha1); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java index 99fe07a1f49cc..b17d2c3f2cd01 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java @@ -41,7 +41,6 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.FastStringReader; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexService; @@ -65,6 +64,7 @@ import java.io.IOException; import java.io.Reader; +import java.io.StringReader; import java.util.ArrayList; import java.util.HashSet; import java.util.List; @@ -317,12 +317,12 @@ private static DetailAnalyzeResponse detailAnalyze(AnalyzeRequest request, Analy for (int textIndex = 0; textIndex < request.text().length; textIndex++) { String charFilteredSource = request.text()[textIndex]; - Reader reader = new FastStringReader(charFilteredSource); + Reader reader = new StringReader(charFilteredSource); if (charFilterFactories != null) { for (int charFilterIndex = 0; charFilterIndex < charFilterFactories.length; charFilterIndex++) { reader = charFilterFactories[charFilterIndex].create(reader); - Reader readerForWriteOut = new FastStringReader(charFilteredSource); + Reader readerForWriteOut = new StringReader(charFilteredSource); readerForWriteOut = charFilterFactories[charFilterIndex].create(readerForWriteOut); charFilteredSource = writeCharStream(readerForWriteOut); charFiltersTexts[charFilterIndex][textIndex] = charFilteredSource; @@ -382,7 +382,7 @@ private static DetailAnalyzeResponse detailAnalyze(AnalyzeRequest request, Analy } private static TokenStream createStackedTokenStream(String source, CharFilterFactory[] charFilterFactories, TokenizerFactory tokenizerFactory, TokenFilterFactory[] tokenFilterFactories, int current) { - Reader reader = new FastStringReader(source); + Reader reader = new StringReader(source); for (CharFilterFactory charFilterFactory : charFilterFactories) { reader = charFilterFactory.create(reader); } diff --git a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java index 32614c636f128..4e7c66afdcaf0 100644 --- a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java @@ -30,6 +30,7 @@ import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -45,6 +46,7 @@ import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; +import java.util.function.Consumer; import java.util.function.Supplier; public class TransportResyncReplicationAction extends TransportWriteAction onPrimaryDemoted, Consumer onIgnoredFailure) { + shardStateAction.remoteShardFailed(replica.shardId(), replica.allocationId().getId(), primaryTerm, false, message, exception, + createShardActionListener(onSuccess, onPrimaryDemoted, onIgnoredFailure)); + } + } } diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 4398c56f26c77..2cd5f7a5f13ac 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -1172,6 +1172,30 @@ public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, R // "alive" if it were to be marked as stale. onSuccess.run(); } + + protected final ShardStateAction.Listener createShardActionListener(final Runnable onSuccess, + final Consumer onPrimaryDemoted, + final Consumer onIgnoredFailure) { + return new ShardStateAction.Listener() { + @Override + public void onSuccess() { + onSuccess.run(); + } + + @Override + public void onFailure(Exception shardFailedError) { + if (shardFailedError instanceof ShardStateAction.NoLongerPrimaryShardException) { + onPrimaryDemoted.accept(shardFailedError); + } else { + // these can occur if the node is shutting down and are okay + // any other exception here is not expected and merits investigation + assert shardFailedError instanceof TransportException || + shardFailedError instanceof NodeClosedException : shardFailedError; + onIgnoredFailure.accept(shardFailedError); + } + } + }; + } } /** diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index 28b8f0826cd91..2a3e8be7aa8bb 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -384,41 +384,16 @@ class WriteActionReplicasProxy extends ReplicasProxy { @Override public void failShardIfNeeded(ShardRouting replica, String message, Exception exception, Runnable onSuccess, Consumer onPrimaryDemoted, Consumer onIgnoredFailure) { - - logger.warn((org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("[{}] {}", replica.shardId(), message), exception); + logger.warn(new ParameterizedMessage("[{}] {}", replica.shardId(), message), exception); shardStateAction.remoteShardFailed(replica.shardId(), replica.allocationId().getId(), primaryTerm, true, message, exception, - createListener(onSuccess, onPrimaryDemoted, onIgnoredFailure)); + createShardActionListener(onSuccess, onPrimaryDemoted, onIgnoredFailure)); } @Override public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, Runnable onSuccess, Consumer onPrimaryDemoted, Consumer onIgnoredFailure) { shardStateAction.remoteShardFailed(shardId, allocationId, primaryTerm, true, "mark copy as stale", null, - createListener(onSuccess, onPrimaryDemoted, onIgnoredFailure)); - } - - private ShardStateAction.Listener createListener(final Runnable onSuccess, final Consumer onPrimaryDemoted, - final Consumer onIgnoredFailure) { - return new ShardStateAction.Listener() { - @Override - public void onSuccess() { - onSuccess.run(); - } - - @Override - public void onFailure(Exception shardFailedError) { - if (shardFailedError instanceof ShardStateAction.NoLongerPrimaryShardException) { - onPrimaryDemoted.accept(shardFailedError); - } else { - // these can occur if the node is shutting down and are okay - // any other exception here is not expected and merits investigation - assert shardFailedError instanceof TransportException || - shardFailedError instanceof NodeClosedException : shardFailedError; - onIgnoredFailure.accept(shardFailedError); - } - } - }; + createShardActionListener(onSuccess, onPrimaryDemoted, onIgnoredFailure)); } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java b/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java index 8e50fddb9b17e..e30f02ad4060d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java @@ -23,6 +23,7 @@ import java.util.IdentityHashMap; import java.util.List; import java.util.Map; +import java.util.stream.Stream; public interface ClusterStateTaskExecutor { /** @@ -55,15 +56,7 @@ default void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) { * This allows groupd task description but the submitting source. */ default String describeTasks(List tasks) { - return tasks.stream().map(T::toString).reduce((s1,s2) -> { - if (s1.isEmpty()) { - return s2; - } else if (s2.isEmpty()) { - return s1; - } else { - return s1 + ", " + s2; - } - }).orElse(""); + return String.join(", ", tasks.stream().map(t -> (CharSequence)t.toString()).filter(t -> t.length() == 0)::iterator); } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 8c6829ca78734..06aa51f612bcc 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -43,7 +43,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.NamedXContentRegistry.UnknownNamedObjectException; +import org.elasticsearch.common.xcontent.UnknownNamedObjectException; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index a116bc369b5e4..7e8806ee65865 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -352,7 +352,7 @@ private ClusterState applyRequest(ClusterState currentState, PutMappingClusterSt @Override public String describeTasks(List tasks) { - return tasks.stream().map(PutMappingClusterStateUpdateRequest::type).reduce((s1, s2) -> s1 + ", " + s2).orElse(""); + return String.join(", ", tasks.stream().map(t -> (CharSequence)t.type())::iterator); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index 057d37d5999a2..5522e37f71a18 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -40,6 +40,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.stream.StreamSupport; /** * This class holds all {@link DiscoveryNode} in the cluster and provides convenience methods to @@ -232,10 +233,6 @@ public DiscoveryNode findByAddress(TransportAddress address) { return null; } - public boolean isAllNodes(String... nodesIds) { - return nodesIds == null || nodesIds.length == 0 || (nodesIds.length == 1 && nodesIds[0].equals("_all")); - } - /** * Returns the version of the node with the oldest version in the cluster that is not a client node * @@ -304,13 +301,8 @@ public DiscoveryNode resolveNode(String node) { * or a generic node attribute name in which case value will be treated as a wildcard and matched against the node attribute values. */ public String[] resolveNodes(String... nodes) { - if (isAllNodes(nodes)) { - int index = 0; - nodes = new String[this.nodes.size()]; - for (DiscoveryNode node : this) { - nodes[index++] = node.getId(); - } - return nodes; + if (nodes == null || nodes.length == 0) { + return StreamSupport.stream(this.spliterator(), false).map(DiscoveryNode::getId).toArray(String[]::new); } else { ObjectHashSet resolvedNodesIds = new ObjectHashSet<>(nodes.length); for (String nodeId : nodes) { @@ -327,16 +319,11 @@ public String[] resolveNodes(String... nodes) { } else if (nodeExists(nodeId)) { resolvedNodesIds.add(nodeId); } else { - // not a node id, try and search by name - for (DiscoveryNode node : this) { - if (Regex.simpleMatch(nodeId, node.getName())) { - resolvedNodesIds.add(node.getId()); - } - } for (DiscoveryNode node : this) { - if (Regex.simpleMatch(nodeId, node.getHostAddress())) { - resolvedNodesIds.add(node.getId()); - } else if (Regex.simpleMatch(nodeId, node.getHostName())) { + if ("_all".equals(nodeId) + || Regex.simpleMatch(nodeId, node.getName()) + || Regex.simpleMatch(nodeId, node.getHostAddress()) + || Regex.simpleMatch(nodeId, node.getHostName())) { resolvedNodesIds.add(node.getId()); } } diff --git a/server/src/main/java/org/elasticsearch/common/Strings.java b/server/src/main/java/org/elasticsearch/common/Strings.java index 02a0852b0a03a..8c823f401a0f8 100644 --- a/server/src/main/java/org/elasticsearch/common/Strings.java +++ b/server/src/main/java/org/elasticsearch/common/Strings.java @@ -23,7 +23,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.FastStringReader; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -31,6 +30,7 @@ import java.io.BufferedReader; import java.io.IOException; +import java.io.StringReader; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -51,7 +51,7 @@ public class Strings { public static final String[] EMPTY_ARRAY = new String[0]; public static void spaceify(int spaces, String from, StringBuilder to) throws Exception { - try (BufferedReader reader = new BufferedReader(new FastStringReader(from))) { + try (BufferedReader reader = new BufferedReader(new StringReader(from))) { String line; while ((line = reader.readLine()) != null) { for (int i = 0; i < spaces; i++) { diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java index 38643df017943..2a8110c5f4dc2 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java @@ -22,6 +22,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.geo.GeoShapeType; +import java.io.StringReader; import org.elasticsearch.common.geo.builders.CoordinatesBuilder; import org.elasticsearch.common.geo.builders.EnvelopeBuilder; import org.elasticsearch.common.geo.builders.GeometryCollectionBuilder; @@ -32,7 +33,6 @@ import org.elasticsearch.common.geo.builders.PointBuilder; import org.elasticsearch.common.geo.builders.PolygonBuilder; import org.elasticsearch.common.geo.builders.ShapeBuilder; -import org.elasticsearch.common.io.FastStringReader; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentParser; @@ -69,7 +69,7 @@ public static ShapeBuilder parse(XContentParser parser) /** throws an exception if the parsed geometry type does not match the expected shape type */ public static ShapeBuilder parseExpectedType(XContentParser parser, final GeoShapeType shapeType) throws IOException, ElasticsearchParseException { - FastStringReader reader = new FastStringReader(parser.text()); + StringReader reader = new StringReader(parser.text()); try { // setup the tokenizer; configured to read words w/o numbers StreamTokenizer tokenizer = new StreamTokenizer(reader); diff --git a/server/src/main/java/org/elasticsearch/common/io/FastCharArrayReader.java b/server/src/main/java/org/elasticsearch/common/io/FastCharArrayReader.java deleted file mode 100644 index f75d8d1c96a63..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/io/FastCharArrayReader.java +++ /dev/null @@ -1,225 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.io; - -import java.io.IOException; -import java.io.Reader; - -public class FastCharArrayReader extends Reader { - - /** - * The character buffer. - */ - protected char buf[]; - - /** - * The current buffer position. - */ - protected int pos; - - /** - * The position of mark in buffer. - */ - protected int markedPos = 0; - - /** - * The index of the end of this buffer. There is not valid - * data at or beyond this index. - */ - protected int count; - - /** - * Creates a CharArrayReader from the specified array of chars. - * - * @param buf Input buffer (not copied) - */ - public FastCharArrayReader(char buf[]) { - this.buf = buf; - this.pos = 0; - this.count = buf.length; - } - - /** - * Creates a CharArrayReader from the specified array of chars. - *

- * The resulting reader will start reading at the given - * offset. The total number of char values that can be - * read from this reader will be either length or - * buf.length-offset, whichever is smaller. - * - * @param buf Input buffer (not copied) - * @param offset Offset of the first char to read - * @param length Number of chars to read - * @throws IllegalArgumentException If offset is negative or greater than - * buf.length, or if length is negative, or if - * the sum of these two values is negative. - */ - public FastCharArrayReader(char buf[], int offset, int length) { - if ((offset < 0) || (offset > buf.length) || (length < 0) || - ((offset + length) < 0)) { - throw new IllegalArgumentException(); - } - this.buf = buf; - this.pos = offset; - this.count = Math.min(offset + length, buf.length); - this.markedPos = offset; - } - - /** - * Checks to make sure that the stream has not been closed - */ - private void ensureOpen() throws IOException { - if (buf == null) - throw new IOException("Stream closed"); - } - - /** - * Reads a single character. - * - * @throws IOException If an I/O error occurs - */ - @Override - public int read() throws IOException { - ensureOpen(); - if (pos >= count) - return -1; - else - return buf[pos++]; - } - - /** - * Reads characters into a portion of an array. - * - * @param b Destination buffer - * @param off Offset at which to start storing characters - * @param len Maximum number of characters to read - * @return The actual number of characters read, or -1 if - * the end of the stream has been reached - * @throws IOException If an I/O error occurs - */ - @Override - public int read(char b[], int off, int len) throws IOException { - ensureOpen(); - if ((off < 0) || (off > b.length) || (len < 0) || - ((off + len) > b.length) || ((off + len) < 0)) { - throw new IndexOutOfBoundsException(); - } else if (len == 0) { - return 0; - } - - if (pos >= count) { - return -1; - } - if (pos + len > count) { - len = count - pos; - } - if (len <= 0) { - return 0; - } - System.arraycopy(buf, pos, b, off, len); - pos += len; - return len; - } - - /** - * Skips characters. Returns the number of characters that were skipped. - *

- * The n parameter may be negative, even though the - * skip method of the {@link Reader} superclass throws - * an exception in this case. If n is negative, then - * this method does nothing and returns 0. - * - * @param n The number of characters to skip - * @return The number of characters actually skipped - * @throws IOException If the stream is closed, or an I/O error occurs - */ - @Override - public long skip(long n) throws IOException { - ensureOpen(); - if (pos + n > count) { - n = count - pos; - } - if (n < 0) { - return 0; - } - pos += n; - return n; - } - - /** - * Tells whether this stream is ready to be read. Character-array readers - * are always ready to be read. - * - * @throws IOException If an I/O error occurs - */ - @Override - public boolean ready() throws IOException { - ensureOpen(); - return (count - pos) > 0; - } - - /** - * Tells whether this stream supports the mark() operation, which it does. - */ - @Override - public boolean markSupported() { - return true; - } - - /** - * Marks the present position in the stream. Subsequent calls to reset() - * will reposition the stream to this point. - * - * @param readAheadLimit Limit on the number of characters that may be - * read while still preserving the mark. Because - * the stream's input comes from a character array, - * there is no actual limit; hence this argument is - * ignored. - * @throws IOException If an I/O error occurs - */ - @Override - public void mark(int readAheadLimit) throws IOException { - ensureOpen(); - markedPos = pos; - } - - /** - * Resets the stream to the most recent mark, or to the beginning if it has - * never been marked. - * - * @throws IOException If an I/O error occurs - */ - @Override - public void reset() throws IOException { - ensureOpen(); - pos = markedPos; - } - - /** - * Closes the stream and releases any system resources associated with - * it. Once the stream has been closed, further read(), ready(), - * mark(), reset(), or skip() invocations will throw an IOException. - * Closing a previously closed stream has no effect. - */ - @Override - public void close() { - buf = null; - } -} diff --git a/server/src/main/java/org/elasticsearch/common/io/FastCharArrayWriter.java b/server/src/main/java/org/elasticsearch/common/io/FastCharArrayWriter.java deleted file mode 100644 index 87313eae7f938..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/io/FastCharArrayWriter.java +++ /dev/null @@ -1,277 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.io; - -import java.io.IOException; -import java.io.Writer; -import java.util.Arrays; - -/** - * A similar class to {@link java.io.CharArrayWriter} allowing to get the underlying char[] buffer. - */ -public class FastCharArrayWriter extends Writer { - - /** - * The buffer where data is stored. - */ - protected char buf[]; - - /** - * The number of chars in the buffer. - */ - protected int count; - - /** - * Creates a new CharArrayWriter. - */ - public FastCharArrayWriter() { - this(32); - } - - /** - * Creates a new CharArrayWriter with the specified initial size. - * - * @param initialSize an int specifying the initial buffer size. - * @throws IllegalArgumentException if initialSize is negative - */ - public FastCharArrayWriter(int initialSize) { - if (initialSize < 0) { - throw new IllegalArgumentException("Negative initial size: " - + initialSize); - } - buf = new char[initialSize]; - } - - /** - * Writes a character to the buffer. - */ - @Override - public void write(int c) { - int newcount = count + 1; - if (newcount > buf.length) { - buf = Arrays.copyOf(buf, Math.max(buf.length << 1, newcount)); - } - buf[count] = (char) c; - count = newcount; - } - - /** - * Writes characters to the buffer. - * - * @param c the data to be written - * @param off the start offset in the data - * @param len the number of chars that are written - */ - @Override - public void write(char c[], int off, int len) { - if ((off < 0) || (off > c.length) || (len < 0) || - ((off + len) > c.length) || ((off + len) < 0)) { - throw new IndexOutOfBoundsException(); - } else if (len == 0) { - return; - } - int newcount = count + len; - if (newcount > buf.length) { - buf = Arrays.copyOf(buf, Math.max(buf.length << 1, newcount)); - } - System.arraycopy(c, off, buf, count, len); - count = newcount; - } - - /** - * Write a portion of a string to the buffer. - * - * @param str String to be written from - * @param off Offset from which to start reading characters - * @param len Number of characters to be written - */ - @Override - public void write(String str, int off, int len) { - int newcount = count + len; - if (newcount > buf.length) { - buf = Arrays.copyOf(buf, Math.max(buf.length << 1, newcount)); - } - str.getChars(off, off + len, buf, count); - count = newcount; - } - - /** - * Writes the contents of the buffer to another character stream. - * - * @param out the output stream to write to - * @throws java.io.IOException If an I/O error occurs. - */ - public void writeTo(Writer out) throws IOException { - out.write(buf, 0, count); - } - - /** - * Appends the specified character sequence to this writer. - *

- * An invocation of this method of the form out.append(csq) - * behaves in exactly the same way as the invocation - *

-     *     out.write(csq.toString()) 
- * - *

Depending on the specification of toString for the - * character sequence csq, the entire sequence may not be - * appended. For instance, invoking the toString method of a - * character buffer will return a subsequence whose content depends upon - * the buffer's position and limit. - * - * @param csq The character sequence to append. If csq is - * null, then the four characters "null" are - * appended to this writer. - * @return This writer - * @since 1.5 - */ - @Override - public FastCharArrayWriter append(CharSequence csq) { - String s = (csq == null ? "null" : csq.toString()); - write(s, 0, s.length()); - return this; - } - - /** - * Appends a subsequence of the specified character sequence to this writer. - *

- * An invocation of this method of the form out.append(csq, start, - * end) when csq is not null, behaves in - * exactly the same way as the invocation - *

-     *     out.write(csq.subSequence(start, end).toString()) 
- * - * @param csq The character sequence from which a subsequence will be - * appended. If csq is null, then characters - * will be appended as if csq contained the four - * characters "null". - * @param start The index of the first character in the subsequence - * @param end The index of the character following the last character in the - * subsequence - * @return This writer - * @throws IndexOutOfBoundsException If start or end are negative, start - * is greater than end, or end is greater than - * csq.length() - * @since 1.5 - */ - @Override - public FastCharArrayWriter append(CharSequence csq, int start, int end) { - String s = (csq == null ? "null" : csq).subSequence(start, end).toString(); - write(s, 0, s.length()); - return this; - } - - /** - * Appends the specified character to this writer. - *

- * An invocation of this method of the form out.append(c) - * behaves in exactly the same way as the invocation - *

-     *     out.write(c) 
- * - * @param c The 16-bit character to append - * @return This writer - * @since 1.5 - */ - @Override - public FastCharArrayWriter append(char c) { - write(c); - return this; - } - - /** - * Resets the buffer so that you can use it again without - * throwing away the already allocated buffer. - */ - public void reset() { - count = 0; - } - - /** - * Returns a copy of the input data. - * - * @return an array of chars copied from the input data. - */ - public char toCharArray()[] { - return Arrays.copyOf(buf, count); - } - - /** - * Returns the underlying char array. Note, use {@link #size()} in order to know the size of - * of the actual content within the array. - */ - public char[] unsafeCharArray() { - return buf; - } - - /** - * Returns the current size of the buffer. - * - * @return an int representing the current size of the buffer. - */ - public int size() { - return count; - } - - /** - * Converts input data to a string. - * - * @return the string. - */ - @Override - public String toString() { - return new String(buf, 0, count); - } - - /** - * Converts the input data to a string with trimmed whitespaces. - */ - public String toStringTrim() { - int st = 0; - int len = count; - char[] val = buf; /* avoid getfield opcode */ - - while ((st < len) && (val[st] <= ' ')) { - st++; - len--; - } - while ((st < len) && (val[len - 1] <= ' ')) { - len--; - } - return new String(buf, st, len); - } - - /** - * Flush the stream. - */ - @Override - public void flush() { - } - - /** - * Close the stream. This method does not release the buffer, since its - * contents might still be required. Note: Invoking this method in this class - * will have no effect. - */ - @Override - public void close() { - } - -} diff --git a/server/src/main/java/org/elasticsearch/common/io/FastStringReader.java b/server/src/main/java/org/elasticsearch/common/io/FastStringReader.java deleted file mode 100644 index 2ac7e9022e687..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/io/FastStringReader.java +++ /dev/null @@ -1,208 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.io; - -import java.io.IOException; -import java.io.Reader; - -/** - * A character stream whose source is a string that is not thread safe - *

- * (shay.banon - * ) - */ -public class FastStringReader extends Reader implements CharSequence { - - private String str; - private int length; - private int next = 0; - private int mark = 0; - private boolean closed = false; - - /** - * Creates a new string reader. - * - * @param s String providing the character stream. - */ - public FastStringReader(String s) { - this.str = s; - this.length = s.length(); - } - - /** - * Check to make sure that the stream has not been closed - */ - private void ensureOpen() throws IOException { - if (closed) { - throw new IOException("Stream closed"); - } - } - - @Override - public int length() { - return length; - } - - @Override - public char charAt(int index) { - return str.charAt(index); - } - - @Override - public CharSequence subSequence(int start, int end) { - return str.subSequence(start, end); - } - - /** - * Reads a single character. - * - * @return The character read, or -1 if the end of the stream has been - * reached - * @throws IOException If an I/O error occurs - */ - @Override - public int read() throws IOException { - ensureOpen(); - if (next >= length) - return -1; - return str.charAt(next++); - } - - /** - * Reads characters into a portion of an array. - * - * @param cbuf Destination buffer - * @param off Offset at which to start writing characters - * @param len Maximum number of characters to read - * @return The number of characters read, or -1 if the end of the - * stream has been reached - * @throws IOException If an I/O error occurs - */ - @Override - public int read(char cbuf[], int off, int len) throws IOException { - ensureOpen(); - if (len == 0) { - return 0; - } - if (next >= length) - return -1; - int n = Math.min(length - next, len); - str.getChars(next, next + n, cbuf, off); - next += n; - return n; - } - - /** - * Skips the specified number of characters in the stream. Returns - * the number of characters that were skipped. - *

- * The ns parameter may be negative, even though the - * skip method of the {@link Reader} superclass throws - * an exception in this case. Negative values of ns cause the - * stream to skip backwards. Negative return values indicate a skip - * backwards. It is not possible to skip backwards past the beginning of - * the string. - *

- * If the entire string has been read or skipped, then this method has - * no effect and always returns 0. - * - * @throws IOException If an I/O error occurs - */ - @Override - public long skip(long ns) throws IOException { - ensureOpen(); - if (next >= length) - return 0; - // Bound skip by beginning and end of the source - long n = Math.min(length - next, ns); - n = Math.max(-next, n); - next += n; - return n; - } - - /** - * Tells whether this stream is ready to be read. - * - * @return True if the next read() is guaranteed not to block for input - * @throws IOException If the stream is closed - */ - @Override - public boolean ready() throws IOException { - ensureOpen(); - return true; - } - - /** - * Tells whether this stream supports the mark() operation, which it does. - */ - @Override - public boolean markSupported() { - return true; - } - - /** - * Marks the present position in the stream. Subsequent calls to reset() - * will reposition the stream to this point. - * - * @param readAheadLimit Limit on the number of characters that may be - * read while still preserving the mark. Because - * the stream's input comes from a string, there - * is no actual limit, so this argument must not - * be negative, but is otherwise ignored. - * @throws IllegalArgumentException If readAheadLimit is < 0 - * @throws IOException If an I/O error occurs - */ - @Override - public void mark(int readAheadLimit) throws IOException { - if (readAheadLimit < 0) { - throw new IllegalArgumentException("Read-ahead limit < 0"); - } - ensureOpen(); - mark = next; - } - - /** - * Resets the stream to the most recent mark, or to the beginning of the - * string if it has never been marked. - * - * @throws IOException If an I/O error occurs - */ - @Override - public void reset() throws IOException { - ensureOpen(); - next = mark; - } - - /** - * Closes the stream and releases any system resources associated with - * it. Once the stream has been closed, further read(), - * ready(), mark(), or reset() invocations will throw an IOException. - * Closing a previously closed stream has no effect. - */ - @Override - public void close() { - closed = true; - } - - @Override - public String toString() { - return str; - } -} diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java b/server/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java index 28971fc9ca45e..f79f45f3b62bd 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java @@ -35,10 +35,10 @@ import org.apache.lucene.search.similarities.TFIDFSimilarity; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.FastStringReader; import java.io.IOException; import java.io.Reader; +import java.io.StringReader; import java.util.Arrays; import java.util.HashSet; import java.util.List; @@ -166,7 +166,7 @@ private Query createQuery(XMoreLikeThis mlt) throws IOException { if (this.likeText != null) { Reader[] readers = new Reader[likeText.length]; for (int i = 0; i < readers.length; i++) { - readers[i] = new FastStringReader(likeText[i]); + readers[i] = new StringReader(likeText[i]); } //LUCENE 4 UPGRADE this mapps the 3.6 behavior (only use the first field) Query mltQuery = mlt.like(moreLikeFields[0], readers); diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java b/server/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java index e973689615ed7..5d1e4537f6561 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java @@ -58,10 +58,10 @@ import org.apache.lucene.util.CharsRefBuilder; import org.apache.lucene.util.PriorityQueue; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.io.FastStringReader; import java.io.IOException; import java.io.Reader; +import java.io.StringReader; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; @@ -815,7 +815,7 @@ private PriorityQueue retrieveTerms(int docNum) throws IOException { for (IndexableField field : fields) { final String stringValue = field.stringValue(); if (stringValue != null) { - addTermFrequencies(new FastStringReader(stringValue), termFreqMap, fieldName); + addTermFrequencies(new StringReader(stringValue), termFreqMap, fieldName); } } } else { diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/Booleans.java b/server/src/main/java/org/elasticsearch/common/xcontent/Booleans.java new file mode 100644 index 0000000000000..21c0ea5fdd08b --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/xcontent/Booleans.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.xcontent; + +/** + * Helpers for dealing with boolean values. Package-visible only so that only XContent classes use them. + */ +final class Booleans { + /** + * Parse {@code value} with values "true", "false", or null, returning the + * default value if null or the empty string is used. Any other input + * results in an {@link IllegalArgumentException} being thrown. + */ + static boolean parseBoolean(String value, Boolean defaultValue) { + if (value != null && value.length() > 0) { + switch (value) { + case "true": + return true; + case "false": + return false; + default: + throw new IllegalArgumentException("Failed to parse param [" + value + "] as only [true] or [false] are allowed."); + } + } else { + return defaultValue; + } + } + +} diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/NamedXContentRegistry.java b/server/src/main/java/org/elasticsearch/common/xcontent/NamedXContentRegistry.java index 4fb397dbe1751..c19a667776f2e 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/NamedXContentRegistry.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/NamedXContentRegistry.java @@ -23,8 +23,6 @@ import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; import java.util.ArrayList; @@ -36,7 +34,6 @@ import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; -import static java.util.Objects.requireNonNull; public class NamedXContentRegistry { /** @@ -143,50 +140,4 @@ public T parseNamedObject(Class categoryClass, String name, XContentPa return categoryClass.cast(entry.parser.parse(parser, context)); } - /** - * Thrown when {@link NamedXContentRegistry#parseNamedObject(Class, String, XContentParser, Object)} is called with an unregistered - * name. When this bubbles up to the rest layer it is converted into a response with {@code 400 BAD REQUEST} status. - */ - public static class UnknownNamedObjectException extends ParsingException { - private final String categoryClass; - private final String name; - - public UnknownNamedObjectException(XContentLocation contentLocation, Class categoryClass, - String name) { - super(contentLocation, "Unknown " + categoryClass.getSimpleName() + " [" + name + "]"); - this.categoryClass = requireNonNull(categoryClass, "categoryClass is required").getName(); - this.name = requireNonNull(name, "name is required"); - } - - /** - * Read from a stream. - */ - public UnknownNamedObjectException(StreamInput in) throws IOException { - super(in); - categoryClass = in.readString(); - name = in.readString(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeString(categoryClass); - out.writeString(name); - } - - /** - * Category class that was missing a parser. This is a String instead of a class because the class might not be on the classpath - * of all nodes or it might be exclusive to a plugin or something. - */ - public String getCategoryClass() { - return categoryClass; - } - - /** - * Name of the missing parser. - */ - public String getName() { - return name; - } - } } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java b/server/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java index 3006363a4ddd4..f74bdec17a9f6 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java @@ -19,8 +19,6 @@ package org.elasticsearch.common.xcontent; -import org.elasticsearch.common.Booleans; - import java.io.IOException; import java.util.Map; @@ -132,4 +130,5 @@ public Boolean paramAsBoolean(String key, Boolean defaultValue) { default boolean isFragment() { return true; } + } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/UnknownNamedObjectException.java b/server/src/main/java/org/elasticsearch/common/xcontent/UnknownNamedObjectException.java new file mode 100644 index 0000000000000..0475ab334d388 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/xcontent/UnknownNamedObjectException.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.xcontent; + +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +import static java.util.Objects.requireNonNull; + +/** + * Thrown when {@link NamedXContentRegistry#parseNamedObject(Class, String, XContentParser, Object)} is called with an unregistered + * name. When this bubbles up to the rest layer it is converted into a response with {@code 400 BAD REQUEST} status. + */ +public class UnknownNamedObjectException extends ParsingException { + private final String categoryClass; + private final String name; + + public UnknownNamedObjectException(XContentLocation contentLocation, Class categoryClass, String name) { + super(contentLocation, "Unknown " + categoryClass.getSimpleName() + " [" + name + "]"); + this.categoryClass = requireNonNull(categoryClass, "categoryClass is required").getName(); + this.name = requireNonNull(name, "name is required"); + } + + /** + * Read from a stream. + */ + public UnknownNamedObjectException(StreamInput in) throws IOException { + super(in); + categoryClass = in.readString(); + name = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(categoryClass); + out.writeString(name); + } + + /** + * Category class that was missing a parser. This is a String instead of a class because the class might not be on the classpath + * of all nodes or it might be exclusive to a plugin or something. + */ + public String getCategoryClass() { + return categoryClass; + } + + /** + * Name of the missing parser. + */ + public String getName() { + return name; + } +} diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContent.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContent.java index c7118f025ee04..6f6ee4ffdda54 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContent.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContent.java @@ -19,9 +19,6 @@ package org.elasticsearch.common.xcontent; -import org.elasticsearch.common.Booleans; -import org.elasticsearch.common.bytes.BytesReference; - import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; @@ -52,7 +49,7 @@ public interface XContent { */ static boolean isStrictDuplicateDetectionEnabled() { // Don't allow duplicate keys in JSON content by default but let the user opt out - return Booleans.parseBoolean(System.getProperty("es.xcontent.strict_duplicate_detection", "true")); + return Booleans.parseBoolean(System.getProperty("es.xcontent.strict_duplicate_detection", "true"), true); } /** @@ -104,15 +101,6 @@ XContentParser createParser(NamedXContentRegistry xContentRegistry, XContentParser createParser(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, byte[] data, int offset, int length) throws IOException; - /** - * Creates a parser over the provided bytes. - * @deprecated use {@link #createParser(NamedXContentRegistry, DeprecationHandler, InputStream)} instead, - * the BytesReference coupling in this class will be removed in a future commit - */ - @Deprecated - XContentParser createParser(NamedXContentRegistry xContentRegistry, - DeprecationHandler deprecationHandler, BytesReference bytes) throws IOException; - /** * Creates a parser over the provided reader. */ diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java index e673c2a4b7ca2..16f0ac83a849f 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java @@ -20,10 +20,10 @@ package org.elasticsearch.common.xcontent; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.io.stream.BytesStream; -import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.unit.ByteSizeValue; @@ -34,6 +34,7 @@ import org.joda.time.format.DateTimeFormatter; import org.joda.time.format.ISODateTimeFormat; +import java.io.ByteArrayOutputStream; import java.io.Flushable; import java.io.IOException; import java.io.InputStream; @@ -58,7 +59,7 @@ public final class XContentBuilder implements Releasable, Flushable { /** * Create a new {@link XContentBuilder} using the given {@link XContent} content. *

- * The builder uses an internal {@link BytesStreamOutput} output stream to build the content. + * The builder uses an internal {@link ByteArrayOutputStream} output stream to build the content. *

* * @param xContent the {@link XContent} @@ -66,13 +67,13 @@ public final class XContentBuilder implements Releasable, Flushable { * @throws IOException if an {@link IOException} occurs while building the content */ public static XContentBuilder builder(XContent xContent) throws IOException { - return new XContentBuilder(xContent, new BytesStreamOutput()); + return new XContentBuilder(xContent, new ByteArrayOutputStream()); } /** * Create a new {@link XContentBuilder} using the given {@link XContent} content and some inclusive and/or exclusive filters. *

- * The builder uses an internal {@link BytesStreamOutput} output stream to build the content. When both exclusive and + * The builder uses an internal {@link ByteArrayOutputStream} output stream to build the content. When both exclusive and * inclusive filters are provided, the underlying builder will first use exclusion filters to remove fields and then will check the * remaining fields against the inclusive filters. *

@@ -83,7 +84,7 @@ public static XContentBuilder builder(XContent xContent) throws IOException { * @throws IOException if an {@link IOException} occurs while building the content */ public static XContentBuilder builder(XContent xContent, Set includes, Set excludes) throws IOException { - return new XContentBuilder(xContent, new BytesStreamOutput(), includes, excludes); + return new XContentBuilder(xContent, new ByteArrayOutputStream(), includes, excludes); } public static final DateTimeFormatter DEFAULT_DATE_PRINTER = ISODateTimeFormat.dateTime().withZone(DateTimeZone.UTC); @@ -1036,7 +1037,11 @@ public XContentGenerator generator() { public BytesReference bytes() { close(); - return ((BytesStream) bos).bytes(); + if (bos instanceof ByteArrayOutputStream) { + return new BytesArray(((ByteArrayOutputStream) bos).toByteArray()); + } else { + return ((BytesStream) bos).bytes(); + } } /** diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java b/server/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java index f05b38fb20e6a..58a9e9a98f833 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java @@ -24,8 +24,6 @@ import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.dataformat.cbor.CBORFactory; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.FastStringReader; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; @@ -38,6 +36,7 @@ import java.io.InputStream; import java.io.OutputStream; import java.io.Reader; +import java.io.StringReader; import java.util.Set; /** @@ -82,7 +81,7 @@ public XContentGenerator createGenerator(OutputStream os, Set includes, @Override public XContentParser createParser(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, String content) throws IOException { - return new CborXContentParser(xContentRegistry, deprecationHandler, cborFactory.createParser(new FastStringReader(content))); + return new CborXContentParser(xContentRegistry, deprecationHandler, cborFactory.createParser(new StringReader(content))); } @Override @@ -103,12 +102,6 @@ public XContentParser createParser(NamedXContentRegistry xContentRegistry, return new CborXContentParser(xContentRegistry, deprecationHandler, cborFactory.createParser(data, offset, length)); } - @Override - public XContentParser createParser(NamedXContentRegistry xContentRegistry, - DeprecationHandler deprecationHandler, BytesReference bytes) throws IOException { - return createParser(xContentRegistry, deprecationHandler, bytes.streamInput()); - } - @Override public XContentParser createParser(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, Reader reader) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java b/server/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java index 7f5174d272266..b2aac37abe57d 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java @@ -23,8 +23,6 @@ import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.core.JsonParser; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.FastStringReader; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; @@ -37,6 +35,7 @@ import java.io.InputStream; import java.io.OutputStream; import java.io.Reader; +import java.io.StringReader; import java.util.Set; /** @@ -83,7 +82,7 @@ public XContentGenerator createGenerator(OutputStream os, Set includes, @Override public XContentParser createParser(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, String content) throws IOException { - return new JsonXContentParser(xContentRegistry, deprecationHandler, jsonFactory.createParser(new FastStringReader(content))); + return new JsonXContentParser(xContentRegistry, deprecationHandler, jsonFactory.createParser(new StringReader(content))); } @Override @@ -104,12 +103,6 @@ public XContentParser createParser(NamedXContentRegistry xContentRegistry, return new JsonXContentParser(xContentRegistry, deprecationHandler, jsonFactory.createParser(data, offset, length)); } - @Override - public XContentParser createParser(NamedXContentRegistry xContentRegistry, - DeprecationHandler deprecationHandler, BytesReference bytes) throws IOException { - return createParser(xContentRegistry, deprecationHandler, bytes.streamInput()); - } - @Override public XContentParser createParser(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, Reader reader) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java b/server/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java index 17de93d87baaf..caf6488eea398 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java @@ -24,8 +24,6 @@ import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.dataformat.smile.SmileFactory; import com.fasterxml.jackson.dataformat.smile.SmileGenerator; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.FastStringReader; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; @@ -38,6 +36,7 @@ import java.io.InputStream; import java.io.OutputStream; import java.io.Reader; +import java.io.StringReader; import java.util.Set; /** @@ -83,7 +82,7 @@ public XContentGenerator createGenerator(OutputStream os, Set includes, @Override public XContentParser createParser(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, String content) throws IOException { - return new SmileXContentParser(xContentRegistry, deprecationHandler, smileFactory.createParser(new FastStringReader(content))); + return new SmileXContentParser(xContentRegistry, deprecationHandler, smileFactory.createParser(new StringReader(content))); } @Override @@ -104,12 +103,6 @@ public XContentParser createParser(NamedXContentRegistry xContentRegistry, return new SmileXContentParser(xContentRegistry, deprecationHandler, smileFactory.createParser(data, offset, length)); } - @Override - public XContentParser createParser(NamedXContentRegistry xContentRegistry, - DeprecationHandler deprecationHandler, BytesReference bytes) throws IOException { - return createParser(xContentRegistry, deprecationHandler, bytes.streamInput()); - } - @Override public XContentParser createParser(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, Reader reader) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java b/server/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java index 3547440eb8b32..5c335276bc024 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java @@ -22,9 +22,6 @@ import com.fasterxml.jackson.core.JsonEncoding; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; -import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.FastStringReader; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; @@ -37,6 +34,7 @@ import java.io.InputStream; import java.io.OutputStream; import java.io.Reader; +import java.io.StringReader; import java.util.Set; /** @@ -67,7 +65,7 @@ public XContentType type() { @Override public byte streamSeparator() { - throw new ElasticsearchParseException("yaml does not support stream parsing..."); + throw new UnsupportedOperationException("yaml does not support stream parsing..."); } @Override @@ -78,7 +76,7 @@ public XContentGenerator createGenerator(OutputStream os, Set includes, @Override public XContentParser createParser(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, String content) throws IOException { - return new YamlXContentParser(xContentRegistry, deprecationHandler, yamlFactory.createParser(new FastStringReader(content))); + return new YamlXContentParser(xContentRegistry, deprecationHandler, yamlFactory.createParser(new StringReader(content))); } @Override @@ -99,12 +97,6 @@ public XContentParser createParser(NamedXContentRegistry xContentRegistry, return new YamlXContentParser(xContentRegistry, deprecationHandler, yamlFactory.createParser(data, offset, length)); } - @Override - public XContentParser createParser(NamedXContentRegistry xContentRegistry, - DeprecationHandler deprecationHandler, BytesReference bytes) throws IOException { - return createParser(xContentRegistry, deprecationHandler, bytes.streamInput()); - } - @Override public XContentParser createParser(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, Reader reader) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java b/server/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java index 37e96cbb54a57..56bae57198829 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java @@ -25,13 +25,13 @@ import org.apache.lucene.analysis.synonym.SynonymFilter; import org.apache.lucene.analysis.synonym.SynonymMap; import org.apache.lucene.analysis.synonym.WordnetSynonymParser; -import org.elasticsearch.common.io.FastStringReader; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; import java.io.IOException; import java.io.Reader; +import java.io.StringReader; import java.util.List; public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory { @@ -68,7 +68,7 @@ protected Reader getRulesFromSettings(Environment env) { for (String line : rulesList) { sb.append(line).append(System.lineSeparator()); } - rulesReader = new FastStringReader(sb.toString()); + rulesReader = new StringReader(sb.toString()); } else if (settings.get("synonyms_path") != null) { rulesReader = Analysis.getReaderFromFile(env, settings, "synonyms_path"); } else { diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index fb937ed4e9302..1452c5de49278 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -847,11 +847,35 @@ public void forceMerge(boolean flush) throws IOException { */ public abstract IndexCommitRef acquireSafeIndexCommit() throws EngineException; + /** + * If the specified throwable contains a fatal error in the throwable graph, such a fatal error will be thrown. Callers should ensure + * that there are no catch statements that would catch an error in the stack as the fatal error here should go uncaught and be handled + * by the uncaught exception handler that we install during bootstrap. If the specified throwable does indeed contain a fatal error, the + * specified message will attempt to be logged before throwing the fatal error. If the specified throwable does not contain a fatal + * error, this method is a no-op. + * + * @param maybeMessage the message to maybe log + * @param maybeFatal the throwable that maybe contains a fatal error + */ + @SuppressWarnings("finally") + private void maybeDie(final String maybeMessage, final Throwable maybeFatal) { + ExceptionsHelper.maybeError(maybeFatal, logger).ifPresent(error -> { + try { + logger.error(maybeMessage, error); + } finally { + throw error; + } + }); + } + /** * fail engine due to some error. the engine will also be closed. * The underlying store is marked corrupted iff failure is caused by index corruption */ public void failEngine(String reason, @Nullable Exception failure) { + if (failure != null) { + maybeDie(reason, failure); + } if (failEngineLock.tryLock()) { store.incRef(); try { diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 100a133042d74..0b67ab21329ef 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -1730,7 +1730,6 @@ private boolean failOnTragicEvent(AlreadyClosedException ex) { // we need to fail the engine. it might have already been failed before // but we are double-checking it's failed and closed if (indexWriter.isOpen() == false && indexWriter.getTragicException() != null) { - maybeDie("tragic event in index writer", indexWriter.getTragicException()); failEngine("already closed by tragic event on the index writer", (Exception) indexWriter.getTragicException()); engineFailed = true; } else if (translog.isOpen() == false && translog.getTragicException() != null) { @@ -2080,34 +2079,12 @@ protected void doRun() throws Exception { * confidence that the call stack does not contain catch statements that would cause the error that might be thrown * here from being caught and never reaching the uncaught exception handler. */ - maybeDie("fatal error while merging", exc); - logger.error("failed to merge", exc); failEngine("merge failed", new MergePolicy.MergeException(exc, dir)); } }); } } - /** - * If the specified throwable is a fatal error, this throwable will be thrown. Callers should ensure that there are no catch statements - * that would catch an error in the stack as the fatal error here should go uncaught and be handled by the uncaught exception handler - * that we install during bootstrap. If the specified throwable is indeed a fatal error, the specified message will attempt to be logged - * before throwing the fatal error. If the specified throwable is not a fatal error, this method is a no-op. - * - * @param maybeMessage the message to maybe log - * @param maybeFatal the throwable that is maybe fatal - */ - @SuppressWarnings("finally") - private void maybeDie(final String maybeMessage, final Throwable maybeFatal) { - if (maybeFatal instanceof Error) { - try { - logger.error(maybeMessage, maybeFatal); - } finally { - throw (Error) maybeFatal; - } - } - } - /** * Commits the specified index writer. * diff --git a/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java index d1337d5258aa9..d272bb29fbfa6 100644 --- a/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java @@ -31,7 +31,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.xcontent.AbstractObjectParser; -import org.elasticsearch.common.xcontent.NamedXContentRegistry.UnknownNamedObjectException; +import org.elasticsearch.common.xcontent.UnknownNamedObjectException; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index 4a018ca025896..ad2b8643f7ae3 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -25,8 +25,6 @@ import java.util.List; import java.util.Map; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.analysis.AnalysisRegistry; @@ -34,8 +32,6 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.threadpool.ThreadPool; -import static org.elasticsearch.common.settings.Setting.Property; - /** * Holder class for several ingest related services. */ diff --git a/server/src/main/java/org/elasticsearch/ingest/PipelineStore.java b/server/src/main/java/org/elasticsearch/ingest/PipelineStore.java index 21372e46e5f3d..c6dce0bd45b3c 100644 --- a/server/src/main/java/org/elasticsearch/ingest/PipelineStore.java +++ b/server/src/main/java/org/elasticsearch/ingest/PipelineStore.java @@ -81,16 +81,41 @@ void innerUpdatePipelines(ClusterState previousState, ClusterState state) { } Map pipelines = new HashMap<>(); + List exceptions = new ArrayList<>(); for (PipelineConfiguration pipeline : ingestMetadata.getPipelines().values()) { try { pipelines.put(pipeline.getId(), factory.create(pipeline.getId(), pipeline.getConfigAsMap(), processorFactories)); } catch (ElasticsearchParseException e) { - throw e; + pipelines.put(pipeline.getId(), substitutePipeline(pipeline.getId(), e)); + exceptions.add(e); } catch (Exception e) { - throw new ElasticsearchParseException("Error updating pipeline with id [" + pipeline.getId() + "]", e); + ElasticsearchParseException parseException = new ElasticsearchParseException( + "Error updating pipeline with id [" + pipeline.getId() + "]", e); + pipelines.put(pipeline.getId(), substitutePipeline(pipeline.getId(), parseException)); + exceptions.add(parseException); } } this.pipelines = Collections.unmodifiableMap(pipelines); + ExceptionsHelper.rethrowAndSuppress(exceptions); + } + + private Pipeline substitutePipeline(String id, ElasticsearchParseException e) { + String tag = e.getHeaderKeys().contains("processor_tag") ? e.getHeader("processor_tag").get(0) : null; + String type = e.getHeaderKeys().contains("processor_type") ? e.getHeader("processor_type").get(0) : "unknown"; + String errorMessage = "pipeline with id [" + id + "] could not be loaded, caused by [" + e.getDetailedMessage() + "]"; + Processor failureProcessor = new AbstractProcessor(tag) { + @Override + public void execute(IngestDocument ingestDocument) { + throw new IllegalStateException(errorMessage); + } + + @Override + public String getType() { + return type; + } + }; + String description = "this is a place holder pipeline, because pipeline with id [" + id + "] could not be loaded"; + return new Pipeline(id, description, null, new CompoundProcessor(failureProcessor)); } /** diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginsService.java b/server/src/main/java/org/elasticsearch/plugins/PluginsService.java index 3ed4374ca2ac3..766d171752c16 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginsService.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginsService.java @@ -140,7 +140,8 @@ public PluginsService(Settings settings, Path configPath, Path modulesDirectory, // TODO: remove this leniency, but tests bogusly rely on it if (isAccessibleDirectory(pluginsDirectory, logger)) { checkForFailedPluginRemovals(pluginsDirectory); - List plugins = getPluginBundleCollections(pluginsDirectory); + // call findBundles directly to get the meta plugin names + List plugins = findBundles(pluginsDirectory, "plugin"); for (final BundleCollection plugin : plugins) { final Collection bundles = plugin.bundles(); for (final Bundle bundle : bundles) { @@ -173,8 +174,9 @@ public PluginsService(Settings settings, Path configPath, Path modulesDirectory, if (!missingPlugins.isEmpty()) { final String message = String.format( Locale.ROOT, - "missing mandatory plugins [%s]", - Strings.collectionToDelimitedString(missingPlugins, ", ")); + "missing mandatory plugins [%s], found plugins [%s]", + Strings.collectionToDelimitedString(missingPlugins, ", "), + Strings.collectionToDelimitedString(pluginsNames, ", ")); throw new IllegalStateException(message); } } @@ -400,25 +402,6 @@ static void verifyCompatibility(PluginInfo info) { JarHell.checkJavaVersion(info.getName(), info.getJavaVersion()); } - // similar in impl to getPluginBundles, but DO NOT try to make them share code. - // we don't need to inherit all the leniency, and things are different enough. - static Set getModuleBundles(Path modulesDirectory) throws IOException { - // damn leniency - if (Files.notExists(modulesDirectory)) { - return Collections.emptySet(); - } - Set bundles = new LinkedHashSet<>(); - try (DirectoryStream stream = Files.newDirectoryStream(modulesDirectory)) { - for (Path module : stream) { - PluginInfo info = PluginInfo.readFromProperties(module); - if (bundles.add(new Bundle(info, module)) == false) { - throw new IllegalStateException("duplicate module: " + info); - } - } - } - return bundles; - } - static void checkForFailedPluginRemovals(final Path pluginsDirectory) throws IOException { /* * Check for the existence of a marker file that indicates any plugins are in a garbage state from a failed attempt to remove the @@ -440,29 +423,29 @@ static void checkForFailedPluginRemovals(final Path pluginsDirectory) throws IOE } } - /** - * Get the plugin bundles from the specified directory. - * - * @param pluginsDirectory the directory - * @return the set of plugin bundles in the specified directory - * @throws IOException if an I/O exception occurs reading the plugin bundles - */ + /** Get bundles for plugins installed in the given modules directory. */ + static Set getModuleBundles(Path modulesDirectory) throws IOException { + return findBundles(modulesDirectory, "module").stream().flatMap(b -> b.bundles().stream()).collect(Collectors.toSet()); + } + + /** Get bundles for plugins installed in the given plugins directory. */ static Set getPluginBundles(final Path pluginsDirectory) throws IOException { - return getPluginBundleCollections(pluginsDirectory).stream().flatMap(b -> b.bundles().stream()).collect(Collectors.toSet()); + return findBundles(pluginsDirectory, "plugin").stream().flatMap(b -> b.bundles().stream()).collect(Collectors.toSet()); } - private static List getPluginBundleCollections(final Path pluginsDirectory) throws IOException { + // searches subdirectories under the given directory for plugin directories + private static List findBundles(final Path directory, String type) throws IOException { final List bundles = new ArrayList<>(); final Set seenBundles = new HashSet<>(); - final Tuple, Map>> groupedPluginDirs = findGroupedPluginDirs(pluginsDirectory); + final Tuple, Map>> groupedPluginDirs = findGroupedPluginDirs(directory); for (final Path plugin : groupedPluginDirs.v1()) { - final Bundle bundle = bundle(seenBundles, plugin); + final Bundle bundle = readPluginBundle(seenBundles, plugin, type); bundles.add(bundle); } for (final Map.Entry> metaPlugin : groupedPluginDirs.v2().entrySet()) { final List metaPluginBundles = new ArrayList<>(); for (final Path metaPluginPlugin : metaPlugin.getValue()) { - final Bundle bundle = bundle(seenBundles, metaPluginPlugin); + final Bundle bundle = readPluginBundle(seenBundles, metaPluginPlugin, type); metaPluginBundles.add(bundle); } final MetaBundle metaBundle = new MetaBundle(metaPlugin.getKey(), metaPluginBundles); @@ -472,18 +455,19 @@ private static List getPluginBundleCollections(final Path plug return bundles; } - private static Bundle bundle(final Set bundles, final Path plugin) throws IOException { - Loggers.getLogger(PluginsService.class).trace("--- adding plugin [{}]", plugin.toAbsolutePath()); + // get a bundle for a single plugin dir + private static Bundle readPluginBundle(final Set bundles, final Path plugin, String type) throws IOException { + Loggers.getLogger(PluginsService.class).trace("--- adding [{}] [{}]", type, plugin.toAbsolutePath()); final PluginInfo info; try { info = PluginInfo.readFromProperties(plugin); } catch (final IOException e) { - throw new IllegalStateException("Could not load plugin descriptor for existing plugin [" - + plugin.getFileName() + "]. Was the plugin built before 2.0?", e); + throw new IllegalStateException("Could not load plugin descriptor for " + type + + " directory [" + plugin.getFileName() + "]", e); } final Bundle bundle = new Bundle(info, plugin); if (bundles.add(bundle) == false) { - throw new IllegalStateException("duplicate plugin: " + info); + throw new IllegalStateException("duplicate " + type + ": " + info); } return bundle; } diff --git a/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java b/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java index b9983dbf359a6..90e35c34e28f8 100644 --- a/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java @@ -225,6 +225,9 @@ public CollapseContext build(SearchContext context) { if (context.searchAfter() != null) { throw new SearchContextException(context, "cannot use `collapse` in conjunction with `search_after`"); } + if (context.rescore() != null && context.rescore().isEmpty() == false) { + throw new SearchContextException(context, "cannot use `collapse` in conjunction with `rescore`"); + } MappedFieldType fieldType = context.getQueryShardContext().fieldMapper(field); if (fieldType == null) { diff --git a/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java b/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java index 2f7e1890266a4..cf4ff6c77b823 100644 --- a/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java +++ b/server/src/main/java/org/elasticsearch/search/query/TopDocsCollectorContext.java @@ -128,7 +128,6 @@ void postProcess(QuerySearchResult result) { static class CollapsingTopDocsCollectorContext extends TopDocsCollectorContext { private final DocValueFormat[] sortFmt; private final CollapsingTopDocsCollector topDocsCollector; - private final boolean rescore; /** * Ctr @@ -140,14 +139,13 @@ static class CollapsingTopDocsCollectorContext extends TopDocsCollectorContext { private CollapsingTopDocsCollectorContext(CollapseContext collapseContext, @Nullable SortAndFormats sortAndFormats, int numHits, - boolean trackMaxScore, boolean rescore) { + boolean trackMaxScore) { super(REASON_SEARCH_TOP_HITS, numHits); assert numHits > 0; assert collapseContext != null; Sort sort = sortAndFormats == null ? Sort.RELEVANCE : sortAndFormats.sort; this.sortFmt = sortAndFormats == null ? new DocValueFormat[] { DocValueFormat.RAW } : sortAndFormats.formats; this.topDocsCollector = collapseContext.createTopDocs(sort, numHits, trackMaxScore); - this.rescore = rescore; } @Override @@ -160,11 +158,6 @@ Collector create(Collector in) throws IOException { void postProcess(QuerySearchResult result) throws IOException { result.topDocs(topDocsCollector.getTopDocs(), sortFmt); } - - @Override - boolean shouldRescore() { - return rescore; - } } abstract static class SimpleTopDocsCollectorContext extends TopDocsCollectorContext { @@ -339,6 +332,11 @@ static TopDocsCollectorContext createTopDocsCollectorContext(SearchContext searc return new ScrollingTopDocsCollectorContext(reader, query, searchContext.scrollContext(), searchContext.sort(), numDocs, searchContext.trackScores(), searchContext.numberOfShards(), searchContext.trackTotalHits(), hasFilterCollector); + } else if (searchContext.collapse() != null) { + boolean trackScores = searchContext.sort() == null ? true : searchContext.trackScores(); + int numDocs = Math.min(searchContext.from() + searchContext.size(), totalNumDocs); + return new CollapsingTopDocsCollectorContext(searchContext.collapse(), + searchContext.sort(), numDocs, trackScores); } else { int numDocs = Math.min(searchContext.from() + searchContext.size(), totalNumDocs); final boolean rescore = searchContext.rescore().isEmpty() == false; @@ -348,11 +346,6 @@ static TopDocsCollectorContext createTopDocsCollectorContext(SearchContext searc numDocs = Math.max(numDocs, rescoreContext.getWindowSize()); } } - if (searchContext.collapse() != null) { - boolean trackScores = searchContext.sort() == null ? true : searchContext.trackScores(); - return new CollapsingTopDocsCollectorContext(searchContext.collapse(), - searchContext.sort(), numDocs, trackScores, rescore); - } return new SimpleTopDocsCollectorContext(reader, query, searchContext.sort(), searchContext.searchAfter(), numDocs, searchContext.trackScores(), searchContext.trackTotalHits(), hasFilterCollector) { @Override diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java index b874c3aeca311..4d6fceba869ff 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java @@ -36,8 +36,8 @@ import org.apache.lucene.util.CharsRef; import org.apache.lucene.util.CharsRefBuilder; import org.apache.lucene.util.IOUtils; -import org.elasticsearch.common.io.FastCharArrayReader; +import java.io.CharArrayReader; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -314,7 +314,7 @@ public static int analyze(Analyzer analyzer, BytesRef toAnalyze, String field, T spare.copyUTF8Bytes(toAnalyze); CharsRef charsRef = spare.get(); try (TokenStream ts = analyzer.tokenStream( - field, new FastCharArrayReader(charsRef.chars, charsRef.offset, charsRef.length))) { + field, new CharArrayReader(charsRef.chars, charsRef.offset, charsRef.length))) { return analyze(ts, consumer); } } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellChecker.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellChecker.java index e6e1767386061..eb9694c6039b7 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellChecker.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellChecker.java @@ -27,10 +27,10 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.CharsRefBuilder; -import org.elasticsearch.common.io.FastCharArrayReader; import org.elasticsearch.search.suggest.phrase.DirectCandidateGenerator.Candidate; import org.elasticsearch.search.suggest.phrase.DirectCandidateGenerator.CandidateSet; +import java.io.CharArrayReader; import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -139,7 +139,7 @@ public Result getCorrections(Analyzer analyzer, BytesRef query, CandidateGenerat public TokenStream tokenStream(Analyzer analyzer, BytesRef query, CharsRefBuilder spare, String field) throws IOException { spare.copyUTF8Bytes(query); - return analyzer.tokenStream(field, new FastCharArrayReader(spare.chars(), 0, spare.length())); + return analyzer.tokenStream(field, new CharArrayReader(spare.chars(), 0, spare.length())); } public static class Result { diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index d4864867902d2..b794ded7f8d03 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -53,6 +53,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.CancellableThreadsTests; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.UnknownNamedObjectException; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.env.ShardLockObtainFailedException; @@ -813,7 +814,7 @@ public void testIds() { ids.put(145, org.elasticsearch.ElasticsearchStatusException.class); ids.put(146, org.elasticsearch.tasks.TaskCancelledException.class); ids.put(147, org.elasticsearch.env.ShardLockObtainFailedException.class); - ids.put(148, org.elasticsearch.common.xcontent.NamedXContentRegistry.UnknownNamedObjectException.class); + ids.put(148, UnknownNamedObjectException.class); ids.put(149, MultiBucketConsumerService.TooManyBucketsException.class); Map, Integer> reverse = new HashMap<>(); diff --git a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java index 37cc11da8b7b6..80f2401ed7735 100644 --- a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java @@ -35,9 +35,11 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; +import java.util.stream.StreamSupport; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.nullValue; @@ -70,6 +72,41 @@ public void testResolveNodeByAttribute() { } } + public void testAll() { + final DiscoveryNodes discoveryNodes = buildDiscoveryNodes(); + + final String[] allNodes = + StreamSupport.stream(discoveryNodes.spliterator(), false).map(DiscoveryNode::getId).toArray(String[]::new); + assertThat(discoveryNodes.resolveNodes(), arrayContainingInAnyOrder(allNodes)); + assertThat(discoveryNodes.resolveNodes(new String[0]), arrayContainingInAnyOrder(allNodes)); + assertThat(discoveryNodes.resolveNodes("_all"), arrayContainingInAnyOrder(allNodes)); + + final String[] nonMasterNodes = + StreamSupport.stream(discoveryNodes.getNodes().values().spliterator(), false) + .map(n -> n.value) + .filter(n -> n.isMasterNode() == false) + .map(DiscoveryNode::getId) + .toArray(String[]::new); + assertThat(discoveryNodes.resolveNodes("_all", "master:false"), arrayContainingInAnyOrder(nonMasterNodes)); + + assertThat(discoveryNodes.resolveNodes("master:false", "_all"), arrayContainingInAnyOrder(allNodes)); + } + + public void testCoordinatorOnlyNodes() { + final DiscoveryNodes discoveryNodes = buildDiscoveryNodes(); + + final String[] coordinatorOnlyNodes = + StreamSupport.stream(discoveryNodes.getNodes().values().spliterator(), false) + .map(n -> n.value) + .filter(n -> n.isDataNode() == false && n.isIngestNode() == false && n.isMasterNode() == false) + .map(DiscoveryNode::getId) + .toArray(String[]::new); + + assertThat( + discoveryNodes.resolveNodes("_all", "data:false", "ingest:false", "master:false"), + arrayContainingInAnyOrder(coordinatorOnlyNodes)); + } + public void testResolveNodesIds() { DiscoveryNodes discoveryNodes = buildDiscoveryNodes(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index 86dd2dfe18904..d7a91c988e9da 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -22,6 +22,7 @@ import com.carrotsearch.hppc.cursors.IntObjectCursor; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequestBuilder; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; +import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -31,6 +32,10 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.gateway.GatewayAllocator; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardTestCase; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; @@ -43,15 +48,23 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.List; +import java.util.Set; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.isIn; +import static org.hamcrest.Matchers.not; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class PrimaryAllocationIT extends ESIntegTestCase { @@ -309,4 +322,71 @@ public void testForceAllocatePrimaryOnNoDecision() throws Exception { assertEquals(1, client().admin().cluster().prepareState().get().getState() .routingTable().index(indexName).shardsWithState(ShardRoutingState.STARTED).size()); } + + /** + * This test asserts that replicas failed to execute resync operations will be failed but not marked as stale. + */ + public void testPrimaryReplicaResyncFailed() throws Exception { + String master = internalCluster().startMasterOnlyNode(Settings.EMPTY); + final int numberOfReplicas = between(2, 3); + final String oldPrimary = internalCluster().startDataOnlyNode(); + assertAcked( + prepareCreate("test", Settings.builder().put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas))); + final ShardId shardId = new ShardId(clusterService().state().metaData().index("test").getIndex(), 0); + final Set replicaNodes = new HashSet<>(internalCluster().startDataOnlyNodes(numberOfReplicas)); + ensureGreen(); + assertAcked( + client(master).admin().cluster().prepareUpdateSettings() + .setTransientSettings(Settings.builder().put("cluster.routing.allocation.enable", "none")).get()); + logger.info("--> Indexing with gap in seqno to ensure that some operations will be replayed in resync"); + long numDocs = scaledRandomIntBetween(5, 50); + for (int i = 0; i < numDocs; i++) { + IndexResponse indexResult = index("test", "doc", Long.toString(i)); + assertThat(indexResult.getShardInfo().getSuccessful(), equalTo(numberOfReplicas + 1)); + } + final IndexShard oldPrimaryShard = internalCluster().getInstance(IndicesService.class, oldPrimary).getShardOrNull(shardId); + IndexShardTestCase.getEngine(oldPrimaryShard).getLocalCheckpointTracker().generateSeqNo(); // Make gap in seqno. + long moreDocs = scaledRandomIntBetween(1, 10); + for (int i = 0; i < moreDocs; i++) { + IndexResponse indexResult = index("test", "doc", Long.toString(numDocs + i)); + assertThat(indexResult.getShardInfo().getSuccessful(), equalTo(numberOfReplicas + 1)); + } + final Set replicasSide1 = Sets.newHashSet(randomSubsetOf(between(1, numberOfReplicas - 1), replicaNodes)); + final Set replicasSide2 = Sets.difference(replicaNodes, replicasSide1); + NetworkDisruption partition = new NetworkDisruption(new TwoPartitions(replicasSide1, replicasSide2), new NetworkDisconnect()); + internalCluster().setDisruptionScheme(partition); + logger.info("--> isolating some replicas during primary-replica resync"); + partition.startDisrupting(); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(oldPrimary)); + // Checks that we fails replicas in one side but not mark them as stale. + assertBusy(() -> { + ClusterState state = client(master).admin().cluster().prepareState().get().getState(); + final IndexShardRoutingTable shardRoutingTable = state.routingTable().shardRoutingTable(shardId); + final String newPrimaryNode = state.getRoutingNodes().node(shardRoutingTable.primary.currentNodeId()).node().getName(); + assertThat(newPrimaryNode, not(equalTo(oldPrimary))); + Set selectedPartition = replicasSide1.contains(newPrimaryNode) ? replicasSide1 : replicasSide2; + assertThat(shardRoutingTable.activeShards(), hasSize(selectedPartition.size())); + for (ShardRouting activeShard : shardRoutingTable.activeShards()) { + assertThat(state.getRoutingNodes().node(activeShard.currentNodeId()).node().getName(), isIn(selectedPartition)); + } + assertThat(state.metaData().index("test").inSyncAllocationIds(shardId.id()), hasSize(numberOfReplicas + 1)); + }, 1, TimeUnit.MINUTES); + assertAcked( + client(master).admin().cluster().prepareUpdateSettings() + .setTransientSettings(Settings.builder().put("cluster.routing.allocation.enable", "all")).get()); + partition.stopDisrupting(); + logger.info("--> stop disrupting network and re-enable allocation"); + assertBusy(() -> { + ClusterState state = client(master).admin().cluster().prepareState().get().getState(); + assertThat(state.routingTable().shardRoutingTable(shardId).activeShards(), hasSize(numberOfReplicas)); + assertThat(state.metaData().index("test").inSyncAllocationIds(shardId.id()), hasSize(numberOfReplicas + 1)); + for (String node : replicaNodes) { + IndexShard shard = internalCluster().getInstance(IndicesService.class, node).getShardOrNull(shardId); + assertThat(shard.getLocalCheckpoint(), equalTo(numDocs + moreDocs)); + } + }); + } + } diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java index 609c12fb6d874..dbb47764158c9 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java +++ b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java @@ -1023,7 +1023,7 @@ public void testNamedObject() throws IOException { { p.nextToken(); assertEquals("test", p.namedObject(Object.class, "str", null)); - NamedXContentRegistry.UnknownNamedObjectException e = expectThrows(NamedXContentRegistry.UnknownNamedObjectException.class, + UnknownNamedObjectException e = expectThrows(UnknownNamedObjectException.class, () -> p.namedObject(Object.class, "unknown", null)); assertEquals("Unknown Object [unknown]", e.getMessage()); assertEquals("java.lang.Object", e.getCategoryClass()); diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/UnknownNamedObjectExceptionTests.java b/server/src/test/java/org/elasticsearch/common/xcontent/UnknownNamedObjectExceptionTests.java index 4fcc16416b56f..c623e4a196b50 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/UnknownNamedObjectExceptionTests.java +++ b/server/src/test/java/org/elasticsearch/common/xcontent/UnknownNamedObjectExceptionTests.java @@ -21,7 +21,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.xcontent.NamedXContentRegistry.UnknownNamedObjectException; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/XContentParserUtilsTests.java b/server/src/test/java/org/elasticsearch/common/xcontent/XContentParserUtilsTests.java index f550e26024d06..e31a1ce72025c 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/XContentParserUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/common/xcontent/XContentParserUtilsTests.java @@ -187,7 +187,7 @@ public void testParseTypedKeysObject() throws IOException { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.nextToken(), parser::getTokenLocation); ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); - NamedXContentRegistry.UnknownNamedObjectException e = expectThrows(NamedXContentRegistry.UnknownNamedObjectException.class, + UnknownNamedObjectException e = expectThrows(UnknownNamedObjectException.class, () -> parseTypedKeysObject(parser, delimiter, Boolean.class, a -> {})); assertEquals("Unknown Boolean [type]", e.getMessage()); assertEquals("type", e.getName()); diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestClientIT.java b/server/src/test/java/org/elasticsearch/ingest/IngestClientIT.java index dbbc8e443c076..d27b05d1e7b29 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestClientIT.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestClientIT.java @@ -36,16 +36,12 @@ import org.elasticsearch.action.ingest.SimulatePipelineRequest; import org.elasticsearch.action.ingest.SimulatePipelineResponse; import org.elasticsearch.action.ingest.WritePipelineResponse; -import org.elasticsearch.action.support.replication.TransportReplicationActionTests; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.client.Requests; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.script.Script; -import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ESIntegTestCase; import java.util.Arrays; @@ -55,6 +51,7 @@ import java.util.concurrent.ExecutionException; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; @@ -130,6 +127,10 @@ public void testSimulate() throws Exception { IngestDocument ingestDocument = new IngestDocument("index", "type", "id", null, null, null, null, source); assertThat(simulateDocumentBaseResult.getIngestDocument().getSourceAndMetadata(), equalTo(ingestDocument.getSourceAndMetadata())); assertThat(simulateDocumentBaseResult.getFailure(), nullValue()); + + // cleanup + WritePipelineResponse deletePipelineResponse = client().admin().cluster().prepareDeletePipeline("_id").get(); + assertTrue(deletePipelineResponse.isAcknowledged()); } public void testBulkWithIngestFailures() throws Exception { @@ -172,6 +173,10 @@ public void testBulkWithIngestFailures() throws Exception { assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); } } + + // cleanup + WritePipelineResponse deletePipelineResponse = client().admin().cluster().prepareDeletePipeline("_id").get(); + assertTrue(deletePipelineResponse.isAcknowledged()); } public void testBulkWithUpsert() throws Exception { @@ -263,13 +268,12 @@ public void testPutWithPipelineFactoryError() throws Exception { .endObject() .endArray() .endObject().bytes(); - PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id", source, XContentType.JSON); - try { - client().admin().cluster().putPipeline(putPipelineRequest).get(); - } catch (ExecutionException e) { - ElasticsearchParseException ex = (ElasticsearchParseException) ExceptionsHelper.unwrap(e, ElasticsearchParseException.class); - assertNotNull(ex); - assertThat(ex.getMessage(), equalTo("processor [test] doesn't support one or more provided configuration parameters [unused]")); - } + PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id2", source, XContentType.JSON); + Exception e = expectThrows(ElasticsearchParseException.class, + () -> client().admin().cluster().putPipeline(putPipelineRequest).actionGet()); + assertThat(e.getMessage(), equalTo("processor [test] doesn't support one or more provided configuration parameters [unused]")); + + GetPipelineResponse response = client().admin().cluster().prepareGetPipeline("_id2").get(); + assertFalse(response.isFound()); } } diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java b/server/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java index 645933348879c..03777b98ab73e 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java @@ -37,7 +37,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; @ESIntegTestCase.ClusterScope(numDataNodes = 0, numClientNodes = 0, scope = ESIntegTestCase.Scope.TEST) public class IngestProcessorNotInstalledOnAllNodesIT extends ESIntegTestCase { @@ -104,7 +103,11 @@ public void testFailStartNode() throws Exception { installPlugin = false; String node2 = internalCluster().startNode(); pipeline = internalCluster().getInstance(NodeService.class, node2).getIngestService().getPipelineStore().get("_id"); - assertThat(pipeline, nullValue()); + + assertNotNull(pipeline); + assertThat(pipeline.getId(), equalTo("_id")); + assertThat(pipeline.getDescription(), equalTo("this is a place holder pipeline, " + + "because pipeline with id [_id] could not be loaded")); } } diff --git a/server/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java index 3247761a548f0..5a3b57a6d7e0b 100644 --- a/server/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/PipelineExecutionServiceTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.ingest; +import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.bulk.BulkRequest; @@ -92,6 +93,32 @@ public void testExecuteIndexPipelineDoesNotExist() { verify(completionHandler, never()).accept(anyBoolean()); } + public void testExecuteIndexPipelineExistsButFailedParsing() { + when(store.get("_id")).thenReturn(new Pipeline("_id", "stub", null, + new CompoundProcessor(new AbstractProcessor("mock") { + @Override + public void execute(IngestDocument ingestDocument) { + throw new IllegalStateException("error"); + } + + @Override + public String getType() { + return null; + } + }))); + SetOnce failed = new SetOnce<>(); + IndexRequest indexRequest = new IndexRequest("_index", "_type", "_id").source(Collections.emptyMap()).setPipeline("_id"); + Consumer failureHandler = (e) -> { + assertThat(e.getCause().getClass(), equalTo(IllegalArgumentException.class)); + assertThat(e.getCause().getCause().getClass(), equalTo(IllegalStateException.class)); + assertThat(e.getCause().getCause().getMessage(), equalTo("error")); + failed.set(true); + }; + Consumer completionHandler = (e) -> failed.set(false); + executionService.executeIndexRequest(indexRequest, failureHandler, completionHandler); + assertTrue(failed.get()); + } + public void testExecuteBulkPipelineDoesNotExist() { CompoundProcessor processor = mock(CompoundProcessor.class); when(store.get("_id")).thenReturn(new Pipeline("_id", "_description", version, processor)); diff --git a/server/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java b/server/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java index bb0d57871208c..250bb5059cf58 100644 --- a/server/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/PipelineStoreTests.java @@ -29,7 +29,6 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.ESTestCase; @@ -165,7 +164,13 @@ public void testPutWithErrorResponse() { assertThat(e.getMessage(), equalTo("[processors] required property is missing")); } pipeline = store.get(id); - assertThat(pipeline, nullValue()); + assertNotNull(pipeline); + assertThat(pipeline.getId(), equalTo("_id")); + assertThat(pipeline.getDescription(), equalTo("this is a place holder pipeline, because pipeline with" + + " id [_id] could not be loaded")); + assertThat(pipeline.getProcessors().size(), equalTo(1)); + assertNull(pipeline.getProcessors().get(0).getTag()); + assertThat(pipeline.getProcessors().get(0).getType(), equalTo("unknown")); } public void testDelete() { diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java index 36e1266c51118..4f0a73ca44ca6 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java @@ -107,12 +107,9 @@ public void testAdditionalSettingsClash() { public void testExistingPluginMissingDescriptor() throws Exception { Path pluginsDir = createTempDir(); Files.createDirectory(pluginsDir.resolve("plugin-missing-descriptor")); - try { - PluginsService.getPluginBundles(pluginsDir); - fail(); - } catch (IllegalStateException e) { - assertTrue(e.getMessage(), e.getMessage().contains("Could not load plugin descriptor for existing plugin [plugin-missing-descriptor]")); - } + IllegalStateException e = expectThrows(IllegalStateException.class, () -> PluginsService.getPluginBundles(pluginsDir)); + assertThat(e.getMessage(), + containsString("Could not load plugin descriptor for plugin directory [plugin-missing-descriptor]")); } public void testFilterPlugins() { @@ -139,7 +136,7 @@ public void testHiddenFiles() throws IOException { IllegalStateException.class, () -> newPluginsService(settings)); - final String expected = "Could not load plugin descriptor for existing plugin [.hidden]"; + final String expected = "Could not load plugin descriptor for plugin directory [.hidden]"; assertThat(e, hasToString(containsString(expected))); } @@ -158,7 +155,7 @@ public void testDesktopServicesStoreFiles() throws IOException { assertNotNull(pluginsService); } else { final IllegalStateException e = expectThrows(IllegalStateException.class, () -> newPluginsService(settings)); - assertThat(e, hasToString(containsString("Could not load plugin descriptor for existing plugin [.DS_Store]"))); + assertThat(e.getMessage(), containsString("Could not load plugin descriptor for plugin directory [.DS_Store]")); assertNotNull(e.getCause()); assertThat(e.getCause(), instanceOf(FileSystemException.class)); if (Constants.WINDOWS) { diff --git a/server/src/test/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java b/server/src/test/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java index 98f14c1aa6e74..58565b5f264b7 100644 --- a/server/src/test/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java +++ b/server/src/test/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java @@ -36,17 +36,13 @@ import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.collapse.CollapseBuilder; import org.elasticsearch.search.rescore.QueryRescoreMode; import org.elasticsearch.search.rescore.QueryRescorerBuilder; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.test.ESIntegTestCase; -import java.io.IOException; import java.util.Arrays; import java.util.Comparator; -import java.util.Map; -import java.util.stream.Collectors; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; @@ -71,7 +67,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThirdHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasScore; -import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -753,65 +748,4 @@ public void testRescorePhaseWithInvalidSort() throws Exception { assertThat(hit.getScore(), equalTo(101f)); } } - - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/28932") - public void testRescoreAfterCollapse() throws Exception { - assertAcked(prepareCreate("test") - .addMapping( - "type1", - jsonBuilder() - .startObject() - .startObject("properties") - .startObject("group") - .field("type", "keyword") - .endObject() - .endObject() - .endObject()) - ); - - ensureGreen("test"); - - indexDocument(1, "miss", "a", 1, 10); - indexDocument(2, "name", "a", 2, 20); - indexDocument(3, "name", "b", 2, 30); - // should be highest on rescore, but filtered out during collapse - indexDocument(4, "name", "b", 1, 40); - - refresh("test"); - - SearchResponse searchResponse = client().prepareSearch("test") - .setTypes("type1") - .setQuery(staticScoreQuery("static_score")) - .addRescorer(new QueryRescorerBuilder(staticScoreQuery("static_rescore"))) - .setCollapse(new CollapseBuilder("group")) - .get(); - - assertThat(searchResponse.getHits().totalHits, equalTo(3L)); - assertThat(searchResponse.getHits().getHits().length, equalTo(2)); - - Map collapsedHits = Arrays - .stream(searchResponse.getHits().getHits()) - .collect(Collectors.toMap(SearchHit::getId, SearchHit::getScore)); - - assertThat(collapsedHits.keySet(), containsInAnyOrder("2", "3")); - assertThat(collapsedHits.get("2"), equalTo(22F)); - assertThat(collapsedHits.get("3"), equalTo(32F)); - } - - private QueryBuilder staticScoreQuery(String scoreField) { - return functionScoreQuery(termQuery("name", "name"), ScoreFunctionBuilders.fieldValueFactorFunction(scoreField)) - .boostMode(CombineFunction.REPLACE); - } - - private void indexDocument(int id, String name, String group, int score, int rescore) throws IOException { - XContentBuilder docBuilder =jsonBuilder() - .startObject() - .field("name", name) - .field("group", group) - .field("static_score", score) - .field("static_rescore", rescore) - .endObject(); - - client().prepareIndex("test", "type1", Integer.toString(id)).setSource(docBuilder).get(); - } }