From e37aae1643a0de187df2c14454cb7637ad42c556 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 13 Apr 2018 12:41:17 -0400 Subject: [PATCH 01/18] Wip --- .../doc/RestTestsFromSnippetsTask.groovy | 13 ++- .../analysis/analyzers/lang-analyzer.asciidoc | 1 + .../smoketest/DocsClientYamlTestSuiteIT.java | 85 ++++++++++++++++++- .../rest/yaml/ESClientYamlSuiteTestCase.java | 15 +++- .../yaml/section/ClientYamlTestSuite.java | 5 +- .../rest/yaml/section/ExecutableSection.java | 15 +++- 6 files changed, 123 insertions(+), 11 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy index 95ec00beca7e0..59cfee7814541 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy @@ -141,11 +141,13 @@ public class RestTestsFromSnippetsTask extends SnippetsTask { private static final String SYNTAX = { String method = /(?GET|PUT|POST|HEAD|OPTIONS|DELETE)/ String pathAndQuery = /(?[^\n]+)/ - String badBody = /GET|PUT|POST|HEAD|OPTIONS|DELETE|#/ + String badBody = /GET|PUT|POST|HEAD|OPTIONS|DELETE|startyaml|#/ String body = /(?(?:\n(?!$badBody)[^\n]+)+)/ - String nonComment = /$method\s+$pathAndQuery$body?/ + String rawRequest = /(?:$method\s+$pathAndQuery$body?)/ + String yamlRequest = /(?:startyaml(?.+?)endyaml)/ + String nonComment = /(?:$rawRequest|$yamlRequest)/ String comment = /(?#.+)/ - /(?:$comment|$nonComment)\n+/ + /(?s)(?:$comment|$nonComment)\n+/ }() /** @@ -333,6 +335,11 @@ public class RestTestsFromSnippetsTask extends SnippetsTask { // Comment return } + String yamlRequest = matcher.group("yaml"); + if (yamlRequest != null) { + current.println(yamlRequest) + return + } String method = matcher.group("method") String pathAndQuery = matcher.group("pathAndQuery") String body = matcher.group("body") diff --git a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc index cb976601fdcbe..b0c1998b4d435 100644 --- a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc @@ -113,6 +113,7 @@ PUT /arabic_example } ---------------------------------------------------- // CONSOLE +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: arabic_example, name: arabic}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should diff --git a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java index 46e448fa54da9..03bc3c7190e7f 100644 --- a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java +++ b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java @@ -24,14 +24,36 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.elasticsearch.Version; import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentLocation; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.rest.yaml.ClientYamlDocsTestClient; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ClientYamlTestClient; +import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; +import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; +import org.elasticsearch.test.rest.yaml.section.ExecutableSection; import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import java.util.function.Function; + +import javax.lang.model.element.ExecutableElement; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.hamcrest.Matchers.hasSize; public class DocsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @@ -41,7 +63,11 @@ public DocsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandi @ParametersFactory public static Iterable parameters() throws Exception { - return ESClientYamlSuiteTestCase.createParameters(); + List entries = new ArrayList<>(ExecutableSection.DEFAULT_EXECUTABLE_CONTEXTS.size() + 1); + entries.addAll(ExecutableSection.DEFAULT_EXECUTABLE_CONTEXTS); + entries.add(new NamedXContentRegistry.Entry(ExecutableSection.class, new ParseField("test_analyzer"), TestAnalyzer::parse)); + NamedXContentRegistry executeableSectionRegistry = new NamedXContentRegistry(entries); + return ESClientYamlSuiteTestCase.createParameters(executeableSectionRegistry); } @Override @@ -64,5 +90,60 @@ protected ClientYamlTestClient initClientYamlTestClient(ClientYamlSuiteRestSpec List hosts, Version esVersion) throws IOException { return new ClientYamlDocsTestClient(restSpec, restClient, hosts, esVersion); } -} + private static class TestAnalyzer implements ExecutableSection { + private static ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("test_analyzer", false, (a, location) -> { + String index = (String) a[0]; + String name = (String) a[0]; + return new TestAnalyzer(location, index, name); + }); + static { + PARSER.declareString(constructorArg(), new ParseField("index")); + PARSER.declareString(constructorArg(), new ParseField("name")); + } + private static TestAnalyzer parse(XContentParser parser) throws IOException { + XContentLocation location = parser.getTokenLocation(); + return PARSER.parse(parser, location); + } + + private final XContentLocation location; + private final String index; + private final String name; + + private TestAnalyzer(XContentLocation location, String index, String name) { + this.location = location; + this.index = index; + this.name = name; + } + + @Override + public XContentLocation getLocation() { + return location; + } + + @Override + public void execute(ClientYamlTestExecutionContext executionContext) throws IOException { + int size = 1000; + List testText = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + testText.add(randomRealisticUnicodeOfCodepointLength(10)); + } + Map body = new HashMap<>(2); + body.put("analyzer", name); + body.put("text", testText); + ClientYamlTestResponse response = executionContext.callApi("anlayze", singletonMap("index", index), + singletonList(body), emptyMap()); + List rebuilt = (List) response.evaluate("path"); + response = executionContext.callApi("anlayze", emptyMap(), singletonList(body), emptyMap()); + List builtIn = (List) response.evaluate("path"); + assertThat(rebuilt, hasSize(size)); + assertThat(builtIn, hasSize(size)); + for (int i = 0; i < size; i++) { + Map rebuiltToken = (Map) rebuilt.get(i); + Map builtInToken = (Map) builtIn.get(i); + assertEquals(builtInToken, rebuiltToken); + } + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index 927f9b46c966a..950bb14eed9af 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; @@ -143,7 +144,19 @@ protected ClientYamlTestClient initClientYamlTestClient(ClientYamlSuiteRestSpec return new ClientYamlTestClient(restSpec, restClient, hosts, esVersion); } + /** + * Create parameters for this parameterized test. Uses the + * {@link ExecutableSection#XCONTENT_REGISTRY list} of executable sections + * defined in {@link ExecutableSection}. + */ public static Iterable createParameters() throws Exception { + return createParameters(ExecutableSection.XCONTENT_REGISTRY); + } + + /** + * Create parameters for this parameterized test. + */ + public static Iterable createParameters(NamedXContentRegistry executeableSectionRegistry) throws Exception { String[] paths = resolvePathsProperty(REST_TESTS_SUITE, ""); // default to all tests under the test root List tests = new ArrayList<>(); Map> yamlSuites = loadSuites(paths); @@ -151,7 +164,7 @@ public static Iterable createParameters() throws Exception { for (String api : yamlSuites.keySet()) { List yamlFiles = new ArrayList<>(yamlSuites.get(api)); for (Path yamlFile : yamlFiles) { - ClientYamlTestSuite restTestSuite = ClientYamlTestSuite.parse(api, yamlFile); + ClientYamlTestSuite restTestSuite = ClientYamlTestSuite.parse(executeableSectionRegistry, api, yamlFile); for (ClientYamlTestSection testSection : restTestSuite.getTestSections()) { tests.add(new Object[]{ new ClientYamlTestCandidate(restTestSuite, testSection) }); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java index 72c83f632efb0..b9988128b02a4 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.yaml.YamlXContent; @@ -40,7 +41,7 @@ * Supports a setup section and multiple test sections. */ public class ClientYamlTestSuite { - public static ClientYamlTestSuite parse(String api, Path file) throws IOException { + public static ClientYamlTestSuite parse(NamedXContentRegistry executeableSectionRegistry, String api, Path file) throws IOException { if (!Files.isRegularFile(file)) { throw new IllegalArgumentException(file.toAbsolutePath() + " is not a file"); } @@ -64,7 +65,7 @@ public static ClientYamlTestSuite parse(String api, Path file) throws IOExceptio } } - try (XContentParser parser = YamlXContent.yamlXContent.createParser(ExecutableSection.XCONTENT_REGISTRY, + try (XContentParser parser = YamlXContent.yamlXContent.createParser(executeableSectionRegistry, LoggingDeprecationHandler.INSTANCE, Files.newInputStream(file))) { return parse(api, filename, parser); } catch(Exception e) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ExecutableSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ExecutableSection.java index 827457f4c2ae2..0101e71cc14b6 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ExecutableSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ExecutableSection.java @@ -26,15 +26,18 @@ import java.io.IOException; import java.util.Arrays; +import java.util.List; + +import static java.util.Collections.unmodifiableList; /** * Represents a test fragment that can be executed (e.g. api call, assertion) */ public interface ExecutableSection { /** - * {@link NamedXContentRegistry} needed in the {@link XContentParser} before calling {@link ExecutableSection#parse(XContentParser)}. + * Default list of {@link ExecutableSection}s available for tests. */ - NamedXContentRegistry XCONTENT_REGISTRY = new NamedXContentRegistry(Arrays.asList( + public List DEFAULT_EXECUTABLE_CONTEXTS = unmodifiableList(Arrays.asList( new NamedXContentRegistry.Entry(ExecutableSection.class, new ParseField("do"), DoSection::parse), new NamedXContentRegistry.Entry(ExecutableSection.class, new ParseField("set"), SetSection::parse), new NamedXContentRegistry.Entry(ExecutableSection.class, new ParseField("match"), MatchAssertion::parse), @@ -46,6 +49,12 @@ public interface ExecutableSection { new NamedXContentRegistry.Entry(ExecutableSection.class, new ParseField("lte"), LessThanOrEqualToAssertion::parse), new NamedXContentRegistry.Entry(ExecutableSection.class, new ParseField("length"), LengthAssertion::parse))); + /** + * {@link NamedXContentRegistry} that parses the default list of + * {@link ExecutableSection}s available for tests. + */ + NamedXContentRegistry XCONTENT_REGISTRY = new NamedXContentRegistry(DEFAULT_EXECUTABLE_CONTEXTS); + static ExecutableSection parse(XContentParser parser) throws IOException { ParserUtils.advanceToFieldName(parser); String section = parser.currentName(); @@ -60,7 +69,7 @@ static ExecutableSection parse(XContentParser parser) throws IOException { } /** - * Get the location in the test that this was defined. + * Get the location in the test that this was defined. */ XContentLocation getLocation(); From b9bbf669a4662f2580b4a0bbb79251e21feae807 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 13 Apr 2018 17:22:00 -0400 Subject: [PATCH 02/18] Docs: Test examples that recreate lang analyzers We have a pile of documentation describing how to rebuild the built in language analyzers and, previously, our documentation testing framework made sure that the examples successfully built *an* analyzer but they didn't assert that the analyzer built by the documentation matches the built in anlayzer. Unsuprisingly, some of the examples aren't quite right. This adds a mechanism that tests that the analyzers built by the docs. The mechanism is fairly simple and brutal but it seems to be working: build a hundred random unicode sequences and send them through the `_analyze` API with the rebuilt analyzer and then again through the built in analyzer. Then make sure both APIs return the same results. Each of these calls to `_anlayze` takes about 20ms on my laptop which seems fine. --- .../analysis/analyzers/lang-analyzer.asciidoc | 148 +++++++++++++----- .../smoketest/DocsClientYamlTestSuiteIT.java | 78 ++++++--- .../test/rest/yaml/ClientYamlTestClient.java | 27 +++- 3 files changed, 182 insertions(+), 71 deletions(-) diff --git a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc index b0c1998b4d435..a3eaa445a244d 100644 --- a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc @@ -97,13 +97,14 @@ PUT /arabic_example } }, "analyzer": { - "arabic": { + "rebuilt_arabic": { "tokenizer": "standard", "filter": [ "lowercase", + "decimal_digit", "arabic_stop", - "arabic_normalization", "arabic_keywords", + "arabic_normalization", "arabic_stemmer" ] } @@ -113,7 +114,8 @@ PUT /arabic_example } ---------------------------------------------------- // CONSOLE -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: arabic_example, name: arabic}\nendyaml\n/] +// TEST[s/"arabic_normalization",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: arabic_example, built_in: arabic, rebuilt: rebuilt_arabic}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -145,7 +147,7 @@ PUT /armenian_example } }, "analyzer": { - "armenian": { + "rebuilt_armenian": { "tokenizer": "standard", "filter": [ "lowercase", @@ -160,6 +162,8 @@ PUT /armenian_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"armenian_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: armenian_example, built_in: armenian, rebuilt: rebuilt_armenian}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -191,7 +195,7 @@ PUT /basque_example } }, "analyzer": { - "basque": { + "rebuilt_basque": { "tokenizer": "standard", "filter": [ "lowercase", @@ -206,6 +210,8 @@ PUT /basque_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"basque_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: basque_example, built_in: basque, rebuilt: rebuilt_basque}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -237,14 +243,15 @@ PUT /bengali_example } }, "analyzer": { - "bengali": { + "rebuilt_bengali": { "tokenizer": "standard", "filter": [ "lowercase", + "decimal_digit", + "bengali_keywords", "indic_normalization", "bengali_normalization", "bengali_stop", - "bengali_keywords", "bengali_stemmer" ] } @@ -254,6 +261,8 @@ PUT /bengali_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"bengali_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: bengali_example, built_in: bengali, rebuilt: rebuilt_bengali}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -285,7 +294,7 @@ PUT /brazilian_example } }, "analyzer": { - "brazilian": { + "rebuilt_brazilian": { "tokenizer": "standard", "filter": [ "lowercase", @@ -300,6 +309,8 @@ PUT /brazilian_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"brazilian_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: brazilian_example, built_in: brazilian, rebuilt: rebuilt_brazilian}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -331,7 +342,7 @@ PUT /bulgarian_example } }, "analyzer": { - "bulgarian": { + "rebuilt_bulgarian": { "tokenizer": "standard", "filter": [ "lowercase", @@ -346,6 +357,8 @@ PUT /bulgarian_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"bulgarian_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: bulgarian_example, built_in: bulgarian, rebuilt: rebuilt_bulgarian}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -381,7 +394,7 @@ PUT /catalan_example } }, "analyzer": { - "catalan": { + "rebuilt_catalan": { "tokenizer": "standard", "filter": [ "catalan_elision", @@ -397,6 +410,8 @@ PUT /catalan_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"catalan_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: catalan_example, built_in: catalan, rebuilt: rebuilt_catalan}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -420,7 +435,7 @@ PUT /cjk_example } }, "analyzer": { - "cjk": { + "rebuilt_cjk": { "tokenizer": "standard", "filter": [ "cjk_width", @@ -435,6 +450,8 @@ PUT /cjk_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"cjk_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: cjk_example, built_in: cjk, rebuilt: rebuilt_cjk}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. @@ -464,7 +481,7 @@ PUT /czech_example } }, "analyzer": { - "czech": { + "rebuilt_czech": { "tokenizer": "standard", "filter": [ "lowercase", @@ -479,6 +496,8 @@ PUT /czech_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"czech_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: czech_example, built_in: czech, rebuilt: rebuilt_czech}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -510,7 +529,7 @@ PUT /danish_example } }, "analyzer": { - "danish": { + "rebuilt_danish": { "tokenizer": "standard", "filter": [ "lowercase", @@ -525,6 +544,8 @@ PUT /danish_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"danish_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: danish_example, built_in: danish, rebuilt: rebuilt_danish}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -565,7 +586,7 @@ PUT /dutch_example } }, "analyzer": { - "dutch": { + "rebuilt_dutch": { "tokenizer": "standard", "filter": [ "lowercase", @@ -581,6 +602,8 @@ PUT /dutch_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"dutch_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: dutch_example, built_in: dutch, rebuilt: rebuilt_dutch}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -616,7 +639,7 @@ PUT /english_example } }, "analyzer": { - "english": { + "rebuilt_english": { "tokenizer": "standard", "filter": [ "english_possessive_stemmer", @@ -632,6 +655,8 @@ PUT /english_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"english_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: english_example, built_in: english, rebuilt: rebuilt_english}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -663,7 +688,7 @@ PUT /finnish_example } }, "analyzer": { - "finnish": { + "rebuilt_finnish": { "tokenizer": "standard", "filter": [ "lowercase", @@ -678,6 +703,8 @@ PUT /finnish_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"finnish_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: finnish_example, built_in: finnish, rebuilt: rebuilt_finnish}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -718,7 +745,7 @@ PUT /french_example } }, "analyzer": { - "french": { + "rebuilt_french": { "tokenizer": "standard", "filter": [ "french_elision", @@ -734,6 +761,8 @@ PUT /french_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"french_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: french_example, built_in: french, rebuilt: rebuilt_french}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -765,7 +794,7 @@ PUT /galician_example } }, "analyzer": { - "galician": { + "rebuilt_galician": { "tokenizer": "standard", "filter": [ "lowercase", @@ -780,6 +809,8 @@ PUT /galician_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"galician_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: galician_example, built_in: galician, rebuilt: rebuilt_galician}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -811,7 +842,7 @@ PUT /german_example } }, "analyzer": { - "german": { + "rebuilt_german": { "tokenizer": "standard", "filter": [ "lowercase", @@ -827,6 +858,8 @@ PUT /german_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"german_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: german_example, built_in: german, rebuilt: rebuilt_german}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -862,7 +895,7 @@ PUT /greek_example } }, "analyzer": { - "greek": { + "rebuilt_greek": { "tokenizer": "standard", "filter": [ "greek_lowercase", @@ -877,6 +910,8 @@ PUT /greek_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"greek_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: greek_example, built_in: greek, rebuilt: rebuilt_greek}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -908,14 +943,15 @@ PUT /hindi_example } }, "analyzer": { - "hindi": { + "rebuilt_hindi": { "tokenizer": "standard", "filter": [ "lowercase", + "decimal_digit", + "hindi_keywords", "indic_normalization", "hindi_normalization", "hindi_stop", - "hindi_keywords", "hindi_stemmer" ] } @@ -925,6 +961,8 @@ PUT /hindi_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"hindi_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: hindi_example, built_in: hindi, rebuilt: rebuilt_hindi}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -956,7 +994,7 @@ PUT /hungarian_example } }, "analyzer": { - "hungarian": { + "rebuilt_hungarian": { "tokenizer": "standard", "filter": [ "lowercase", @@ -971,6 +1009,8 @@ PUT /hungarian_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"hungarian_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: hungarian_example, built_in: hungarian, rebuilt: rebuilt_hungarian}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1003,7 +1043,7 @@ PUT /indonesian_example } }, "analyzer": { - "indonesian": { + "rebuilt_indonesian": { "tokenizer": "standard", "filter": [ "lowercase", @@ -1018,6 +1058,8 @@ PUT /indonesian_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"indonesian_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: indonesian_example, built_in: indonesian, rebuilt: rebuilt_indonesian}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1057,7 +1099,7 @@ PUT /irish_example } }, "analyzer": { - "irish": { + "rebuilt_irish": { "tokenizer": "standard", "filter": [ "irish_stop", @@ -1073,6 +1115,8 @@ PUT /irish_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"irish_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: irish_example, built_in: irish, rebuilt: rebuilt_irish}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1113,7 +1157,7 @@ PUT /italian_example } }, "analyzer": { - "italian": { + "rebuilt_italian": { "tokenizer": "standard", "filter": [ "italian_elision", @@ -1129,6 +1173,8 @@ PUT /italian_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"italian_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: italian_example, built_in: italian, rebuilt: rebuilt_italian}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1160,7 +1206,7 @@ PUT /latvian_example } }, "analyzer": { - "latvian": { + "rebuilt_latvian": { "tokenizer": "standard", "filter": [ "lowercase", @@ -1175,6 +1221,8 @@ PUT /latvian_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"latvian_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: latvian_example, built_in: latvian, rebuilt: rebuilt_latvian}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1206,7 +1254,7 @@ PUT /lithuanian_example } }, "analyzer": { - "lithuanian": { + "rebuilt_lithuanian": { "tokenizer": "standard", "filter": [ "lowercase", @@ -1221,6 +1269,8 @@ PUT /lithuanian_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"lithuanian_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: lithuanian_example, built_in: lithuanian, rebuilt: rebuilt_lithuanian}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1252,7 +1302,7 @@ PUT /norwegian_example } }, "analyzer": { - "norwegian": { + "rebuilt_norwegian": { "tokenizer": "standard", "filter": [ "lowercase", @@ -1267,6 +1317,8 @@ PUT /norwegian_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"norwegian_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: norwegian_example, built_in: norwegian, rebuilt: rebuilt_norwegian}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1296,11 +1348,12 @@ PUT /persian_example } }, "analyzer": { - "persian": { + "rebuilt_persian": { "tokenizer": "standard", "char_filter": [ "zero_width_spaces" ], "filter": [ "lowercase", + "decimal_digit", "arabic_normalization", "persian_normalization", "persian_stop" @@ -1312,6 +1365,7 @@ PUT /persian_example } ---------------------------------------------------- // CONSOLE +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: persian_example, built_in: persian, rebuilt: rebuilt_persian}\nendyaml\n/] <1> Replaces zero-width non-joiners with an ASCII space. <2> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. @@ -1342,7 +1396,7 @@ PUT /portuguese_example } }, "analyzer": { - "portuguese": { + "rebuilt_portuguese": { "tokenizer": "standard", "filter": [ "lowercase", @@ -1357,6 +1411,8 @@ PUT /portuguese_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"portuguese_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: portuguese_example, built_in: portuguese, rebuilt: rebuilt_portuguese}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1388,7 +1444,7 @@ PUT /romanian_example } }, "analyzer": { - "romanian": { + "rebuilt_romanian": { "tokenizer": "standard", "filter": [ "lowercase", @@ -1403,6 +1459,8 @@ PUT /romanian_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"romanian_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: romanian_example, built_in: romanian, rebuilt: rebuilt_romanian}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1435,7 +1493,7 @@ PUT /russian_example } }, "analyzer": { - "russian": { + "rebuilt_russian": { "tokenizer": "standard", "filter": [ "lowercase", @@ -1450,6 +1508,8 @@ PUT /russian_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"russian_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: russian_example, built_in: russian, rebuilt: rebuilt_russian}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1481,11 +1541,12 @@ PUT /sorani_example } }, "analyzer": { - "sorani": { + "rebuilt_sorani": { "tokenizer": "standard", "filter": [ "sorani_normalization", "lowercase", + "decimal_digit", "sorani_stop", "sorani_keywords", "sorani_stemmer" @@ -1497,6 +1558,8 @@ PUT /sorani_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"sorani_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: sorani_example, built_in: sorani, rebuilt: rebuilt_sorani}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1528,7 +1591,7 @@ PUT /spanish_example } }, "analyzer": { - "spanish": { + "rebuilt_spanish": { "tokenizer": "standard", "filter": [ "lowercase", @@ -1543,6 +1606,8 @@ PUT /spanish_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"spanish_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: spanish_example, built_in: spanish, rebuilt: rebuilt_spanish}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1574,7 +1639,7 @@ PUT /swedish_example } }, "analyzer": { - "swedish": { + "rebuilt_swedish": { "tokenizer": "standard", "filter": [ "lowercase", @@ -1589,6 +1654,8 @@ PUT /swedish_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"swedish_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: swedish_example, built_in: swedish, rebuilt: rebuilt_swedish}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1624,7 +1691,7 @@ PUT /turkish_example } }, "analyzer": { - "turkish": { + "rebuilt_turkish": { "tokenizer": "standard", "filter": [ "apostrophe", @@ -1640,6 +1707,8 @@ PUT /turkish_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"turkish_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: turkish_example, built_in: turkish, rebuilt: rebuilt_turkish}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1663,10 +1732,11 @@ PUT /thai_example } }, "analyzer": { - "thai": { + "rebuilt_thai": { "tokenizer": "thai", "filter": [ "lowercase", + "decimal_digit", "thai_stop" ] } @@ -1676,5 +1746,7 @@ PUT /thai_example } ---------------------------------------------------- // CONSOLE +// TEST[s/"thai_keywords",//] +// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: thai_example, built_in: thai, rebuilt: rebuilt_thai}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. diff --git a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java index 03bc3c7190e7f..69d94699dc9c1 100644 --- a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java +++ b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java @@ -20,17 +20,18 @@ package org.elasticsearch.smoketest; import org.apache.http.HttpHost; +import org.apache.lucene.util.BytesRef; + import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.elasticsearch.Version; import org.elasticsearch.client.RestClient; -import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.test.rest.yaml.ClientYamlDocsTestClient; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ClientYamlTestClient; @@ -43,17 +44,15 @@ import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.function.Function; -import javax.lang.model.element.ExecutableElement; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; -import static org.hamcrest.Matchers.hasSize; public class DocsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @@ -95,26 +94,33 @@ private static class TestAnalyzer implements ExecutableSection { private static ConstructingObjectParser PARSER = new ConstructingObjectParser<>("test_analyzer", false, (a, location) -> { String index = (String) a[0]; - String name = (String) a[0]; - return new TestAnalyzer(location, index, name); + String builtIn = (String) a[1]; + String rebuilt = (String) a[2]; + return new TestAnalyzer(location, index, builtIn, rebuilt); }); static { PARSER.declareString(constructorArg(), new ParseField("index")); - PARSER.declareString(constructorArg(), new ParseField("name")); + PARSER.declareString(constructorArg(), new ParseField("built_in")); + PARSER.declareString(constructorArg(), new ParseField("rebuilt")); } private static TestAnalyzer parse(XContentParser parser) throws IOException { XContentLocation location = parser.getTokenLocation(); - return PARSER.parse(parser, location); + TestAnalyzer section = PARSER.parse(parser, location); + assert parser.currentToken() == Token.END_OBJECT; + parser.nextToken(); // throw out the END_OBJECT to conform with other ExecutableSections + return section; } private final XContentLocation location; private final String index; - private final String name; + private final String builtIn; + private final String rebuilt; - private TestAnalyzer(XContentLocation location, String index, String name) { + private TestAnalyzer(XContentLocation location, String index, String builtIn, String rebuilt) { this.location = location; this.index = index; - this.name = name; + this.builtIn = builtIn; + this.rebuilt = rebuilt; } @Override @@ -124,26 +130,48 @@ public XContentLocation getLocation() { @Override public void execute(ClientYamlTestExecutionContext executionContext) throws IOException { - int size = 1000; + int size = 100; List testText = new ArrayList<>(size); for (int i = 0; i < size; i++) { - testText.add(randomRealisticUnicodeOfCodepointLength(10)); + testText.add(randomRealisticUnicodeOfCodepointLength(between(1, 15)) + // Don't look up stashed values + .replace("$", "\\$")); } Map body = new HashMap<>(2); - body.put("analyzer", name); + body.put("analyzer", builtIn); body.put("text", testText); - ClientYamlTestResponse response = executionContext.callApi("anlayze", singletonMap("index", index), + ClientYamlTestResponse response = executionContext.callApi("indices.analyze", singletonMap("index", index), singletonList(body), emptyMap()); - List rebuilt = (List) response.evaluate("path"); - response = executionContext.callApi("anlayze", emptyMap(), singletonList(body), emptyMap()); - List builtIn = (List) response.evaluate("path"); - assertThat(rebuilt, hasSize(size)); - assertThat(builtIn, hasSize(size)); - for (int i = 0; i < size; i++) { - Map rebuiltToken = (Map) rebuilt.get(i); - Map builtInToken = (Map) builtIn.get(i); + Iterator builtInTokens = ((List) response.evaluate("tokens")).iterator(); + body.put("analyzer", rebuilt); + response = executionContext.callApi("indices.analyze", singletonMap("index", index), + singletonList(body), emptyMap()); + Iterator rebuiltTokens = ((List) response.evaluate("tokens")).iterator(); + + Object previousRebuilt = null; + Object previousBuiltIn = null; + while (builtInTokens.hasNext()) { + if (false == rebuiltTokens.hasNext()) { + fail("rebuilt token filter has fewer tokens. built in has [" + builtInTokens.next() + + "]. Previous built in was [" + previousBuiltIn + "] and previous rebuilt was [" + + previousRebuilt + "]"); + } + Map builtInToken = (Map) builtInTokens.next(); + Map rebuiltToken = (Map) rebuiltTokens.next(); + String builtInText = (String) builtInToken.get("token"); + String rebuiltText = (String) rebuiltToken.get("token"); + // Check the text and produce an error message with the utf8 sequence if they don't match. + if (false == rebuiltText.equals(builtInText)) { + fail("text differs. built in was [" + builtInText + "] but rebuilt was [" + rebuiltText + "]. In utf8 those are\n" + + new BytesRef(builtInText) + " and\n" + new BytesRef(rebuiltText)); + } + // Now check the whole map just in case the text matches but something else differs assertEquals(builtInToken, rebuiltToken); } + if (rebuiltTokens.hasNext()) { + fail("rebuilt token filter has more tokens. it has [" + rebuiltTokens.next() + "]. Previous built in was [" + + previousBuiltIn + "] and previous rebuilt was [" + previousRebuilt + "]"); + } } } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java index f5e834aa90c69..16aa9c428d7e9 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java @@ -121,7 +121,7 @@ public ClientYamlTestResponse callApi(String apiName, Map params } String contentType = entity.getContentType().getValue(); //randomly test the GET with source param instead of GET/POST with body - if (sendBodyAsSourceParam(supportedMethods, contentType)) { + if (sendBodyAsSourceParam(supportedMethods, contentType, entity.getContentLength())) { logger.debug("sending the request body as source param with GET method"); queryStringParams.put("source", EntityUtils.toString(entity)); queryStringParams.put("source_content_type", contentType); @@ -177,14 +177,25 @@ public ClientYamlTestResponse callApi(String apiName, Map params } } - private static boolean sendBodyAsSourceParam(List supportedMethods, String contentType) { - if (supportedMethods.contains(HttpGet.METHOD_NAME)) { - if (contentType.startsWith(ContentType.APPLICATION_JSON.getMimeType()) || - contentType.startsWith(YAML_CONTENT_TYPE.getMimeType())) { - return RandomizedTest.rarely(); - } + private static boolean sendBodyAsSourceParam(List supportedMethods, String contentType, long contentLength) { + if (false == supportedMethods.contains(HttpGet.METHOD_NAME)) { + // The API doesn't claim to support GET anyway + return false; + } + if (contentLength < 0) { + // Negative length means "unknown" or "huge" in this case. Either way we can't send it as a parameter + return false; + } + if (contentLength > 2000) { + // Long bodies won't fit in the parameter and will cause a too_long_frame_exception + return false; + } + if (false == contentType.startsWith(ContentType.APPLICATION_JSON.getMimeType()) + && false == contentType.startsWith(YAML_CONTENT_TYPE.getMimeType())) { + // We can only encode JSON or YAML this way. + return false; } - return false; + return RandomizedTest.rarely(); } private ClientYamlSuiteRestApi restApi(String apiName) { From 0a1d66d8bce7a5108fe1d74f0acc97970c5f488f Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 16 Apr 2018 10:55:05 -0400 Subject: [PATCH 03/18] Fix names --- .../analysis/analyzers/lang-analyzer.asciidoc | 68 +++++++-------- .../smoketest/DocsClientYamlTestSuiteIT.java | 84 ++++++++++--------- 2 files changed, 79 insertions(+), 73 deletions(-) diff --git a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc index a3eaa445a244d..1560798d18107 100644 --- a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc @@ -115,7 +115,7 @@ PUT /arabic_example ---------------------------------------------------- // CONSOLE // TEST[s/"arabic_normalization",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: arabic_example, built_in: arabic, rebuilt: rebuilt_arabic}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: arabic_example, first: arabic, second: rebuilt_arabic}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -163,7 +163,7 @@ PUT /armenian_example ---------------------------------------------------- // CONSOLE // TEST[s/"armenian_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: armenian_example, built_in: armenian, rebuilt: rebuilt_armenian}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: armenian_example, first: armenian, second: rebuilt_armenian}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -211,7 +211,7 @@ PUT /basque_example ---------------------------------------------------- // CONSOLE // TEST[s/"basque_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: basque_example, built_in: basque, rebuilt: rebuilt_basque}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: basque_example, first: basque, second: rebuilt_basque}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -262,7 +262,7 @@ PUT /bengali_example ---------------------------------------------------- // CONSOLE // TEST[s/"bengali_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: bengali_example, built_in: bengali, rebuilt: rebuilt_bengali}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: bengali_example, first: bengali, second: rebuilt_bengali}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -310,7 +310,7 @@ PUT /brazilian_example ---------------------------------------------------- // CONSOLE // TEST[s/"brazilian_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: brazilian_example, built_in: brazilian, rebuilt: rebuilt_brazilian}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: brazilian_example, first: brazilian, second: rebuilt_brazilian}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -358,7 +358,7 @@ PUT /bulgarian_example ---------------------------------------------------- // CONSOLE // TEST[s/"bulgarian_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: bulgarian_example, built_in: bulgarian, rebuilt: rebuilt_bulgarian}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: bulgarian_example, first: bulgarian, second: rebuilt_bulgarian}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -411,7 +411,7 @@ PUT /catalan_example ---------------------------------------------------- // CONSOLE // TEST[s/"catalan_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: catalan_example, built_in: catalan, rebuilt: rebuilt_catalan}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: catalan_example, first: catalan, second: rebuilt_catalan}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -451,7 +451,7 @@ PUT /cjk_example ---------------------------------------------------- // CONSOLE // TEST[s/"cjk_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: cjk_example, built_in: cjk, rebuilt: rebuilt_cjk}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: cjk_example, first: cjk, second: rebuilt_cjk}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. @@ -497,7 +497,7 @@ PUT /czech_example ---------------------------------------------------- // CONSOLE // TEST[s/"czech_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: czech_example, built_in: czech, rebuilt: rebuilt_czech}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: czech_example, first: czech, second: rebuilt_czech}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -545,7 +545,7 @@ PUT /danish_example ---------------------------------------------------- // CONSOLE // TEST[s/"danish_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: danish_example, built_in: danish, rebuilt: rebuilt_danish}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: danish_example, first: danish, second: rebuilt_danish}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -603,7 +603,7 @@ PUT /dutch_example ---------------------------------------------------- // CONSOLE // TEST[s/"dutch_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: dutch_example, built_in: dutch, rebuilt: rebuilt_dutch}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: dutch_example, first: dutch, second: rebuilt_dutch}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -656,7 +656,7 @@ PUT /english_example ---------------------------------------------------- // CONSOLE // TEST[s/"english_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: english_example, built_in: english, rebuilt: rebuilt_english}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: english_example, first: english, second: rebuilt_english}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -704,7 +704,7 @@ PUT /finnish_example ---------------------------------------------------- // CONSOLE // TEST[s/"finnish_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: finnish_example, built_in: finnish, rebuilt: rebuilt_finnish}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: finnish_example, first: finnish, second: rebuilt_finnish}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -762,7 +762,7 @@ PUT /french_example ---------------------------------------------------- // CONSOLE // TEST[s/"french_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: french_example, built_in: french, rebuilt: rebuilt_french}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: french_example, first: french, second: rebuilt_french}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -810,7 +810,7 @@ PUT /galician_example ---------------------------------------------------- // CONSOLE // TEST[s/"galician_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: galician_example, built_in: galician, rebuilt: rebuilt_galician}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: galician_example, first: galician, second: rebuilt_galician}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -859,7 +859,7 @@ PUT /german_example ---------------------------------------------------- // CONSOLE // TEST[s/"german_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: german_example, built_in: german, rebuilt: rebuilt_german}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: german_example, first: german, second: rebuilt_german}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -911,7 +911,7 @@ PUT /greek_example ---------------------------------------------------- // CONSOLE // TEST[s/"greek_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: greek_example, built_in: greek, rebuilt: rebuilt_greek}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: greek_example, first: greek, second: rebuilt_greek}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -962,7 +962,7 @@ PUT /hindi_example ---------------------------------------------------- // CONSOLE // TEST[s/"hindi_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: hindi_example, built_in: hindi, rebuilt: rebuilt_hindi}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: hindi_example, first: hindi, second: rebuilt_hindi}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1010,7 +1010,7 @@ PUT /hungarian_example ---------------------------------------------------- // CONSOLE // TEST[s/"hungarian_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: hungarian_example, built_in: hungarian, rebuilt: rebuilt_hungarian}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: hungarian_example, first: hungarian, second: rebuilt_hungarian}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1059,7 +1059,7 @@ PUT /indonesian_example ---------------------------------------------------- // CONSOLE // TEST[s/"indonesian_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: indonesian_example, built_in: indonesian, rebuilt: rebuilt_indonesian}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: indonesian_example, first: indonesian, second: rebuilt_indonesian}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1116,7 +1116,7 @@ PUT /irish_example ---------------------------------------------------- // CONSOLE // TEST[s/"irish_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: irish_example, built_in: irish, rebuilt: rebuilt_irish}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: irish_example, first: irish, second: rebuilt_irish}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1174,7 +1174,7 @@ PUT /italian_example ---------------------------------------------------- // CONSOLE // TEST[s/"italian_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: italian_example, built_in: italian, rebuilt: rebuilt_italian}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: italian_example, first: italian, second: rebuilt_italian}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1222,7 +1222,7 @@ PUT /latvian_example ---------------------------------------------------- // CONSOLE // TEST[s/"latvian_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: latvian_example, built_in: latvian, rebuilt: rebuilt_latvian}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: latvian_example, first: latvian, second: rebuilt_latvian}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1270,7 +1270,7 @@ PUT /lithuanian_example ---------------------------------------------------- // CONSOLE // TEST[s/"lithuanian_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: lithuanian_example, built_in: lithuanian, rebuilt: rebuilt_lithuanian}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: lithuanian_example, first: lithuanian, second: rebuilt_lithuanian}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1318,7 +1318,7 @@ PUT /norwegian_example ---------------------------------------------------- // CONSOLE // TEST[s/"norwegian_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: norwegian_example, built_in: norwegian, rebuilt: rebuilt_norwegian}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: norwegian_example, first: norwegian, second: rebuilt_norwegian}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1365,7 +1365,7 @@ PUT /persian_example } ---------------------------------------------------- // CONSOLE -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: persian_example, built_in: persian, rebuilt: rebuilt_persian}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: persian_example, first: persian, second: rebuilt_persian}\nendyaml\n/] <1> Replaces zero-width non-joiners with an ASCII space. <2> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. @@ -1412,7 +1412,7 @@ PUT /portuguese_example ---------------------------------------------------- // CONSOLE // TEST[s/"portuguese_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: portuguese_example, built_in: portuguese, rebuilt: rebuilt_portuguese}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: portuguese_example, first: portuguese, second: rebuilt_portuguese}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1460,7 +1460,7 @@ PUT /romanian_example ---------------------------------------------------- // CONSOLE // TEST[s/"romanian_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: romanian_example, built_in: romanian, rebuilt: rebuilt_romanian}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: romanian_example, first: romanian, second: rebuilt_romanian}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1509,7 +1509,7 @@ PUT /russian_example ---------------------------------------------------- // CONSOLE // TEST[s/"russian_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: russian_example, built_in: russian, rebuilt: rebuilt_russian}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: russian_example, first: russian, second: rebuilt_russian}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1559,7 +1559,7 @@ PUT /sorani_example ---------------------------------------------------- // CONSOLE // TEST[s/"sorani_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: sorani_example, built_in: sorani, rebuilt: rebuilt_sorani}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: sorani_example, first: sorani, second: rebuilt_sorani}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1607,7 +1607,7 @@ PUT /spanish_example ---------------------------------------------------- // CONSOLE // TEST[s/"spanish_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: spanish_example, built_in: spanish, rebuilt: rebuilt_spanish}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: spanish_example, first: spanish, second: rebuilt_spanish}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1655,7 +1655,7 @@ PUT /swedish_example ---------------------------------------------------- // CONSOLE // TEST[s/"swedish_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: swedish_example, built_in: swedish, rebuilt: rebuilt_swedish}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: swedish_example, first: swedish, second: rebuilt_swedish}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1708,7 +1708,7 @@ PUT /turkish_example ---------------------------------------------------- // CONSOLE // TEST[s/"turkish_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: turkish_example, built_in: turkish, rebuilt: rebuilt_turkish}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: turkish_example, first: turkish, second: rebuilt_turkish}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1747,6 +1747,6 @@ PUT /thai_example ---------------------------------------------------- // CONSOLE // TEST[s/"thai_keywords",//] -// TEST[s/\n$/\nstartyaml\n - test_analyzer: {index: thai_example, built_in: thai, rebuilt: rebuilt_thai}\nendyaml\n/] +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: thai_example, first: thai, second: rebuilt_thai}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. diff --git a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java index 69d94699dc9c1..72bbe10066ebb 100644 --- a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java +++ b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java @@ -64,7 +64,8 @@ public DocsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandi public static Iterable parameters() throws Exception { List entries = new ArrayList<>(ExecutableSection.DEFAULT_EXECUTABLE_CONTEXTS.size() + 1); entries.addAll(ExecutableSection.DEFAULT_EXECUTABLE_CONTEXTS); - entries.add(new NamedXContentRegistry.Entry(ExecutableSection.class, new ParseField("test_analyzer"), TestAnalyzer::parse)); + entries.add(new NamedXContentRegistry.Entry(ExecutableSection.class, + new ParseField("compare_analyzers"), CompareAnalyzers::parse)); NamedXContentRegistry executeableSectionRegistry = new NamedXContentRegistry(entries); return ESClientYamlSuiteTestCase.createParameters(executeableSectionRegistry); } @@ -90,22 +91,25 @@ protected ClientYamlTestClient initClientYamlTestClient(ClientYamlSuiteRestSpec return new ClientYamlDocsTestClient(restSpec, restClient, hosts, esVersion); } - private static class TestAnalyzer implements ExecutableSection { - private static ConstructingObjectParser PARSER = + /** + * Compares the the results of running two analyzers against many random strings. + */ + private static class CompareAnalyzers implements ExecutableSection { + private static ConstructingObjectParser PARSER = new ConstructingObjectParser<>("test_analyzer", false, (a, location) -> { String index = (String) a[0]; - String builtIn = (String) a[1]; - String rebuilt = (String) a[2]; - return new TestAnalyzer(location, index, builtIn, rebuilt); + String first = (String) a[1]; + String second = (String) a[2]; + return new CompareAnalyzers(location, index, first, second); }); static { PARSER.declareString(constructorArg(), new ParseField("index")); - PARSER.declareString(constructorArg(), new ParseField("built_in")); - PARSER.declareString(constructorArg(), new ParseField("rebuilt")); + PARSER.declareString(constructorArg(), new ParseField("first")); + PARSER.declareString(constructorArg(), new ParseField("second")); } - private static TestAnalyzer parse(XContentParser parser) throws IOException { + private static CompareAnalyzers parse(XContentParser parser) throws IOException { XContentLocation location = parser.getTokenLocation(); - TestAnalyzer section = PARSER.parse(parser, location); + CompareAnalyzers section = PARSER.parse(parser, location); assert parser.currentToken() == Token.END_OBJECT; parser.nextToken(); // throw out the END_OBJECT to conform with other ExecutableSections return section; @@ -113,14 +117,14 @@ private static TestAnalyzer parse(XContentParser parser) throws IOException { private final XContentLocation location; private final String index; - private final String builtIn; - private final String rebuilt; + private final String first; + private final String second; - private TestAnalyzer(XContentLocation location, String index, String builtIn, String rebuilt) { + private CompareAnalyzers(XContentLocation location, String index, String first, String second) { this.location = location; this.index = index; - this.builtIn = builtIn; - this.rebuilt = rebuilt; + this.first = first; + this.second = second; } @Override @@ -138,40 +142,42 @@ public void execute(ClientYamlTestExecutionContext executionContext) throws IOEx .replace("$", "\\$")); } Map body = new HashMap<>(2); - body.put("analyzer", builtIn); + body.put("analyzer", first); body.put("text", testText); ClientYamlTestResponse response = executionContext.callApi("indices.analyze", singletonMap("index", index), singletonList(body), emptyMap()); - Iterator builtInTokens = ((List) response.evaluate("tokens")).iterator(); - body.put("analyzer", rebuilt); + Iterator firstTokens = ((List) response.evaluate("tokens")).iterator(); + body.put("analyzer", second); response = executionContext.callApi("indices.analyze", singletonMap("index", index), singletonList(body), emptyMap()); - Iterator rebuiltTokens = ((List) response.evaluate("tokens")).iterator(); - - Object previousRebuilt = null; - Object previousBuiltIn = null; - while (builtInTokens.hasNext()) { - if (false == rebuiltTokens.hasNext()) { - fail("rebuilt token filter has fewer tokens. built in has [" + builtInTokens.next() - + "]. Previous built in was [" + previousBuiltIn + "] and previous rebuilt was [" - + previousRebuilt + "]"); + Iterator secondTokens = ((List) response.evaluate("tokens")).iterator(); + + Object previousFirst = null; + Object previousSecond = null; + while (firstTokens.hasNext()) { + if (false == secondTokens.hasNext()) { + fail(second + " has fewer tokens than " + first + ". " + + first + " has [" + firstTokens.next() + "] but " + second + " is out of tokens. " + + first + "'s last token was [" + previousFirst + "] and " + + second + "'s last token was' [" + previousSecond + "]"); } - Map builtInToken = (Map) builtInTokens.next(); - Map rebuiltToken = (Map) rebuiltTokens.next(); - String builtInText = (String) builtInToken.get("token"); - String rebuiltText = (String) rebuiltToken.get("token"); + Map firstToken = (Map) firstTokens.next(); + Map secondToken = (Map) secondTokens.next(); + String firstText = (String) firstToken.get("token"); + String secondText = (String) secondToken.get("token"); // Check the text and produce an error message with the utf8 sequence if they don't match. - if (false == rebuiltText.equals(builtInText)) { - fail("text differs. built in was [" + builtInText + "] but rebuilt was [" + rebuiltText + "]. In utf8 those are\n" - + new BytesRef(builtInText) + " and\n" + new BytesRef(rebuiltText)); + if (false == secondText.equals(firstText)) { + fail("text differs: " + first + " was [" + firstText + "] but " + second + " was [" + secondText + + "]. In utf8 those are\n" + new BytesRef(firstText) + " and\n" + new BytesRef(secondText)); } // Now check the whole map just in case the text matches but something else differs - assertEquals(builtInToken, rebuiltToken); - } - if (rebuiltTokens.hasNext()) { - fail("rebuilt token filter has more tokens. it has [" + rebuiltTokens.next() + "]. Previous built in was [" - + previousBuiltIn + "] and previous rebuilt was [" + previousRebuilt + "]"); + assertEquals(firstToken, secondToken); } + if (secondTokens.hasNext()) { + fail(second + " has more tokens than " + first + ". " + + second + " has [" + secondTokens.next() + "] but " + first + " is out of tokens. " + + first + "'s last token was [" + previousFirst + "] and " + + second + "'s last token was' [" + previousSecond + "]"); } } } } From ec2eef7696bece831209a107fcb04a99208fc2b1 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 19 Apr 2018 09:37:27 -0400 Subject: [PATCH 04/18] Spaces --- .../smoketest/DocsClientYamlTestSuiteIT.java | 28 +++++++++++++++++-- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java index 72bbe10066ebb..7ffe1eeb8dd3d 100644 --- a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java +++ b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java @@ -47,6 +47,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.concurrent.TimeUnit; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; @@ -92,7 +93,12 @@ protected ClientYamlTestClient initClientYamlTestClient(ClientYamlSuiteRestSpec } /** - * Compares the the results of running two analyzers against many random strings. + * Compares the the results of running two analyzers against many random + * strings. The goal is to figure out if two anlayzers are "the same" by + * comparing their results. This is far from perfect but should be fairly + * accurate, especially for gross things like missing {@code decimal_digit} + * token filters, and should be fairly fast because it compares a fairly + * small number of tokens. */ private static class CompareAnalyzers implements ExecutableSection { private static ConstructingObjectParser PARSER = @@ -135,9 +141,24 @@ public XContentLocation getLocation() { @Override public void execute(ClientYamlTestExecutionContext executionContext) throws IOException { int size = 100; + int maxLength = 15; List testText = new ArrayList<>(size); for (int i = 0; i < size; i++) { - testText.add(randomRealisticUnicodeOfCodepointLength(between(1, 15)) + /** + * Build a string with a few unicode sequences separated by + * spaces. The unicode sequences aren't going to be of the same + * code page which is a shame because it makes the entire + * string less realistic. But this still provides a fairly + * nice string to compare. + */ + int spaces = between(0, 5); + StringBuilder b = new StringBuilder((spaces + 1) * maxLength); + b.append(randomRealisticUnicodeOfCodepointLengthBetween(1, maxLength)); + for (int t = 0; t < spaces; t++) { + b.append(' '); + b.append(randomRealisticUnicodeOfCodepointLengthBetween(1, maxLength)); + } + testText.add(b.toString() // Don't look up stashed values .replace("$", "\\$")); } @@ -177,7 +198,8 @@ public void execute(ClientYamlTestExecutionContext executionContext) throws IOEx fail(second + " has more tokens than " + first + ". " + second + " has [" + secondTokens.next() + "] but " + first + " is out of tokens. " + first + "'s last token was [" + previousFirst + "] and " - + second + "'s last token was' [" + previousSecond + "]"); } + + second + "'s last token was' [" + previousSecond + "]"); + } } } } From 27c9dc1c887ec20c6c73624e4f17660e1e7c868d Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 19 Apr 2018 09:40:45 -0400 Subject: [PATCH 05/18] Document sytax enhancement --- docs/README.asciidoc | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/docs/README.asciidoc b/docs/README.asciidoc index 1e4a313578529..f0826b4161224 100644 --- a/docs/README.asciidoc +++ b/docs/README.asciidoc @@ -68,6 +68,23 @@ for its modifiers: but rather than the setup defined in `docs/build.gradle` the setup is defined right in the documentation file. +In addition to the standard CONSOLE syntax these snippets can contain blocks +of yaml surrounded by markers like this: + +``` +startyaml + - compare_analyzers: {index: thai_example, first: thai, second: rebuilt_thai} +endyaml +``` + +This allows slightly more expressive testing of the snippets. Since that syntax +is not supported by CONSOLE the usual way to incorporate it is with a +`// TEST[s//]` marker like this: + +``` +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: thai_example, first: thai, second: rebuilt_thai}\nendyaml\n/] +``` + Any place you can use json you can use elements like `$body.path.to.thing` which is replaced on the fly with the contents of the thing at `path.to.thing` in the last response. From bf175048c4442305d3cbae04faf713a529538242 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 20 Apr 2018 16:34:48 -0400 Subject: [PATCH 06/18] Fixes --- docs/build.gradle | 2 ++ .../org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/build.gradle b/docs/build.gradle index 97094c6e79cbe..dc24aaa7b02db 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -58,6 +58,8 @@ buildRestTests.docs = fileTree(projectDir) { exclude 'build.gradle' // That is where the snippets go, not where they come from! exclude 'build' + // Just syntax examples + exclude 'README.asciidoc' } Closure setupTwitter = { String name, int count -> diff --git a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java index 7ffe1eeb8dd3d..ad84229f0c7da 100644 --- a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java +++ b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java @@ -47,7 +47,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.concurrent.TimeUnit; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; @@ -193,6 +192,8 @@ public void execute(ClientYamlTestExecutionContext executionContext) throws IOEx } // Now check the whole map just in case the text matches but something else differs assertEquals(firstToken, secondToken); + previousFirst = firstToken; + previousSecond = secondToken; } if (secondTokens.hasNext()) { fail(second + " has more tokens than " + first + ". " From 3579f83e55157da1697e9c5f758a8c942ba7c6ed Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 27 Apr 2018 09:53:39 -0400 Subject: [PATCH 07/18] Move flag This makes the change to the regex smaller and fixes some parse errors I hadn't noticed before. --- .../gradle/doc/RestTestsFromSnippetsTask.groovy | 4 ++-- ...eindexWithPainlessClientYamlTestSuiteIT.class | Bin 0 -> 2087 bytes 2 files changed, 2 insertions(+), 2 deletions(-) create mode 100644 qa/smoke-test-reindex-with-all-modules/bin/src/test/java/org/elasticsearch/smoketest/SmokeTestReindexWithPainlessClientYamlTestSuiteIT.class diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy index 59cfee7814541..15a4f21b17543 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy @@ -144,10 +144,10 @@ public class RestTestsFromSnippetsTask extends SnippetsTask { String badBody = /GET|PUT|POST|HEAD|OPTIONS|DELETE|startyaml|#/ String body = /(?(?:\n(?!$badBody)[^\n]+)+)/ String rawRequest = /(?:$method\s+$pathAndQuery$body?)/ - String yamlRequest = /(?:startyaml(?.+?)endyaml)/ + String yamlRequest = /(?:startyaml(?s)(?.+?)(?-s)endyaml)/ String nonComment = /(?:$rawRequest|$yamlRequest)/ String comment = /(?#.+)/ - /(?s)(?:$comment|$nonComment)\n+/ + /(?:$comment|$nonComment)\n+/ }() /** diff --git a/qa/smoke-test-reindex-with-all-modules/bin/src/test/java/org/elasticsearch/smoketest/SmokeTestReindexWithPainlessClientYamlTestSuiteIT.class b/qa/smoke-test-reindex-with-all-modules/bin/src/test/java/org/elasticsearch/smoketest/SmokeTestReindexWithPainlessClientYamlTestSuiteIT.class new file mode 100644 index 0000000000000000000000000000000000000000..13f24950da115da7d8c28fc6a2afbc2196aa63a4 GIT binary patch literal 2087 zcmc&#TTc@~6h707rKJjjhzRO{s6~_Qi~3**iD@7bJ;oPJD2&+nQuQneF1=H@FWE>0tZ@odBe5IAF(#ehpL|E zKGQ~cnlt4!@;V5Qc_dJ_s|t$lav^K{7nW<)48!nxb{w?H7-W`2tkNEE{wPeGQz)S2;3r9w3U*GQbbm5S@oZ}S@U!k{Vq z2#GdTh&BTKf@ac{2B)UeYT3NO*`jq+Uei)3ay1) zQyXT$jMtzBv%ycA7~3EMGRLvRaH1Uj4zC-j2%4cXHswRYyQ~PB8;3GVWV3umx`?Tk zvPGu(5I|}|$|5FY(w1doM9zS_n5S$Wqt&Oi-eR5!RcDNR2`hjhRmX^pBCF*n+aaE#dhV-aS)9JZ9lx3F6hcz)^kCu&T{ zz>@Uvp%B{O6ZJ}9?clrCNx=gG^CJp21&;_UjO2U@)(A}W5{QI(EwskAUBMSa!!i(v z;}Z)I!&MTl;F!VPQ~VplIrAAN_h+ttgsHEQ%nXiJ2JoB3F&nKja0BLWmxh}#4+*%1 z>ja)Gz#^_ Date: Fri, 27 Apr 2018 10:59:30 -0400 Subject: [PATCH 08/18] Fix up irish stemmer --- docs/reference/analysis/analyzers/lang-analyzer.asciidoc | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc index 1560798d18107..a201cabe87489 100644 --- a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc @@ -1077,9 +1077,13 @@ PUT /irish_example "settings": { "analysis": { "filter": { + "irish_hyphenation": { + "type": "stop", + "stopwords": [ "h", "n", "t" ] + } "irish_elision": { "type": "elision", - "articles": [ "h", "n", "t" ] + "articles": [ "d", "m", "b" ] }, "irish_stop": { "type": "stop", @@ -1102,9 +1106,10 @@ PUT /irish_example "rebuilt_irish": { "tokenizer": "standard", "filter": [ - "irish_stop", + "irish_hyphenation", "irish_elision", "irish_lowercase", + "irish_stop", "irish_keywords", "irish_stemmer" ] From bd10c4ac168c5c765504bc24ece2e019f53c6bbd Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 27 Apr 2018 11:01:18 -0400 Subject: [PATCH 09/18] , --- docs/reference/analysis/analyzers/lang-analyzer.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc index a201cabe87489..9588c9f5c5d73 100644 --- a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc @@ -1080,7 +1080,7 @@ PUT /irish_example "irish_hyphenation": { "type": "stop", "stopwords": [ "h", "n", "t" ] - } + }, "irish_elision": { "type": "elision", "articles": [ "d", "m", "b" ] From 43b22135932cc0538cca53580540a6774bb0a030 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 27 Apr 2018 11:15:28 -0400 Subject: [PATCH 10/18] Fix irish better --- docs/reference/analysis/analyzers/lang-analyzer.asciidoc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc index 9588c9f5c5d73..62fe99cdc6c46 100644 --- a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc @@ -1079,11 +1079,13 @@ PUT /irish_example "filter": { "irish_hyphenation": { "type": "stop", - "stopwords": [ "h", "n", "t" ] + "stopwords": [ "h", "n", "t" ], + "ignore_case": true }, "irish_elision": { "type": "elision", - "articles": [ "d", "m", "b" ] + "articles": [ "d", "m", "b" ], + "articles_case": true }, "irish_stop": { "type": "stop", From 0e6b62fd4de4a4513f5676dda07fecf2a5df08db Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 27 Apr 2018 11:39:57 -0400 Subject: [PATCH 11/18] Fix cjk --- .../analysis/analyzers/lang-analyzer.asciidoc | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc index 62fe99cdc6c46..26104913b5e2c 100644 --- a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc @@ -431,7 +431,13 @@ PUT /cjk_example "filter": { "english_stop": { "type": "stop", - "stopwords": "_english_" <1> + "stopwords": [ <1> + "a", "and", "are", "as", "at", "be", "but", "by", "for" + "if", "in", "into", "is", "it", "no", "not", "of", "on", + "or", "s", "such", "t", "that", "the", "their", "then", + "there", "these", "they", "this", "to", "was", "will", + "with", "www" + ] } }, "analyzer": { @@ -453,7 +459,9 @@ PUT /cjk_example // TEST[s/"cjk_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: cjk_example, first: cjk, second: rebuilt_cjk}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` - or `stopwords_path` parameters. + or `stopwords_path` parameters. The default stop words are + *almost* the same as the `_english_` set, but not exactly + the same. [[czech-analyzer]] ===== `czech` analyzer From 2c77a9778ec7a007577ff6b115e62a6712126eb4 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 27 Apr 2018 11:41:34 -0400 Subject: [PATCH 12/18] . --- docs/reference/analysis/analyzers/lang-analyzer.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc index 26104913b5e2c..ea6d24c7160e4 100644 --- a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc @@ -435,7 +435,7 @@ PUT /cjk_example "a", "and", "are", "as", "at", "be", "but", "by", "for" "if", "in", "into", "is", "it", "no", "not", "of", "on", "or", "s", "such", "t", "that", "the", "their", "then", - "there", "these", "they", "this", "to", "was", "will", + "there", "these", "they", "this", "to", "was", "will" "with", "www" ] } From 5ba12d153e088ee13bef82fed3878be8337ac61e Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 27 Apr 2018 11:43:19 -0400 Subject: [PATCH 13/18] , --- docs/reference/analysis/analyzers/lang-analyzer.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc index ea6d24c7160e4..d24376d6d65f8 100644 --- a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc @@ -432,7 +432,7 @@ PUT /cjk_example "english_stop": { "type": "stop", "stopwords": [ <1> - "a", "and", "are", "as", "at", "be", "but", "by", "for" + "a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "no", "not", "of", "on", "or", "s", "such", "t", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will" From f4c22209165c090815943ebda809c23d3c1087bb Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 27 Apr 2018 11:45:22 -0400 Subject: [PATCH 14/18] , againt --- docs/reference/analysis/analyzers/lang-analyzer.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc index d24376d6d65f8..a51346c71ae02 100644 --- a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc @@ -435,7 +435,7 @@ PUT /cjk_example "a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "no", "not", "of", "on", "or", "s", "such", "t", "that", "the", "their", "then", - "there", "these", "they", "this", "to", "was", "will" + "there", "these", "they", "this", "to", "was", "will", "with", "www" ] } From b58330b0afd3ba47960fe34e94e4b8cec69ddb75 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 27 Apr 2018 12:16:37 -0400 Subject: [PATCH 15/18] Sigh --- docs/reference/analysis/analyzers/lang-analyzer.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc index a51346c71ae02..d718a0b2da6ff 100644 --- a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc @@ -103,8 +103,8 @@ PUT /arabic_example "lowercase", "decimal_digit", "arabic_stop", - "arabic_keywords", "arabic_normalization", + "arabic_keywords", "arabic_stemmer" ] } @@ -114,7 +114,7 @@ PUT /arabic_example } ---------------------------------------------------- // CONSOLE -// TEST[s/"arabic_normalization",//] +// TEST[s/"arabic_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: arabic_example, first: arabic, second: rebuilt_arabic}\nendyaml\n/] <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. From dcf25b54401d9985491ccad2badbd495d9cc9d0a Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Sat, 28 Apr 2018 09:51:27 -0400 Subject: [PATCH 16/18] Fix precommit --- .../elasticsearch/test/rest/yaml/section/ExecutableSection.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ExecutableSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ExecutableSection.java index 0101e71cc14b6..ce5ea1c1cde06 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ExecutableSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/ExecutableSection.java @@ -37,7 +37,7 @@ public interface ExecutableSection { /** * Default list of {@link ExecutableSection}s available for tests. */ - public List DEFAULT_EXECUTABLE_CONTEXTS = unmodifiableList(Arrays.asList( + List DEFAULT_EXECUTABLE_CONTEXTS = unmodifiableList(Arrays.asList( new NamedXContentRegistry.Entry(ExecutableSection.class, new ParseField("do"), DoSection::parse), new NamedXContentRegistry.Entry(ExecutableSection.class, new ParseField("set"), SetSection::parse), new NamedXContentRegistry.Entry(ExecutableSection.class, new ParseField("match"), MatchAssertion::parse), From 3c0f070f22142c2ced8f0deb7b8462e81edfb3f8 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 7 May 2018 17:52:31 -0400 Subject: [PATCH 17/18] Remove errant class file --- ...eindexWithPainlessClientYamlTestSuiteIT.class | Bin 2087 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 qa/smoke-test-reindex-with-all-modules/bin/src/test/java/org/elasticsearch/smoketest/SmokeTestReindexWithPainlessClientYamlTestSuiteIT.class diff --git a/qa/smoke-test-reindex-with-all-modules/bin/src/test/java/org/elasticsearch/smoketest/SmokeTestReindexWithPainlessClientYamlTestSuiteIT.class b/qa/smoke-test-reindex-with-all-modules/bin/src/test/java/org/elasticsearch/smoketest/SmokeTestReindexWithPainlessClientYamlTestSuiteIT.class deleted file mode 100644 index 13f24950da115da7d8c28fc6a2afbc2196aa63a4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2087 zcmc&#TTc@~6h707rKJjjhzRO{s6~_Qi~3**iD@7bJ;oPJD2&+nQuQneF1=H@FWE>0tZ@odBe5IAF(#ehpL|E zKGQ~cnlt4!@;V5Qc_dJ_s|t$lav^K{7nW<)48!nxb{w?H7-W`2tkNEE{wPeGQz)S2;3r9w3U*GQbbm5S@oZ}S@U!k{Vq z2#GdTh&BTKf@ac{2B)UeYT3NO*`jq+Uei)3ay1) zQyXT$jMtzBv%ycA7~3EMGRLvRaH1Uj4zC-j2%4cXHswRYyQ~PB8;3GVWV3umx`?Tk zvPGu(5I|}|$|5FY(w1doM9zS_n5S$Wqt&Oi-eR5!RcDNR2`hjhRmX^pBCF*n+aaE#dhV-aS)9JZ9lx3F6hcz)^kCu&T{ zz>@Uvp%B{O6ZJ}9?clrCNx=gG^CJp21&;_UjO2U@)(A}W5{QI(EwskAUBMSa!!i(v z;}Z)I!&MTl;F!VPQ~VplIrAAN_h+ttgsHEQ%nXiJ2JoB3F&nKja0BLWmxh}#4+*%1 z>ja)Gz#^_ Date: Mon, 7 May 2018 19:52:42 -0400 Subject: [PATCH 18/18] Add warning --- .../org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java index ad84229f0c7da..a36df9987e7de 100644 --- a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java +++ b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java @@ -115,7 +115,7 @@ private static class CompareAnalyzers implements ExecutableSection { private static CompareAnalyzers parse(XContentParser parser) throws IOException { XContentLocation location = parser.getTokenLocation(); CompareAnalyzers section = PARSER.parse(parser, location); - assert parser.currentToken() == Token.END_OBJECT; + assert parser.currentToken() == Token.END_OBJECT : "End of object required"; parser.nextToken(); // throw out the END_OBJECT to conform with other ExecutableSections return section; }