diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index bdef0ba631b72..d5b86656f1ca6 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -118,6 +118,7 @@ import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Predicate; import java.util.function.Supplier; @@ -408,6 +409,10 @@ protected static boolean has(ProductFeature feature) { } protected List parseClusterHosts(String hostsString) { + return parseClusterHosts(hostsString, this::buildHttpHost); + } + + public static List parseClusterHosts(String hostsString, BiFunction httpHostSupplier) { String[] stringUrls = hostsString.split(","); List hosts = new ArrayList<>(stringUrls.length); for (String stringUrl : stringUrls) { @@ -417,7 +422,7 @@ protected List parseClusterHosts(String hostsString) { } String host = stringUrl.substring(0, portSeparator); int port = Integer.valueOf(stringUrl.substring(portSeparator + 1)); - hosts.add(buildHttpHost(host, port)); + hosts.add(httpHostSupplier.apply(host, port)); } return unmodifiableList(hosts); } diff --git a/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/BWCCodec.java b/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/BWCCodec.java index 3ed8fc26ac937..432974dc256eb 100644 --- a/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/BWCCodec.java +++ b/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/BWCCodec.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.lucene.bwc.codecs; +import org.apache.lucene.backward_codecs.lucene86.Lucene86Codec; +import org.apache.lucene.backward_codecs.lucene87.Lucene87Codec; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.FieldInfosFormat; import org.apache.lucene.codecs.FieldsConsumer; @@ -26,6 +28,8 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; +import org.elasticsearch.xpack.lucene.bwc.codecs.lucene86.BWCLucene86Codec; +import org.elasticsearch.xpack.lucene.bwc.codecs.lucene87.BWCLucene87Codec; import java.io.IOException; import java.util.ArrayList; @@ -118,7 +122,14 @@ private static FieldInfos filterFields(FieldInfos fieldInfos) { } public static SegmentInfo wrap(SegmentInfo segmentInfo) { - final Codec codec = segmentInfo.getCodec(); + // special handling for Lucene87Codec (which is currently bundled with Lucene) + // Use BWCLucene87Codec instead as that one extends BWCCodec (similar to all other older codecs) + Codec codec = segmentInfo.getCodec(); + if (codec instanceof Lucene86Codec) { + codec = new BWCLucene86Codec(); + } else if (codec instanceof Lucene87Codec) { + codec = new BWCLucene87Codec(); + } final SegmentInfo segmentInfo1 = new SegmentInfo( segmentInfo.dir, // Use Version.LATEST instead of original version, otherwise SegmentCommitInfo will bark when processing (N-1 limitation) diff --git a/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/lucene86/BWCLucene86Codec.java b/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/lucene86/BWCLucene86Codec.java new file mode 100644 index 0000000000000..4372e44f8171c --- /dev/null +++ b/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/lucene86/BWCLucene86Codec.java @@ -0,0 +1,153 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.lucene.bwc.codecs.lucene86; + +import org.apache.lucene.backward_codecs.lucene50.Lucene50CompoundFormat; +import org.apache.lucene.backward_codecs.lucene50.Lucene50LiveDocsFormat; +import org.apache.lucene.backward_codecs.lucene50.Lucene50StoredFieldsFormat; +import org.apache.lucene.backward_codecs.lucene50.Lucene50TermVectorsFormat; +import org.apache.lucene.backward_codecs.lucene60.Lucene60FieldInfosFormat; +import org.apache.lucene.backward_codecs.lucene80.Lucene80NormsFormat; +import org.apache.lucene.backward_codecs.lucene84.Lucene84PostingsFormat; +import org.apache.lucene.backward_codecs.lucene86.Lucene86PointsFormat; +import org.apache.lucene.backward_codecs.lucene86.Lucene86SegmentInfoFormat; +import org.apache.lucene.codecs.CompoundFormat; +import org.apache.lucene.codecs.DocValuesFormat; +import org.apache.lucene.codecs.FieldInfosFormat; +import org.apache.lucene.codecs.KnnVectorsFormat; +import org.apache.lucene.codecs.LiveDocsFormat; +import org.apache.lucene.codecs.NormsFormat; +import org.apache.lucene.codecs.PointsFormat; +import org.apache.lucene.codecs.PostingsFormat; +import org.apache.lucene.codecs.SegmentInfoFormat; +import org.apache.lucene.codecs.StoredFieldsFormat; +import org.apache.lucene.codecs.TermVectorsFormat; +import org.apache.lucene.codecs.perfield.PerFieldDocValuesFormat; +import org.apache.lucene.codecs.perfield.PerFieldPostingsFormat; +import org.elasticsearch.xpack.lucene.bwc.codecs.BWCCodec; + +import java.util.Objects; + +public class BWCLucene86Codec extends BWCCodec { + + private final TermVectorsFormat vectorsFormat = new Lucene50TermVectorsFormat(); + private final FieldInfosFormat fieldInfosFormat = wrap(new Lucene60FieldInfosFormat()); + private final SegmentInfoFormat segmentInfosFormat = wrap(new Lucene86SegmentInfoFormat()); + private final LiveDocsFormat liveDocsFormat = new Lucene50LiveDocsFormat(); + private final CompoundFormat compoundFormat = new Lucene50CompoundFormat(); + private final PointsFormat pointsFormat = new Lucene86PointsFormat(); + private final PostingsFormat defaultFormat; + + private final PostingsFormat postingsFormat = new PerFieldPostingsFormat() { + @Override + public PostingsFormat getPostingsFormatForField(String field) { + return BWCLucene86Codec.this.getPostingsFormatForField(field); + } + }; + + private final DocValuesFormat docValuesFormat = new PerFieldDocValuesFormat() { + @Override + public DocValuesFormat getDocValuesFormatForField(String field) { + return BWCLucene86Codec.this.getDocValuesFormatForField(field); + } + }; + + private final StoredFieldsFormat storedFieldsFormat; + + /** Instantiates a new codec. */ + public BWCLucene86Codec() { + super("BWCLucene86Codec"); + this.storedFieldsFormat = new Lucene50StoredFieldsFormat(Objects.requireNonNull(Lucene50StoredFieldsFormat.Mode.BEST_SPEED)); + this.defaultFormat = new Lucene84PostingsFormat(); + } + + @Override + public StoredFieldsFormat storedFieldsFormat() { + return storedFieldsFormat; + } + + @Override + public TermVectorsFormat termVectorsFormat() { + return vectorsFormat; + } + + @Override + public PostingsFormat postingsFormat() { + return postingsFormat; + } + + @Override + public final FieldInfosFormat fieldInfosFormat() { + return fieldInfosFormat; + } + + @Override + public SegmentInfoFormat segmentInfoFormat() { + return segmentInfosFormat; + } + + @Override + public final LiveDocsFormat liveDocsFormat() { + return liveDocsFormat; + } + + @Override + public CompoundFormat compoundFormat() { + return compoundFormat; + } + + @Override + public PointsFormat pointsFormat() { + return pointsFormat; + } + + @Override + public final KnnVectorsFormat knnVectorsFormat() { + return KnnVectorsFormat.EMPTY; + } + + /** + * Returns the postings format that should be used for writing new segments of field. + * + *

The default implementation always returns "Lucene84". + * + *

WARNING: if you subclass, you are responsible for index backwards compatibility: + * future version of Lucene are only guaranteed to be able to read the default implementation. + */ + public PostingsFormat getPostingsFormatForField(String field) { + return defaultFormat; + } + + /** + * Returns the docvalues format that should be used for writing new segments of field + * . + * + *

The default implementation always returns "Lucene80". + * + *

WARNING: if you subclass, you are responsible for index backwards compatibility: + * future version of Lucene are only guaranteed to be able to read the default implementation. + */ + public DocValuesFormat getDocValuesFormatForField(String field) { + return defaultDVFormat; + } + + @Override + public final DocValuesFormat docValuesFormat() { + return docValuesFormat; + } + + private final DocValuesFormat defaultDVFormat = DocValuesFormat.forName("Lucene80"); + + private final NormsFormat normsFormat = new Lucene80NormsFormat(); + + @Override + public NormsFormat normsFormat() { + return normsFormat; + } + +} diff --git a/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/lucene87/BWCLucene87Codec.java b/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/lucene87/BWCLucene87Codec.java new file mode 100644 index 0000000000000..7d36811a30626 --- /dev/null +++ b/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/codecs/lucene87/BWCLucene87Codec.java @@ -0,0 +1,153 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.lucene.bwc.codecs.lucene87; + +import org.apache.lucene.backward_codecs.lucene50.Lucene50CompoundFormat; +import org.apache.lucene.backward_codecs.lucene50.Lucene50LiveDocsFormat; +import org.apache.lucene.backward_codecs.lucene50.Lucene50TermVectorsFormat; +import org.apache.lucene.backward_codecs.lucene60.Lucene60FieldInfosFormat; +import org.apache.lucene.backward_codecs.lucene80.Lucene80DocValuesFormat; +import org.apache.lucene.backward_codecs.lucene80.Lucene80NormsFormat; +import org.apache.lucene.backward_codecs.lucene84.Lucene84PostingsFormat; +import org.apache.lucene.backward_codecs.lucene86.Lucene86PointsFormat; +import org.apache.lucene.backward_codecs.lucene86.Lucene86SegmentInfoFormat; +import org.apache.lucene.backward_codecs.lucene87.Lucene87StoredFieldsFormat; +import org.apache.lucene.codecs.CompoundFormat; +import org.apache.lucene.codecs.DocValuesFormat; +import org.apache.lucene.codecs.FieldInfosFormat; +import org.apache.lucene.codecs.KnnVectorsFormat; +import org.apache.lucene.codecs.LiveDocsFormat; +import org.apache.lucene.codecs.NormsFormat; +import org.apache.lucene.codecs.PointsFormat; +import org.apache.lucene.codecs.PostingsFormat; +import org.apache.lucene.codecs.SegmentInfoFormat; +import org.apache.lucene.codecs.StoredFieldsFormat; +import org.apache.lucene.codecs.TermVectorsFormat; +import org.apache.lucene.codecs.perfield.PerFieldDocValuesFormat; +import org.apache.lucene.codecs.perfield.PerFieldPostingsFormat; +import org.elasticsearch.xpack.lucene.bwc.codecs.BWCCodec; + +public class BWCLucene87Codec extends BWCCodec { + + private final TermVectorsFormat vectorsFormat = new Lucene50TermVectorsFormat(); + private final FieldInfosFormat fieldInfosFormat = wrap(new Lucene60FieldInfosFormat()); + private final SegmentInfoFormat segmentInfosFormat = wrap(new Lucene86SegmentInfoFormat()); + private final LiveDocsFormat liveDocsFormat = new Lucene50LiveDocsFormat(); + private final CompoundFormat compoundFormat = new Lucene50CompoundFormat(); + private final PointsFormat pointsFormat = new Lucene86PointsFormat(); + private final PostingsFormat defaultFormat; + + private final PostingsFormat postingsFormat = new PerFieldPostingsFormat() { + @Override + public PostingsFormat getPostingsFormatForField(String field) { + return BWCLucene87Codec.this.getPostingsFormatForField(field); + } + }; + + private final DocValuesFormat docValuesFormat = new PerFieldDocValuesFormat() { + @Override + public DocValuesFormat getDocValuesFormatForField(String field) { + return BWCLucene87Codec.this.getDocValuesFormatForField(field); + } + }; + + private final StoredFieldsFormat storedFieldsFormat; + + /** Instantiates a new codec. */ + public BWCLucene87Codec() { + super("BWCLucene87Codec"); + this.storedFieldsFormat = new Lucene87StoredFieldsFormat(Lucene87StoredFieldsFormat.Mode.BEST_COMPRESSION); + this.defaultFormat = new Lucene84PostingsFormat(); + this.defaultDVFormat = new Lucene80DocValuesFormat(Lucene80DocValuesFormat.Mode.BEST_COMPRESSION); + } + + @Override + public StoredFieldsFormat storedFieldsFormat() { + return storedFieldsFormat; + } + + @Override + public TermVectorsFormat termVectorsFormat() { + return vectorsFormat; + } + + @Override + public PostingsFormat postingsFormat() { + return postingsFormat; + } + + @Override + public final FieldInfosFormat fieldInfosFormat() { + return fieldInfosFormat; + } + + @Override + public SegmentInfoFormat segmentInfoFormat() { + return segmentInfosFormat; + } + + @Override + public final LiveDocsFormat liveDocsFormat() { + return liveDocsFormat; + } + + @Override + public CompoundFormat compoundFormat() { + return compoundFormat; + } + + @Override + public PointsFormat pointsFormat() { + return pointsFormat; + } + + @Override + public final KnnVectorsFormat knnVectorsFormat() { + return KnnVectorsFormat.EMPTY; + } + + /** + * Returns the postings format that should be used for writing new segments of field. + * + *

The default implementation always returns "Lucene84". + * + *

WARNING: if you subclass, you are responsible for index backwards compatibility: + * future version of Lucene are only guaranteed to be able to read the default implementation. + */ + public PostingsFormat getPostingsFormatForField(String field) { + return defaultFormat; + } + + /** + * Returns the docvalues format that should be used for writing new segments of field + * . + * + *

The default implementation always returns "Lucene80". + * + *

WARNING: if you subclass, you are responsible for index backwards compatibility: + * future version of Lucene are only guaranteed to be able to read the default implementation. + */ + public DocValuesFormat getDocValuesFormatForField(String field) { + return defaultDVFormat; + } + + @Override + public final DocValuesFormat docValuesFormat() { + return docValuesFormat; + } + + private final DocValuesFormat defaultDVFormat; + + private final NormsFormat normsFormat = new Lucene80NormsFormat(); + + @Override + public NormsFormat normsFormat() { + return normsFormat; + } + +} diff --git a/x-pack/plugin/old-lucene-versions/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec b/x-pack/plugin/old-lucene-versions/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec index 0215e9f7ca4ab..7830dbffe4adf 100644 --- a/x-pack/plugin/old-lucene-versions/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec +++ b/x-pack/plugin/old-lucene-versions/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec @@ -5,6 +5,8 @@ # 2.0. # +org.elasticsearch.xpack.lucene.bwc.codecs.lucene87.BWCLucene87Codec +org.elasticsearch.xpack.lucene.bwc.codecs.lucene86.BWCLucene86Codec org.elasticsearch.xpack.lucene.bwc.codecs.lucene70.BWCLucene70Codec org.elasticsearch.xpack.lucene.bwc.codecs.lucene70.Lucene70Codec org.elasticsearch.xpack.lucene.bwc.codecs.lucene62.Lucene62Codec diff --git a/x-pack/qa/repository-old-versions-7x/build.gradle b/x-pack/qa/repository-old-versions-7x/build.gradle new file mode 100644 index 0000000000000..e072f37bf04bf --- /dev/null +++ b/x-pack/qa/repository-old-versions-7x/build.gradle @@ -0,0 +1,55 @@ +import org.elasticsearch.gradle.internal.test.RestIntegTestTask +import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask +import org.elasticsearch.gradle.Version +import org.elasticsearch.gradle.internal.test.rest.CopyRestApiTask +import org.elasticsearch.gradle.internal.test.rest.CopyRestTestsTask +import org.elasticsearch.gradle.internal.test.rest.RestResourcesPlugin + +apply plugin: 'elasticsearch.internal-java-rest-test' +apply plugin: 'elasticsearch.rest-resources' + +restResources { + restApi { + include '_common', 'search' + } + restTests { + includeCore 'search/390_doc_values_search.yml' + } +} + +// Register rest resources with source set +sourceSets.javaRestTest.getOutput() + .dir( + project.getTasks() + .withType(CopyRestApiTask.class) + .named(RestResourcesPlugin.COPY_REST_API_SPECS_TASK) + .flatMap(CopyRestApiTask::getOutputResourceDir) + ); + +sourceSets.javaRestTest.getOutput() + .dir( + project.getTasks() + .withType(CopyRestTestsTask.class) + .named(RestResourcesPlugin.COPY_YAML_TESTS_TASK) + .flatMap(CopyRestTestsTask::getOutputResourceDir) + ); + +tasks.named("javaRestTest") { + enabled = false +} + +['7.9.0', '7.17.25'].each { versionString -> + String versionNoDots = versionString.replace('.', '_') + + tasks.register("javaRestTest#${versionNoDots}", StandaloneRestIntegTestTask) { + systemProperty 'tests.old_cluster_version', versionString + usesDefaultDistribution() + testClassesDirs = sourceSets.javaRestTest.output.classesDirs + classpath = sourceSets.javaRestTest.runtimeClasspath + usesBwcDistribution(Version.fromString(versionString)) + } + + tasks.named("check").configure { + dependsOn "javaRestTest#${versionNoDots}" + } +} diff --git a/x-pack/qa/repository-old-versions-7x/src/javaRestTest/java/org/elasticsearch/oldrepos7x/DocValueOnlyFieldsIT.java b/x-pack/qa/repository-old-versions-7x/src/javaRestTest/java/org/elasticsearch/oldrepos7x/DocValueOnlyFieldsIT.java new file mode 100644 index 0000000000000..e5bee956b3679 --- /dev/null +++ b/x-pack/qa/repository-old-versions-7x/src/javaRestTest/java/org/elasticsearch/oldrepos7x/DocValueOnlyFieldsIT.java @@ -0,0 +1,212 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.oldrepos7x; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.http.HttpHost; +import org.elasticsearch.Version; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TemporaryFolder; +import org.junit.rules.TestRule; + +import java.io.IOException; +import java.util.List; +import java.util.function.Supplier; + +/** + * Tests doc-value-based searches against indices imported from clusters older than N-1. + * We reuse the YAML tests in search/390_doc_values_search.yml but have to do the setup + * manually here as the setup is done on the old cluster for which we have to use the + * low-level REST client instead of the YAML set up that only knows how to talk to + * newer ES versions. + * + * We mimic the setup in search/390_doc_values_search.yml here, but adapt it to work + * against older version clusters. + */ +public class DocValueOnlyFieldsIT extends ESClientYamlSuiteTestCase { + + private static final Version oldVersion = Version.fromString(System.getProperty("tests.old_cluster_version")); + private static boolean repoRestored = false; + private static final TemporaryFolder repoDirectory = new TemporaryFolder(); + public static ElasticsearchCluster currentCluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .nodes(2) + .setting("xpack.security.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.ml.enabled", "false") + .setting("path.repo", () -> repoDirectory.getRoot().getPath()) + .build(); + + public static ElasticsearchCluster oldCluster = ElasticsearchCluster.local() + .version(org.elasticsearch.test.cluster.util.Version.fromString(System.getProperty("tests.old_cluster_version"))) + .distribution(DistributionType.DEFAULT) + .nodes(2) + .setting("xpack.security.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.ml.enabled", "false") + .setting("path.repo", () -> repoDirectory.getRoot().getPath()) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(repoDirectory).around(oldCluster).around(currentCluster); + private static final String REPO_NAME = "doc_values_repo"; + private static final String INDEX_NAME = "test"; + private static final String snapshotName = "snap"; + private static final Supplier repoLocation = () -> repoDirectory.getRoot().getPath(); + + public DocValueOnlyFieldsIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } + + @Override + protected boolean preserveClusterUponCompletion() { + return true; + } + + @Override + protected boolean skipSetupSections() { + // setup in the YAML file is replaced by the method below + return true; + } + + @BeforeClass + public static void setupSnapshot() throws IOException { + String[] basicTypes = new String[] { + "byte", + "double", + "float", + "half_float", + "integer", + "long", + "short", + "boolean", + "keyword", + "ip", + "geo_point" }; // date is manually added as it need further configuration + List oldClusterHosts = parseClusterHosts(oldCluster.getHttpAddresses(), (host, port) -> new HttpHost(host, port)); + try (RestClient oldEs = RestClient.builder(oldClusterHosts.toArray(new HttpHost[oldClusterHosts.size()])).build();) { + Request createIndex = new Request("PUT", "/" + INDEX_NAME); + int numberOfShards = randomIntBetween(1, 3); + + XContentBuilder settingsBuilder = XContentFactory.jsonBuilder() + .startObject() + .startObject("settings") + .field("index.number_of_shards", numberOfShards) + .endObject() + .startObject("mappings"); + settingsBuilder.field("dynamic", false).startObject("properties"); + for (String type : basicTypes) { + settingsBuilder.startObject(type).field("type", type).endObject(); + } + settingsBuilder.startObject("date").field("type", "date").field("format", "yyyy/MM/dd").endObject(); + settingsBuilder.endObject().endObject().endObject(); + + createIndex.setJsonEntity(Strings.toString(settingsBuilder)); + assertOK(oldEs.performRequest(createIndex)); + + Request doc1 = new Request("PUT", "/" + INDEX_NAME + "/" + "_doc" + "/" + "1"); + doc1.addParameter("refresh", "true"); + XContentBuilder bodyDoc1 = XContentFactory.jsonBuilder() + .startObject() + .field("byte", 1) + .field("double", 1.0) + .field("float", 1.0) + .field("half_float", 1.0) + .field("integer", 1) + .field("long", 1) + .field("short", 1) + .field("date", "2017/01/01") + .field("keyword", "key1") + .field("boolean", false) + .field("ip", "192.168.0.1") + .array("geo_point", 13.5, 34.89) + .endObject(); + doc1.setJsonEntity(Strings.toString(bodyDoc1)); + assertOK(oldEs.performRequest(doc1)); + + Request doc2 = new Request("PUT", "/" + INDEX_NAME + "/" + "_doc" + "/" + "2"); + doc2.addParameter("refresh", "true"); + XContentBuilder bodyDoc2 = XContentFactory.jsonBuilder() + .startObject() + .field("byte", 2) + .field("double", 2.0) + .field("float", 2.0) + .field("half_float", 2.0) + .field("integer", 2) + .field("long", 2) + .field("short", 2) + .field("date", "2017/01/02") + .field("keyword", "key2") + .field("boolean", true) + .field("ip", "192.168.0.2") + .array("geo_point", -63.24, 31.0) + .endObject(); + doc2.setJsonEntity(Strings.toString(bodyDoc2)); + assertOK(oldEs.performRequest(doc2)); + + // register repo on old ES and take snapshot + Request createRepoRequest = new Request("PUT", "/_snapshot/" + REPO_NAME); + createRepoRequest.setJsonEntity(Strings.format(""" + {"type":"fs","settings":{"location":"%s"}} + """, repoLocation.get())); + assertOK(oldEs.performRequest(createRepoRequest)); + + Request createSnapshotRequest = new Request("PUT", "/_snapshot/" + REPO_NAME + "/" + snapshotName); + createSnapshotRequest.addParameter("wait_for_completion", "true"); + createSnapshotRequest.setJsonEntity("{\"indices\":\"" + INDEX_NAME + "\"}"); + assertOK(oldEs.performRequest(createSnapshotRequest)); + } + } + + @Before + public void registerAndRestoreRepo() throws IOException { + // Ideally we could restore the repo on the "current" cluster in @BeforeClass, but that + // does not work because the client() is not initialized then. + // To restore only once we guard this operation by a flag. + if (repoRestored == false) { + // register repo on new ES and restore snapshot + Request createRepoRequest2 = new Request("PUT", "/_snapshot/" + REPO_NAME); + createRepoRequest2.setJsonEntity(Strings.format(""" + {"type":"fs","settings":{"location":"%s"}} + """, repoLocation.get())); + assertOK(client().performRequest(createRepoRequest2)); + + final Request createRestoreRequest = new Request("POST", "/_snapshot/" + REPO_NAME + "/" + snapshotName + "/_restore"); + createRestoreRequest.addParameter("wait_for_completion", "true"); + createRestoreRequest.setJsonEntity("{\"indices\":\"" + INDEX_NAME + "\"}"); + assertOK(client().performRequest(createRestoreRequest)); + + repoRestored = true; + } + logger.info("Repo [" + REPO_NAME + "] restored."); + } + + @Override + protected String getTestRestCluster() { + return currentCluster.getHttpAddresses(); + } +} diff --git a/x-pack/qa/repository-old-versions-7x/src/javaRestTest/java/org/elasticsearch/oldrepos7x/OldMappingsIT.java b/x-pack/qa/repository-old-versions-7x/src/javaRestTest/java/org/elasticsearch/oldrepos7x/OldMappingsIT.java new file mode 100644 index 0000000000000..f4ae61052db8c --- /dev/null +++ b/x-pack/qa/repository-old-versions-7x/src/javaRestTest/java/org/elasticsearch/oldrepos7x/OldMappingsIT.java @@ -0,0 +1,377 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.oldrepos7x; + +import org.apache.http.HttpHost; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.WarningsHandler; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.Version; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TemporaryFolder; +import org.junit.rules.TestRule; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.hasSize; + +public class OldMappingsIT extends ESRestTestCase { + + private static final List indices = Arrays.asList("filebeat", "custom", "nested"); + + public static TemporaryFolder repoDirectory = new TemporaryFolder(); + + public static ElasticsearchCluster currentCluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .distribution(DistributionType.DEFAULT) + .setting("xpack.security.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.ml.enabled", "false") + .setting("path.repo", () -> repoDirectory.getRoot().getPath()) + .build(); + + public static ElasticsearchCluster oldCluster = ElasticsearchCluster.local() + .version(Version.fromString(System.getProperty("tests.old_cluster_version"))) + .distribution(DistributionType.DEFAULT) + .setting("xpack.security.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.ml.enabled", "false") + .setting("path.repo", () -> repoDirectory.getRoot().getPath()) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(repoDirectory).around(oldCluster).around(currentCluster); + + private static boolean repoRestored = false; + private static final Supplier repoLocation = () -> repoDirectory.getRoot().getPath(); + private static final String repoName = "old_mappings_repo"; + private static final String snapshotName = "snap"; + + @Override + protected String getTestRestCluster() { + return currentCluster.getHttpAddresses(); + } + + private static Request createIndex(String indexName, String file) throws IOException { + Request createIndex = new Request("PUT", "/" + indexName); + int numberOfShards = randomIntBetween(1, 3); + + XContentBuilder builder = XContentFactory.jsonBuilder() + .startObject() + .startObject("settings") + .field("index.number_of_shards", numberOfShards) + .field("index.number_of_replicas", 0) + .endObject() + .startObject("mappings"); + builder.rawValue(OldMappingsIT.class.getResourceAsStream(file), XContentType.JSON); + builder.endObject().endObject(); + + createIndex.setJsonEntity(Strings.toString(builder)); + return createIndex; + } + + @BeforeClass + public static void setupOldRepo() throws IOException { + List oldClusterHosts = parseClusterHosts(oldCluster.getHttpAddresses(), (host, port) -> new HttpHost(host, port)); + try (RestClient oldEsClient = RestClient.builder(oldClusterHosts.toArray(new HttpHost[oldClusterHosts.size()])).build();) { + assertOK(oldEsClient.performRequest(createIndex("filebeat", "filebeat.json"))); + assertOK(oldEsClient.performRequest(createIndex("custom", "custom.json"))); + assertOK(oldEsClient.performRequest(createIndex("nested", "nested.json"))); + + Request doc1 = new Request("PUT", "/" + "custom" + "/" + "_doc" + "/" + "1"); + doc1.addParameter("refresh", "true"); + XContentBuilder bodyDoc1 = XContentFactory.jsonBuilder() + .startObject() + .startObject("apache2") + .startObject("access") + .field("url", "myurl1") + .field("agent", "agent1") + .endObject() + .endObject() + .endObject(); + doc1.setJsonEntity(Strings.toString(bodyDoc1)); + assertOK(oldEsClient.performRequest(doc1)); + + Request doc2 = new Request("PUT", "/" + "custom" + "/" + "_doc" + "/" + "2"); + doc2.addParameter("refresh", "true"); + XContentBuilder bodyDoc2 = XContentFactory.jsonBuilder() + .startObject() + .startObject("apache2") + .startObject("access") + .field("url", "myurl2") + .field("agent", "agent2 agent2") + .endObject() + .endObject() + .field("completion", "some_value") + .endObject(); + doc2.setJsonEntity(Strings.toString(bodyDoc2)); + assertOK(oldEsClient.performRequest(doc2)); + + Request doc3 = new Request("PUT", "/" + "nested" + "/" + "_doc" + "/" + "1"); + doc3.addParameter("refresh", "true"); + XContentBuilder bodyDoc3 = XContentFactory.jsonBuilder() + .startObject() + .field("group", "fans") + .startArray("user") + .startObject() + .field("first", "John") + .field("last", "Smith") + .endObject() + .startObject() + .field("first", "Alice") + .field("last", "White") + .endObject() + .endArray() + .endObject(); + doc3.setJsonEntity(Strings.toString(bodyDoc3)); + assertOK(oldEsClient.performRequest(doc3)); + + Request getSettingsRequest = new Request("GET", "/_cluster/settings?include_defaults=true"); + Map response = entityAsMap(oldEsClient.performRequest(getSettingsRequest)); + assertEquals(repoLocation.get(), ((List) (XContentMapValues.extractValue("defaults.path.repo", response))).get(0)); + + // register repo on old ES and take snapshot + Request createRepoRequest = new Request("PUT", "/_snapshot/" + repoName); + createRepoRequest.setJsonEntity(Strings.format(""" + {"type":"fs","settings":{"location":"%s"}} + """, repoLocation.get())); + assertOK(oldEsClient.performRequest(createRepoRequest)); + + Request createSnapshotRequest = new Request("PUT", "/_snapshot/" + repoName + "/" + snapshotName); + createSnapshotRequest.addParameter("wait_for_completion", "true"); + createSnapshotRequest.setJsonEntity("{\"indices\":\"" + indices.stream().collect(Collectors.joining(",")) + "\"}"); + assertOK(oldEsClient.performRequest(createSnapshotRequest)); + + } + } + + @Before + public void registerAndRestoreRepo() throws IOException { + // this would ideally also happen in @BeforeClass and just once, but we don't have the current cluster client() + // there yet. So we do it before tests here and make sure to only restore the repo once. + // Goes together with the empty "wipeSnapshot()" override in this test. + if (repoRestored == false) { + // register repo on new ES and restore snapshot + Request createRepoRequest2 = new Request("PUT", "/_snapshot/" + repoName); + createRepoRequest2.setJsonEntity(Strings.format(""" + {"type":"fs","settings":{"location":"%s"}} + """, repoLocation.get())); + assertOK(client().performRequest(createRepoRequest2)); + + final Request createRestoreRequest = new Request("POST", "/_snapshot/" + repoName + "/" + snapshotName + "/_restore"); + createRestoreRequest.addParameter("wait_for_completion", "true"); + createRestoreRequest.setJsonEntity("{\"indices\":\"" + indices.stream().collect(Collectors.joining(",")) + "\"}"); + createRestoreRequest.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(WarningsHandler.PERMISSIVE)); + Response response = client().performRequest(createRestoreRequest); + // check deprecation warning for "_field_name" disabling + assertTrue(response.getWarnings().stream().filter(s -> s.contains("Disabling _field_names is not necessary")).count() > 0); + assertOK(response); + + repoRestored = true; + } + } + + public void testFileBeatApache2MappingOk() throws IOException { + Request mappingRequest = new Request("GET", "/" + "filebeat" + "/_mapping"); + Map mapping = entityAsMap(client().performRequest(mappingRequest)); + assertNotNull(XContentMapValues.extractValue(mapping, "filebeat", "mappings", "properties", "apache2")); + } + + public void testSearchKeyword() throws IOException { + Request search = new Request("POST", "/" + "custom" + "/_search"); + XContentBuilder query = XContentBuilder.builder(XContentType.JSON.xContent()) + .startObject() + .startObject("query") + .startObject("match") + .startObject("apache2.access.url") + .field("query", "myurl2") + .endObject() + .endObject() + .endObject() + .endObject(); + search.setJsonEntity(Strings.toString(query)); + Map response = entityAsMap(client().performRequest(search)); + List hits = (List) (XContentMapValues.extractValue("hits.hits", response)); + assertThat(hits, hasSize(1)); + } + + public void testSearchOnPlaceHolderField() throws IOException { + Request search = new Request("POST", "/" + "custom" + "/_search"); + XContentBuilder query = XContentBuilder.builder(XContentType.JSON.xContent()) + .startObject() + .startObject("query") + .startObject("match") + .startObject("completion") + .field("query", "some-agent") + .endObject() + .endObject() + .endObject() + .endObject(); + search.setJsonEntity(Strings.toString(query)); + ResponseException re = expectThrows(ResponseException.class, () -> entityAsMap(client().performRequest(search))); + assertThat( + re.getMessage(), + containsString("Field [completion] of type [completion] in legacy index does not support match queries") + ); + } + + public void testAggregationOnPlaceholderField() throws IOException { + Request search = new Request("POST", "/" + "custom" + "/_search"); + XContentBuilder query = XContentBuilder.builder(XContentType.JSON.xContent()) + .startObject() + .startObject("aggs") + .startObject("agents") + .startObject("terms") + .field("field", "completion") + .endObject() + .endObject() + .endObject() + .endObject(); + search.setJsonEntity(Strings.toString(query)); + ResponseException re = expectThrows(ResponseException.class, () -> entityAsMap(client().performRequest(search))); + assertThat(re.getMessage(), containsString("can't run aggregation or sorts on field type completion of legacy index")); + } + + public void testConstantScoringOnTextField() throws IOException { + Request search = new Request("POST", "/" + "custom" + "/_search"); + XContentBuilder query = XContentBuilder.builder(XContentType.JSON.xContent()) + .startObject() + .startObject("query") + .startObject("match") + .startObject("apache2.access.agent") + .field("query", "agent2") + .endObject() + .endObject() + .endObject() + .endObject(); + search.setJsonEntity(Strings.toString(query)); + Map response = entityAsMap(client().performRequest(search)); + List hits = (List) (XContentMapValues.extractValue("hits.hits", response)); + assertThat(hits, hasSize(1)); + @SuppressWarnings("unchecked") + Map hit = (Map) hits.get(0); + assertThat(hit, hasKey("_score")); + assertEquals(1.0d, (double) hit.get("_score"), 0.01d); + } + + public void testFieldsExistQueryOnTextField() throws IOException { + Request search = new Request("POST", "/" + "custom" + "/_search"); + XContentBuilder query = XContentBuilder.builder(XContentType.JSON.xContent()) + .startObject() + .startObject("query") + .startObject("exists") + .field("field", "apache2.access.agent") + .endObject() + .endObject() + .endObject(); + search.setJsonEntity(Strings.toString(query)); + Map response = entityAsMap(client().performRequest(search)); + List hits = (List) (XContentMapValues.extractValue("hits.hits", response)); + assertThat(hits, hasSize(2)); + } + + public void testSearchFieldsOnPlaceholderField() throws IOException { + Request search = new Request("POST", "/" + "custom" + "/_search"); + XContentBuilder query = XContentBuilder.builder(XContentType.JSON.xContent()) + .startObject() + .startObject("query") + .startObject("match") + .startObject("apache2.access.url") + .field("query", "myurl2") + .endObject() + .endObject() + .endObject() + .startArray("fields") + .value("completion") + .endArray() + .endObject(); + search.setJsonEntity(Strings.toString(query)); + Map response = entityAsMap(client().performRequest(search)); + List hits = (List) (XContentMapValues.extractValue("hits.hits", response)); + assertThat(hits, hasSize(1)); + logger.info(hits); + Map fields = (Map) (XContentMapValues.extractValue("fields", (Map) hits.get(0))); + assertEquals(List.of("some_value"), fields.get("completion")); + } + + public void testNestedDocuments() throws IOException { + Request search = new Request("POST", "/" + "nested" + "/_search"); + Map response = entityAsMap(client().performRequest(search)); + logger.info(response); + List hits = (List) (XContentMapValues.extractValue("hits.hits", response)); + assertThat(hits, hasSize(1)); + Map source = (Map) (XContentMapValues.extractValue("_source", (Map) hits.get(0))); + assertEquals("fans", source.get("group")); + + search = new Request("POST", "/" + "nested" + "/_search"); + XContentBuilder query = XContentBuilder.builder(XContentType.JSON.xContent()) + .startObject() + .startObject("query") + .startObject("nested") + .field("path", "user") + .startObject("query") + .startObject("bool") + .startArray("must") + .startObject() + .startObject("match") + .field("user.first", "Alice") + .endObject() + .endObject() + .startObject() + .startObject("match") + .field("user.last", "White") + .endObject() + .endObject() + .endArray() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject(); + search.setJsonEntity(Strings.toString(query)); + response = entityAsMap(client().performRequest(search)); + logger.info(response); + hits = (List) (XContentMapValues.extractValue("hits.hits", response)); + assertThat(hits, hasSize(1)); + source = (Map) (XContentMapValues.extractValue("_source", (Map) hits.get(0))); + assertEquals("fans", source.get("group")); + } + + protected boolean resetFeatureStates() { + return false; + } + + protected boolean preserveIndicesUponCompletion() { + return true; + } + + protected void wipeSnapshots() throws IOException { + // we want to keep snapshots between individual tests + } +} diff --git a/x-pack/qa/repository-old-versions-7x/src/javaRestTest/java/org/elasticsearch/oldrepos7x/OldRepositoryAccessIT.java b/x-pack/qa/repository-old-versions-7x/src/javaRestTest/java/org/elasticsearch/oldrepos7x/OldRepositoryAccessIT.java new file mode 100644 index 0000000000000..4613f0d17bc07 --- /dev/null +++ b/x-pack/qa/repository-old-versions-7x/src/javaRestTest/java/org/elasticsearch/oldrepos7x/OldRepositoryAccessIT.java @@ -0,0 +1,532 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.oldrepos7x; + +import org.apache.http.HttpHost; +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.client.Node; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.PathUtils; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchResponseUtils; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.sort.SortBuilders; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.snapshots.SnapshotState; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.ObjectPath; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TemporaryFolder; +import org.junit.rules.TestRule; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.startsWith; + +public class OldRepositoryAccessIT extends ESRestTestCase { + + public static TemporaryFolder repoDirectory = new TemporaryFolder(); + + public static ElasticsearchCluster currentCluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .nodes(2) + .setting("xpack.security.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.ml.enabled", "false") + .setting("path.repo", () -> repoDirectory.getRoot().getPath()) + .setting("xpack.searchable.snapshot.shared_cache.size", "16MB") + .setting("xpack.searchable.snapshot.shared_cache.region_size", "256KB") + .build(); + + public static ElasticsearchCluster oldCluster = ElasticsearchCluster.local() + .version(org.elasticsearch.test.cluster.util.Version.fromString(System.getProperty("tests.old_cluster_version"))) + .distribution(DistributionType.DEFAULT) + .nodes(2) + .setting("xpack.security.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.ml.enabled", "false") + .setting("path.repo", () -> repoDirectory.getRoot().getPath()) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(repoDirectory).around(oldCluster).around(currentCluster); + + private static final String REPO_LOCATION_BASE = "source_only_"; + + private static final Map> expectedTestIds = new HashMap<>(); + + private static final Map expectedNumberOfShards = new HashMap<>(); + + private static final int numDocs = 10; + private static final int extraDocs = 1; + + /** + * We set up the data in the old version cluster and take a snapshot to a file system directory. + * We only need to do this once before all other tests because they can re-mount the data from that + * directory. + */ + @BeforeClass + public static void setupOldRepo() throws IOException { + String repoLocationBase = repoDirectory.getRoot().getPath(); + List oldClusterHosts = parseClusterHosts(oldCluster.getHttpAddresses(), (host, port) -> new HttpHost(host, port)); + Version oldVersion = Version.fromString(System.getProperty("tests.old_cluster_version")); + try (RestClient oldEs = RestClient.builder(oldClusterHosts.toArray(new HttpHost[oldClusterHosts.size()])).build()) { + checkClusterVersion(oldEs, oldVersion); + for (boolean sourceOnlyRepository : new boolean[] { true, false }) { + String repoLocation = PathUtils.get(repoLocationBase).resolve(REPO_LOCATION_BASE + sourceOnlyRepository).toString(); + assumeTrue( + "source only repositories only supported since ES 6.5.0", + sourceOnlyRepository == false || oldVersion.onOrAfter(Version.fromString("6.5.0")) + ); + + assertThat("Index version should be added to archive tests", oldVersion, lessThan(Version.V_8_10_0)); + IndexVersion indexVersion = IndexVersion.fromId(oldVersion.id); + String indexName; + if (sourceOnlyRepository) { + indexName = "source_only_test_index"; + } else { + indexName = "test_index"; + } + + String repoName = "repo_" + indexName; + String snapshotName = "snap_" + indexName; + Request createIndex = new Request("PUT", "/" + indexName); + int numShards = randomIntBetween(1, 3); + expectedNumberOfShards.put(repoLocation, numShards); + + XContentBuilder settingsBuilder = XContentFactory.jsonBuilder().startObject().startObject("settings"); + settingsBuilder.field("index.number_of_shards", numShards); + + // 6.5.0 started using soft-deletes, but it was only enabled by default on 7.0 + if (oldVersion.onOrAfter(Version.fromString("6.5.0")) + && oldVersion.before(Version.fromString("7.0.0")) + && randomBoolean()) { + settingsBuilder.field("index.soft_deletes.enabled", true); + } + + settingsBuilder.endObject().endObject(); + + createIndex.setJsonEntity(Strings.toString(settingsBuilder)); + assertOK(oldEs.performRequest(createIndex)); + + // TODO maybe go back to using multiple types for ES versions < 6.0.0 if we migrate them to this qa project + String type = "_doc"; + Set expectedIds = new HashSet<>(); + for (int i = 0; i < numDocs + extraDocs; i++) { + String id = "testdoc" + i; + expectedIds.add(id); + Request doc = new Request("PUT", "/" + indexName + "/" + type + "/" + id); + doc.addParameter("refresh", "true"); + doc.setJsonEntity(sourceForDoc(i)); + assertOK(oldEs.performRequest(doc)); + } + + for (int i = 0; i < extraDocs; i++) { + String id = randomFrom(expectedIds); + expectedIds.remove(id); + Request doc = new Request("DELETE", "/" + indexName + "/" + type + "/" + id); + doc.addParameter("refresh", "true"); + oldEs.performRequest(doc); + } + expectedTestIds.put(repoLocation, expectedIds); + + // register repo on old ES and take snapshot + Request createRepoRequest = new Request("PUT", "/_snapshot/" + repoName); + createRepoRequest.setJsonEntity(sourceOnlyRepository ? Strings.format(""" + {"type":"source","settings":{"location":"%s","delegate_type":"fs"}} + """, repoLocation) : Strings.format(""" + {"type":"fs","settings":{"location":"%s"}} + """, repoLocation)); + assertOK(oldEs.performRequest(createRepoRequest)); + + Request createSnapshotRequest = new Request("PUT", "/_snapshot/" + repoName + "/" + snapshotName); + createSnapshotRequest.addParameter("wait_for_completion", "true"); + createSnapshotRequest.setJsonEntity("{\"indices\":\"" + indexName + "\"}"); + assertOK(oldEs.performRequest(createSnapshotRequest)); + } + } + } + + private static void checkClusterVersion(RestClient client, Version version) throws IOException { + // check expected Cluster version + Request infoRequest = new Request("GET", "/"); + Response response = assertOK(client.performRequest(infoRequest)); + assertEquals( + version.toString(), + ((String) ObjectPath.createFromResponse(response).evaluate("version.number")).replace("-SNAPSHOT", "") + ); + } + + @Override + protected String getTestRestCluster() { + return currentCluster.getHttpAddresses(); + } + + public void testOldRepoAccess() throws IOException { + runTest(false); + } + + public void testOldSourceOnlyRepoAccess() throws IOException { + runTest(true); + } + + public void runTest(boolean sourceOnlyRepository) throws IOException { + checkClusterVersion(client(), Version.CURRENT); + String repoLocation = repoDirectory.getRoot().getPath(); + repoLocation = PathUtils.get(repoLocation).resolve(REPO_LOCATION_BASE + sourceOnlyRepository).toString(); + Version oldVersion = Version.fromString(System.getProperty("tests.old_cluster_version")); + assertThat("Index version should be added to archive tests", oldVersion, lessThan(Version.V_8_10_0)); + IndexVersion indexVersion = IndexVersion.fromId(oldVersion.id); + + String indexName; + if (sourceOnlyRepository) { + indexName = "source_only_test_index"; + } else { + indexName = "test_index"; + } + String repoName = "repo_" + indexName; + String snapshotName = "snap_" + indexName; + Set expectedIds = expectedTestIds.get(repoLocation); + int numberOfShards = expectedNumberOfShards.get(repoLocation); + + // register repo on new ES + Settings.Builder repoSettingsBuilder = Settings.builder().put("location", repoLocation); + if (sourceOnlyRepository) { + repoSettingsBuilder.put("delegate_type", "fs"); + } + Request createRepo = new Request("PUT", "/_snapshot/" + repoName); + createRepo.setJsonEntity( + Strings.toString( + new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).type(sourceOnlyRepository ? "source" : "fs") + .settings(repoSettingsBuilder.build()) + ) + ); + assertAcknowledged(client().performRequest(createRepo)); + + // list snapshots on new ES + Request getSnaps = new Request("GET", "/_snapshot/" + repoName + "/_all"); + Response getResponse = client().performRequest(getSnaps); + ObjectPath getResp = ObjectPath.createFromResponse(getResponse); + assertThat(getResp.evaluate("total"), equalTo(1)); + assertThat(getResp.evaluate("snapshots.0.snapshot"), equalTo(snapshotName)); + assertThat(getResp.evaluate("snapshots.0.repository"), equalTo(repoName)); + assertThat(getResp.evaluate("snapshots.0.indices"), contains(indexName)); + assertThat(getResp.evaluate("snapshots.0.state"), equalTo(SnapshotState.SUCCESS.toString())); + assertEquals(numberOfShards, (int) getResp.evaluate("snapshots.0.shards.successful")); + assertEquals(numberOfShards, (int) getResp.evaluate("snapshots.0.shards.total")); + assertEquals(0, (int) getResp.evaluate("snapshots.0.shards.failed")); + assertEquals(indexVersion.toReleaseVersion(), getResp.evaluate("snapshots.0.version")); + + // list specific snapshot on new ES + getSnaps = new Request("GET", "/_snapshot/" + repoName + "/" + snapshotName); + getResponse = client().performRequest(getSnaps); + getResp = ObjectPath.createFromResponse(getResponse); + assertThat(getResp.evaluate("total"), equalTo(1)); + assertThat(getResp.evaluate("snapshots.0.snapshot"), equalTo(snapshotName)); + assertThat(getResp.evaluate("snapshots.0.repository"), equalTo(repoName)); + assertThat(getResp.evaluate("snapshots.0.indices"), contains(indexName)); + assertThat(getResp.evaluate("snapshots.0.state"), equalTo(SnapshotState.SUCCESS.toString())); + assertEquals(numberOfShards, (int) getResp.evaluate("snapshots.0.shards.successful")); + assertEquals(numberOfShards, (int) getResp.evaluate("snapshots.0.shards.total")); + assertEquals(0, (int) getResp.evaluate("snapshots.0.shards.failed")); + assertEquals(indexVersion.toReleaseVersion(), getResp.evaluate("snapshots.0.version")); + + // list advanced snapshot info on new ES + getSnaps = new Request("GET", "/_snapshot/" + repoName + "/" + snapshotName + "/_status"); + getResponse = client().performRequest(getSnaps); + getResp = ObjectPath.createFromResponse(getResponse); + assertThat(((List) getResp.evaluate("snapshots")).size(), equalTo(1)); + assertThat(getResp.evaluate("snapshots.0.snapshot"), equalTo(snapshotName)); + assertThat(getResp.evaluate("snapshots.0.repository"), equalTo(repoName)); + assertThat(((Map) getResp.evaluate("snapshots.0.indices")).keySet(), contains(indexName)); + assertThat(getResp.evaluate("snapshots.0.state"), equalTo(SnapshotState.SUCCESS.toString())); + assertEquals(numberOfShards, (int) getResp.evaluate("snapshots.0.shards_stats.done")); + assertEquals(numberOfShards, (int) getResp.evaluate("snapshots.0.shards_stats.total")); + assertEquals(0, (int) getResp.evaluate("snapshots.0.shards_stats.failed")); + assertThat(getResp.evaluate("snapshots.0.stats.total.size_in_bytes"), greaterThan(0)); + assertThat(getResp.evaluate("snapshots.0.stats.total.file_count"), greaterThan(0)); + + // restore / mount and check whether searches work + restoreMountAndVerify(numDocs, expectedIds, numberOfShards, sourceOnlyRepository, indexName, repoName, snapshotName); + + // close indices + closeIndex(client(), "restored_" + indexName); + closeIndex(client(), "mounted_full_copy_" + indexName); + closeIndex(client(), "mounted_shared_cache_" + indexName); + + // restore / mount again + restoreMountAndVerify(numDocs, expectedIds, numberOfShards, sourceOnlyRepository, indexName, repoName, snapshotName); + + currentCluster.stop(false); + currentCluster.start(); + + // we need to replace the nodes for the clients can connect to the restarted cluster + List hosts = parseClusterHosts(currentCluster.getHttpAddresses(), (host, port) -> new HttpHost(host, port)); + List nodes = hosts.stream().map(Node::new).collect(Collectors.toList()); + client().setNodes(nodes); + adminClient().setNodes(nodes); + + ensureGreen("restored_" + indexName); + ensureGreen("mounted_full_copy_" + indexName); + ensureGreen("mounted_shared_cache_" + indexName); + } + + private static String sourceForDoc(int i) { + return "{\"test\":\"test" + i + "\",\"val\":" + i + ",\"create_date\":\"2020-01-" + Strings.format("%02d", i + 1) + "\"}"; + } + + private void restoreMountAndVerify( + int numDocs, + Set expectedIds, + int numberOfShards, + boolean sourceOnlyRepository, + String indexName, + String repoName, + String snapshotName + ) throws IOException { + // restore index + Request restoreRequest = new Request("POST", "/_snapshot/" + repoName + "/" + snapshotName + "/_restore"); + restoreRequest.setJsonEntity( + Strings.toString( + new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT).indices(indexName).renamePattern("(.+)").renameReplacement("restored_$1") + ) + ); + restoreRequest.addParameter("wait_for_completion", "true"); + Response restoreResponse = client().performRequest(restoreRequest); + ObjectPath restore = ObjectPath.createFromResponse(restoreResponse); + assertEquals(numberOfShards, (int) restore.evaluate("snapshot.shards.total")); + assertEquals(numberOfShards, (int) restore.evaluate("snapshot.shards.successful")); + + ensureGreen("restored_" + indexName); + + String restoredIndex = "restored_" + indexName; + var response = responseAsMap(client().performRequest(new Request("GET", "/" + restoredIndex + "/_mapping"))); + Map mapping = ObjectPath.evaluate(response, restoredIndex + ".mappings"); + logger.info("mapping for {}: {}", restoredIndex, mapping); + assertThat(mapping, hasKey("_meta")); + assertThat(mapping.get("_meta"), instanceOf(Map.class)); + @SuppressWarnings("unchecked") + Map meta = (Map) mapping.get("_meta"); + assertThat(meta, hasKey("legacy_mappings")); + assertThat(meta.get("legacy_mappings"), instanceOf(Map.class)); + @SuppressWarnings("unchecked") + Map legacyMappings = (Map) meta.get("legacy_mappings"); + assertThat(legacyMappings.keySet(), not(empty())); + for (Map.Entry entry : legacyMappings.entrySet()) { + String type = entry.getKey(); + assertThat(type, startsWith("_doc")); + assertThat(entry.getValue(), instanceOf(Map.class)); + @SuppressWarnings("unchecked") + Map legacyMapping = (Map) entry.getValue(); + assertThat(legacyMapping, hasKey("properties")); + assertThat(legacyMapping.get("properties"), instanceOf(Map.class)); + @SuppressWarnings("unchecked") + Map propertiesMapping = (Map) legacyMapping.get("properties"); + assertThat(propertiesMapping, hasKey("val")); + assertThat(propertiesMapping.get("val"), instanceOf(Map.class)); + @SuppressWarnings("unchecked") + Map valMapping = (Map) propertiesMapping.get("val"); + assertThat(valMapping, hasKey("type")); + assertEquals("long", valMapping.get("type")); + } + + // run a search against the index + assertDocs("restored_" + indexName, numDocs, expectedIds, sourceOnlyRepository, numberOfShards); + + // mount as full copy searchable snapshot + Request mountRequest = new Request("POST", "/_snapshot/" + repoName + "/" + snapshotName + "/_mount"); + mountRequest.setJsonEntity( + "{\"index\": \"" + + indexName + + "\",\"renamed_index\": \"mounted_full_copy_" + + indexName + + "\",\"index_settings\": {\"index.number_of_replicas\": 1}}" + ); + mountRequest.addParameter("wait_for_completion", "true"); + ObjectPath mountResponse = ObjectPath.createFromResponse(client().performRequest(mountRequest)); + assertNotNull(mountResponse.evaluate("snapshot")); + assertEquals(numberOfShards, (int) mountResponse.evaluate("snapshot.shards.total")); + assertEquals(numberOfShards, (int) mountResponse.evaluate("snapshot.shards.successful")); + + ensureGreen("mounted_full_copy_" + indexName); + + // run a search against the index + assertDocs("mounted_full_copy_" + indexName, numDocs, expectedIds, sourceOnlyRepository, numberOfShards); + + // mount as shared cache searchable snapshot + mountRequest = new Request("POST", "/_snapshot/" + repoName + "/" + snapshotName + "/_mount"); + mountRequest.setJsonEntity("{\"index\": \"" + indexName + "\",\"renamed_index\": \"mounted_shared_cache_" + indexName + "\"}"); + mountRequest.addParameter("wait_for_completion", "true"); + mountRequest.addParameter("storage", "shared_cache"); + mountResponse = ObjectPath.createFromResponse(client().performRequest(mountRequest)); + assertNotNull(mountResponse.evaluate("snapshot")); + System.out.println("---> " + Strings.toString(mountResponse.toXContentBuilder(XContentType.JSON.xContent()))); + assertEquals(numberOfShards, (int) mountResponse.evaluate("snapshot.shards.total")); + assertEquals(numberOfShards, (int) mountResponse.evaluate("snapshot.shards.successful")); + + // run a search against the index + assertDocs("mounted_shared_cache_" + indexName, numDocs, expectedIds, sourceOnlyRepository, numberOfShards); + } + + private void assertDocs(String index, int numDocs, Set expectedIds, boolean sourceOnlyRepository, int numberOfShards) + throws IOException { + RequestOptions requestOptions = RequestOptions.DEFAULT; + + // run a search against the index + SearchResponse searchResponse = search(index, null, requestOptions); + try { + logger.info(searchResponse); + // check hit count + assertEquals(numDocs, searchResponse.getHits().getTotalHits().value()); + // check that _index is properly set + assertTrue(Arrays.stream(searchResponse.getHits().getHits()).map(SearchHit::getIndex).allMatch(index::equals)); + // check that all _ids are there + assertEquals(expectedIds, Arrays.stream(searchResponse.getHits().getHits()).map(SearchHit::getId).collect(Collectors.toSet())); + // check that _source is present + assertTrue(Arrays.stream(searchResponse.getHits().getHits()).allMatch(SearchHit::hasSource)); + // check that correct _source present for each document + for (SearchHit h : searchResponse.getHits().getHits()) { + assertEquals(sourceForDoc(getIdAsNumeric(h.getId())), h.getSourceAsString()); + } + } finally { + searchResponse.decRef(); + } + + String id = randomFrom(expectedIds); + int num = getIdAsNumeric(id); + // run a search using runtime fields against the index + searchResponse = search( + index, + SearchSourceBuilder.searchSource() + .query(QueryBuilders.matchQuery("val", num)) + .runtimeMappings(Map.of("val", Map.of("type", "long"))), + requestOptions + ); + try { + logger.info(searchResponse); + assertEquals(1, searchResponse.getHits().getTotalHits().value()); + assertEquals(id, searchResponse.getHits().getHits()[0].getId()); + assertEquals(sourceForDoc(num), searchResponse.getHits().getHits()[0].getSourceAsString()); + } finally { + searchResponse.decRef(); + } + + if (sourceOnlyRepository == false) { + // search using reverse sort on val + searchResponse = search( + index, + SearchSourceBuilder.searchSource() + .query(QueryBuilders.matchAllQuery()) + .sort(SortBuilders.fieldSort("val").order(SortOrder.DESC)), + requestOptions + ); + try { + logger.info(searchResponse); + // check sort order + assertEquals( + expectedIds.stream().sorted(Comparator.comparingInt(this::getIdAsNumeric).reversed()).toList(), + Arrays.stream(searchResponse.getHits().getHits()).map(SearchHit::getId).toList() + ); + } finally { + searchResponse.decRef(); + } + + // look up postings + searchResponse = search( + index, + SearchSourceBuilder.searchSource().query(QueryBuilders.matchQuery("test", "test" + num)), + requestOptions + ); + try { + logger.info(searchResponse); + // check match + ElasticsearchAssertions.assertSearchHits(searchResponse, id); + } finally { + searchResponse.decRef(); + } + + assertThat( + expectThrows(ResponseException.class, () -> client().performRequest(new Request("GET", "/" + index + "/_doc/" + id))) + .getMessage(), + containsString("get operations not allowed on a legacy index") + ); + + // check that shards are skipped based on non-matching date + searchResponse = search( + index, + SearchSourceBuilder.searchSource().query(QueryBuilders.rangeQuery("create_date").from("2020-02-01")), + requestOptions + ); + try { + logger.info(searchResponse); + assertEquals(0, searchResponse.getHits().getTotalHits().value()); + assertEquals(numberOfShards, searchResponse.getSuccessfulShards()); + int expectedSkips = numberOfShards == 1 ? 0 : numberOfShards; + assertEquals(expectedSkips, searchResponse.getSkippedShards()); + } finally { + searchResponse.decRef(); + } + } + } + + private static SearchResponse search(String index, @Nullable SearchSourceBuilder builder, RequestOptions options) throws IOException { + Request request = new Request("POST", "/" + index + "/_search"); + if (builder != null) { + request.setJsonEntity(builder.toString()); + } + request.setOptions(options); + return SearchResponseUtils.parseSearchResponse(responseAsParser(client().performRequest(request))); + } + + private int getIdAsNumeric(String id) { + return Integer.parseInt(id.substring("testdoc".length())); + } + + private static void closeIndex(RestClient client, String index) throws IOException { + Request request = new Request("POST", "/" + index + "/_close"); + ObjectPath doc = ObjectPath.createFromResponse(client.performRequest(request)); + assertTrue(doc.evaluate("shards_acknowledged")); + } +} diff --git a/x-pack/qa/repository-old-versions-7x/src/javaRestTest/resources/org/elasticsearch/oldrepos7x/custom.json b/x-pack/qa/repository-old-versions-7x/src/javaRestTest/resources/org/elasticsearch/oldrepos7x/custom.json new file mode 100644 index 0000000000000..eb2027e09e22f --- /dev/null +++ b/x-pack/qa/repository-old-versions-7x/src/javaRestTest/resources/org/elasticsearch/oldrepos7x/custom.json @@ -0,0 +1,24 @@ +"_field_names": { + "enabled": false +}, +"properties": { + "apache2": { + "properties": { + "access": { + "properties": { + "agent": { + "type": "text" + }, + "url": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + } + }, + "completion": { + "type": "completion" + } +} + diff --git a/x-pack/qa/repository-old-versions-7x/src/javaRestTest/resources/org/elasticsearch/oldrepos7x/filebeat.json b/x-pack/qa/repository-old-versions-7x/src/javaRestTest/resources/org/elasticsearch/oldrepos7x/filebeat.json new file mode 100644 index 0000000000000..ff2a12c7a44a0 --- /dev/null +++ b/x-pack/qa/repository-old-versions-7x/src/javaRestTest/resources/org/elasticsearch/oldrepos7x/filebeat.json @@ -0,0 +1,679 @@ +"_meta": { + "version": "5.6.17" +}, +"date_detection": false, +"dynamic_templates": [ + { + "strings_as_keyword": { + "mapping": { + "ignore_above": 1024, + "type": "keyword" + }, + "match_mapping_type": "string" + } + } +], +"properties": { + "@timestamp": { + "type": "date" + }, + "apache2": { + "properties": { + "access": { + "properties": { + "agent": { + "norms": false, + "type": "text" + }, + "body_sent": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "geoip": { + "properties": { + "city_name": { + "ignore_above": 1024, + "type": "keyword" + }, + "continent_name": { + "ignore_above": 1024, + "type": "keyword" + }, + "country_iso_code": { + "ignore_above": 1024, + "type": "keyword" + }, + "location": { + "type": "geo_point" + }, + "region_name": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "http_version": { + "ignore_above": 1024, + "type": "keyword" + }, + "method": { + "ignore_above": 1024, + "type": "keyword" + }, + "referrer": { + "ignore_above": 1024, + "type": "keyword" + }, + "remote_ip": { + "ignore_above": 1024, + "type": "keyword" + }, + "response_code": { + "type": "long" + }, + "url": { + "ignore_above": 1024, + "type": "keyword" + }, + "user_agent": { + "properties": { + "device": { + "ignore_above": 1024, + "type": "keyword" + }, + "major": { + "type": "long" + }, + "minor": { + "type": "long" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "os": { + "ignore_above": 1024, + "type": "keyword" + }, + "os_major": { + "type": "long" + }, + "os_minor": { + "type": "long" + }, + "os_name": { + "ignore_above": 1024, + "type": "keyword" + }, + "patch": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "user_name": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "error": { + "properties": { + "client": { + "ignore_above": 1024, + "type": "keyword" + }, + "level": { + "ignore_above": 1024, + "type": "keyword" + }, + "message": { + "norms": false, + "type": "text" + }, + "module": { + "ignore_above": 1024, + "type": "keyword" + }, + "pid": { + "type": "long" + }, + "tid": { + "type": "long" + } + } + } + } + }, + "auditd": { + "properties": { + "log": { + "properties": { + "a0": { + "ignore_above": 1024, + "type": "keyword" + }, + "acct": { + "ignore_above": 1024, + "type": "keyword" + }, + "geoip": { + "properties": { + "city_name": { + "ignore_above": 1024, + "type": "keyword" + }, + "continent_name": { + "ignore_above": 1024, + "type": "keyword" + }, + "country_iso_code": { + "ignore_above": 1024, + "type": "keyword" + }, + "location": { + "type": "geo_point" + }, + "region_name": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "item": { + "ignore_above": 1024, + "type": "keyword" + }, + "items": { + "ignore_above": 1024, + "type": "keyword" + }, + "new_auid": { + "ignore_above": 1024, + "type": "keyword" + }, + "new_ses": { + "ignore_above": 1024, + "type": "keyword" + }, + "old_auid": { + "ignore_above": 1024, + "type": "keyword" + }, + "old_ses": { + "ignore_above": 1024, + "type": "keyword" + }, + "pid": { + "ignore_above": 1024, + "type": "keyword" + }, + "ppid": { + "ignore_above": 1024, + "type": "keyword" + }, + "record_type": { + "ignore_above": 1024, + "type": "keyword" + }, + "res": { + "ignore_above": 1024, + "type": "keyword" + }, + "sequence": { + "type": "long" + } + } + } + } + }, + "beat": { + "properties": { + "hostname": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "version": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "error": { + "ignore_above": 1024, + "type": "keyword" + }, + "fileset": { + "properties": { + "module": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "input_type": { + "ignore_above": 1024, + "type": "keyword" + }, + "message": { + "norms": false, + "type": "text" + }, + "meta": { + "properties": { + "cloud": { + "properties": { + "availability_zone": { + "ignore_above": 1024, + "type": "keyword" + }, + "instance_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "machine_type": { + "ignore_above": 1024, + "type": "keyword" + }, + "project_id": { + "ignore_above": 1024, + "type": "keyword" + }, + "provider": { + "ignore_above": 1024, + "type": "keyword" + }, + "region": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + } + }, + "mysql": { + "properties": { + "error": { + "properties": { + "level": { + "ignore_above": 1024, + "type": "keyword" + }, + "message": { + "norms": false, + "type": "text" + }, + "thread_id": { + "type": "long" + }, + "timestamp": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "slowlog": { + "properties": { + "host": { + "ignore_above": 1024, + "type": "keyword" + }, + "id": { + "type": "long" + }, + "ip": { + "ignore_above": 1024, + "type": "keyword" + }, + "lock_time": { + "properties": { + "sec": { + "type": "float" + } + } + }, + "query": { + "ignore_above": 1024, + "type": "keyword" + }, + "query_time": { + "properties": { + "sec": { + "type": "float" + } + } + }, + "rows_examined": { + "type": "long" + }, + "rows_sent": { + "type": "long" + }, + "timestamp": { + "type": "long" + }, + "user": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + } + }, + "nginx": { + "properties": { + "access": { + "properties": { + "agent": { + "norms": false, + "type": "text" + }, + "body_sent": { + "properties": { + "bytes": { + "type": "long" + } + } + }, + "geoip": { + "properties": { + "city_name": { + "ignore_above": 1024, + "type": "keyword" + }, + "continent_name": { + "ignore_above": 1024, + "type": "keyword" + }, + "country_iso_code": { + "ignore_above": 1024, + "type": "keyword" + }, + "location": { + "type": "geo_point" + }, + "region_name": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "http_version": { + "ignore_above": 1024, + "type": "keyword" + }, + "method": { + "ignore_above": 1024, + "type": "keyword" + }, + "referrer": { + "ignore_above": 1024, + "type": "keyword" + }, + "remote_ip": { + "ignore_above": 1024, + "type": "keyword" + }, + "response_code": { + "type": "long" + }, + "url": { + "ignore_above": 1024, + "type": "keyword" + }, + "user_agent": { + "properties": { + "device": { + "ignore_above": 1024, + "type": "keyword" + }, + "major": { + "type": "long" + }, + "minor": { + "type": "long" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "os": { + "ignore_above": 1024, + "type": "keyword" + }, + "os_major": { + "type": "long" + }, + "os_minor": { + "type": "long" + }, + "os_name": { + "ignore_above": 1024, + "type": "keyword" + }, + "patch": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "user_name": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "error": { + "properties": { + "connection_id": { + "type": "long" + }, + "level": { + "ignore_above": 1024, + "type": "keyword" + }, + "message": { + "norms": false, + "type": "text" + }, + "pid": { + "type": "long" + }, + "tid": { + "type": "long" + } + } + } + } + }, + "offset": { + "type": "long" + }, + "read_timestamp": { + "ignore_above": 1024, + "type": "keyword" + }, + "source": { + "ignore_above": 1024, + "type": "keyword" + }, + "system": { + "properties": { + "auth": { + "properties": { + "groupadd": { + "properties": { + "gid": { + "type": "long" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "hostname": { + "ignore_above": 1024, + "type": "keyword" + }, + "message": { + "ignore_above": 1024, + "type": "keyword" + }, + "pid": { + "type": "long" + }, + "program": { + "ignore_above": 1024, + "type": "keyword" + }, + "ssh": { + "properties": { + "dropped_ip": { + "type": "ip" + }, + "event": { + "ignore_above": 1024, + "type": "keyword" + }, + "geoip": { + "properties": { + "city_name": { + "ignore_above": 1024, + "type": "keyword" + }, + "continent_name": { + "ignore_above": 1024, + "type": "keyword" + }, + "country_iso_code": { + "ignore_above": 1024, + "type": "keyword" + }, + "location": { + "type": "geo_point" + }, + "region_name": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "ip": { + "type": "ip" + }, + "method": { + "ignore_above": 1024, + "type": "keyword" + }, + "port": { + "type": "long" + }, + "signature": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "sudo": { + "properties": { + "command": { + "ignore_above": 1024, + "type": "keyword" + }, + "error": { + "ignore_above": 1024, + "type": "keyword" + }, + "pwd": { + "ignore_above": 1024, + "type": "keyword" + }, + "tty": { + "ignore_above": 1024, + "type": "keyword" + }, + "user": { + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "timestamp": { + "ignore_above": 1024, + "type": "keyword" + }, + "user": { + "ignore_above": 1024, + "type": "keyword" + }, + "useradd": { + "properties": { + "gid": { + "type": "long" + }, + "home": { + "ignore_above": 1024, + "type": "keyword" + }, + "name": { + "ignore_above": 1024, + "type": "keyword" + }, + "shell": { + "ignore_above": 1024, + "type": "keyword" + }, + "uid": { + "type": "long" + } + } + } + } + }, + "syslog": { + "properties": { + "hostname": { + "ignore_above": 1024, + "type": "keyword" + }, + "message": { + "ignore_above": 1024, + "type": "keyword" + }, + "pid": { + "ignore_above": 1024, + "type": "keyword" + }, + "program": { + "ignore_above": 1024, + "type": "keyword" + }, + "timestamp": { + "ignore_above": 1024, + "type": "keyword" + } + } + } + } + }, + "tags": { + "ignore_above": 1024, + "type": "keyword" + }, + "type": { + "ignore_above": 1024, + "type": "keyword" + } +} diff --git a/x-pack/qa/repository-old-versions-7x/src/javaRestTest/resources/org/elasticsearch/oldrepos7x/nested.json b/x-pack/qa/repository-old-versions-7x/src/javaRestTest/resources/org/elasticsearch/oldrepos7x/nested.json new file mode 100644 index 0000000000000..e8fd42f1d7c58 --- /dev/null +++ b/x-pack/qa/repository-old-versions-7x/src/javaRestTest/resources/org/elasticsearch/oldrepos7x/nested.json @@ -0,0 +1,5 @@ +"properties": { + "user": { + "type": "nested" + } +}