From fbf8c3ee83bddd134a16891e905ea61c4eaefc0a Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 3 Nov 2017 08:48:48 +0000 Subject: [PATCH 01/14] =?UTF-8?q?Reinstate=20recommendation=20for=20?= =?UTF-8?q?=E2=89=A5=203=20master-eligible=20nodes.=20(#27204)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In the docs for 1.7 ([doc][doc-1.7], [src][src-1.7]) there was a recommendation for at least 3 master-eligible nodes "in critical clusters" but this was lost when that page was updated in 2.0 ([doc][doc-2.0], [src][src-2.0]). I'd like to reinstate this. [doc-1.7]: https://www.elastic.co/guide/en/elasticsearch/reference/1.7/modules-node.html [src-1.7]: https://github.com/elastic/elasticsearch/blob/2cbaccb2f2a495923bc64447fe3396e0fc58b3d3/docs/reference/modules/node.asciidoc [doc-2.0]: https://www.elastic.co/guide/en/elasticsearch/reference/2.0/modules-node.html#split-brain [src-2.0]: https://github.com/elastic/elasticsearch/blob/4799009ad7ea8f885b6aedc6f62ad61d69e7a40d/docs/reference/modules/node.asciidoc --- docs/reference/modules/node.asciidoc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/reference/modules/node.asciidoc b/docs/reference/modules/node.asciidoc index 7b891c579beaa..1e2f7a6e3dd97 100644 --- a/docs/reference/modules/node.asciidoc +++ b/docs/reference/modules/node.asciidoc @@ -156,6 +156,13 @@ discovery.zen.minimum_master_nodes: 2 <1> ---------------------------- <1> Defaults to `1`. +To be able to remain available when one of the master-eligible nodes fails, +clusters should have at least three master-eligible nodes, with +`minimum_master_nodes` set accordingly. A <>, +performed without any downtime, also requires at least three master-eligible +nodes to avoid the possibility of data loss if a network split occurs while the +upgrade is in progress. + This setting can also be changed dynamically on a live cluster with the <>: From 28b4d95cf5aea2632fa3c57f9714bad93784df9f Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Fri, 3 Nov 2017 08:51:40 +0000 Subject: [PATCH 02/14] Uses norms for exists query if enabled (#27237) * Uses norms for exists query if enabled This change means that for indexes created from 6.1.0, if normas are enabled we will not write the field name to the `_field_names` field and for an exists query we will instead use the NormsFieldExistsQuery which was added in Lucene 7.1.0. If norms are not enabled or if the index was created before 6.1.0 `_field_names` will be used as before. * Fixes tests --- .../elasticsearch/index/mapper/TextFieldMapper.java | 12 +++++++----- .../index/mapper/FieldNamesFieldMapperTests.java | 3 ++- .../index/query/ExistsQueryBuilderTests.java | 5 +++++ .../index/query/RangeQueryBuilderTests.java | 4 ++++ 4 files changed, 18 insertions(+), 6 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index 24c3443658aa8..ae99f743fe57f 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -23,7 +23,7 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; -import org.apache.lucene.search.DocValuesFieldExistsQuery; +import org.apache.lucene.search.NormsFieldExistsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.elasticsearch.common.settings.Settings; @@ -280,10 +280,10 @@ public String typeName() { @Override public Query existsQuery(QueryShardContext context) { - if (hasDocValues()) { - return new DocValuesFieldExistsQuery(name()); - } else { + if (omitNorms()) { return new TermQuery(new Term(FieldNamesFieldMapper.NAME, name())); + } else { + return new NormsFieldExistsQuery(name()); } } @@ -345,7 +345,9 @@ protected void parseCreateField(ParseContext context, List field if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) { Field field = new Field(fieldType().name(), value, fieldType()); fields.add(field); - createFieldNamesField(context, fields); + if (fieldType().omitNorms()) { + createFieldNamesField(context, fields); + } } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java index 70022dc632619..3655f04fcbba1 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.test.ESSingleNodeTestCase; import java.util.Arrays; +import java.util.Collections; import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; @@ -87,7 +88,7 @@ public void testInjectIntoDocDuringParsing() throws Exception { .bytes(), XContentType.JSON)); - assertFieldNames(set("a"), doc); + assertFieldNames(Collections.emptySet(), doc); } public void testExplicitEnabled() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/index/query/ExistsQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/ExistsQueryBuilderTests.java index 40d3f90dd4efe..d4547eee26f89 100644 --- a/core/src/test/java/org/elasticsearch/index/query/ExistsQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/ExistsQueryBuilderTests.java @@ -24,6 +24,7 @@ import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.NormsFieldExistsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.elasticsearch.Version; @@ -113,6 +114,10 @@ protected void doAssertLuceneQuery(ExistsQueryBuilder queryBuilder, Query query, assertThat(constantScoreQuery.getQuery(), instanceOf(DocValuesFieldExistsQuery.class)); DocValuesFieldExistsQuery dvExistsQuery = (DocValuesFieldExistsQuery) constantScoreQuery.getQuery(); assertEquals(field, dvExistsQuery.getField()); + } else if (context.getQueryShardContext().getMapperService().fullName(field).omitNorms() == false) { + assertThat(constantScoreQuery.getQuery(), instanceOf(NormsFieldExistsQuery.class)); + NormsFieldExistsQuery normsExistsQuery = (NormsFieldExistsQuery) constantScoreQuery.getQuery(); + assertEquals(field, normsExistsQuery.getField()); } else { assertThat(constantScoreQuery.getQuery(), instanceOf(TermQuery.class)); TermQuery termQuery = (TermQuery) constantScoreQuery.getQuery(); diff --git a/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java index ad3946a675560..2230436b18ef4 100644 --- a/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java @@ -26,6 +26,7 @@ import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.NormsFieldExistsQuery; import org.apache.lucene.search.PointRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; @@ -129,6 +130,9 @@ protected void doAssertLuceneQuery(RangeQueryBuilder queryBuilder, Query query, if (context.mapperService().getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_1_0) && context.mapperService().fullName(queryBuilder.fieldName()).hasDocValues()) { expectedQuery = new ConstantScoreQuery(new DocValuesFieldExistsQuery(queryBuilder.fieldName())); + } else if (context.mapperService().getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_1_0) + && context.mapperService().fullName(queryBuilder.fieldName()).omitNorms() == false) { + expectedQuery = new ConstantScoreQuery(new NormsFieldExistsQuery(queryBuilder.fieldName())); } else { expectedQuery = new ConstantScoreQuery(new TermQuery(new Term(FieldNamesFieldMapper.NAME, queryBuilder.fieldName()))); } From d5037826995c04e97a752c44e117efab1aac9e6e Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Fri, 3 Nov 2017 11:24:45 +0100 Subject: [PATCH 03/14] [Test] Fix QueryStringQueryBuilderTests.testExistsFieldQuery Adapt the test to check for the new NormsFieldExistsQuery. Closes #27246 --- .../index/query/QueryStringQueryBuilderTests.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index cf71bc0872aa0..c3ea1c0c9426f 100644 --- a/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -31,6 +31,7 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.MultiTermQuery; +import org.apache.lucene.search.NormsFieldExistsQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; @@ -805,7 +806,7 @@ public void testExistsFieldQuery() throws Exception { Query query = queryBuilder.toQuery(context); Query expected; if (getCurrentTypes().length > 0) { - expected = new ConstantScoreQuery(new TermQuery(new Term("_field_names", STRING_FIELD_NAME))); + expected = new ConstantScoreQuery(new NormsFieldExistsQuery(STRING_FIELD_NAME)); } else { expected = new MatchNoDocsQuery(); } From 9abc26ee92a504dbaec9aa89469603f2113e5432 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Fri, 3 Nov 2017 11:17:08 +0100 Subject: [PATCH 04/14] [Test] Fix InternalStatsTests After recent changes in InternalStats#doXContentBody the corresponding xContent output of the parsed aggregation needed to be changed in a similar way. --- .../search/aggregations/metrics/stats/ParsedStats.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/ParsedStats.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/ParsedStats.java index 239548ecdebc6..4c676cf227838 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/ParsedStats.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/ParsedStats.java @@ -109,7 +109,7 @@ protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) builder.nullField(Fields.MIN); builder.nullField(Fields.MAX); builder.nullField(Fields.AVG); - builder.nullField(Fields.SUM); + builder.field(Fields.SUM, 0.0d); } otherStatsToXContent(builder, params); return builder; From 0f21262b3692d02c0524026846879e92bf61518a Mon Sep 17 00:00:00 2001 From: kel Date: Fri, 3 Nov 2017 07:10:50 -0500 Subject: [PATCH 05/14] Do not create directories if repository is readonly (#26909) For FsBlobStore and HdfsBlobStore, if the repository is read only, the blob store should be aware of the readonly setting and do not create directories if they don't exist. Closes #21495 --- .../common/blobstore/fs/FsBlobStore.java | 11 +++- .../common/blobstore/FsBlobStoreTests.java | 37 +++++++++++ .../repositories/hdfs/HdfsBlobStore.java | 28 +++++---- .../repositories/hdfs/HdfsRepository.java | 2 +- .../hdfs/HdfsBlobStoreContainerTests.java | 63 +++++++++++++++---- .../ESBlobStoreContainerTestCase.java | 2 +- .../repositories/ESBlobStoreTestCase.java | 2 +- 7 files changed, 118 insertions(+), 27 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java b/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java index ce696678896f8..60055130fbe1d 100644 --- a/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java +++ b/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java @@ -39,10 +39,15 @@ public class FsBlobStore extends AbstractComponent implements BlobStore { private final int bufferSizeInBytes; + private final boolean readOnly; + public FsBlobStore(Settings settings, Path path) throws IOException { super(settings); this.path = path; - Files.createDirectories(path); + this.readOnly = settings.getAsBoolean("readonly", false); + if (!this.readOnly) { + Files.createDirectories(path); + } this.bufferSizeInBytes = (int) settings.getAsBytesSize("repositories.fs.buffer_size", new ByteSizeValue(100, ByteSizeUnit.KB)).getBytes(); } @@ -80,7 +85,9 @@ public void close() { private synchronized Path buildAndCreate(BlobPath path) throws IOException { Path f = buildPath(path); - Files.createDirectories(f); + if (!readOnly) { + Files.createDirectories(f); + } return f; } diff --git a/core/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreTests.java b/core/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreTests.java index 7d4ac1acc0798..8b9021cae9370 100644 --- a/core/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreTests.java +++ b/core/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreTests.java @@ -20,12 +20,14 @@ import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.common.blobstore.fs.FsBlobStore; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.repositories.ESBlobStoreTestCase; import java.io.IOException; +import java.nio.file.Files; import java.nio.file.Path; @LuceneTestCase.SuppressFileSystems("ExtrasFS") @@ -35,4 +37,39 @@ protected BlobStore newBlobStore() throws IOException { Settings settings = randomBoolean() ? Settings.EMPTY : Settings.builder().put("buffer_size", new ByteSizeValue(randomIntBetween(1, 100), ByteSizeUnit.KB)).build(); return new FsBlobStore(settings, tempDir); } + + public void testReadOnly() throws Exception { + Settings settings = Settings.builder().put("readonly", true).build(); + Path tempDir = createTempDir(); + Path path = tempDir.resolve("bar"); + + try (FsBlobStore store = new FsBlobStore(settings, path)) { + assertFalse(Files.exists(path)); + BlobPath blobPath = BlobPath.cleanPath().add("foo"); + store.blobContainer(blobPath); + Path storePath = store.path(); + for (String d : blobPath) { + storePath = storePath.resolve(d); + } + assertFalse(Files.exists(storePath)); + } + + settings = randomBoolean() ? Settings.EMPTY : Settings.builder().put("readonly", false).build(); + try (FsBlobStore store = new FsBlobStore(settings, path)) { + assertTrue(Files.exists(path)); + BlobPath blobPath = BlobPath.cleanPath().add("foo"); + BlobContainer container = store.blobContainer(blobPath); + Path storePath = store.path(); + for (String d : blobPath) { + storePath = storePath.resolve(d); + } + assertTrue(Files.exists(storePath)); + assertTrue(Files.isDirectory(storePath)); + + byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16))); + writeBlob(container, "test", new BytesArray(data)); + assertArrayEquals(readBlobFully(container, "test", data.length), data); + assertTrue(container.blobExists("test")); + } + } } diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java index fb26bd4675428..fc6922d81f441 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java @@ -39,17 +39,21 @@ final class HdfsBlobStore implements BlobStore { private final FileContext fileContext; private final HdfsSecurityContext securityContext; private final int bufferSize; + private final boolean readOnly; private volatile boolean closed; - HdfsBlobStore(FileContext fileContext, String path, int bufferSize) throws IOException { + HdfsBlobStore(FileContext fileContext, String path, int bufferSize, boolean readOnly) throws IOException { this.fileContext = fileContext; this.securityContext = new HdfsSecurityContext(fileContext.getUgi()); this.bufferSize = bufferSize; this.root = execute(fileContext1 -> fileContext1.makeQualified(new Path(path))); - try { - mkdirs(root); - } catch (FileAlreadyExistsException ok) { - // behaves like Files.createDirectories + this.readOnly = readOnly; + if (!readOnly) { + try { + mkdirs(root); + } catch (FileAlreadyExistsException ok) { + // behaves like Files.createDirectories + } } } @@ -80,12 +84,14 @@ public BlobContainer blobContainer(BlobPath path) { private Path buildHdfsPath(BlobPath blobPath) { final Path path = translateToHdfsPath(blobPath); - try { - mkdirs(path); - } catch (FileAlreadyExistsException ok) { - // behaves like Files.createDirectories - } catch (IOException ex) { - throw new ElasticsearchException("failed to create blob container", ex); + if (!readOnly) { + try { + mkdirs(path); + } catch (FileAlreadyExistsException ok) { + // behaves like Files.createDirectories + } catch (IOException ex) { + throw new ElasticsearchException("failed to create blob container", ex); + } } return path; } diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java index aa57cc1128f08..1bf2e47e9650b 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java @@ -106,7 +106,7 @@ protected void doStart() { SpecialPermission.check(); FileContext fileContext = AccessController.doPrivileged((PrivilegedAction) () -> createContext(uri, getMetadata().settings())); - blobStore = new HdfsBlobStore(fileContext, pathSetting, bufferSize); + blobStore = new HdfsBlobStore(fileContext, pathSetting, bufferSize, isReadOnly()); logger.debug("Using file-system [{}] for URI [{}], path [{}]", fileContext.getDefaultFileSystem(), fileContext.getDefaultFileSystem().getUri(), pathSetting); } catch (IOException e) { throw new UncheckedIOException(String.format(Locale.ROOT, "Cannot create HDFS repository for uri [%s]", uri), e); diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java index 195dea9810224..a5d68331db78e 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java @@ -19,6 +19,20 @@ package org.elasticsearch.repositories.hdfs; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.AbstractFileSystem; +import org.apache.hadoop.fs.FileContext; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.UnsupportedFileSystemException; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.repositories.ESBlobStoreContainerTestCase; + +import javax.security.auth.Subject; import java.io.IOException; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; @@ -29,22 +43,20 @@ import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; import java.util.Collections; -import javax.security.auth.Subject; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.AbstractFileSystem; -import org.apache.hadoop.fs.FileContext; -import org.apache.hadoop.fs.UnsupportedFileSystemException; -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.blobstore.BlobStore; -import org.elasticsearch.repositories.ESBlobStoreContainerTestCase; +import static org.elasticsearch.repositories.ESBlobStoreTestCase.randomBytes; +import static org.elasticsearch.repositories.ESBlobStoreTestCase.readBlobFully; + @ThreadLeakFilters(filters = {HdfsClientThreadLeakFilter.class}) public class HdfsBlobStoreContainerTests extends ESBlobStoreContainerTestCase { @Override protected BlobStore newBlobStore() throws IOException { + return new HdfsBlobStore(createTestContext(), "temp", 1024, false); + } + + private FileContext createTestContext() { FileContext fileContext; try { fileContext = AccessController.doPrivileged((PrivilegedExceptionAction) @@ -52,7 +64,7 @@ protected BlobStore newBlobStore() throws IOException { } catch (PrivilegedActionException e) { throw new RuntimeException(e.getCause()); } - return new HdfsBlobStore(fileContext, "temp", 1024); + return fileContext; } @SuppressForbidden(reason = "lesser of two evils (the other being a bunch of JNI/classloader nightmares)") @@ -69,7 +81,7 @@ private FileContext createContext(URI uri) { Class clazz = Class.forName("org.apache.hadoop.security.User"); ctor = clazz.getConstructor(String.class); ctor.setAccessible(true); - } catch (ClassNotFoundException | NoSuchMethodException e) { + } catch (ClassNotFoundException | NoSuchMethodException e) { throw new RuntimeException(e); } @@ -98,4 +110,33 @@ private FileContext createContext(URI uri) { } }); } + + public void testReadOnly() throws Exception { + FileContext fileContext = createTestContext(); + // Constructor will not create dir if read only + HdfsBlobStore hdfsBlobStore = new HdfsBlobStore(fileContext, "dir", 1024, true); + FileContext.Util util = fileContext.util(); + Path root = fileContext.makeQualified(new Path("dir")); + assertFalse(util.exists(root)); + BlobPath blobPath = BlobPath.cleanPath().add("path"); + + // blobContainer() will not create path if read only + hdfsBlobStore.blobContainer(blobPath); + Path hdfsPath = root; + for (String p : blobPath) { + hdfsPath = new Path(hdfsPath, p); + } + assertFalse(util.exists(hdfsPath)); + + // if not read only, directory will be created + hdfsBlobStore = new HdfsBlobStore(fileContext, "dir", 1024, false); + assertTrue(util.exists(root)); + BlobContainer container = hdfsBlobStore.blobContainer(blobPath); + assertTrue(util.exists(hdfsPath)); + + byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16))); + writeBlob(container, "foo", new BytesArray(data)); + assertArrayEquals(readBlobFully(container, "foo", data.length), data); + assertTrue(container.blobExists("foo")); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java index 42029b053a4b0..18be4e9437770 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java @@ -142,7 +142,7 @@ public void testVerifyOverwriteFails() throws IOException { } } - private void writeBlob(final BlobContainer container, final String blobName, final BytesArray bytesArray) throws IOException { + protected void writeBlob(final BlobContainer container, final String blobName, final BytesArray bytesArray) throws IOException { try (InputStream stream = bytesArray.streamInput()) { container.writeBlob(blobName, stream, bytesArray.length()); } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreTestCase.java index e7f8edb1fa208..35a17c2a8dd83 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreTestCase.java @@ -78,7 +78,7 @@ public static byte[] randomBytes(int length) { return data; } - private static void writeBlob(BlobContainer container, String blobName, BytesArray bytesArray) throws IOException { + protected static void writeBlob(BlobContainer container, String blobName, BytesArray bytesArray) throws IOException { try (InputStream stream = bytesArray.streamInput()) { container.writeBlob(blobName, stream, bytesArray.length()); } From 8b4a92fbb7e5facd103c9597fb7a0dbc948cde25 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Fri, 3 Nov 2017 08:51:03 -0400 Subject: [PATCH 06/14] Adjust assertions for sequence numbers BWC tests This commit adjusts the assertions for the sequence number BWC tests to account for the fact that sometimes these tests are run in mixed-clusters with 5.6 nodes (that do not understand sequence numbers), and sometimes these tests are run in mixed-cluster with 6.0+ nodes (that all understood sequence numbers). Relates #27251 --- .../java/org/elasticsearch/backwards/IndexingIT.java | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java index c6200417e39d8..f744b3029b125 100644 --- a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java +++ b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java @@ -192,8 +192,8 @@ public void testSeqNoCheckpoints() throws Exception { int numDocs = 0; final int numberOfInitialDocs = 1 + randomInt(5); logger.info("indexing [{}] docs initially", numberOfInitialDocs); - numDocs += indexDocs(index, numDocs, numberOfInitialDocs); - assertSeqNoOnShards(index, nodes, numDocs, newNodeClient); + numDocs += indexDocs(index, 0, numberOfInitialDocs); + assertSeqNoOnShards(index, nodes, nodes.getBWCVersion().major >= 6 ? numDocs : 0, newNodeClient); logger.info("allowing shards on all nodes"); updateIndexSetting(index, Settings.builder().putNull("index.routing.allocation.include._name")); ensureGreen(); @@ -204,7 +204,7 @@ public void testSeqNoCheckpoints() throws Exception { final int numberOfDocsAfterAllowingShardsOnAllNodes = 1 + randomInt(5); logger.info("indexing [{}] docs after allowing shards on all nodes", numberOfDocsAfterAllowingShardsOnAllNodes); numDocs += indexDocs(index, numDocs, numberOfDocsAfterAllowingShardsOnAllNodes); - assertSeqNoOnShards(index, nodes, numDocs, newNodeClient); + assertSeqNoOnShards(index, nodes, nodes.getBWCVersion().major >= 6 ? numDocs : 0, newNodeClient); Shard primary = buildShards(index, nodes, newNodeClient).stream().filter(Shard::isPrimary).findFirst().get(); logger.info("moving primary to new node by excluding {}", primary.getNode().getNodeName()); updateIndexSetting(index, Settings.builder().put("index.routing.allocation.exclude._name", primary.getNode().getNodeName())); @@ -214,7 +214,7 @@ public void testSeqNoCheckpoints() throws Exception { logger.info("indexing [{}] docs after moving primary", numberOfDocsAfterMovingPrimary); numDocsOnNewPrimary += indexDocs(index, numDocs, numberOfDocsAfterMovingPrimary); numDocs += numberOfDocsAfterMovingPrimary; - assertSeqNoOnShards(index, nodes, numDocs, newNodeClient); + assertSeqNoOnShards(index, nodes, nodes.getBWCVersion().major >= 6 ? numDocs : numDocsOnNewPrimary, newNodeClient); /* * Dropping the number of replicas to zero, and then increasing it to one triggers a recovery thus exercising any BWC-logic in * the recovery code. @@ -233,7 +233,7 @@ public void testSeqNoCheckpoints() throws Exception { for (Shard shard : buildShards(index, nodes, newNodeClient)) { assertCount(index, "_only_nodes:" + shard.node.nodeName, numDocs); } - assertSeqNoOnShards(index, nodes, numDocs, newNodeClient); + assertSeqNoOnShards(index, nodes, nodes.getBWCVersion().major >= 6 ? numDocs : numDocsOnNewPrimary, newNodeClient); } } From 262422375e8dfea8b0edcbded51d160ce99858e3 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Fri, 3 Nov 2017 14:17:11 +0100 Subject: [PATCH 07/14] [Test] Fix QueryStringQueryBuilderTests.testExistsFieldQuery BWC Handle BWC version in this test. Closes #27246 --- .../index/query/QueryStringQueryBuilderTests.java | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index c3ea1c0c9426f..9c5238f55f61c 100644 --- a/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -46,6 +46,7 @@ import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.automaton.TooComplexToDeterminizeException; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.compress.CompressedXContent; @@ -804,17 +805,19 @@ public void testExistsFieldQuery() throws Exception { QueryShardContext context = createShardContext(); QueryStringQueryBuilder queryBuilder = new QueryStringQueryBuilder(STRING_FIELD_NAME + ":*"); Query query = queryBuilder.toQuery(context); - Query expected; if (getCurrentTypes().length > 0) { - expected = new ConstantScoreQuery(new NormsFieldExistsQuery(STRING_FIELD_NAME)); + if (context.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_1_0)) { + assertThat(query, equalTo(new ConstantScoreQuery(new NormsFieldExistsQuery(STRING_FIELD_NAME)))); + } else { + assertThat(query, equalTo(new ConstantScoreQuery(new TermQuery(new Term("_field_names", STRING_FIELD_NAME))))); + } } else { - expected = new MatchNoDocsQuery(); + assertThat(query, equalTo(new MatchNoDocsQuery())); } - assertThat(query, equalTo(expected)); queryBuilder = new QueryStringQueryBuilder("*:*"); query = queryBuilder.toQuery(context); - expected = new MatchAllDocsQuery(); + Query expected = new MatchAllDocsQuery(); assertThat(query, equalTo(expected)); queryBuilder = new QueryStringQueryBuilder("*"); From 9e67cca98747144092ca9cdb5bcc9083b3a77fb7 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Fri, 3 Nov 2017 11:52:23 +0100 Subject: [PATCH 08/14] build: Fix setting the incorrect bwc version in mixed cluster qa module Prior to this change if the `bwcTest` task is run then it would create task for each version, but each task in reality would use wireCompatVersions - 1 ES version. So we were not actually testing against 5.6.x versions in the 6.x and 6.0 branches. --- qa/mixed-cluster/build.gradle | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index 66185325931d8..781a69684e5d4 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -37,14 +37,14 @@ for (Version version : wireCompatVersions) { includePackaged = true } - /* This project runs the core REST tests against a 2 node cluster where one of + /* This project runs the core REST tests against a 4 node cluster where two of the nodes has a different minor. */ Object extension = extensions.findByName("${baseName}#mixedClusterTestCluster") - configure(extensions.findByName("${baseName}#mixedClusterTestCluster")) { + configure(extension) { distribution = 'zip' numNodes = 4 numBwcNodes = 2 - bwcVersion = project.wireCompatVersions[-1] + bwcVersion = version } Task versionBwcTest = tasks.create(name: "${baseName}#bwcTest") { From 20e8005859a800d6d08e011c43dd74c64eb7c4be Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Fri, 3 Nov 2017 13:21:44 +0000 Subject: [PATCH 09/14] Fixes QueryStringQueryBuilderTests Closes #27246 --- .../index/query/QueryStringQueryBuilderTests.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index 9c5238f55f61c..5aa375672822a 100644 --- a/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -806,7 +806,8 @@ public void testExistsFieldQuery() throws Exception { QueryStringQueryBuilder queryBuilder = new QueryStringQueryBuilder(STRING_FIELD_NAME + ":*"); Query query = queryBuilder.toQuery(context); if (getCurrentTypes().length > 0) { - if (context.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_1_0)) { + if (context.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_1_0) + && (context.fieldMapper(STRING_FIELD_NAME).omitNorms() == false)) { assertThat(query, equalTo(new ConstantScoreQuery(new NormsFieldExistsQuery(STRING_FIELD_NAME)))); } else { assertThat(query, equalTo(new ConstantScoreQuery(new TermQuery(new Term("_field_names", STRING_FIELD_NAME))))); From 0635778c90b94bf156b3b802cd4c2bfffc471958 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Fri, 3 Nov 2017 14:36:59 +0100 Subject: [PATCH 10/14] Add support for Gradle 4.3 (#27249) Closes #26840 Related to #27087 --- .../groovy/org/elasticsearch/gradle/BuildPlugin.groovy | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 67ebfd5c9bb2c..159f84258df28 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -131,9 +131,10 @@ class BuildPlugin implements Plugin { throw new GradleException("${minGradle} or above is required to build elasticsearch") } - final GradleVersion maxGradle = GradleVersion.version('4.2') - if (currentGradleVersion >= maxGradle) { - throw new GradleException("${maxGradle} or above is not compatible with the elasticsearch build") + final GradleVersion gradle42 = GradleVersion.version('4.2') + final GradleVersion gradle43 = GradleVersion.version('4.3') + if (currentGradleVersion >= gradle42 && currentGradleVersion < gradle43) { + throw new GradleException("${currentGradleVersion} is not compatible with the elasticsearch build") } // enforce Java version From 8f0f02450782f334dbe5e715f4f6f93dd7695f06 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 3 Nov 2017 20:23:07 +0100 Subject: [PATCH 11/14] #27189 Fixed rounding of bounds in scaled float comparison (#27207) * #27189 Fixed rounding of bounds in scaled float comparison * #27189 more assertions from CR --- .../index/mapper/ScaledFloatFieldMapper.java | 8 ++--- .../mapper/ScaledFloatFieldTypeTests.java | 36 +++++++++++++++++++ 2 files changed, 40 insertions(+), 4 deletions(-) diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java index 0ff3acdea05fa..96ec29e2aa695 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java @@ -256,19 +256,19 @@ public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower failIfNotIndexed(); Long lo = null; if (lowerTerm != null) { - double dValue = parse(lowerTerm); + double dValue = parse(lowerTerm) * scalingFactor; if (includeLower == false) { dValue = Math.nextUp(dValue); } - lo = Math.round(Math.ceil(dValue * scalingFactor)); + lo = Math.round(Math.ceil(dValue)); } Long hi = null; if (upperTerm != null) { - double dValue = parse(upperTerm); + double dValue = parse(upperTerm) * scalingFactor; if (includeUpper == false) { dValue = Math.nextDown(dValue); } - hi = Math.round(Math.floor(dValue * scalingFactor)); + hi = Math.round(Math.floor(dValue)); } Query query = NumberFieldMapper.NumberType.LONG.rangeQuery(name(), lo, hi, true, true, hasDocValues()); if (boost() != 1f) { diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldTypeTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldTypeTests.java index 811bac82bbce9..83039ebd88319 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldTypeTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldTypeTests.java @@ -124,6 +124,42 @@ public void testRangeQuery() throws IOException { IOUtils.close(reader, dir); } + public void testRoundsUpperBoundCorrectly() { + ScaledFloatFieldMapper.ScaledFloatFieldType ft = new ScaledFloatFieldMapper.ScaledFloatFieldType(); + ft.setName("scaled_float"); + ft.setScalingFactor(100.0); + Query scaledFloatQ = ft.rangeQuery(null, 0.1, true, false, null); + assertEquals("scaled_float:[-9223372036854775808 TO 9]", scaledFloatQ.toString()); + scaledFloatQ = ft.rangeQuery(null, 0.1, true, true, null); + assertEquals("scaled_float:[-9223372036854775808 TO 10]", scaledFloatQ.toString()); + scaledFloatQ = ft.rangeQuery(null, 0.095, true, false, null); + assertEquals("scaled_float:[-9223372036854775808 TO 9]", scaledFloatQ.toString()); + scaledFloatQ = ft.rangeQuery(null, 0.095, true, true, null); + assertEquals("scaled_float:[-9223372036854775808 TO 9]", scaledFloatQ.toString()); + scaledFloatQ = ft.rangeQuery(null, 0.105, true, false, null); + assertEquals("scaled_float:[-9223372036854775808 TO 10]", scaledFloatQ.toString()); + scaledFloatQ = ft.rangeQuery(null, 0.105, true, true, null); + assertEquals("scaled_float:[-9223372036854775808 TO 10]", scaledFloatQ.toString()); + } + + public void testRoundsLowerBoundCorrectly() { + ScaledFloatFieldMapper.ScaledFloatFieldType ft = new ScaledFloatFieldMapper.ScaledFloatFieldType(); + ft.setName("scaled_float"); + ft.setScalingFactor(100.0); + Query scaledFloatQ = ft.rangeQuery(-0.1, null, false, true, null); + assertEquals("scaled_float:[-9 TO 9223372036854775807]", scaledFloatQ.toString()); + scaledFloatQ = ft.rangeQuery(-0.1, null, true, true, null); + assertEquals("scaled_float:[-10 TO 9223372036854775807]", scaledFloatQ.toString()); + scaledFloatQ = ft.rangeQuery(-0.095, null, false, true, null); + assertEquals("scaled_float:[-9 TO 9223372036854775807]", scaledFloatQ.toString()); + scaledFloatQ = ft.rangeQuery(-0.095, null, true, true, null); + assertEquals("scaled_float:[-9 TO 9223372036854775807]", scaledFloatQ.toString()); + scaledFloatQ = ft.rangeQuery(-0.105, null, false, true, null); + assertEquals("scaled_float:[-10 TO 9223372036854775807]", scaledFloatQ.toString()); + scaledFloatQ = ft.rangeQuery(-0.105, null, true, true, null); + assertEquals("scaled_float:[-10 TO 9223372036854775807]", scaledFloatQ.toString()); + } + public void testValueForSearch() { ScaledFloatFieldMapper.ScaledFloatFieldType ft = new ScaledFloatFieldMapper.ScaledFloatFieldType(); ft.setName("scaled_float"); From 3deba0ed1fe43faa9d22a0e924a1c6f8e60df3ce Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 3 Nov 2017 20:34:48 +0100 Subject: [PATCH 12/14] #26260 Allow ip_range to accept CIDR notation (#27192) * #26260 Allow ip_range to accept CIDR notation * #26260 added non-byte-alligned cidr test cases --- .../index/mapper/RangeFieldMapper.java | 24 ++++- .../index/mapper/IpRangeFieldMapperTests.java | 87 +++++++++++++++++++ 2 files changed, 110 insertions(+), 1 deletion(-) create mode 100644 modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/IpRangeFieldMapperTests.java diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java index 0ff6283f38108..46d553a472973 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java @@ -42,6 +42,7 @@ import org.elasticsearch.Version; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.joda.DateMathParser; import org.elasticsearch.common.joda.FormatDateTimeFormatter; @@ -57,6 +58,7 @@ import java.io.IOException; import java.net.InetAddress; +import java.net.UnknownHostException; import java.util.ArrayList; import java.util.HashSet; import java.util.Iterator; @@ -353,7 +355,8 @@ protected void parseCreateField(ParseContext context, List field range = context.parseExternalValue(Range.class); } else { XContentParser parser = context.parser(); - if (parser.currentToken() == XContentParser.Token.START_OBJECT) { + final XContentParser.Token start = parser.currentToken(); + if (start == XContentParser.Token.START_OBJECT) { RangeFieldType fieldType = fieldType(); RangeType rangeType = fieldType.rangeType; String fieldName = null; @@ -393,6 +396,8 @@ protected void parseCreateField(ParseContext context, List field } } range = new Range(rangeType, from, to, includeFrom, includeTo); + } else if (fieldType().rangeType == RangeType.IP && start == XContentParser.Token.VALUE_STRING) { + range = parseIpRangeFromCidr(parser); } else { throw new MapperParsingException("error parsing field [" + name() + "], expected an object but got " + parser.currentName()); @@ -435,6 +440,23 @@ && fieldType().dateTimeFormatter().locale() != Locale.ROOT))) { } } + private static Range parseIpRangeFromCidr(final XContentParser parser) throws IOException { + final Tuple cidr = InetAddresses.parseCidr(parser.text()); + // create the lower value by zeroing out the host portion, upper value by filling it with all ones. + byte[] lower = cidr.v1().getAddress(); + byte[] upper = lower.clone(); + for (int i = cidr.v2(); i < 8 * lower.length; i++) { + int m = 1 << 7 - (i & 7); + lower[i >> 3] &= ~m; + upper[i >> 3] |= m; + } + try { + return new Range(RangeType.IP, InetAddress.getByAddress(lower), InetAddress.getByAddress(upper), true, true); + } catch (UnknownHostException bogus) { + throw new AssertionError(bogus); + } + } + /** Enum defining the type of range */ public enum RangeType { IP("ip_range") { diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/IpRangeFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/IpRangeFieldMapperTests.java new file mode 100644 index 0000000000000..63ebe7a6cb3c0 --- /dev/null +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/IpRangeFieldMapperTests.java @@ -0,0 +1,87 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.mapper; + +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.IndexableField; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.network.InetAddresses; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.junit.Before; + +import static org.hamcrest.Matchers.containsString; + +public class IpRangeFieldMapperTests extends ESSingleNodeTestCase { + + private IndexService indexService; + private DocumentMapperParser parser; + + @Override + protected Collection> getPlugins() { + return pluginList(MapperExtrasPlugin.class); + } + + @Before + public void setup() { + indexService = createIndex("test"); + parser = indexService.mapperService().documentMapperParser(); + } + + public void testStoreCidr() throws Exception { + XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "ip_range") + .field("store", true); + mapping = mapping.endObject().endObject().endObject().endObject(); + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping.string())); + assertEquals(mapping.string(), mapper.mappingSource().toString()); + final Map cases = new HashMap<>(); + cases.put("192.168.0.0/15", "192.169.255.255"); + cases.put("192.168.0.0/16", "192.168.255.255"); + cases.put("192.168.0.0/17", "192.168.127.255"); + for (final Map.Entry entry : cases.entrySet()) { + ParsedDocument doc = + mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() + .startObject() + .field("field", entry.getKey()) + .endObject().bytes(), + XContentType.JSON + )); + IndexableField[] fields = doc.rootDoc().getFields("field"); + assertEquals(3, fields.length); + IndexableField dvField = fields[0]; + assertEquals(DocValuesType.BINARY, dvField.fieldType().docValuesType()); + IndexableField pointField = fields[1]; + assertEquals(2, pointField.fieldType().pointDimensionCount()); + IndexableField storedField = fields[2]; + assertTrue(storedField.fieldType().stored()); + String strVal = + InetAddresses.toAddrString(InetAddresses.forString("192.168.0.0")) + " : " + + InetAddresses.toAddrString(InetAddresses.forString(entry.getValue())); + assertThat(storedField.stringValue(), containsString(strVal)); + } + } +} From 67e677f443e90792f03fa900f09eedbeeb755456 Mon Sep 17 00:00:00 2001 From: Loek van Gool Date: Fri, 3 Nov 2017 23:20:58 +0100 Subject: [PATCH 13/14] Add an example of dynamic field names (#27255) --- docs/reference/ingest/ingest-node.asciidoc | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index bdcbf2a9166fe..74cfabbff47a1 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -563,6 +563,20 @@ to set the index that the document will be indexed into: -------------------------------------------------- // NOTCONSOLE +Dynamic field names are also supported. This example sets the field named after the +value of `service` to the value of the field `code`: + +[source,js] +-------------------------------------------------- +{ + "set": { + "field": "{{service}}" + "value": "{{code}}" + } +} +-------------------------------------------------- +// NOTCONSOLE + [[handling-failure-in-pipelines]] == Handling Failures in Pipelines From 117f0f3a445ea8f43cb479c42759314e9a9dbd2f Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Fri, 3 Nov 2017 19:36:08 -0400 Subject: [PATCH 14/14] Fix snapshot getting stuck in INIT state (#27214) If the master disconnects from the cluster after initiating snapshot, but just before the snapshot switches from INIT to STARTED state, the snapshot can get indefinitely stuck in the INIT state. This error is specific to v5.x+ and was triggered by keeping the master node that stepped down in the node list, the cleanup logic in snapshot/restore assumed that if master steps down it is always removed from the the node list. This commit changes the logic to trigger cleanup even if no nodes left the cluster. Closes #27180 --- .../common/blobstore/fs/FsBlobContainer.java | 4 +- .../snapshots/SnapshotsService.java | 38 +++- .../discovery/SnapshotDisruptionIT.java | 173 ++++++++++++++++++ 3 files changed, 209 insertions(+), 6 deletions(-) create mode 100644 core/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java diff --git a/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java b/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java index 757cce7d8379a..1e384109aebce 100644 --- a/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java +++ b/core/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java @@ -140,7 +140,9 @@ public void move(String source, String target) throws IOException { Path targetPath = path.resolve(target); // If the target file exists then Files.move() behaviour is implementation specific // the existing file might be replaced or this method fails by throwing an IOException. - assert !Files.exists(targetPath); + if (Files.exists(targetPath)) { + throw new FileAlreadyExistsException("blob [" + targetPath + "] already exists, cannot overwrite"); + } Files.move(sourcePath, targetPath, StandardCopyOption.ATOMIC_MOVE); IOUtils.fsync(path, true); } diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 037db4d5caf66..0804e69e46e23 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -425,6 +425,15 @@ public void onFailure(String source, Exception e) { removeSnapshotFromClusterState(snapshot.snapshot(), null, e, new CleanupAfterErrorListener(snapshot, true, userCreateSnapshotListener, e)); } + @Override + public void onNoLongerMaster(String source) { + // We are not longer a master - we shouldn't try to do any cleanup + // The new master will take care of it + logger.warn("[{}] failed to create snapshot - no longer a master", snapshot.snapshot().getSnapshotId()); + userCreateSnapshotListener.onFailure( + new SnapshotException(snapshot.snapshot(), "master changed during snapshot initialization")); + } + @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { // The userCreateSnapshotListener.onResponse() notifies caller that the snapshot was accepted @@ -473,6 +482,10 @@ public void onFailure(Exception e) { cleanupAfterError(e); } + public void onNoLongerMaster(String source) { + userCreateSnapshotListener.onFailure(e); + } + private void cleanupAfterError(Exception exception) { if(snapshotCreated) { try { @@ -628,7 +641,8 @@ private SnapshotShardFailure findShardFailure(List shardFa public void applyClusterState(ClusterChangedEvent event) { try { if (event.localNodeMaster()) { - if (event.nodesRemoved()) { + // We don't remove old master when master flips anymore. So, we need to check for change in master + if (event.nodesRemoved() || event.previousState().nodes().isLocalNodeElectedMaster() == false) { processSnapshotsOnRemovedNodes(event); } if (event.routingTableChanged()) { @@ -981,7 +995,7 @@ private void removeSnapshotFromClusterState(final Snapshot snapshot, final Snaps * @param listener listener to notify when snapshot information is removed from the cluster state */ private void removeSnapshotFromClusterState(final Snapshot snapshot, final SnapshotInfo snapshotInfo, final Exception failure, - @Nullable ActionListener listener) { + @Nullable CleanupAfterErrorListener listener) { clusterService.submitStateUpdateTask("remove snapshot metadata", new ClusterStateUpdateTask() { @Override @@ -1013,6 +1027,13 @@ public void onFailure(String source, Exception e) { } } + @Override + public void onNoLongerMaster(String source) { + if (listener != null) { + listener.onNoLongerMaster(source); + } + } + @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { for (SnapshotCompletionListener listener : snapshotCompletionListeners) { @@ -1183,9 +1204,16 @@ public void onSnapshotCompletion(Snapshot completedSnapshot, SnapshotInfo snapsh if (completedSnapshot.equals(snapshot)) { logger.debug("deleted snapshot completed - deleting files"); removeListener(this); - threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> - deleteSnapshot(completedSnapshot.getRepository(), completedSnapshot.getSnapshotId().getName(), - listener, true) + threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> { + try { + deleteSnapshot(completedSnapshot.getRepository(), completedSnapshot.getSnapshotId().getName(), + listener, true); + + } catch (Exception ex) { + logger.warn((Supplier) () -> + new ParameterizedMessage("[{}] failed to delete snapshot", snapshot), ex); + } + } ); } } diff --git a/core/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java b/core/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java new file mode 100644 index 0000000000000..3458cca0cf78e --- /dev/null +++ b/core/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java @@ -0,0 +1,173 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.discovery; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.SnapshotsInProgress; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.snapshots.SnapshotMissingException; +import org.elasticsearch.snapshots.SnapshotState; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.disruption.NetworkDisruption; +import org.elasticsearch.test.junit.annotations.TestLogging; + +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.instanceOf; + +/** + * Tests snapshot operations during disruptions. + */ +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0, autoMinMasterNodes = false) +@TestLogging("org.elasticsearch.snapshot:TRACE") +public class SnapshotDisruptionIT extends AbstractDisruptionTestCase { + + public void testDisruptionOnSnapshotInitialization() throws Exception { + final Settings settings = Settings.builder() + .put(DEFAULT_SETTINGS) + .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "30s") // wait till cluster state is committed + .build(); + final String idxName = "test"; + configureCluster(settings, 4, null, 2); + final List allMasterEligibleNodes = internalCluster().startMasterOnlyNodes(3); + final String dataNode = internalCluster().startDataOnlyNode(); + ensureStableCluster(4); + + createRandomIndex(idxName); + + logger.info("--> creating repository"); + assertAcked(client().admin().cluster().preparePutRepository("test-repo") + .setType("fs").setSettings(Settings.builder() + .put("location", randomRepoPath()) + .put("compress", randomBoolean()) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); + + // Writing incompatible snapshot can cause this test to fail due to a race condition in repo initialization + // by the current master and the former master. It is not causing any issues in real life scenario, but + // might make this test to fail. We are going to complete initialization of the snapshot to prevent this failures. + logger.info("--> initializing the repository"); + assertEquals(SnapshotState.SUCCESS, client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1") + .setWaitForCompletion(true).setIncludeGlobalState(true).setIndices().get().getSnapshotInfo().state()); + + final String masterNode1 = internalCluster().getMasterName(); + Set otherNodes = new HashSet<>(); + otherNodes.addAll(allMasterEligibleNodes); + otherNodes.remove(masterNode1); + otherNodes.add(dataNode); + + NetworkDisruption networkDisruption = + new NetworkDisruption(new NetworkDisruption.TwoPartitions(Collections.singleton(masterNode1), otherNodes), + new NetworkDisruption.NetworkUnresponsive()); + internalCluster().setDisruptionScheme(networkDisruption); + + ClusterService clusterService = internalCluster().clusterService(masterNode1); + CountDownLatch disruptionStarted = new CountDownLatch(1); + clusterService.addListener(new ClusterStateListener() { + @Override + public void clusterChanged(ClusterChangedEvent event) { + SnapshotsInProgress snapshots = event.state().custom(SnapshotsInProgress.TYPE); + if (snapshots != null && snapshots.entries().size() > 0) { + if (snapshots.entries().get(0).state() == SnapshotsInProgress.State.INIT) { + // The snapshot started, we can start disruption so the INIT state will arrive to another master node + logger.info("--> starting disruption"); + networkDisruption.startDisrupting(); + clusterService.removeListener(this); + disruptionStarted.countDown(); + } + } + } + }); + + logger.info("--> starting snapshot"); + ActionFuture future = client(masterNode1).admin().cluster() + .prepareCreateSnapshot("test-repo", "test-snap-2").setWaitForCompletion(false).setIndices(idxName).execute(); + + logger.info("--> waiting for disruption to start"); + assertTrue(disruptionStarted.await(1, TimeUnit.MINUTES)); + + logger.info("--> wait until the snapshot is done"); + assertBusy(() -> { + SnapshotsInProgress snapshots = dataNodeClient().admin().cluster().prepareState().setLocal(true).get().getState() + .custom(SnapshotsInProgress.TYPE); + if (snapshots != null && snapshots.entries().size() > 0) { + logger.info("Current snapshot state [{}]", snapshots.entries().get(0).state()); + fail("Snapshot is still running"); + } else { + logger.info("Snapshot is no longer in the cluster state"); + } + }, 1, TimeUnit.MINUTES); + + logger.info("--> verify that snapshot was successful or no longer exist"); + assertBusy(() -> { + try { + GetSnapshotsResponse snapshotsStatusResponse = dataNodeClient().admin().cluster().prepareGetSnapshots("test-repo") + .setSnapshots("test-snap-2").get(); + SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0); + assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); + assertEquals(snapshotInfo.totalShards(), snapshotInfo.successfulShards()); + assertEquals(0, snapshotInfo.failedShards()); + logger.info("--> done verifying"); + } catch (SnapshotMissingException exception) { + logger.info("--> snapshot doesn't exist"); + } + }, 1, TimeUnit.MINUTES); + + logger.info("--> stopping disrupting"); + networkDisruption.stopDisrupting(); + ensureStableCluster(4, masterNode1); + logger.info("--> done"); + + try { + future.get(); + } catch (Exception ex) { + logger.info("--> got exception from hanged master", ex); + Throwable cause = ex.getCause(); + assertThat(cause, instanceOf(MasterNotDiscoveredException.class)); + cause = cause.getCause(); + assertThat(cause, instanceOf(Discovery.FailedToCommitClusterStateException.class)); + } + } + + private void createRandomIndex(String idxName) throws ExecutionException, InterruptedException { + assertAcked(prepareCreate(idxName, 0, Settings.builder().put("number_of_shards", between(1, 20)) + .put("number_of_replicas", 0))); + logger.info("--> indexing some data"); + final int numdocs = randomIntBetween(10, 100); + IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs]; + for (int i = 0; i < builders.length; i++) { + builders[i] = client().prepareIndex(idxName, "type1", Integer.toString(i)).setSource("field1", "bar " + i); + } + indexRandom(true, builders); + } +}