Skip to content

Commit

Permalink
Merge remote-tracking branch 'es/master' into ccr
Browse files Browse the repository at this point in the history
* es/master:
  Fix snapshot getting stuck in INIT state (#27214)
  Add an example of dynamic field names (#27255)
  #26260 Allow ip_range to accept CIDR notation (#27192)
  #27189 Fixed rounding of bounds in scaled float comparison (#27207)
  Add support for Gradle 4.3 (#27249)
  Fixes QueryStringQueryBuilderTests
  build: Fix setting the incorrect bwc version in mixed cluster qa module
  [Test] Fix QueryStringQueryBuilderTests.testExistsFieldQuery BWC
  Adjust assertions for sequence numbers BWC tests
  Do not create directories if repository is readonly (#26909)
  [Test] Fix InternalStatsTests
  [Test] Fix QueryStringQueryBuilderTests.testExistsFieldQuery
  Uses norms for exists query if enabled (#27237)
  Reinstate recommendation for ≥ 3 master-eligible nodes. (#27204)
  • Loading branch information
martijnvg committed Nov 4, 2017
2 parents 0289330 + 117f0f3 commit e61e4b8
Show file tree
Hide file tree
Showing 25 changed files with 539 additions and 61 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -131,9 +131,10 @@ class BuildPlugin implements Plugin<Project> {
throw new GradleException("${minGradle} or above is required to build elasticsearch")
}

final GradleVersion maxGradle = GradleVersion.version('4.2')
if (currentGradleVersion >= maxGradle) {
throw new GradleException("${maxGradle} or above is not compatible with the elasticsearch build")
final GradleVersion gradle42 = GradleVersion.version('4.2')
final GradleVersion gradle43 = GradleVersion.version('4.3')
if (currentGradleVersion >= gradle42 && currentGradleVersion < gradle43) {
throw new GradleException("${currentGradleVersion} is not compatible with the elasticsearch build")
}

// enforce Java version
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,9 @@ public void move(String source, String target) throws IOException {
Path targetPath = path.resolve(target);
// If the target file exists then Files.move() behaviour is implementation specific
// the existing file might be replaced or this method fails by throwing an IOException.
assert !Files.exists(targetPath);
if (Files.exists(targetPath)) {
throw new FileAlreadyExistsException("blob [" + targetPath + "] already exists, cannot overwrite");
}
Files.move(sourcePath, targetPath, StandardCopyOption.ATOMIC_MOVE);
IOUtils.fsync(path, true);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,15 @@ public class FsBlobStore extends AbstractComponent implements BlobStore {

private final int bufferSizeInBytes;

private final boolean readOnly;

public FsBlobStore(Settings settings, Path path) throws IOException {
super(settings);
this.path = path;
Files.createDirectories(path);
this.readOnly = settings.getAsBoolean("readonly", false);
if (!this.readOnly) {
Files.createDirectories(path);
}
this.bufferSizeInBytes = (int) settings.getAsBytesSize("repositories.fs.buffer_size", new ByteSizeValue(100, ByteSizeUnit.KB)).getBytes();
}

Expand Down Expand Up @@ -80,7 +85,9 @@ public void close() {

private synchronized Path buildAndCreate(BlobPath path) throws IOException {
Path f = buildPath(path);
Files.createDirectories(f);
if (!readOnly) {
Files.createDirectories(f);
}
return f;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.DocValuesFieldExistsQuery;
import org.apache.lucene.search.NormsFieldExistsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.elasticsearch.common.settings.Settings;
Expand Down Expand Up @@ -280,10 +280,10 @@ public String typeName() {

@Override
public Query existsQuery(QueryShardContext context) {
if (hasDocValues()) {
return new DocValuesFieldExistsQuery(name());
} else {
if (omitNorms()) {
return new TermQuery(new Term(FieldNamesFieldMapper.NAME, name()));
} else {
return new NormsFieldExistsQuery(name());
}
}

Expand Down Expand Up @@ -345,7 +345,9 @@ protected void parseCreateField(ParseContext context, List<IndexableField> field
if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) {
Field field = new Field(fieldType().name(), value, fieldType());
fields.add(field);
createFieldNamesField(context, fields);
if (fieldType().omitNorms()) {
createFieldNamesField(context, fields);
}
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ protected XContentBuilder doXContentBody(XContentBuilder builder, Params params)
builder.nullField(Fields.MIN);
builder.nullField(Fields.MAX);
builder.nullField(Fields.AVG);
builder.nullField(Fields.SUM);
builder.field(Fields.SUM, 0.0d);
}
otherStatsToXContent(builder, params);
return builder;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -425,6 +425,15 @@ public void onFailure(String source, Exception e) {
removeSnapshotFromClusterState(snapshot.snapshot(), null, e, new CleanupAfterErrorListener(snapshot, true, userCreateSnapshotListener, e));
}

@Override
public void onNoLongerMaster(String source) {
// We are not longer a master - we shouldn't try to do any cleanup
// The new master will take care of it
logger.warn("[{}] failed to create snapshot - no longer a master", snapshot.snapshot().getSnapshotId());
userCreateSnapshotListener.onFailure(
new SnapshotException(snapshot.snapshot(), "master changed during snapshot initialization"));
}

@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
// The userCreateSnapshotListener.onResponse() notifies caller that the snapshot was accepted
Expand Down Expand Up @@ -473,6 +482,10 @@ public void onFailure(Exception e) {
cleanupAfterError(e);
}

public void onNoLongerMaster(String source) {
userCreateSnapshotListener.onFailure(e);
}

private void cleanupAfterError(Exception exception) {
if(snapshotCreated) {
try {
Expand Down Expand Up @@ -628,7 +641,8 @@ private SnapshotShardFailure findShardFailure(List<SnapshotShardFailure> shardFa
public void applyClusterState(ClusterChangedEvent event) {
try {
if (event.localNodeMaster()) {
if (event.nodesRemoved()) {
// We don't remove old master when master flips anymore. So, we need to check for change in master
if (event.nodesRemoved() || event.previousState().nodes().isLocalNodeElectedMaster() == false) {
processSnapshotsOnRemovedNodes(event);
}
if (event.routingTableChanged()) {
Expand Down Expand Up @@ -981,7 +995,7 @@ private void removeSnapshotFromClusterState(final Snapshot snapshot, final Snaps
* @param listener listener to notify when snapshot information is removed from the cluster state
*/
private void removeSnapshotFromClusterState(final Snapshot snapshot, final SnapshotInfo snapshotInfo, final Exception failure,
@Nullable ActionListener<SnapshotInfo> listener) {
@Nullable CleanupAfterErrorListener listener) {
clusterService.submitStateUpdateTask("remove snapshot metadata", new ClusterStateUpdateTask() {

@Override
Expand Down Expand Up @@ -1013,6 +1027,13 @@ public void onFailure(String source, Exception e) {
}
}

@Override
public void onNoLongerMaster(String source) {
if (listener != null) {
listener.onNoLongerMaster(source);
}
}

@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
for (SnapshotCompletionListener listener : snapshotCompletionListeners) {
Expand Down Expand Up @@ -1183,9 +1204,16 @@ public void onSnapshotCompletion(Snapshot completedSnapshot, SnapshotInfo snapsh
if (completedSnapshot.equals(snapshot)) {
logger.debug("deleted snapshot completed - deleting files");
removeListener(this);
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() ->
deleteSnapshot(completedSnapshot.getRepository(), completedSnapshot.getSnapshotId().getName(),
listener, true)
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> {
try {
deleteSnapshot(completedSnapshot.getRepository(), completedSnapshot.getSnapshotId().getName(),
listener, true);

} catch (Exception ex) {
logger.warn((Supplier<?>) () ->
new ParameterizedMessage("[{}] failed to delete snapshot", snapshot), ex);
}
}
);
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,14 @@

import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.common.blobstore.fs.FsBlobStore;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.repositories.ESBlobStoreTestCase;

import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;

@LuceneTestCase.SuppressFileSystems("ExtrasFS")
Expand All @@ -35,4 +37,39 @@ protected BlobStore newBlobStore() throws IOException {
Settings settings = randomBoolean() ? Settings.EMPTY : Settings.builder().put("buffer_size", new ByteSizeValue(randomIntBetween(1, 100), ByteSizeUnit.KB)).build();
return new FsBlobStore(settings, tempDir);
}

public void testReadOnly() throws Exception {
Settings settings = Settings.builder().put("readonly", true).build();
Path tempDir = createTempDir();
Path path = tempDir.resolve("bar");

try (FsBlobStore store = new FsBlobStore(settings, path)) {
assertFalse(Files.exists(path));
BlobPath blobPath = BlobPath.cleanPath().add("foo");
store.blobContainer(blobPath);
Path storePath = store.path();
for (String d : blobPath) {
storePath = storePath.resolve(d);
}
assertFalse(Files.exists(storePath));
}

settings = randomBoolean() ? Settings.EMPTY : Settings.builder().put("readonly", false).build();
try (FsBlobStore store = new FsBlobStore(settings, path)) {
assertTrue(Files.exists(path));
BlobPath blobPath = BlobPath.cleanPath().add("foo");
BlobContainer container = store.blobContainer(blobPath);
Path storePath = store.path();
for (String d : blobPath) {
storePath = storePath.resolve(d);
}
assertTrue(Files.exists(storePath));
assertTrue(Files.isDirectory(storePath));

byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16)));
writeBlob(container, "test", new BytesArray(data));
assertArrayEquals(readBlobFully(container, "test", data.length), data);
assertTrue(container.blobExists("test"));
}
}
}
Loading

0 comments on commit e61e4b8

Please sign in to comment.