diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 7e34dec9e4bf8..3cdbf099ec1d0 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,6 +1,6 @@ # When updating elasticsearch, please update 'rest' version in core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy elasticsearch = 7.0.0-alpha1 -lucene = 7.1.0-snapshot-f33ed4ba12a +lucene = 7.1.0 # optional dependencies spatial4j = 0.6 diff --git a/core/licenses/lucene-analyzers-common-7.1.0-snapshot-f33ed4ba12a.jar.sha1 b/core/licenses/lucene-analyzers-common-7.1.0-snapshot-f33ed4ba12a.jar.sha1 deleted file mode 100644 index 6863607ab7906..0000000000000 --- a/core/licenses/lucene-analyzers-common-7.1.0-snapshot-f33ed4ba12a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a59ac3bdd17becc848f319fb77994060661c2c71 \ No newline at end of file diff --git a/core/licenses/lucene-analyzers-common-7.1.0.jar.sha1 b/core/licenses/lucene-analyzers-common-7.1.0.jar.sha1 new file mode 100644 index 0000000000000..880d261cb89d2 --- /dev/null +++ b/core/licenses/lucene-analyzers-common-7.1.0.jar.sha1 @@ -0,0 +1 @@ +a508bf6b580471ee568dab7d2acfedfa5aadce70 \ No newline at end of file diff --git a/core/licenses/lucene-backward-codecs-7.1.0-snapshot-f33ed4ba12a.jar.sha1 b/core/licenses/lucene-backward-codecs-7.1.0-snapshot-f33ed4ba12a.jar.sha1 deleted file mode 100644 index 2bc2287cf2d57..0000000000000 --- a/core/licenses/lucene-backward-codecs-7.1.0-snapshot-f33ed4ba12a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -47f560086db8683b5be26911fae3721d8b0da465 \ No newline at end of file diff --git a/core/licenses/lucene-backward-codecs-7.1.0.jar.sha1 b/core/licenses/lucene-backward-codecs-7.1.0.jar.sha1 new file mode 100644 index 0000000000000..ec597be207dd5 --- /dev/null +++ b/core/licenses/lucene-backward-codecs-7.1.0.jar.sha1 @@ -0,0 +1 @@ +804a7ce82bba3d085733486bfde4846ecb77ce01 \ No newline at end of file diff --git a/core/licenses/lucene-core-7.1.0-snapshot-f33ed4ba12a.jar.sha1 b/core/licenses/lucene-core-7.1.0-snapshot-f33ed4ba12a.jar.sha1 deleted file mode 100644 index a1079a6df66eb..0000000000000 --- a/core/licenses/lucene-core-7.1.0-snapshot-f33ed4ba12a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -17bd8e886ac2e763c27a507e697f78e43103afd3 \ No newline at end of file diff --git a/core/licenses/lucene-core-7.1.0.jar.sha1 b/core/licenses/lucene-core-7.1.0.jar.sha1 new file mode 100644 index 0000000000000..9e8112996604d --- /dev/null +++ b/core/licenses/lucene-core-7.1.0.jar.sha1 @@ -0,0 +1 @@ +dd291b7ebf4845483895724d2562214dc7f40049 \ No newline at end of file diff --git a/core/licenses/lucene-grouping-7.1.0-snapshot-f33ed4ba12a.jar.sha1 b/core/licenses/lucene-grouping-7.1.0-snapshot-f33ed4ba12a.jar.sha1 deleted file mode 100644 index 036e7d5b1563d..0000000000000 --- a/core/licenses/lucene-grouping-7.1.0-snapshot-f33ed4ba12a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bb7d5f5f6dd0bada3991828b8687a35c90de76ca \ No newline at end of file diff --git a/core/licenses/lucene-grouping-7.1.0.jar.sha1 b/core/licenses/lucene-grouping-7.1.0.jar.sha1 new file mode 100644 index 0000000000000..3c4963f4460e9 --- /dev/null +++ b/core/licenses/lucene-grouping-7.1.0.jar.sha1 @@ -0,0 +1 @@ +0732d16c16421fca058a2a07ca4081ec7696365b \ No newline at end of file diff --git a/core/licenses/lucene-highlighter-7.1.0-snapshot-f33ed4ba12a.jar.sha1 b/core/licenses/lucene-highlighter-7.1.0-snapshot-f33ed4ba12a.jar.sha1 deleted file mode 100644 index 28e24b897f416..0000000000000 --- a/core/licenses/lucene-highlighter-7.1.0-snapshot-f33ed4ba12a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f024368b33bfb7c1589aaf424992e474c4e3be38 \ No newline at end of file diff --git a/core/licenses/lucene-highlighter-7.1.0.jar.sha1 b/core/licenses/lucene-highlighter-7.1.0.jar.sha1 new file mode 100644 index 0000000000000..87f841e14677b --- /dev/null +++ b/core/licenses/lucene-highlighter-7.1.0.jar.sha1 @@ -0,0 +1 @@ +596550daabae765ad685112e0fe7c4f0fdfccb3f \ No newline at end of file diff --git a/core/licenses/lucene-join-7.1.0-snapshot-f33ed4ba12a.jar.sha1 b/core/licenses/lucene-join-7.1.0-snapshot-f33ed4ba12a.jar.sha1 deleted file mode 100644 index 684763a1fa6a7..0000000000000 --- a/core/licenses/lucene-join-7.1.0-snapshot-f33ed4ba12a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7b525cb2e2c8403543fefc09b972c78b86d2f0da \ No newline at end of file diff --git a/core/licenses/lucene-join-7.1.0.jar.sha1 b/core/licenses/lucene-join-7.1.0.jar.sha1 new file mode 100644 index 0000000000000..774ec13c61451 --- /dev/null +++ b/core/licenses/lucene-join-7.1.0.jar.sha1 @@ -0,0 +1 @@ +5f26dd64c195258a81175772ef7fe105e7d60a26 \ No newline at end of file diff --git a/core/licenses/lucene-memory-7.1.0-snapshot-f33ed4ba12a.jar.sha1 b/core/licenses/lucene-memory-7.1.0-snapshot-f33ed4ba12a.jar.sha1 deleted file mode 100644 index 47d82587ef0e1..0000000000000 --- a/core/licenses/lucene-memory-7.1.0-snapshot-f33ed4ba12a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -61cc3ced15fa80d8f97affe0c8df9818eeb8af49 \ No newline at end of file diff --git a/core/licenses/lucene-memory-7.1.0.jar.sha1 b/core/licenses/lucene-memory-7.1.0.jar.sha1 new file mode 100644 index 0000000000000..17264d5e43df8 --- /dev/null +++ b/core/licenses/lucene-memory-7.1.0.jar.sha1 @@ -0,0 +1 @@ +3ef64c58d0c09ca40d848efa96b585b7476271f2 \ No newline at end of file diff --git a/core/licenses/lucene-misc-7.1.0-snapshot-f33ed4ba12a.jar.sha1 b/core/licenses/lucene-misc-7.1.0-snapshot-f33ed4ba12a.jar.sha1 deleted file mode 100644 index adfb48c3cbd46..0000000000000 --- a/core/licenses/lucene-misc-7.1.0-snapshot-f33ed4ba12a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -03a71b5875d25576c9f8992822db65fb181f4328 \ No newline at end of file diff --git a/core/licenses/lucene-misc-7.1.0.jar.sha1 b/core/licenses/lucene-misc-7.1.0.jar.sha1 new file mode 100644 index 0000000000000..6fb92dee458e0 --- /dev/null +++ b/core/licenses/lucene-misc-7.1.0.jar.sha1 @@ -0,0 +1 @@ +1496ee5fa62206ee5ddf51042a340d6a9ee3b5de \ No newline at end of file diff --git a/core/licenses/lucene-queries-7.1.0-snapshot-f33ed4ba12a.jar.sha1 b/core/licenses/lucene-queries-7.1.0-snapshot-f33ed4ba12a.jar.sha1 deleted file mode 100644 index c1f9c01c22123..0000000000000 --- a/core/licenses/lucene-queries-7.1.0-snapshot-f33ed4ba12a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9c07c15b2c6f8bd3d75e0f53fff5631f012bff98 \ No newline at end of file diff --git a/core/licenses/lucene-queries-7.1.0.jar.sha1 b/core/licenses/lucene-queries-7.1.0.jar.sha1 new file mode 100644 index 0000000000000..a4028cc2149cb --- /dev/null +++ b/core/licenses/lucene-queries-7.1.0.jar.sha1 @@ -0,0 +1 @@ +1554920ab207a3245fa408d022a5c90ad3a1fea3 \ No newline at end of file diff --git a/core/licenses/lucene-queryparser-7.1.0-snapshot-f33ed4ba12a.jar.sha1 b/core/licenses/lucene-queryparser-7.1.0-snapshot-f33ed4ba12a.jar.sha1 deleted file mode 100644 index 380eb78880477..0000000000000 --- a/core/licenses/lucene-queryparser-7.1.0-snapshot-f33ed4ba12a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e0a7815981d096d96e7dc41b1c063cd78c91132d \ No newline at end of file diff --git a/core/licenses/lucene-queryparser-7.1.0.jar.sha1 b/core/licenses/lucene-queryparser-7.1.0.jar.sha1 new file mode 100644 index 0000000000000..85c745ea911b7 --- /dev/null +++ b/core/licenses/lucene-queryparser-7.1.0.jar.sha1 @@ -0,0 +1 @@ +5767c15c5ee97926829fd8a4337e434fa95f3c08 \ No newline at end of file diff --git a/core/licenses/lucene-sandbox-7.1.0-snapshot-f33ed4ba12a.jar.sha1 b/core/licenses/lucene-sandbox-7.1.0-snapshot-f33ed4ba12a.jar.sha1 deleted file mode 100644 index 5348255939923..0000000000000 --- a/core/licenses/lucene-sandbox-7.1.0-snapshot-f33ed4ba12a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1ea14867a6bc545fb2e09dd1f31b48523cdbc040 \ No newline at end of file diff --git a/core/licenses/lucene-sandbox-7.1.0.jar.sha1 b/core/licenses/lucene-sandbox-7.1.0.jar.sha1 new file mode 100644 index 0000000000000..4fedc42d2f10d --- /dev/null +++ b/core/licenses/lucene-sandbox-7.1.0.jar.sha1 @@ -0,0 +1 @@ +691f7b9ac05f3ad2ac7e80733ef70247904bd3ae \ No newline at end of file diff --git a/core/licenses/lucene-spatial-7.1.0-snapshot-f33ed4ba12a.jar.sha1 b/core/licenses/lucene-spatial-7.1.0-snapshot-f33ed4ba12a.jar.sha1 deleted file mode 100644 index 18afbd6349384..0000000000000 --- a/core/licenses/lucene-spatial-7.1.0-snapshot-f33ed4ba12a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58ce824ebc6126e37ff232c96a561a659377a873 \ No newline at end of file diff --git a/core/licenses/lucene-spatial-7.1.0.jar.sha1 b/core/licenses/lucene-spatial-7.1.0.jar.sha1 new file mode 100644 index 0000000000000..3cc891f4b4d85 --- /dev/null +++ b/core/licenses/lucene-spatial-7.1.0.jar.sha1 @@ -0,0 +1 @@ +6c64c04d802badb800516a8a574cb993929c3805 \ No newline at end of file diff --git a/core/licenses/lucene-spatial-extras-7.1.0-snapshot-f33ed4ba12a.jar.sha1 b/core/licenses/lucene-spatial-extras-7.1.0-snapshot-f33ed4ba12a.jar.sha1 deleted file mode 100644 index b07b6ca6d9c3e..0000000000000 --- a/core/licenses/lucene-spatial-extras-7.1.0-snapshot-f33ed4ba12a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3fcd89a8cda5ee2049c189b06b5e30258b1aa198 \ No newline at end of file diff --git a/core/licenses/lucene-spatial-extras-7.1.0.jar.sha1 b/core/licenses/lucene-spatial-extras-7.1.0.jar.sha1 new file mode 100644 index 0000000000000..066098d5571f6 --- /dev/null +++ b/core/licenses/lucene-spatial-extras-7.1.0.jar.sha1 @@ -0,0 +1 @@ +3f1bc1aada8f06b176b782da24b9d7ad9641c41a \ No newline at end of file diff --git a/core/licenses/lucene-spatial3d-7.1.0-snapshot-f33ed4ba12a.jar.sha1 b/core/licenses/lucene-spatial3d-7.1.0-snapshot-f33ed4ba12a.jar.sha1 deleted file mode 100644 index a3be4c237dd8e..0000000000000 --- a/core/licenses/lucene-spatial3d-7.1.0-snapshot-f33ed4ba12a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1d1ada8fbb1b2bbbc88e9f29e28802a7b44a6665 \ No newline at end of file diff --git a/core/licenses/lucene-spatial3d-7.1.0.jar.sha1 b/core/licenses/lucene-spatial3d-7.1.0.jar.sha1 new file mode 100644 index 0000000000000..32277c393c94e --- /dev/null +++ b/core/licenses/lucene-spatial3d-7.1.0.jar.sha1 @@ -0,0 +1 @@ +8ded650aed23efb775f17be496e3e3870214e23b \ No newline at end of file diff --git a/core/licenses/lucene-suggest-7.1.0-snapshot-f33ed4ba12a.jar.sha1 b/core/licenses/lucene-suggest-7.1.0-snapshot-f33ed4ba12a.jar.sha1 deleted file mode 100644 index e7534fc3cf7c0..0000000000000 --- a/core/licenses/lucene-suggest-7.1.0-snapshot-f33ed4ba12a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fb7f18e6a81899e3ac95760b56bea21ebf143cf9 \ No newline at end of file diff --git a/core/licenses/lucene-suggest-7.1.0.jar.sha1 b/core/licenses/lucene-suggest-7.1.0.jar.sha1 new file mode 100644 index 0000000000000..1d2d0585c63c1 --- /dev/null +++ b/core/licenses/lucene-suggest-7.1.0.jar.sha1 @@ -0,0 +1 @@ +8d0ed1589ebdccf34e888c6efc0134a13a238c85 \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java index 3a13915b3aaea..c4369a30586d0 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java @@ -155,7 +155,7 @@ protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest re String error = null; ShardSearchLocalRequest shardSearchLocalRequest = new ShardSearchLocalRequest(request.shardId(), request.types(), request.nowInMillis(), request.filteringAliases()); - SearchContext searchContext = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT, null); + SearchContext searchContext = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT); try { ParsedQuery parsedQuery = searchContext.getQueryShardContext().toQuery(request.query()); searchContext.parsedQuery(parsedQuery); diff --git a/core/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java b/core/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java index 02195c5a32c33..c30dfd360a08b 100644 --- a/core/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java +++ b/core/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java @@ -90,7 +90,7 @@ protected void resolveRequest(ClusterState state, InternalRequest request) { protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId) throws IOException { ShardSearchLocalRequest shardSearchLocalRequest = new ShardSearchLocalRequest(shardId, new String[]{request.type()}, request.nowInMillis, request.filteringAlias()); - SearchContext context = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT, null); + SearchContext context = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT); Engine.GetResult result = null; try { Term uidTerm = context.mapperService().createUidTerm(request.type(), request.id()); diff --git a/core/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java b/core/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java index 4d42ad334a9f0..560379a6ce2f6 100644 --- a/core/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java +++ b/core/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java @@ -21,12 +21,14 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -112,11 +114,14 @@ public Exception getFailure() { private Item[] items; + private long tookInMillis; + MultiSearchResponse() { } - public MultiSearchResponse(Item[] items) { + public MultiSearchResponse(Item[] items, long tookInMillis) { this.items = items; + this.tookInMillis = tookInMillis; } @Override @@ -131,6 +136,13 @@ public Item[] getResponses() { return this.items; } + /** + * How long the msearch took. + */ + public TimeValue getTook() { + return new TimeValue(tookInMillis); + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); @@ -138,6 +150,9 @@ public void readFrom(StreamInput in) throws IOException { for (int i = 0; i < items.length; i++) { items[i] = Item.readItem(in); } + if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + tookInMillis = in.readVLong(); + } } @Override @@ -147,11 +162,15 @@ public void writeTo(StreamOutput out) throws IOException { for (Item item : items) { item.writeTo(out); } + if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + out.writeVLong(tookInMillis); + } } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); + builder.field("took", tookInMillis); builder.startArray(Fields.RESPONSES); for (Item item : items) { builder.startObject(); diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java index b65cd4d55516a..9dec3be5c1b11 100644 --- a/core/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java @@ -34,16 +34,18 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.List; import java.util.Queue; import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.LongSupplier; public class TransportMultiSearchAction extends HandledTransportAction { private final int availableProcessors; private final ClusterService clusterService; private final TransportAction searchAction; + private final LongSupplier relativeTimeProvider; @Inject public TransportMultiSearchAction(Settings settings, ThreadPool threadPool, TransportService transportService, @@ -53,19 +55,23 @@ public TransportMultiSearchAction(Settings settings, ThreadPool threadPool, Tran this.clusterService = clusterService; this.searchAction = searchAction; this.availableProcessors = EsExecutors.numberOfProcessors(settings); + this.relativeTimeProvider = System::nanoTime; } TransportMultiSearchAction(ThreadPool threadPool, ActionFilters actionFilters, TransportService transportService, ClusterService clusterService, TransportAction searchAction, - IndexNameExpressionResolver resolver, int availableProcessors) { + IndexNameExpressionResolver resolver, int availableProcessors, LongSupplier relativeTimeProvider) { super(Settings.EMPTY, MultiSearchAction.NAME, threadPool, transportService, actionFilters, resolver, MultiSearchRequest::new); this.clusterService = clusterService; this.searchAction = searchAction; this.availableProcessors = availableProcessors; + this.relativeTimeProvider = relativeTimeProvider; } @Override protected void doExecute(MultiSearchRequest request, ActionListener listener) { + final long relativeStartTime = relativeTimeProvider.getAsLong(); + ClusterState clusterState = clusterService.state(); clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ); @@ -85,7 +91,7 @@ protected void doExecute(MultiSearchRequest request, ActionListener requests, final AtomicArray responses, final AtomicInteger responseCounter, - final ActionListener listener) { + final ActionListener listener, + final long relativeStartTime) { SearchRequestSlot request = requests.poll(); if (request == null) { /* @@ -155,16 +162,25 @@ private void handleResponse(final int responseSlot, final MultiSearchResponse.It } else { if (thread == Thread.currentThread()) { // we are on the same thread, we need to fork to another thread to avoid recursive stack overflow on a single thread - threadPool.generic().execute(() -> executeSearch(requests, responses, responseCounter, listener)); + threadPool.generic() + .execute(() -> executeSearch(requests, responses, responseCounter, listener, relativeStartTime)); } else { // we are on a different thread (we went asynchronous), it's safe to recurse - executeSearch(requests, responses, responseCounter, listener); + executeSearch(requests, responses, responseCounter, listener, relativeStartTime); } } } private void finish() { - listener.onResponse(new MultiSearchResponse(responses.toArray(new MultiSearchResponse.Item[responses.length()]))); + listener.onResponse(new MultiSearchResponse(responses.toArray(new MultiSearchResponse.Item[responses.length()]), + buildTookInMillis())); + } + + /** + * Builds how long it took to execute the msearch. + */ + private long buildTookInMillis() { + return TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - relativeStartTime); } }); } @@ -178,7 +194,5 @@ static final class SearchRequestSlot { this.request = request; this.responseSlot = responseSlot; } - } - } diff --git a/core/src/main/java/org/elasticsearch/index/engine/Engine.java b/core/src/main/java/org/elasticsearch/index/engine/Engine.java index 4299fa0cb6ea3..42805a19b340f 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -187,6 +187,9 @@ public MergeStats getMergeStats() { /** returns the history uuid for the engine */ public abstract String getHistoryUUID(); + /** Returns how many bytes we are currently moving from heap to disk */ + public abstract long getWritingBytes(); + /** * A throttling class that can be activated, causing the * {@code acquireThrottle} method to block on a lock when throttling @@ -707,7 +710,7 @@ protected void writerSegmentStats(SegmentsStats stats) { } /** How much heap is used that would be freed by a refresh. Note that this may throw {@link AlreadyClosedException}. */ - public abstract long getIndexBufferRAMBytesUsed(); + public abstract long getIndexBufferRAMBytesUsed(); protected Segment[] getSegmentInfo(SegmentInfos lastCommittedSegmentInfos, boolean verbose) { ensureOpen(); diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 98273fa3894f0..9dd81d3a28165 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -140,6 +140,12 @@ public class InternalEngine extends Engine { private final AtomicLong maxUnsafeAutoIdTimestamp = new AtomicLong(-1); private final CounterMetric numVersionLookups = new CounterMetric(); private final CounterMetric numIndexVersionsLookups = new CounterMetric(); + /** + * How many bytes we are currently moving to disk, via either IndexWriter.flush or refresh. IndexingMemoryController polls this + * across all shards to decide if throttling is necessary because moving bytes to disk is falling behind vs incoming documents + * being indexed/deleted. + */ + private final AtomicLong writingBytes = new AtomicLong(); @Nullable private final String historyUUID; @@ -409,6 +415,12 @@ public String getHistoryUUID() { return historyUUID; } + /** Returns how many bytes we are currently moving from indexing buffer to segments on disk */ + @Override + public long getWritingBytes() { + return writingBytes.get(); + } + /** * Reads the current stored translog ID from the IW commit data. If the id is not found, recommits the current * translog id into lucene and returns null. @@ -1217,21 +1229,26 @@ public void refresh(String source) throws EngineException { } final void refresh(String source, SearcherScope scope) throws EngineException { + long bytes = 0; // we obtain a read lock here, since we don't want a flush to happen while we are refreshing // since it flushes the index as well (though, in terms of concurrency, we are allowed to do it) try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); + bytes = indexWriter.ramBytesUsed(); switch (scope) { case EXTERNAL: // even though we maintain 2 managers we really do the heavy-lifting only once. // the second refresh will only do the extra work we have to do for warming caches etc. + writingBytes.addAndGet(bytes); externalSearcherManager.maybeRefreshBlocking(); // the break here is intentional we never refresh both internal / external together break; case INTERNAL: + final long versionMapBytes = versionMap.ramBytesUsedForRefresh(); + bytes += versionMapBytes; + writingBytes.addAndGet(bytes); internalSearcherManager.maybeRefreshBlocking(); break; - default: throw new IllegalArgumentException("unknown scope: " + scope); } @@ -1245,6 +1262,8 @@ final void refresh(String source, SearcherScope scope) throws EngineException { e.addSuppressed(inner); } throw new RefreshFailedEngineException(shardId, e); + } finally { + writingBytes.addAndGet(-bytes); } // TODO: maybe we should just put a scheduled job in threadPool? @@ -1258,24 +1277,7 @@ final void refresh(String source, SearcherScope scope) throws EngineException { public void writeIndexingBuffer() throws EngineException { // we obtain a read lock here, since we don't want a flush to happen while we are writing // since it flushes the index as well (though, in terms of concurrency, we are allowed to do it) - try (ReleasableLock lock = readLock.acquire()) { - ensureOpen(); - final long versionMapBytes = versionMap.ramBytesUsedForRefresh(); - final long indexingBufferBytes = indexWriter.ramBytesUsed(); - logger.debug("use refresh to write indexing buffer (heap size=[{}]), to also clear version map (heap size=[{}])", - new ByteSizeValue(indexingBufferBytes), new ByteSizeValue(versionMapBytes)); - refresh("write indexing buffer", SearcherScope.INTERNAL); - } catch (AlreadyClosedException e) { - failOnTragicEvent(e); - throw e; - } catch (Exception e) { - try { - failEngine("writeIndexingBuffer failed", e); - } catch (Exception inner) { - e.addSuppressed(inner); - } - throw new RefreshFailedEngineException(shardId, e); - } + refresh("write indexing buffer", SearcherScope.INTERNAL); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java b/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java index 14b3291f441a9..11804c2e88e1d 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.document.Field; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.ParseContext.Document; @@ -35,7 +34,6 @@ public class ParsedDocument { private final Field version; private final String id, type; - private final BytesRef uid; private final SeqNoFieldMapper.SequenceIDFields seqID; private final String routing; @@ -62,7 +60,6 @@ public ParsedDocument(Field version, this.seqID = seqID; this.id = id; this.type = type; - this.uid = Uid.createUidAsBytes(type, id); this.routing = routing; this.documents = documents; this.source = source; @@ -140,9 +137,7 @@ public void addDynamicMappingsUpdate(Mapping update) { @Override public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("Document ").append("uid[").append(uid).append("] doc [").append(documents).append("]"); - return sb.toString(); + return "Document uid[" + Uid.createUidAsBytes(type, id) + "] doc [" + documents + ']'; } } diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index c6d6e1c5aa60e..d1cc9306e13f0 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -182,12 +182,6 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl private final QueryCachingPolicy cachingPolicy; private final Supplier indexSortSupplier; - /** - * How many bytes we are currently moving to disk, via either IndexWriter.flush or refresh. IndexingMemoryController polls this - * across all shards to decide if throttling is necessary because moving bytes to disk is falling behind vs incoming documents - * being indexed/deleted. - */ - private final AtomicLong writingBytes = new AtomicLong(); private final SearchOperationListener searchOperationListener; protected volatile ShardRouting shardRouting; @@ -323,12 +317,6 @@ public Store store() { public Sort getIndexSort() { return indexSortSupplier.get(); } - /** - * returns true if this shard supports indexing (i.e., write) operations. - */ - public boolean canIndex() { - return true; - } public ShardGetService getService() { return this.getService; @@ -839,34 +827,21 @@ public Engine.GetResult get(Engine.Get get) { */ public void refresh(String source) { verifyNotClosed(); - - if (canIndex()) { - long bytes = getEngine().getIndexBufferRAMBytesUsed(); - writingBytes.addAndGet(bytes); - try { - if (logger.isTraceEnabled()) { - logger.trace("refresh with source [{}] indexBufferRAMBytesUsed [{}]", source, new ByteSizeValue(bytes)); - } - getEngine().refresh(source); - } finally { - if (logger.isTraceEnabled()) { - logger.trace("remove [{}] writing bytes for shard [{}]", new ByteSizeValue(bytes), shardId()); - } - writingBytes.addAndGet(-bytes); - } - } else { - if (logger.isTraceEnabled()) { - logger.trace("refresh with source [{}]", source); - } - getEngine().refresh(source); + if (logger.isTraceEnabled()) { + logger.trace("refresh with source [{}]", source); } + getEngine().refresh(source); } /** * Returns how many bytes we are currently moving from heap to disk */ public long getWritingBytes() { - return writingBytes.get(); + Engine engine = getEngineOrNull(); + if (engine == null) { + return 0; + } + return engine.getWritingBytes(); } public RefreshStats refreshStats() { @@ -1671,24 +1646,9 @@ private void handleRefreshException(Exception e) { * Called when our shard is using too much heap and should move buffered indexed/deleted documents to disk. */ public void writeIndexingBuffer() { - if (canIndex() == false) { - throw new UnsupportedOperationException(); - } try { Engine engine = getEngine(); - long bytes = engine.getIndexBufferRAMBytesUsed(); - - // NOTE: this can be an overestimate by up to 20%, if engine uses IW.flush not refresh, because version map - // memory is low enough, but this is fine because after the writes finish, IMC will poll again and see that - // there's still up to the 20% being used and continue writing if necessary: - logger.debug("add [{}] writing bytes for shard [{}]", new ByteSizeValue(bytes), shardId()); - writingBytes.addAndGet(bytes); - try { - engine.writeIndexingBuffer(); - } finally { - writingBytes.addAndGet(-bytes); - logger.debug("remove [{}] writing bytes for shard [{}]", new ByteSizeValue(bytes), shardId()); - } + engine.writeIndexingBuffer(); } catch (Exception e) { handleRefreshException(e); } diff --git a/core/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java b/core/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java index e1c88fff3bee3..73ba9342175d4 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java +++ b/core/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java @@ -152,8 +152,7 @@ ByteSizeValue indexingBufferSize() { protected List availableShards() { List availableShards = new ArrayList<>(); for (IndexShard shard : indexShards) { - // shadow replica doesn't have an indexing buffer - if (shard.canIndex() && CAN_WRITE_INDEX_BUFFER_STATES.contains(shard.state())) { + if (CAN_WRITE_INDEX_BUFFER_STATES.contains(shard.state())) { availableShards.add(shard); } } diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index 8277f2733a46a..49ab665295793 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -30,7 +30,6 @@ import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Setting; @@ -82,7 +81,6 @@ import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.profile.Profilers; import org.elasticsearch.search.query.QueryPhase; -import org.elasticsearch.search.query.QueryPhaseExecutionException; import org.elasticsearch.search.query.QuerySearchRequest; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.query.ScrollQuerySearchResult; @@ -526,7 +524,7 @@ private SearchContext findContext(long id, TransportRequest request) throws Sear } final SearchContext createAndPutContext(ShardSearchRequest request) throws IOException { - SearchContext context = createContext(request, null); + SearchContext context = createContext(request); boolean success = false; try { putContext(context); @@ -543,8 +541,8 @@ final SearchContext createAndPutContext(ShardSearchRequest request) throws IOExc } } - final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) throws IOException { - final DefaultSearchContext context = createSearchContext(request, defaultSearchTimeout, searcher); + final SearchContext createContext(ShardSearchRequest request) throws IOException { + final DefaultSearchContext context = createSearchContext(request, defaultSearchTimeout); try { if (request.scroll() != null) { context.scrollContext(new ScrollContext()); @@ -580,18 +578,18 @@ final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.S return context; } - public DefaultSearchContext createSearchContext(ShardSearchRequest request, TimeValue timeout, @Nullable Engine.Searcher searcher) + public DefaultSearchContext createSearchContext(ShardSearchRequest request, TimeValue timeout) throws IOException { - return createSearchContext(request, timeout, searcher, true); + return createSearchContext(request, timeout, true); } - private DefaultSearchContext createSearchContext(ShardSearchRequest request, TimeValue timeout, @Nullable Engine.Searcher searcher, + private DefaultSearchContext createSearchContext(ShardSearchRequest request, TimeValue timeout, boolean assertAsyncActions) throws IOException { IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexShard indexShard = indexService.getShard(request.shardId().getId()); SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().getId(), indexShard.shardId(), request.getClusterAlias(), OriginalIndices.NONE); - Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher; + Engine.Searcher engineSearcher = indexShard.acquireSearcher("search"); final DefaultSearchContext searchContext = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher, indexService, indexShard, bigArrays, threadPool.estimatedTimeInMillisCounter(), timeout, fetchPhase, @@ -947,7 +945,7 @@ public AliasFilter buildAliasFilter(ClusterState state, String index, String... */ public boolean canMatch(ShardSearchRequest request) throws IOException { assert request.searchType() == SearchType.QUERY_THEN_FETCH : "unexpected search type: " + request.searchType(); - try (DefaultSearchContext context = createSearchContext(request, defaultSearchTimeout, null, false)) { + try (DefaultSearchContext context = createSearchContext(request, defaultSearchTimeout, false)) { SearchSourceBuilder source = context.request().source(); if (canRewriteToMatchNone(source)) { QueryBuilder queryBuilder = source.query(); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java index 172e3691127d1..6d7ae0cddc0df 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java @@ -192,7 +192,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th builder.nullField(Fields.MIN); builder.nullField(Fields.MAX); builder.nullField(Fields.AVG); - builder.nullField(Fields.SUM); + builder.field(Fields.SUM, 0.0d); } otherStatsToXContent(builder, params); return builder; diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy index deb9a88e4fd01..7268e0f72380b 100644 --- a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy +++ b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -31,7 +31,7 @@ grant codeBase "${codebase.securesm-1.1.jar}" { //// Very special jar permissions: //// These are dangerous permissions that we don't want to grant to everything. -grant codeBase "${codebase.lucene-core-7.1.0-snapshot-f33ed4ba12a.jar}" { +grant codeBase "${codebase.lucene-core-7.1.0.jar}" { // needed to allow MMapDirectory's "unmap hack" (die unmap hack, die) // java 8 package permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; @@ -42,7 +42,7 @@ grant codeBase "${codebase.lucene-core-7.1.0-snapshot-f33ed4ba12a.jar}" { permission java.lang.RuntimePermission "accessDeclaredMembers"; }; -grant codeBase "${codebase.lucene-misc-7.1.0-snapshot-f33ed4ba12a.jar}" { +grant codeBase "${codebase.lucene-misc-7.1.0.jar}" { // needed to allow shard shrinking to use hard-links if possible via lucenes HardlinkCopyDirectoryWrapper permission java.nio.file.LinkPermission "hard"; }; diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy b/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy index c5a9a2b00d312..453621b138e0a 100644 --- a/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy +++ b/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy @@ -33,7 +33,7 @@ grant codeBase "${codebase.securemock-1.2.jar}" { permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; }; -grant codeBase "${codebase.lucene-test-framework-7.1.0-snapshot-f33ed4ba12a.jar}" { +grant codeBase "${codebase.lucene-test-framework-7.1.0.jar}" { // needed by RamUsageTester permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; // needed for testing hardlinks in StoreRecoveryTests since we install MockFS diff --git a/core/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java b/core/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java index b84dafb4f6d5b..0951380fcf4aa 100644 --- a/core/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java @@ -101,7 +101,8 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL mSearchResponses.add(new MultiSearchResponse.Item(response, null)); } - listener.onResponse(new MultiSearchResponse(mSearchResponses.toArray(new MultiSearchResponse.Item[0]))); + listener.onResponse( + new MultiSearchResponse(mSearchResponses.toArray(new MultiSearchResponse.Item[0]), randomIntBetween(1, 10000))); } }; @@ -153,10 +154,11 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL InternalSearchResponse internalSearchResponse = new InternalSearchResponse(collapsedHits, null, null, null, false, null, 1); SearchResponse response = mockSearchPhaseContext.buildSearchResponse(internalSearchResponse, null); - listener.onResponse(new MultiSearchResponse(new MultiSearchResponse.Item[]{ - new MultiSearchResponse.Item(null, new RuntimeException("boom")), - new MultiSearchResponse.Item(response, null) - })); + listener.onResponse(new MultiSearchResponse( + new MultiSearchResponse.Item[]{ + new MultiSearchResponse.Item(null, new RuntimeException("boom")), + new MultiSearchResponse.Item(response, null) + }, randomIntBetween(1, 10000))); } }; diff --git a/core/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java b/core/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java new file mode 100644 index 0000000000000..73743230d1a14 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java @@ -0,0 +1,199 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.search; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.IdentityHashMap; +import java.util.List; +import java.util.Queue; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.LongSupplier; + +import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * MultiSearch took time tests + */ +public class MultiSearchActionTookTests extends ESTestCase { + + private ThreadPool threadPool; + private ClusterService clusterService; + + @BeforeClass + public static void beforeClass() { + } + + @AfterClass + public static void afterClass() { + } + + @Before + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool("MultiSearchActionTookTests"); + clusterService = createClusterService(threadPool); + } + + @After + public void tearDown() throws Exception { + clusterService.close(); + ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); + super.tearDown(); + } + + // test unit conversion using a controller clock + public void testTookWithControlledClock() throws Exception { + runTestTook(true); + } + + // test using System#nanoTime + public void testTookWithRealClock() throws Exception { + runTestTook(false); + } + + private void runTestTook(boolean controlledClock) throws Exception { + MultiSearchRequest multiSearchRequest = new MultiSearchRequest().add(new SearchRequest()); + AtomicLong expected = new AtomicLong(); + + TransportMultiSearchAction action = createTransportMultiSearchAction(controlledClock, expected); + + action.doExecute(multiSearchRequest, new ActionListener() { + @Override + public void onResponse(MultiSearchResponse multiSearchResponse) { + if (controlledClock) { + assertThat(TimeUnit.MILLISECONDS.convert(expected.get(), TimeUnit.NANOSECONDS), + equalTo(multiSearchResponse.getTook().getMillis())); + } else { + assertThat(multiSearchResponse.getTook().getMillis(), + greaterThanOrEqualTo(TimeUnit.MILLISECONDS.convert(expected.get(), TimeUnit.NANOSECONDS))); + } + } + + @Override + public void onFailure(Exception e) { + throw new RuntimeException(e); + } + }); + } + + private TransportMultiSearchAction createTransportMultiSearchAction(boolean controlledClock, AtomicLong expected) { + Settings settings = Settings.builder().put("node.name", TransportMultiSearchActionTests.class.getSimpleName()).build(); + TaskManager taskManager = mock(TaskManager.class); + TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, + boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null) { + @Override + public TaskManager getTaskManager() { + return taskManager; + } + }; + ActionFilters actionFilters = new ActionFilters(new HashSet<>()); + ClusterService clusterService = mock(ClusterService.class); + when(clusterService.state()).thenReturn(ClusterState.builder(new ClusterName("test")).build()); + IndexNameExpressionResolver resolver = new Resolver(Settings.EMPTY); + + final int availableProcessors = Runtime.getRuntime().availableProcessors(); + AtomicInteger counter = new AtomicInteger(); + final List threadPoolNames = Arrays.asList(ThreadPool.Names.GENERIC, ThreadPool.Names.SAME); + Randomness.shuffle(threadPoolNames); + final ExecutorService commonExecutor = threadPool.executor(threadPoolNames.get(0)); + final Set requests = Collections.newSetFromMap(Collections.synchronizedMap(new IdentityHashMap<>())); + + TransportAction searchAction = new TransportAction(Settings.EMPTY, + "action", threadPool, actionFilters, resolver, taskManager) { + @Override + protected void doExecute(SearchRequest request, ActionListener listener) { + requests.add(request); + commonExecutor.execute(() -> { + counter.decrementAndGet(); + listener.onResponse(new SearchResponse()); + }); + } + }; + + if (controlledClock) { + return new TransportMultiSearchAction(threadPool, actionFilters, transportService, clusterService, searchAction, resolver, + availableProcessors, expected::get) { + @Override + void executeSearch(final Queue requests, final AtomicArray responses, + final AtomicInteger responseCounter, final ActionListener listener, long startTimeInNanos) { + expected.set(1000000); + super.executeSearch(requests, responses, responseCounter, listener, startTimeInNanos); + } + }; + } else { + return new TransportMultiSearchAction(threadPool, actionFilters, transportService, clusterService, searchAction, resolver, + availableProcessors, System::nanoTime) { + + @Override + void executeSearch(final Queue requests, final AtomicArray responses, + final AtomicInteger responseCounter, final ActionListener listener, long startTimeInNanos) { + long elapsed = spinForAtLeastNMilliseconds(randomIntBetween(0, 10)); + expected.set(elapsed); + super.executeSearch(requests, responses, responseCounter, listener, startTimeInNanos); + } + }; + } + } + + static class Resolver extends IndexNameExpressionResolver { + + Resolver(Settings settings) { + super(settings); + } + + @Override + public String[] concreteIndexNames(ClusterState state, IndicesRequest request) { + return request.indices(); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java b/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java index 3a162f302bc3b..e6de1d859d867 100644 --- a/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java @@ -146,13 +146,16 @@ public void testSimpleAdd4() throws Exception { } public void testResponseErrorToXContent() throws IOException { + long tookInMillis = randomIntBetween(1, 1000); MultiSearchResponse response = new MultiSearchResponse( - new MultiSearchResponse.Item[]{ - new MultiSearchResponse.Item(null, new IllegalStateException("foobar")), - new MultiSearchResponse.Item(null, new IllegalStateException("baaaaaazzzz")) - }); - - assertEquals("{\"responses\":[" + new MultiSearchResponse.Item[] { + new MultiSearchResponse.Item(null, new IllegalStateException("foobar")), + new MultiSearchResponse.Item(null, new IllegalStateException("baaaaaazzzz")) + }, tookInMillis); + + assertEquals("{\"took\":" + + tookInMillis + + ",\"responses\":[" + "{" + "\"error\":{\"root_cause\":[{\"type\":\"illegal_state_exception\",\"reason\":\"foobar\"}]," + "\"type\":\"illegal_state_exception\",\"reason\":\"foobar\"},\"status\":500" diff --git a/core/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java b/core/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java index e811da82c47a8..4410507eef92e 100644 --- a/core/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java @@ -102,8 +102,10 @@ protected void doExecute(SearchRequest request, ActionListener l }); } }; - TransportMultiSearchAction action = - new TransportMultiSearchAction(threadPool, actionFilters, transportService, clusterService, searchAction, resolver, 10); + + TransportMultiSearchAction action = + new TransportMultiSearchAction(threadPool, actionFilters, transportService, clusterService, searchAction, resolver, 10, + System::nanoTime); // Execute the multi search api and fail if we find an error after executing: try { diff --git a/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 5d166aaa628ba..92f018f282a43 100644 --- a/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -248,8 +248,8 @@ public void testTimeout() throws IOException { new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), - 1.0f), - null); + 1.0f) + ); try { // the search context should inherit the default timeout assertThat(contextWithDefaultTimeout.timeout(), equalTo(TimeValue.timeValueSeconds(5))); @@ -268,8 +268,8 @@ public void testTimeout() throws IOException { new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), - 1.0f), - null); + 1.0f) + ); try { // the search context should inherit the query timeout assertThat(context.timeout(), equalTo(TimeValue.timeValueSeconds(seconds))); @@ -296,12 +296,12 @@ public void testMaxDocvalueFieldsSearch() throws IOException { searchSourceBuilder.docValueField("field" + i); } try (SearchContext context = service.createContext(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, - searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f), null)) { + searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f))) { assertNotNull(context); searchSourceBuilder.docValueField("one_field_too_much"); IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> service.createContext(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, - searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f), null)); + searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f))); assertEquals( "Trying to retrieve too many docvalue_fields. Must be less than or equal to: [100] but was [101]. " + "This limit can be set by changing the [index.max_docvalue_fields_search] index level setting.", @@ -327,13 +327,13 @@ public void testMaxScriptFieldsSearch() throws IOException { new Script(ScriptType.INLINE, MockScriptEngine.NAME, CustomScriptPlugin.DUMMY_SCRIPT, Collections.emptyMap())); } try (SearchContext context = service.createContext(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, - searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f), null)) { + searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f))) { assertNotNull(context); searchSourceBuilder.scriptField("anotherScriptField", new Script(ScriptType.INLINE, MockScriptEngine.NAME, CustomScriptPlugin.DUMMY_SCRIPT, Collections.emptyMap())); IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> service.createContext(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, - searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f), null)); + searchSourceBuilder, new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f))); assertEquals( "Trying to retrieve too many script_fields. Must be less than or equal to: [" + maxScriptFields + "] but was [" + (maxScriptFields + 1) diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalStatsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalStatsTests.java index 2e3437d2093e5..4ce29e4e0ed83 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalStatsTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalStatsTests.java @@ -19,6 +19,9 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.ParsedAggregation; import org.elasticsearch.search.aggregations.metrics.stats.InternalStats; @@ -26,6 +29,8 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalAggregationTestCase; +import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -80,7 +85,7 @@ static void assertStats(InternalStats aggregation, ParsedStats parsed) { long count = aggregation.getCount(); assertEquals(count, parsed.getCount()); // for count == 0, fields are rendered as `null`, so we test that we parse to default values used also in the reduce phase - assertEquals(count > 0 ? aggregation.getMin() : Double.POSITIVE_INFINITY , parsed.getMin(), 0); + assertEquals(count > 0 ? aggregation.getMin() : Double.POSITIVE_INFINITY, parsed.getMin(), 0); assertEquals(count > 0 ? aggregation.getMax() : Double.NEGATIVE_INFINITY, parsed.getMax(), 0); assertEquals(count > 0 ? aggregation.getSum() : 0, parsed.getSum(), 0); assertEquals(count > 0 ? aggregation.getAvg() : 0, parsed.getAvg(), 0); @@ -153,5 +158,55 @@ protected InternalStats mutateInstance(InternalStats instance) { } return new InternalStats(name, count, sum, min, max, formatter, pipelineAggregators, metaData); } + + public void testDoXContentBody() throws IOException { + // count is greater than zero + double min = randomDoubleBetween(-1000000, 1000000, true); + double max = randomDoubleBetween(-1000000, 1000000, true); + double sum = randomDoubleBetween(-1000000, 1000000, true); + int count = randomIntBetween(1, 10); + DocValueFormat format = randomNumericDocValueFormat(); + InternalStats internalStats = createInstance("stats", count, sum, min, max, format, Collections.emptyList(), null); + XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); + builder.startObject(); + internalStats.doXContentBody(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + String expected = "{\n" + + " \"count\" : " + count + ",\n" + + " \"min\" : " + min + ",\n" + + " \"max\" : " + max + ",\n" + + " \"avg\" : " + internalStats.getAvg() + ",\n" + + " \"sum\" : " + sum; + if (format != DocValueFormat.RAW) { + expected += ",\n"+ + " \"min_as_string\" : \"" + format.format(internalStats.getMin()) + "\",\n" + + " \"max_as_string\" : \"" + format.format(internalStats.getMax()) + "\",\n" + + " \"avg_as_string\" : \"" + format.format(internalStats.getAvg()) + "\",\n" + + " \"sum_as_string\" : \"" + format.format(internalStats.getSum()) + "\""; + } + expected += "\n}"; + assertEquals(expected, builder.string()); + + // count is zero + format = randomNumericDocValueFormat(); + min = 0.0; + max = 0.0; + sum = 0.0; + count = 0; + internalStats = createInstance("stats", count, sum, min, max, format, Collections.emptyList(), null); + builder = JsonXContent.contentBuilder().prettyPrint(); + builder.startObject(); + internalStats.doXContentBody(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + assertEquals("{\n" + + " \"count\" : 0,\n" + + " \"min\" : null,\n" + + " \"max\" : null,\n" + + " \"avg\" : null,\n" + + " \"sum\" : 0.0\n" + + "}", builder.string()); + } } diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index 6127d599acfee..65c9e55a365c1 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -1,6 +1,6 @@ :version: 7.0.0-alpha1 :major-version: 7.x -:lucene_version: 7.1.0-SNAPSHOT +:lucene_version: 7.1.0 :lucene_version_path: 7_1_0 :branch: master :jdk: 1.8.0_131 diff --git a/docs/reference/modules/discovery/zen.asciidoc b/docs/reference/modules/discovery/zen.asciidoc index cbe8ad13dd2ed..e68350515eabf 100644 --- a/docs/reference/modules/discovery/zen.asciidoc +++ b/docs/reference/modules/discovery/zen.asciidoc @@ -56,9 +56,15 @@ The unicast discovery uses the <> module to perform As part of the ping process a master of the cluster is either elected or joined to. This is done automatically. The -`discovery.zen.ping_timeout` (which defaults to `3s`) allows for the -tweaking of election time to handle cases of slow or congested networks -(higher values assure less chance of failure). Once a node joins, it +`discovery.zen.ping_timeout` (which defaults to `3s`) determines how long the node +will wait before deciding on starting an election or joining an existing cluster. +Three pings will be sent over this timeout interval. In case where no decision can be +reached after the timeout, the pinging process restarts. +In slow or congested networks, three seconds might not be enough for a node to become +aware of the other nodes in its environment before making an election decision. +Increasing the timeout should be done with care in that case, as it will slow down the +election process. +Once a node decides to join an existing formed cluster, it will send a join request to the master (`discovery.zen.join_timeout`) with a timeout defaulting at 20 times the ping timeout. diff --git a/modules/lang-expression/licenses/lucene-expressions-7.1.0-snapshot-f33ed4ba12a.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.1.0-snapshot-f33ed4ba12a.jar.sha1 deleted file mode 100644 index 3c02bb4144b29..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-7.1.0-snapshot-f33ed4ba12a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3d20e7c480ff31a6e0a74c57770a1cfdfd56cf0c \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-7.1.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.1.0.jar.sha1 new file mode 100644 index 0000000000000..29689e4e74f00 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-7.1.0.jar.sha1 @@ -0,0 +1 @@ +714927eb1d1db641bff9aa658e7e112c368f3e6d \ No newline at end of file diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java index b31c412920aa1..6da0f5433bae6 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java @@ -64,7 +64,15 @@ protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Except Unpooled.copiedBuffer(request.content()), request.headers(), request.trailingHeaders()); - final Netty4HttpRequest httpRequest = new Netty4HttpRequest(serverTransport.xContentRegistry, copy, ctx.channel()); + final Netty4HttpRequest httpRequest; + try { + httpRequest = new Netty4HttpRequest(serverTransport.xContentRegistry, copy, ctx.channel()); + } catch (Exception ex) { + if (pipelinedRequest != null) { + pipelinedRequest.release(); + } + throw ex; + } final Netty4HttpChannel channel = new Netty4HttpChannel(serverTransport, httpRequest, pipelinedRequest, detailedErrorsEnabled, threadContext); diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.1.0-snapshot-f33ed4ba12a.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.1.0-snapshot-f33ed4ba12a.jar.sha1 deleted file mode 100644 index ba04076220499..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.1.0-snapshot-f33ed4ba12a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0677be4595aecb88e8052e309dfd2e5909107a24 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.1.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.1.0.jar.sha1 new file mode 100644 index 0000000000000..d1619f9fc1952 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.1.0.jar.sha1 @@ -0,0 +1 @@ +d9a640081289c9c50da08479ff198b579df71c26 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.1.0-snapshot-f33ed4ba12a.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.1.0-snapshot-f33ed4ba12a.jar.sha1 deleted file mode 100644 index 979b8fc979573..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.1.0-snapshot-f33ed4ba12a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8fdf628e764891f3753d6aa6a4bd81a60543b249 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.1.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.1.0.jar.sha1 new file mode 100644 index 0000000000000..89ca7249ffb5b --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.1.0.jar.sha1 @@ -0,0 +1 @@ +a2ca81efc31d857fa2ade104dcdb3fed20c95ea0 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.1.0-snapshot-f33ed4ba12a.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.1.0-snapshot-f33ed4ba12a.jar.sha1 deleted file mode 100644 index 8d33e380b0691..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.1.0-snapshot-f33ed4ba12a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1c23328881752f6590a463cb2a06acb1d952214d \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.1.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.1.0.jar.sha1 new file mode 100644 index 0000000000000..512ecb59fc5bd --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.1.0.jar.sha1 @@ -0,0 +1 @@ +42058220ada77c4c5340e8383f62a4398e10a8ce \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.1.0-snapshot-f33ed4ba12a.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.1.0-snapshot-f33ed4ba12a.jar.sha1 deleted file mode 100644 index fca544111a52e..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.1.0-snapshot-f33ed4ba12a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1e5db24f38cb9fff03942123dcbdf4e46fc3760e \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.1.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.1.0.jar.sha1 new file mode 100644 index 0000000000000..7e68fa106c108 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.1.0.jar.sha1 @@ -0,0 +1 @@ +2769d7f7330c78aea1edf4d8cd2eb111564c6800 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.1.0-snapshot-f33ed4ba12a.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.1.0-snapshot-f33ed4ba12a.jar.sha1 deleted file mode 100644 index 3d558870f95d8..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.1.0-snapshot-f33ed4ba12a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2f3b84a76adf611b942c3e1ebce35686ce0e077d \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.1.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.1.0.jar.sha1 new file mode 100644 index 0000000000000..3e59d9c24c4a1 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.1.0.jar.sha1 @@ -0,0 +1 @@ +2bec616dc5bb33df9d0beddf6a9565ef14a227ff \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.1.0-snapshot-f33ed4ba12a.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.1.0-snapshot-f33ed4ba12a.jar.sha1 deleted file mode 100644 index f6f61a2f32b64..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.1.0-snapshot-f33ed4ba12a.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c33e9c9a609ac7e6cb5d3695b42d4fddded47ae0 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.1.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.1.0.jar.sha1 new file mode 100644 index 0000000000000..55f36cb5f8eee --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.1.0.jar.sha1 @@ -0,0 +1 @@ +0e78e3e59b7bdf6e1aa24ff8289cc1246248f642 \ No newline at end of file diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query.yml index a34b110e910b4..f3380f513966d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query.yml @@ -584,7 +584,7 @@ setup: --- "Test exists query on _uid field": - skip: - version: " - 6.1.0" + version: " - 6.0.99" reason: exists on _uid not supported prior to 6.1.0 - do: search: @@ -599,7 +599,7 @@ setup: --- "Test exists query on _index field": - skip: - version: " - 6.1.0" + version: " - 6.0.99" reason: exists on _index not supported prior to 6.1.0 - do: search: @@ -614,7 +614,7 @@ setup: --- "Test exists query on _type field": - skip: - version: " - 6.1.0" + version: " - 6.0.99" reason: exists on _type not supported prior to 6.1.0 - do: search: @@ -653,7 +653,7 @@ setup: --- "Test exists query on _source field": - skip: - version: " - 6.1.0" + version: " - 6.0.99" reason: exists on _source not supported prior to 6.1.0 - do: catch: /query_shard_exception/