Skip to content

Commit

Permalink
Only aggregations require at least one shard request (elastic#115314)
Browse files Browse the repository at this point in the history
* unskipping shards only when aggs

* Update docs/changelog/115314.yaml

* fixed more tests

* null check for searchRequest.source()
  • Loading branch information
piergm authored Oct 25, 2024
1 parent bbd887a commit 7f573c6
Show file tree
Hide file tree
Showing 16 changed files with 70 additions and 56 deletions.
5 changes: 5 additions & 0 deletions docs/changelog/115314.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 115314
summary: Only aggregations require at least one shard request
area: Search
type: enhancement
issues: []
Original file line number Diff line number Diff line change
Expand Up @@ -412,7 +412,7 @@ public void testSkippingShards() throws Exception {
assertResponse(client().search(searchRequest), searchResponse -> {
ElasticsearchAssertions.assertNoSearchHits(searchResponse);
assertThat(searchResponse.getTotalShards(), equalTo(2));
assertThat(searchResponse.getSkippedShards(), equalTo(1));
assertThat(searchResponse.getSkippedShards(), equalTo(2));
assertThat(searchResponse.getSuccessfulShards(), equalTo(2));
});
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
import org.elasticsearch.rest.action.search.RestSearchAction;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptType;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder;
Expand Down Expand Up @@ -580,13 +581,14 @@ public void testSortByField() throws Exception {

public void testSortByFieldOneClusterHasNoResults() throws Exception {
assumeMultiClusterSetup();
// set to a value greater than the number of shards to avoid differences due to the skipping of shards
// setting aggs to avoid differences due to the skipping of shards when matching none
SearchSourceBuilder sourceBuilder = new SearchSourceBuilder();
boolean onlyRemote = randomBoolean();
sourceBuilder.query(new TermQueryBuilder("_index", onlyRemote ? REMOTE_INDEX_NAME : INDEX_NAME));
sourceBuilder.sort("type.keyword", SortOrder.ASC);
sourceBuilder.sort("creationDate", SortOrder.DESC);
sourceBuilder.sort("user.keyword", SortOrder.ASC);
sourceBuilder.aggregation(AggregationBuilders.max("max").field("creationDate"));
CheckedConsumer<ObjectPath, IOException> responseChecker = response -> {
assertHits(response);
int size = response.evaluateArraySize("hits.hits");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -166,8 +166,7 @@
- match: { hits.total.value: 0 }
- match: { _shards.total: 2 }
- match: { _shards.successful: 2 }
# When all shards are skipped current logic returns 1 to produce a valid search result
- match: { _shards.skipped : 1}
- match: { _shards.skipped : 2}
- match: { _shards.failed: 0 }

# check that skipped when we don't match the alias with a terms query
Expand All @@ -183,8 +182,7 @@
- match: { hits.total.value: 0 }
- match: { _shards.total: 2 }
- match: { _shards.successful: 2 }
# When all shards are skipped current logic returns 1 to produce a valid search result
- match: { _shards.skipped : 1}
- match: { _shards.skipped : 2}
- match: { _shards.failed: 0 }

# check that skipped when we don't match the alias with a prefix query
Expand All @@ -200,8 +198,7 @@
- match: { hits.total.value: 0 }
- match: { _shards.total: 2 }
- match: { _shards.successful: 2 }
# When all shards are skipped current logic returns 1 to produce a valid search result
- match: { _shards.skipped : 1}
- match: { _shards.skipped : 2}
- match: { _shards.failed: 0 }

# check that skipped when we don't match the alias with a wildcard query
Expand All @@ -217,7 +214,6 @@
- match: { hits.total.value: 0 }
- match: { _shards.total: 2 }
- match: { _shards.successful: 2 }
# When all shards are skipped current logic returns 1 to produce a valid search result
- match: { _shards.skipped : 1}
- match: { _shards.skipped : 2}
- match: { _shards.failed: 0 }

Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ teardown:
- match: { hits.total.value: 0 }
- match: { _shards.total: 2 }
- match: { _shards.successful: 2 }
- match: { _shards.skipped : 1}
- match: { _shards.skipped : 2}
- match: { _shards.failed: 0 }

- do:
Expand All @@ -98,5 +98,5 @@ teardown:
- match: { hits.total.value: 0 }
- match: { _shards.total: 2 }
- match: { _shards.successful: 2 }
- match: { _shards.skipped : 1}
- match: { _shards.skipped : 2}
- match: { _shards.failed: 0 }
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except
// with DFS_QUERY_THEN_FETCH, the local shards are never skipped
assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0));
} else {
assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards - 1));
assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards));
}
assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0));
assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0));
Expand All @@ -224,7 +224,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except
assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards));
assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards));
if (clusters.isCcsMinimizeRoundtrips()) {
assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards - 1));
assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards));
} else {
assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards));
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,11 @@ public void testProfileQuery() throws Exception {
prepareSearch().setQuery(q).setTrackTotalHits(true).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH),
response -> {
assertNotNull("Profile response element should not be null", response.getProfileResults());
assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0));
if (response.getSkippedShards() == response.getSuccessfulShards()) {
assertEquals(0, response.getProfileResults().size());
} else {
assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0));
}
for (Map.Entry<String, SearchProfileShardResult> shard : response.getProfileResults().entrySet()) {
for (QueryProfileShardResult searchProfiles : shard.getValue().getQueryProfileResults()) {
for (ProfileResult result : searchProfiles.getQueryResults()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -158,11 +158,15 @@ public void testFieldUsageStats() throws ExecutionException, InterruptedExceptio

assertTrue(stats.hasField("date_field"));
assertEquals(Set.of(UsageContext.POINTS), stats.get("date_field").keySet());
// can_match does not enter search stats
// there is a special case though where we have no hit but we need to get at least one search response in order
// to produce a valid search result with all the aggs etc., so we hit one of the two shards

long expectedShards = 2L * numShards;
if (numShards == 1) {
// with 1 shard and setPreFilterShardSize(1) we don't perform can_match phase but instead directly query the shard
expectedShards += 1;
}

assertEquals(
(2 * numShards) + 1,
expectedShards,
indicesAdmin().prepareStats("test")
.clear()
.setSearch(true)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1458,6 +1458,8 @@ public SearchPhase newSearchPhase(
SearchResponse.Clusters clusters
) {
if (preFilter) {
// only for aggs we need to contact shards even if there are no matches
boolean requireAtLeastOneMatch = searchRequest.source() != null && searchRequest.source().aggregations() != null;
return new CanMatchPreFilterSearchPhase(
logger,
searchTransportService,
Expand All @@ -1469,7 +1471,7 @@ public SearchPhase newSearchPhase(
shardIterators,
timeProvider,
task,
true,
requireAtLeastOneMatch,
searchService.getCoordinatorRewriteContextProvider(timeProvider::absoluteStartMillis),
listener.delegateFailureAndWrap(
(l, iters) -> newSearchPhase(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,8 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except
boolean dfs = randomBoolean();
if (dfs) {
request.getSearchRequest().searchType(SearchType.DFS_QUERY_THEN_FETCH);
} else {
request.getSearchRequest().searchType(SearchType.QUERY_THEN_FETCH);
}
RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder("@timestamp").from(100).to(2000);
request.getSearchRequest().source(new SearchSourceBuilder().query(rangeQueryBuilder).size(10));
Expand All @@ -288,20 +290,30 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except
assertTrue(response.isRunning());
SearchResponse.Clusters clusters = response.getSearchResponse().getClusters();
assertThat(clusters.getTotal(), equalTo(2));
assertTrue("search cluster results should be marked as partial", clusters.hasPartialResults());

if (dfs) {
assertTrue("search cluster results should be marked as partial", clusters.hasPartialResults());
} else {
assertFalse(
"search cluster results should not be marked as partial as all shards are skipped",
clusters.hasPartialResults()
);
}
SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY);
assertNotNull(localClusterSearchInfo);
assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.RUNNING));
if (dfs) {
assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.RUNNING));
} else {
assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL));
}

SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER);
assertNotNull(remoteClusterSearchInfo);
assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.RUNNING));
} finally {
response.decRef();
}

SearchListenerPlugin.waitSearchStarted();
if (dfs) {
SearchListenerPlugin.waitSearchStarted();
}
SearchListenerPlugin.allowQueryPhase();

waitForSearchTasksToFinish();
Expand Down Expand Up @@ -331,7 +343,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except
// no skipped shards locally when DFS_QUERY_THEN_FETCH is used
assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0));
} else {
assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards - 1));
assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards));
}
assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0));
assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0));
Expand All @@ -341,7 +353,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except
assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards));
assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards));
if (minimizeRoundtrips) {
assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards - 1));
assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards));
} else {
assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards));
}
Expand Down Expand Up @@ -377,7 +389,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except
// no skipped shards locally when DFS_QUERY_THEN_FETCH is used
assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0));
} else {
assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards - 1));
assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards));
}
assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0));
assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0));
Expand All @@ -387,7 +399,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except
assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards));
assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards));
if (minimizeRoundtrips) {
assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards - 1));
assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards));
} else {
assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards));
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@

import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize;

public class SearchIdleTests extends ESSingleNodeTestCase {
Expand Down Expand Up @@ -133,8 +132,7 @@ public void testSearchIdleConstantKeywordMatchNoIndex() throws InterruptedExcept
// WHEN
assertResponse(search("test*", "constant_keyword", randomAlphaOfLength(5), 5), searchResponse -> {
assertEquals(RestStatus.OK, searchResponse.status());
// NOTE: we need an empty result from at least one shard
assertEquals(idleIndexShardsCount + activeIndexShardsCount - 1, searchResponse.getSkippedShards());
assertEquals(idleIndexShardsCount + activeIndexShardsCount, searchResponse.getSkippedShards());
assertEquals(0, searchResponse.getFailedShards());
assertEquals(0, searchResponse.getHits().getHits().length);
});
Expand All @@ -144,12 +142,8 @@ public void testSearchIdleConstantKeywordMatchNoIndex() throws InterruptedExcept

assertIdleShardsRefreshStats(beforeStatsResponse, afterStatsResponse);

// If no shards match the can match phase then at least one shard gets queries for an empty response.
// However, this affects the search idle stats.
List<ShardStats> active = Arrays.stream(afterStatsResponse.getShards()).filter(s -> s.isSearchIdle() == false).toList();
assertThat(active, hasSize(1));
assertThat(active.get(0).getShardRouting().getIndexName(), equalTo("test1"));
assertThat(active.get(0).getShardRouting().id(), equalTo(0));
assertThat(active, hasSize(0));
}

public void testSearchIdleConstantKeywordMatchOneIndex() throws InterruptedException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
import org.apache.lucene.document.LongPoint;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.PointValues;
import org.apache.lucene.search.TotalHits;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.common.Strings;
import org.elasticsearch.index.IndexSettings;
Expand Down Expand Up @@ -206,10 +207,10 @@ public void testCanMatchCoordinator() throws Exception {
)
.setSize(5),
response -> {
assertNull(response.getHits().getTotalHits());
assertEquals(new TotalHits(0, TotalHits.Relation.EQUAL_TO), response.getHits().getTotalHits());
assertEquals(0, response.getHits().getHits().length);
assertEquals(5, response.getSuccessfulShards());
assertEquals(4, response.getSkippedShards());
assertEquals(5, response.getSkippedShards());
}
);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@

package org.elasticsearch.xpack.rank.rrf;

import org.apache.lucene.search.TotalHits;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.search.SearchType;
Expand Down Expand Up @@ -199,10 +200,10 @@ public void testCanMatchShard() throws IOException {
)
.setSize(5),
response -> {
assertNull(response.getHits().getTotalHits());
assertEquals(new TotalHits(0, TotalHits.Relation.EQUAL_TO), response.getHits().getTotalHits());
assertEquals(0, response.getHits().getHits().length);
assertEquals(5, response.getSuccessfulShards());
assertEquals(4, response.getSkippedShards());
assertEquals(5, response.getSkippedShards());
}
);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -384,11 +384,9 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying
}
} else {
assertResponse(client().search(request), newSearchResponse -> {
// When all shards are skipped, at least one of them should be queried in order to
// provide a proper search response.
assertThat(newSearchResponse.getSkippedShards(), equalTo(indexOutsideSearchRangeShardCount - 1));
assertThat(newSearchResponse.getSuccessfulShards(), equalTo(indexOutsideSearchRangeShardCount - 1));
assertThat(newSearchResponse.getFailedShards(), equalTo(1));
assertThat(newSearchResponse.getSkippedShards(), equalTo(indexOutsideSearchRangeShardCount));
assertThat(newSearchResponse.getSuccessfulShards(), equalTo(indexOutsideSearchRangeShardCount));
assertThat(newSearchResponse.getFailedShards(), equalTo(0));
assertThat(newSearchResponse.getTotalShards(), equalTo(indexOutsideSearchRangeShardCount));
});

Expand Down Expand Up @@ -748,9 +746,7 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped()
// All the regular index searches succeeded
assertThat(newSearchResponse.getSuccessfulShards(), equalTo(totalShards));
assertThat(newSearchResponse.getFailedShards(), equalTo(0));
// We have to query at least one node to construct a valid response, and we pick
// a shard that's available in order to construct the search response
assertThat(newSearchResponse.getSkippedShards(), equalTo(totalShards - 1));
assertThat(newSearchResponse.getSkippedShards(), equalTo(totalShards));
assertThat(newSearchResponse.getTotalShards(), equalTo(totalShards));
assertThat(newSearchResponse.getHits().getTotalHits().value(), equalTo(0L));
});
Expand Down
Loading

0 comments on commit 7f573c6

Please sign in to comment.