From 4f6fc5455f5ede06d9bbf4b9e602fd5db7a2f7a6 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Mon, 10 Apr 2017 12:36:16 +0200 Subject: [PATCH 1/2] Set shard count limit to unlimited Now that we have incremental reduce functions for topN and aggregations we can set the default for `action.search.shard_count.limit` to unlimited. This still allows users to restrict these settings while by default we executed across all shards matching the search requests index pattern. --- .../org/elasticsearch/action/search/TransportSearchAction.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 008d022a6556f..63a3ad0b62d63 100644 --- a/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -60,7 +60,7 @@ public class TransportSearchAction extends HandledTransportAction SHARD_COUNT_LIMIT_SETTING = Setting.longSetting( - "action.search.shard_count.limit", 1000L, 1L, Property.Dynamic, Property.NodeScope); + "action.search.shard_count.limit", Long.MAX_VALUE, 1L, Property.Dynamic, Property.NodeScope); private final ClusterService clusterService; private final SearchTransportService searchTransportService; From a4ed970b48d20d8fe908ba90d2157d8c8179cb4b Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Mon, 10 Apr 2017 15:50:59 +0200 Subject: [PATCH 2/2] make clint happy --- docs/reference/search/search.asciidoc | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/docs/reference/search/search.asciidoc b/docs/reference/search/search.asciidoc index eccba57dee1fe..41ba6e5c87ab8 100644 --- a/docs/reference/search/search.asciidoc +++ b/docs/reference/search/search.asciidoc @@ -60,9 +60,10 @@ GET /_search?q=tag:wow // CONSOLE // TEST[setup:twitter] -By default elasticsearch rejects search requests that would query more than -1000 shards. The reason is that such large numbers of shards make the job of -the coordinating node very CPU and memory intensive. It is usually a better -idea to organize data in such a way that there are fewer larger shards. In -case you would like to bypass this limit, which is discouraged, you can update -the `action.search.shard_count.limit` cluster setting to a greater value. +By default elasticsearch doesn't reject any search requests based on the number +of shards the request hits. While elasticsearch will optimize the search execution +on the coordinating node a large number of shards can have a significant impact +CPU and memory wise. It is usually a better idea to organize data in such a way +that there are fewer larger shards. In case you would like to configure a soft +limit, you can update the `action.search.shard_count.limit` cluster setting in order +to reject search requests that hit too many shards.