Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adjust randomization in cluster shard limit tests #47254

Merged
merged 4 commits into from
Sep 30, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -454,7 +454,7 @@ public void testCalculateNumRoutingShards() {
}

public void testShardLimit() {
int nodesInCluster = randomIntBetween(2,100);
int nodesInCluster = randomIntBetween(2,90);
ClusterShardLimitIT.ShardCounts counts = forDataNodeCount(nodesInCluster);
Settings clusterSettings = Settings.builder()
.put(MetaData.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), counts.getShardsPerNode())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.lessThanOrEqualTo;

@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST)
public class ClusterShardLimitIT extends ESIntegTestCase {
Expand Down Expand Up @@ -102,24 +103,11 @@ public void testIndexCreationOverLimit() {
assertFalse(clusterState.getMetaData().hasIndex("should-fail"));
}

@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/47107")
public void testIndexCreationOverLimitFromTemplate() {
int dataNodes = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size();

final ShardCounts counts;
{
final ShardCounts temporaryCounts = ShardCounts.forDataNodeCount(dataNodes);
/*
* We are going to create an index that will bring us up to one below the limit; we go one below the limit to ensure the
* template is used instead of one shard.
*/
counts = new ShardCounts(
temporaryCounts.shardsPerNode,
temporaryCounts.firstIndexShards - 1,
temporaryCounts.firstIndexReplicas,
temporaryCounts.failingIndexShards + 1,
temporaryCounts.failingIndexReplicas);
}
final ShardCounts counts = ShardCounts.forDataNodeCount(dataNodes);

setShardsPerNode(counts.getShardsPerNode());

if (counts.firstIndexShards > 0) {
Expand Down Expand Up @@ -401,10 +389,13 @@ private ShardCounts(int shardsPerNode,
}

public static ShardCounts forDataNodeCount(int dataNodes) {
assertThat("this method will not work reliably with this many data nodes due to the limit of shards in a single index," +
"use fewer data nodes or multiple indices", dataNodes, lessThanOrEqualTo(90));
int mainIndexReplicas = between(0, dataNodes - 1);
int mainIndexShards = between(1, 10);
int totalShardsInIndex = (mainIndexReplicas + 1) * mainIndexShards;
int shardsPerNode = (int) Math.ceil((double) totalShardsInIndex / dataNodes);
// Sometimes add some headroom to the limit to check that it works even if you're not already right up against the limit
int shardsPerNode = (int) Math.ceil((double) totalShardsInIndex / dataNodes) + between(0, 10);
int totalCap = shardsPerNode * dataNodes;

int failingIndexShards;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -572,7 +572,7 @@ public void testConflictingEngineFactories() {
}

public void testOverShardLimit() {
int nodesInCluster = randomIntBetween(1,100);
int nodesInCluster = randomIntBetween(1,90);
ClusterShardLimitIT.ShardCounts counts = forDataNodeCount(nodesInCluster);

Settings clusterSettings = Settings.builder()
Expand Down