diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java index 4a8700a9db1d9..65ead9c09f1d5 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java @@ -175,7 +175,7 @@ public void setDisruptionScheme(ServiceDisruptionScheme scheme) { } @Override - protected void beforeIndexDeletion() throws IOException { + protected void beforeIndexDeletion() throws Exception { if (disableBeforeIndexDeletion == false) { super.beforeIndexDeletion(); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 82ae03af80dbb..859ad590cf9c2 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -577,7 +577,7 @@ protected Set excludeTemplates() { return Collections.emptySet(); } - protected void beforeIndexDeletion() throws IOException { + protected void beforeIndexDeletion() throws Exception { cluster().beforeIndexDeletion(); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 0d19e03299af1..37e3a58295efb 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -1020,7 +1020,7 @@ public synchronized void afterTest() throws IOException { } @Override - public void beforeIndexDeletion() throws IOException { + public void beforeIndexDeletion() throws Exception { // Check that the operations counter on index shard has reached 0. // The assumption here is that after a test there are no ongoing write operations. // test that have ongoing write operations after the test (for example because ttl is used @@ -1055,33 +1055,40 @@ private void assertSameSyncIdSameDocs() { } } - private void assertShardIndexCounter() throws IOException { - final Collection nodesAndClients = nodes.values(); - for (NodeAndClient nodeAndClient : nodesAndClients) { - IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name); - for (IndexService indexService : indexServices) { - for (IndexShard indexShard : indexService) { - int activeOperationsCount = indexShard.getActiveOperationsCount(); - if (activeOperationsCount > 0) { - TaskManager taskManager = getInstance(TransportService.class, nodeAndClient.name).getTaskManager(); - DiscoveryNode localNode = getInstance(ClusterService.class, nodeAndClient.name).localNode(); - List taskInfos = taskManager.getTasks().values().stream() - .filter(task -> task instanceof ReplicationTask) - .map(task -> task.taskInfo(localNode.getId(), true)) - .collect(Collectors.toList()); - ListTasksResponse response = new ListTasksResponse(taskInfos, Collections.emptyList(), Collections.emptyList()); - XContentBuilder builder = XContentFactory.jsonBuilder() - .prettyPrint() - .startObject() - .value(response) - .endObject(); - throw new AssertionError("expected index shard counter on shard " + indexShard.shardId() + " on node " + - nodeAndClient.name + " to be 0 but was " + activeOperationsCount + ". Current replication tasks on node:\n" + - builder.string()); + private void assertShardIndexCounter() throws Exception { + assertBusy(() -> { + final Collection nodesAndClients = nodes.values(); + for (NodeAndClient nodeAndClient : nodesAndClients) { + IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name); + for (IndexService indexService : indexServices) { + for (IndexShard indexShard : indexService) { + int activeOperationsCount = indexShard.getActiveOperationsCount(); + if (activeOperationsCount > 0) { + TaskManager taskManager = getInstance(TransportService.class, nodeAndClient.name).getTaskManager(); + DiscoveryNode localNode = getInstance(ClusterService.class, nodeAndClient.name).localNode(); + List taskInfos = taskManager.getTasks().values().stream() + .filter(task -> task instanceof ReplicationTask) + .map(task -> task.taskInfo(localNode.getId(), true)) + .collect(Collectors.toList()); + ListTasksResponse response = new ListTasksResponse(taskInfos, Collections.emptyList(), Collections.emptyList()); + XContentBuilder builder = null; + try { + builder = XContentFactory.jsonBuilder() + .prettyPrint() + .startObject() + .value(response) + .endObject(); + throw new AssertionError("expected index shard counter on shard " + indexShard.shardId() + " on node " + + nodeAndClient.name + " to be 0 but was " + activeOperationsCount + ". Current replication tasks on node:\n" + + builder.string()); + } catch (IOException e) { + throw new RuntimeException("caught exception while building response [" + response + "]", e); + } + } } } } - } + }); } private void randomlyResetClients() throws IOException { diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java index b960685777e8d..c2ac65d9980e1 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java @@ -82,7 +82,7 @@ public void wipe(Set excludeTemplates) { /** * Assertions that should run before the cluster is wiped should be called in this method */ - public void beforeIndexDeletion() throws IOException { + public void beforeIndexDeletion() throws Exception { } /**