Skip to content

Commit

Permalink
[TEST] Add assertBusy when checking for pending operation counter aft…
Browse files Browse the repository at this point in the history
…er tests

Currently, pending operations can complete after tests with disruption scheme
completes. This commit waits for the pending operation counter to complete
after the tests are run
  • Loading branch information
areek committed Nov 10, 2016
1 parent 5b4c3fb commit 7ed195f
Show file tree
Hide file tree
Showing 4 changed files with 35 additions and 28 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ public void setDisruptionScheme(ServiceDisruptionScheme scheme) {
}

@Override
protected void beforeIndexDeletion() throws IOException {
protected void beforeIndexDeletion() throws Exception {
if (disableBeforeIndexDeletion == false) {
super.beforeIndexDeletion();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -577,7 +577,7 @@ protected Set<String> excludeTemplates() {
return Collections.emptySet();
}

protected void beforeIndexDeletion() throws IOException {
protected void beforeIndexDeletion() throws Exception {
cluster().beforeIndexDeletion();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1020,7 +1020,7 @@ public synchronized void afterTest() throws IOException {
}

@Override
public void beforeIndexDeletion() throws IOException {
public void beforeIndexDeletion() throws Exception {
// Check that the operations counter on index shard has reached 0.
// The assumption here is that after a test there are no ongoing write operations.
// test that have ongoing write operations after the test (for example because ttl is used
Expand Down Expand Up @@ -1055,33 +1055,40 @@ private void assertSameSyncIdSameDocs() {
}
}

private void assertShardIndexCounter() throws IOException {
final Collection<NodeAndClient> nodesAndClients = nodes.values();
for (NodeAndClient nodeAndClient : nodesAndClients) {
IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name);
for (IndexService indexService : indexServices) {
for (IndexShard indexShard : indexService) {
int activeOperationsCount = indexShard.getActiveOperationsCount();
if (activeOperationsCount > 0) {
TaskManager taskManager = getInstance(TransportService.class, nodeAndClient.name).getTaskManager();
DiscoveryNode localNode = getInstance(ClusterService.class, nodeAndClient.name).localNode();
List<TaskInfo> taskInfos = taskManager.getTasks().values().stream()
.filter(task -> task instanceof ReplicationTask)
.map(task -> task.taskInfo(localNode.getId(), true))
.collect(Collectors.toList());
ListTasksResponse response = new ListTasksResponse(taskInfos, Collections.emptyList(), Collections.emptyList());
XContentBuilder builder = XContentFactory.jsonBuilder()
.prettyPrint()
.startObject()
.value(response)
.endObject();
throw new AssertionError("expected index shard counter on shard " + indexShard.shardId() + " on node " +
nodeAndClient.name + " to be 0 but was " + activeOperationsCount + ". Current replication tasks on node:\n" +
builder.string());
private void assertShardIndexCounter() throws Exception {
assertBusy(() -> {
final Collection<NodeAndClient> nodesAndClients = nodes.values();
for (NodeAndClient nodeAndClient : nodesAndClients) {
IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name);
for (IndexService indexService : indexServices) {
for (IndexShard indexShard : indexService) {
int activeOperationsCount = indexShard.getActiveOperationsCount();
if (activeOperationsCount > 0) {
TaskManager taskManager = getInstance(TransportService.class, nodeAndClient.name).getTaskManager();
DiscoveryNode localNode = getInstance(ClusterService.class, nodeAndClient.name).localNode();
List<TaskInfo> taskInfos = taskManager.getTasks().values().stream()
.filter(task -> task instanceof ReplicationTask)
.map(task -> task.taskInfo(localNode.getId(), true))
.collect(Collectors.toList());
ListTasksResponse response = new ListTasksResponse(taskInfos, Collections.emptyList(), Collections.emptyList());
XContentBuilder builder = null;
try {
builder = XContentFactory.jsonBuilder()
.prettyPrint()
.startObject()
.value(response)
.endObject();
throw new AssertionError("expected index shard counter on shard " + indexShard.shardId() + " on node " +
nodeAndClient.name + " to be 0 but was " + activeOperationsCount + ". Current replication tasks on node:\n" +
builder.string());
} catch (IOException e) {
throw new RuntimeException("caught exception while building response [" + response + "]", e);
}
}
}
}
}
}
});
}

private void randomlyResetClients() throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ public void wipe(Set<String> excludeTemplates) {
/**
* Assertions that should run before the cluster is wiped should be called in this method
*/
public void beforeIndexDeletion() throws IOException {
public void beforeIndexDeletion() throws Exception {
}

/**
Expand Down

0 comments on commit 7ed195f

Please sign in to comment.