Skip to content

Commit

Permalink
Log pending cluster state tasks after cluster cleanup timeout (#119186)
Browse files Browse the repository at this point in the history
The Cluster Health API call in the `ESRestTestCase` cleanup waits for
all cluster state tasks to be finished. We frequently see that API call
timing out with plenty of tasks still in the queue. With this commit, we
retrieve the pending tasks and log them - only if the Cluster Health API
call timed out. This will hopefully give some more insight into what
(kind of) tasks are still pending when the API call times out.
  • Loading branch information
nielsbauman authored Dec 23, 2024
1 parent d92233c commit e01d956
Showing 1 changed file with 36 additions and 14 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -852,8 +852,7 @@ protected boolean preserveSearchableSnapshotsIndicesUponCompletion() {
}

private void wipeCluster() throws Exception {
logger.info("Waiting for all cluster updates up to this moment to be processed");
assertOK(adminClient().performRequest(new Request("GET", "_cluster/health?wait_for_events=languid")));
waitForClusterUpdates();

// Cleanup rollup before deleting indices. A rollup job might have bulks in-flight,
// so we need to fully shut them down first otherwise a job might stall waiting
Expand Down Expand Up @@ -991,6 +990,38 @@ private void wipeCluster() throws Exception {
deleteAllNodeShutdownMetadata();
}

private void waitForClusterUpdates() throws Exception {
logger.info("Waiting for all cluster updates up to this moment to be processed");
try {
assertOK(adminClient().performRequest(new Request("GET", "_cluster/health?wait_for_events=languid")));
} catch (ResponseException e) {
if (e.getResponse().getStatusLine().getStatusCode() == HttpStatus.SC_REQUEST_TIMEOUT) {
final var pendingTasks = getPendingClusterStateTasks();
if (pendingTasks != null) {
logger.error("Timed out waiting for cluster updates to be processed, {}", pendingTasks);
}
}
throw e;
}
}

private static String getPendingClusterStateTasks() {
try {
Response response = adminClient().performRequest(new Request("GET", "/_cluster/pending_tasks"));
List<?> tasks = (List<?>) entityAsMap(response).get("tasks");
if (false == tasks.isEmpty()) {
StringBuilder message = new StringBuilder("there are still running tasks:");
for (Object task : tasks) {
message.append('\n').append(task.toString());
}
return message.toString();
}
} catch (IOException e) {
fail(e, "Failed to retrieve pending tasks in the cluster during cleanup");
}
return null;
}

/**
* This method checks whether ILM policies or templates get recreated after they have been deleted. If so, we are probably deleting
* them unnecessarily, potentially causing test performance problems. This could happen for example if someone adds a new standard ILM
Expand Down Expand Up @@ -1461,18 +1492,9 @@ private void logIfThereAreRunningTasks() throws IOException {
*/
private static void waitForClusterStateUpdatesToFinish() throws Exception {
assertBusy(() -> {
try {
Response response = adminClient().performRequest(new Request("GET", "/_cluster/pending_tasks"));
List<?> tasks = (List<?>) entityAsMap(response).get("tasks");
if (false == tasks.isEmpty()) {
StringBuilder message = new StringBuilder("there are still running tasks:");
for (Object task : tasks) {
message.append('\n').append(task.toString());
}
fail(message.toString());
}
} catch (IOException e) {
fail("cannot get cluster's pending tasks: " + e.getMessage());
final var pendingTasks = getPendingClusterStateTasks();
if (pendingTasks != null) {
fail(pendingTasks);
}
}, 30, TimeUnit.SECONDS);
}
Expand Down

0 comments on commit e01d956

Please sign in to comment.