Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Improve Node Shutdown Observability #78727

Merged
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@

package org.elasticsearch.cluster.metadata;

import org.elasticsearch.Version;
import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
Expand All @@ -20,25 +22,42 @@
import java.util.Objects;

public class ShutdownShardMigrationStatus implements Writeable, ToXContentObject {
private static final Version ALLOCATION_DECISION_ADDED_VERSION = Version.V_8_0_0;

private final SingleNodeShutdownMetadata.Status status;
private final long shardsRemaining;
@Nullable private final String explanation;
@Nullable private final ShardAllocationDecision allocationDecision;

public ShutdownShardMigrationStatus(SingleNodeShutdownMetadata.Status status, long shardsRemaining) {
this(status, shardsRemaining, null);
this(status, shardsRemaining, null, null);
}

public ShutdownShardMigrationStatus(SingleNodeShutdownMetadata.Status status, long shardsRemaining, @Nullable String explanation) {
this(status, shardsRemaining, explanation, null);
}

public ShutdownShardMigrationStatus(
SingleNodeShutdownMetadata.Status status,
long shardsRemaining,
@Nullable String explanation,
@Nullable ShardAllocationDecision allocationDecision
) {
this.status = Objects.requireNonNull(status, "status must not be null");
this.shardsRemaining = shardsRemaining;
this.explanation = explanation;
this.allocationDecision = allocationDecision;
}

public ShutdownShardMigrationStatus(StreamInput in) throws IOException {
this.status = in.readEnum(SingleNodeShutdownMetadata.Status.class);
this.shardsRemaining = in.readLong();
this.explanation = in.readOptionalString();
if (in.getVersion().onOrAfter(ALLOCATION_DECISION_ADDED_VERSION)) {
this.allocationDecision = in.readOptionalWriteable(ShardAllocationDecision::new);
} else {
this.allocationDecision = null;
}
}

public long getShardsRemaining() {
Expand All @@ -61,6 +80,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
if (Objects.nonNull(explanation)) {
builder.field("explanation", explanation);
}
if (Objects.nonNull(allocationDecision)) {
builder.startObject("node_allocation_decisions");
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Super minor nit, but since this is the top-level decision first rather than a plain list (it definitely has multiple sub parts though), can this be 'node_allocation_decision'?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done and good catch!

{
allocationDecision.toXContent(builder, params);
}
builder.endObject();
}
builder.endObject();
return builder;
}
Expand All @@ -70,19 +96,25 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeEnum(status);
out.writeLong(shardsRemaining);
out.writeOptionalString(explanation);
if (out.getVersion().onOrAfter(ALLOCATION_DECISION_ADDED_VERSION)) {
out.writeOptionalWriteable(allocationDecision);
}
}

@Override
public boolean equals(Object o) {
if (this == o) return true;
if ((o instanceof ShutdownShardMigrationStatus) == false) return false;
ShutdownShardMigrationStatus that = (ShutdownShardMigrationStatus) o;
return shardsRemaining == that.shardsRemaining && status == that.status && Objects.equals(explanation, that.explanation);
return shardsRemaining == that.shardsRemaining
&& status == that.status
&& Objects.equals(explanation, that.explanation)
&& Objects.equals(allocationDecision, that.allocationDecision);
}

@Override
public int hashCode() {
return Objects.hash(status, shardsRemaining, explanation);
return Objects.hash(status, shardsRemaining, explanation, allocationDecision);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -262,17 +262,15 @@ static ShutdownShardMigrationStatus shardMigrationStatus(
return hasShardCopyOnOtherNode == false;
})
.peek(pair -> {
if (logger.isTraceEnabled()) { // don't serialize the decision unless we have to
logger.trace(
"node [{}] shutdown of type [{}] stalled: found shard [{}][{}] from index [{}] with negative decision: [{}]",
nodeId,
shutdownType,
pair.v1().getId(),
pair.v1().primary() ? "primary" : "replica",
pair.v1().shardId().getIndexName(),
Strings.toString(pair.v2())
);
}
logger.info(
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we hit this every 10 seconds for 30 minutes, that's roughly 180 times, are we okay with that amount of logging? That seems a little too excessive to me. Perhaps it would be better to put this at DEBUG level since we'll expose most of the info in the response now?

"node [{}] shutdown of type [{}] stalled: found shard [{}][{}] from index [{}] with negative decision: [{}]",
nodeId,
shutdownType,
pair.v1().getId(),
pair.v1().primary() ? "primary" : "replica",
pair.v1().shardId().getIndexName(),
Strings.toString(pair.v2())
);
})
.findFirst();

Expand All @@ -287,6 +285,7 @@ static ShutdownShardMigrationStatus shardMigrationStatus(
} else if (unmovableShard.isPresent()) {
// We found a shard that can't be moved, so shard relocation is stalled. Blame the unmovable shard.
ShardRouting shardRouting = unmovableShard.get().v1();
ShardAllocationDecision decision = unmovableShard.get().v2();

return new ShutdownShardMigrationStatus(
SingleNodeShutdownMetadata.Status.STALLED,
Expand All @@ -296,7 +295,8 @@ static ShutdownShardMigrationStatus shardMigrationStatus(
shardRouting.shardId().getId(),
shardRouting.primary() ? "primary" : "replica",
shardRouting.index().getName()
).getFormattedMessage()
).getFormattedMessage(),
decision
);
} else {
return new ShutdownShardMigrationStatus(SingleNodeShutdownMetadata.Status.IN_PROGRESS, totalRemainingShards);
Expand Down