Skip to content

Commit

Permalink
Some ILM tidying cleanups, mostly around String formatting (elastic#1…
Browse files Browse the repository at this point in the history
  • Loading branch information
joegallo committed Dec 10, 2024
1 parent 8b0c4d8 commit 4bce48b
Show file tree
Hide file tree
Showing 61 changed files with 187 additions and 232 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
*/
public abstract class AsyncWaitStep extends Step {

private Client client;
private final Client client;

public AsyncWaitStep(StepKey key, StepKey nextStepKey, Client client) {
super(key, nextStepKey);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ public ClusterState performAction(Index index, ClusterState clusterState) {
* This method returns the next step to execute based on the predicate. If
* the predicate returned true, then nextStepKeyOnTrue is the key of the
* next step to run, otherwise nextStepKeyOnFalse is.
*
* <p>
* throws {@link UnsupportedOperationException} if performAction was not called yet
*
* @return next step to execute
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,10 @@
import org.elasticsearch.cluster.metadata.IndexAbstraction;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.common.Strings;
import org.elasticsearch.index.Index;
import org.elasticsearch.xpack.core.ilm.step.info.SingleMessageFieldInfo;

import java.util.Locale;

/**
* Some actions cannot be executed on a data stream's write index (eg. `searchable-snapshot`). This step checks if the managed index is
* part of a data stream, in which case it will check it's not the write index. If the managed index is the write index of a data stream
Expand Down Expand Up @@ -46,8 +45,7 @@ public Result isConditionMet(Index index, ClusterState clusterState) {
String indexName = index.getName();

if (indexMetadata == null) {
String errorMessage = String.format(
Locale.ROOT,
String errorMessage = Strings.format(
"[%s] lifecycle action for index [%s] executed but index no longer exists",
getKey().action(),
indexName
Expand All @@ -64,8 +62,7 @@ public Result isConditionMet(Index index, ClusterState clusterState) {
if (dataStream != null) {
boolean isFailureStoreWriteIndex = index.equals(dataStream.getFailureStoreWriteIndex());
if (isFailureStoreWriteIndex || dataStream.getWriteIndex().equals(index)) {
String errorMessage = String.format(
Locale.ROOT,
String errorMessage = Strings.format(
"index [%s] is the%s write index for data stream [%s], pausing "
+ "ILM execution of lifecycle [%s] until this index is no longer the write index for the data stream via manual or "
+ "automated rollover",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
import org.elasticsearch.xpack.core.ilm.step.info.SingleMessageFieldInfo;

import java.io.IOException;
import java.util.Locale;
import java.util.Objects;

/**
Expand Down Expand Up @@ -158,8 +157,7 @@ public Info(String nodeId, long expectedShards, long numberShardsLeftToAllocate)
if (numberShardsLeftToAllocate < 0) {
this.message = "Waiting for all shards to become active";
} else {
this.message = String.format(
Locale.ROOT,
this.message = Strings.format(
"Waiting for node [%s] to contain [%d] shards, found [%d], remaining [%d]",
nodeId,
expectedShards,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,10 @@
import org.apache.logging.log4j.Logger;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.Strings;
import org.elasticsearch.index.Index;
import org.elasticsearch.xpack.core.ilm.step.info.SingleMessageFieldInfo;

import java.util.Locale;

/**
* This step checks whether the new shrunken index's shards count is a factor of the source index's shards count.
*/
Expand Down Expand Up @@ -53,8 +52,7 @@ public Result isConditionMet(Index index, ClusterState clusterState) {
int sourceNumberOfShards = indexMetadata.getNumberOfShards();
if (sourceNumberOfShards % numberOfShards != 0) {
String policyName = indexMetadata.getLifecyclePolicyName();
String errorMessage = String.format(
Locale.ROOT,
String errorMessage = Strings.format(
"lifecycle action of policy [%s] for index [%s] cannot make progress "
+ "because the target shards count [%d] must be a factor of the source index's shards count [%d]",
policyName,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,27 +66,24 @@ void performDuringNoSnapshot(IndexMetadata indexMetadata, ClusterState currentCl
}
getClient().admin()
.indices()
.delete(
new DeleteIndexRequest(shrinkIndexName).masterNodeTimeout(TimeValue.MAX_VALUE),
new ActionListener<AcknowledgedResponse>() {
@Override
public void onResponse(AcknowledgedResponse acknowledgedResponse) {
// even if not all nodes acked the delete request yet we can consider this operation as successful as
// we'll generate a new index name and attempt to shrink into the newly generated name
listener.onResponse(null);
}
.delete(new DeleteIndexRequest(shrinkIndexName).masterNodeTimeout(TimeValue.MAX_VALUE), new ActionListener<>() {
@Override
public void onResponse(AcknowledgedResponse acknowledgedResponse) {
// even if not all nodes acked the delete request yet we can consider this operation as successful as
// we'll generate a new index name and attempt to shrink into the newly generated name
listener.onResponse(null);
}

@Override
public void onFailure(Exception e) {
if (e instanceof IndexNotFoundException) {
// we can move on if the index was deleted in the meantime
listener.onResponse(null);
} else {
listener.onFailure(e);
}
@Override
public void onFailure(Exception e) {
if (e instanceof IndexNotFoundException) {
// we can move on if the index was deleted in the meantime
listener.onResponse(null);
} else {
listener.onFailure(e);
}
}
);
});
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@

/**
* Checks whether a condition has been met based on the cluster state.
*
* <p>
* If checking a condition not based on the cluster state, or which may take time to evaluate, use {@link AsyncWaitStep}.
*/
public abstract class ClusterStateWaitStep extends Step {
Expand All @@ -35,19 +35,19 @@ public boolean isCompletable() {

public static class Result {
private final boolean complete;
private final ToXContentObject infomationContext;
private final ToXContentObject informationContext;

public Result(boolean complete, ToXContentObject infomationContext) {
public Result(boolean complete, ToXContentObject informationContext) {
this.complete = complete;
this.infomationContext = infomationContext;
this.informationContext = informationContext;
}

public boolean isComplete() {
return complete;
}

public ToXContentObject getInfomationContext() {
return infomationContext;
public ToXContentObject getInformationContext() {
return informationContext;
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
import org.elasticsearch.xpack.core.ilm.step.info.SingleMessageFieldInfo;

import java.time.Clock;
import java.util.Locale;
import java.util.Objects;
import java.util.concurrent.atomic.AtomicBoolean;

Expand All @@ -30,7 +29,7 @@
* If the action response is complete, the {@link ClusterStateWaitUntilThresholdStep}'s nextStepKey will be the nextStepKey of the
* wrapped action. When the threshold level is surpassed, if the underlying step's condition was not met, the nextStepKey will be changed to
* the provided {@link #nextKeyOnThresholdBreach} and this step will stop waiting.
*
* <p>
* Failures encountered whilst executing the wrapped action will be propagated directly.
*/
public class ClusterStateWaitUntilThresholdStep extends ClusterStateWaitStep {
Expand Down Expand Up @@ -72,14 +71,13 @@ public Result isConditionMet(Index index, ClusterState clusterState) {
// we may not have passed the time threshold, but the step is not completable due to a different reason
thresholdPassed.set(true);

String message = String.format(
Locale.ROOT,
String message = Strings.format(
"[%s] lifecycle step, as part of [%s] action, for index [%s] Is not "
+ "completable, reason: [%s]. Abandoning execution and moving to the next fallback step [%s]",
getKey().name(),
getKey().action(),
idxMeta.getIndex().getName(),
Strings.toString(stepResult.getInfomationContext()),
Strings.toString(stepResult.getInformationContext()),
nextKeyOnThresholdBreach
);
logger.debug(message);
Expand All @@ -90,8 +88,7 @@ public Result isConditionMet(Index index, ClusterState clusterState) {
// we retried this step enough, next step will be the configured to {@code nextKeyOnThresholdBreach}
thresholdPassed.set(true);

String message = String.format(
Locale.ROOT,
String message = Strings.format(
"[%s] lifecycle step, as part of [%s] action, for index [%s] executed for"
+ " more than [%s]. Abandoning execution and moving to the next fallback step [%s]",
getKey().name(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
* Copies the execution state data from one index to another, typically after a
* new index has been created. As part of the execution state copy it will set the target index
* "current step" to the provided target next step {@link org.elasticsearch.xpack.core.ilm.Step.StepKey}.
*
* <p>
* Useful for actions such as shrink.
*/
public class CopyExecutionStateStep extends ClusterStateActionStep {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,11 @@
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.LifecycleExecutionState;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;

import java.util.Arrays;
import java.util.Locale;
import java.util.Objects;
import java.util.function.BiFunction;

Expand Down Expand Up @@ -78,9 +78,8 @@ public ClusterState performAction(Index index, ClusterState clusterState) {
String targetIndexName = targetIndexNameSupplier.apply(sourceIndexName, sourceIndexMetadata.getLifecycleExecutionState());
IndexMetadata targetIndexMetadata = clusterState.metadata().index(targetIndexName);
if (targetIndexMetadata == null) {
String errorMessage = String.format(
Locale.ROOT,
"index [%s] is being referenced by ILM action [%s] on step [%s] but " + "it doesn't exist",
String errorMessage = Strings.format(
"index [%s] is being referenced by ILM action [%s] on step [%s] but it doesn't exist",
targetIndexName,
getKey().action(),
getKey().name()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,12 @@
import org.elasticsearch.cluster.metadata.DesiredNodes;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.common.Strings;
import org.elasticsearch.index.Index;
import org.elasticsearch.xpack.cluster.routing.allocation.DataTierAllocationDecider;
import org.elasticsearch.xpack.core.ilm.step.info.AllocationInfo;

import java.util.List;
import java.util.Locale;
import java.util.Optional;

import static org.elasticsearch.xpack.core.ilm.AllocationRoutedStep.getPendingAllocations;
Expand Down Expand Up @@ -103,8 +103,7 @@ public Result isConditionMet(Index index, ClusterState clusterState) {

if (allocationPendingAllShards > 0) {
String statusMessage = availableDestinationTier.map(
s -> String.format(
Locale.ROOT,
s -> Strings.format(
"[%s] lifecycle action [%s] waiting for [%s] shards to be moved to the [%s] tier (tier "
+ "migration preference configuration is %s)",
index.getName(),
Expand All @@ -115,9 +114,8 @@ public Result isConditionMet(Index index, ClusterState clusterState) {
)
)
.orElseGet(
() -> String.format(
Locale.ROOT,
"index [%s] has a preference for tiers %s, but no nodes for any of those tiers are " + "available in the cluster",
() -> Strings.format(
"index [%s] has a preference for tiers %s, but no nodes for any of those tiers are available in the cluster",
index.getName(),
preferredTierConfiguration
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,10 @@
import org.elasticsearch.cluster.metadata.DataStream;
import org.elasticsearch.cluster.metadata.IndexAbstraction;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.Strings;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.index.Index;

import java.util.Locale;

/**
* Deletes a single index.
*/
Expand Down Expand Up @@ -64,8 +63,7 @@ public void performDuringNoSnapshot(IndexMetadata indexMetadata, ClusterState cu
);
return;
} else if (isFailureStoreWriteIndex || dataStream.getWriteIndex().getName().equals(indexName)) {
String errorMessage = String.format(
Locale.ROOT,
String errorMessage = Strings.format(
"index [%s] is the%s write index for data stream [%s]. "
+ "stopping execution of lifecycle [%s] as a data stream's write index cannot be deleted. manually rolling over the"
+ " index will resume the execution of the policy as the index will not be the data stream's write index anymore",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@ public List<Step> toSteps(Client client, String phase, StepKey nextStepKey) {
// upgrade was performed resume the ILM execution and complete the downsample action after upgrade.)
NoopStep cleanupDownsampleIndexStep = new NoopStep(cleanupDownsampleIndexKey, downsampleKey);

// Prepare the lifecycleState by generating the name of the target index, that subsequest steps will use.
// Prepare the lifecycleState by generating the name of the target index, that subsequent steps will use.
DownsamplePrepareLifeCycleStateStep generateDownsampleIndexNameStep = new DownsamplePrepareLifeCycleStateStep(
generateDownsampleIndexNameKey,
downsampleKey,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@

/**
* An ILM step that sets the target index to use in the {@link DownsampleStep}.
* The reason why this is done in a seperate step and stored in {@link LifecycleExecutionState},
* The reason why this is done in a separate step and stored in {@link LifecycleExecutionState},
* is because other steps after downsampling also depend on the target index generated here.
*/
public class DownsamplePrepareLifeCycleStateStep extends ClusterStateActionStep {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@

/**
* The request object used by the Explain Lifecycle API.
*
* <p>
* Multiple indices may be queried in the same request using the
* {@link #indices(String...)} method
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@

/**
* The response object returned by the Explain Lifecycle API.
*
* <p>
* Since the API can be run over multiple indices the response provides a map of
* index to the explanation of the lifecycle status for that index.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
import org.elasticsearch.common.Strings;

import java.util.Arrays;
import java.util.Locale;
import java.util.Objects;
import java.util.stream.Collectors;

Expand Down Expand Up @@ -62,8 +61,7 @@ public void performAction(
} else {
DefaultShardOperationFailedException[] failures = response.getShardFailures();
String policyName = indexMetadata.getLifecyclePolicyName();
String errorMessage = String.format(
Locale.ROOT,
String errorMessage = Strings.format(
"index [%s] in policy [%s] encountered failures [%s] on step [%s]",
indexName,
policyName,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,14 @@
package org.elasticsearch.xpack.core.ilm;

import org.elasticsearch.ElasticsearchException;

import java.util.Locale;
import org.elasticsearch.common.Strings;

/**
* Exception thrown when a problem is encountered while initialising an ILM policy for an index.
*/
public class InitializePolicyException extends ElasticsearchException {

public InitializePolicyException(String policy, String index, Throwable cause) {
super(String.format(Locale.ROOT, "unable to initialize policy [%s] for index [%s]", policy, index), cause);
super(Strings.format("unable to initialize policy [%s] for index [%s]", policy, index), cause);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,10 @@ private LifecycleExecutionStateUtils() {}
/**
* Given a cluster state, index, and lifecycle state, returns a cluster state where
* the lifecycle state will be associated with the given index.
*
* <p>
* The passed-in index must already be present in the cluster state, this method cannot
* be used to add an index.
*
* <p>
* See also {@link Metadata#withLifecycleState}.
*/
public static ClusterState newClusterStateWithLifecycleState(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@ public class LifecycleOperationMetadata implements Metadata.Custom {
public static final ParseField SLM_OPERATION_MODE_FIELD = new ParseField("slm_operation_mode");
public static final LifecycleOperationMetadata EMPTY = new LifecycleOperationMetadata(OperationMode.RUNNING, OperationMode.RUNNING);

@SuppressWarnings("unchecked")
public static final ConstructingObjectParser<LifecycleOperationMetadata, Void> PARSER = new ConstructingObjectParser<>(
TYPE,
a -> new LifecycleOperationMetadata(OperationMode.valueOf((String) a[0]), OperationMode.valueOf((String) a[1]))
Expand Down
Loading

0 comments on commit 4bce48b

Please sign in to comment.