Skip to content

Commit

Permalink
When considering the size of shadow replica indices, set size to 0
Browse files Browse the repository at this point in the history
Otherwise, when trying to calculate the amount of disk usage *after* the
shard has been allocated, it has incorrectly subtracted the shadow
replica size.

Resolves #17460
  • Loading branch information
dakrone committed Apr 7, 2016
1 parent 974ad4a commit 43f939c
Show file tree
Hide file tree
Showing 2 changed files with 67 additions and 3 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,10 @@
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
import org.elasticsearch.action.admin.indices.stats.ShardStats;
import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider;
Expand Down Expand Up @@ -354,7 +357,7 @@ public void onResponse(IndicesStatsResponse indicesStatsResponse) {
ShardStats[] stats = indicesStatsResponse.getShards();
final HashMap<String, Long> newShardSizes = new HashMap<>();
final HashMap<ShardRouting, String> newShardRoutingToDataPath = new HashMap<>();
buildShardLevelInfo(logger, stats, newShardSizes, newShardRoutingToDataPath);
buildShardLevelInfo(logger, stats, newShardSizes, newShardRoutingToDataPath, clusterService.state());
shardSizes = Collections.unmodifiableMap(newShardSizes);
shardRoutingToDataPath = Collections.unmodifiableMap(newShardRoutingToDataPath);
}
Expand Down Expand Up @@ -402,14 +405,24 @@ public void onFailure(Throwable e) {
return clusterInfo;
}

static void buildShardLevelInfo(ESLogger logger, ShardStats[] stats, HashMap<String, Long> newShardSizes, HashMap<ShardRouting, String> newShardRoutingToDataPath) {
static void buildShardLevelInfo(ESLogger logger, ShardStats[] stats, HashMap<String, Long> newShardSizes, HashMap<ShardRouting, String> newShardRoutingToDataPath, ClusterState state) {
MetaData meta = state.getMetaData();
for (ShardStats s : stats) {
IndexMetaData indexMeta = meta.index(s.getShardRouting().index());
Settings indexSettings = indexMeta == null ? null : indexMeta.getSettings();
newShardRoutingToDataPath.put(s.getShardRouting(), s.getDataPath());
long size = s.getStats().getStore().sizeInBytes();
String sid = ClusterInfo.shardIdentifierFromRouting(s.getShardRouting());
if (logger.isTraceEnabled()) {
logger.trace("shard: {} size: {}", sid, size);
}
if (indexSettings != null && IndexMetaData.isIndexUsingShadowReplicas(indexSettings)) {
// Shards on a shared filesystem should be considered of size 0
if (logger.isTraceEnabled()) {
logger.trace("shard: {} is using shadow replicas and will be treated as size 0", sid);
}
size = 0;
}
newShardSizes.put(sid, size);
}
}
Expand Down
53 changes: 52 additions & 1 deletion core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,15 @@
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.elasticsearch.action.admin.indices.stats.CommonStats;
import org.elasticsearch.action.admin.indices.stats.ShardStats;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingHelper;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.DummyTransportAddress;
import org.elasticsearch.index.shard.ShardPath;
import org.elasticsearch.index.store.StoreStats;
Expand Down Expand Up @@ -115,7 +120,8 @@ public void testFillShardLevelInfo() {
};
HashMap<String, Long> shardSizes = new HashMap<>();
HashMap<ShardRouting, String> routingToPath = new HashMap<>();
InternalClusterInfoService.buildShardLevelInfo(logger, stats, shardSizes, routingToPath);
ClusterState state = ClusterState.builder(new ClusterName("blarg")).version(0).build();
InternalClusterInfoService.buildShardLevelInfo(logger, stats, shardSizes, routingToPath, state);
assertEquals(2, shardSizes.size());
assertTrue(shardSizes.containsKey(ClusterInfo.shardIdentifierFromRouting(test_0)));
assertTrue(shardSizes.containsKey(ClusterInfo.shardIdentifierFromRouting(test_1)));
Expand All @@ -129,6 +135,51 @@ public void testFillShardLevelInfo() {
assertEquals(test1Path.getParent().getParent().getParent().toAbsolutePath().toString(), routingToPath.get(test_1));
}

public void testFillShardsWithShadowIndices() {
ShardRouting s0 = ShardRouting.newUnassigned("non-shadow", 0, null, false, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
ShardRoutingHelper.initialize(s0, "node1");
ShardRoutingHelper.moveToStarted(s0);
Path i0Path = createTempDir().resolve("indices").resolve("non-shadow").resolve("0");
CommonStats commonStats0 = new CommonStats();
commonStats0.store = new StoreStats(100, 1);
ShardRouting s1 = ShardRouting.newUnassigned("shadow", 0, null, false, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
ShardRoutingHelper.initialize(s1, "node2");
ShardRoutingHelper.moveToStarted(s1);
Path i1Path = createTempDir().resolve("indices").resolve("shadow").resolve("0");
CommonStats commonStats1 = new CommonStats();
commonStats1.store = new StoreStats(1000, 1);
ShardStats[] stats = new ShardStats[] {
new ShardStats(s0, new ShardPath(false, i0Path, i0Path, "0xcafe0000", s0.shardId()), commonStats0 , null),
new ShardStats(s1, new ShardPath(false, i1Path, i1Path, "0xcafe0001", s1.shardId()), commonStats1 , null)
};
HashMap<String, Long> shardSizes = new HashMap<>();
HashMap<ShardRouting, String> routingToPath = new HashMap<>();
ClusterState state = ClusterState.builder(new ClusterName("blarg"))
.version(0)
.metaData(MetaData.builder()
.put(IndexMetaData.builder("non-shadow")
.settings(Settings.builder()
.put(IndexMetaData.SETTING_INDEX_UUID, "0xcafe0000")
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
.numberOfShards(1)
.numberOfReplicas(0))
.put(IndexMetaData.builder("shadow")
.settings(Settings.builder()
.put(IndexMetaData.SETTING_INDEX_UUID, "0xcafe0001")
.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
.numberOfShards(1)
.numberOfReplicas(0)))
.build();
logger.info("--> calling buildShardLevelInfo with state: {}", state);
InternalClusterInfoService.buildShardLevelInfo(logger, stats, shardSizes, routingToPath, state);
assertEquals(2, shardSizes.size());
assertTrue(shardSizes.containsKey(ClusterInfo.shardIdentifierFromRouting(s0)));
assertTrue(shardSizes.containsKey(ClusterInfo.shardIdentifierFromRouting(s1)));
assertEquals(100L, shardSizes.get(ClusterInfo.shardIdentifierFromRouting(s0)).longValue());
assertEquals(0L, shardSizes.get(ClusterInfo.shardIdentifierFromRouting(s1)).longValue());
}

public void testFillDiskUsage() {
Map<String, DiskUsage> newLeastAvaiableUsages = new HashMap<>();
Map<String, DiskUsage> newMostAvaiableUsages = new HashMap<>();
Expand Down

0 comments on commit 43f939c

Please sign in to comment.