From fd0e25d2a2f3e893f9ae6a2cbf2e8aa372de3cbf Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Thu, 13 Apr 2023 10:39:15 -0500 Subject: [PATCH 1/3] [Refactor] ClusterInfo to use j.util.Map instead of ImmutableOpenMap (#7126) With java.util.Map immutability and collection improvements the hppc ImmutableOpenMap is not needed in ClusterInfo. This commit refactors ClusterInfo to use java Maps and Immutable Collections and further trim the dependency on the aging hppc library. Signed-off-by: Nicholas Walter Knize --- .../cluster/ClusterInfoServiceIT.java | 27 ++- .../decider/DiskThresholdDeciderIT.java | 2 +- .../org/opensearch/cluster/ClusterInfo.java | 82 ++++---- .../cluster/InternalClusterInfoService.java | 72 +++---- .../allocation/DiskThresholdMonitor.java | 17 +- .../decider/DiskThresholdDecider.java | 19 +- .../opensearch/cluster/ClusterInfoTests.java | 28 +-- .../opensearch/cluster/DiskUsageTests.java | 15 +- .../allocation/DiskThresholdMonitorTests.java | 121 +++++------ ...dexShardConstraintDeciderOverlapTests.java | 54 +++-- .../RemoteShardsBalancerBaseTestCase.java | 10 +- .../decider/DiskThresholdDeciderTests.java | 188 ++++++++---------- .../DiskThresholdDeciderUnitTests.java | 63 ++---- 13 files changed, 309 insertions(+), 389 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterInfoServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterInfoServiceIT.java index b133b864a6b82..7ec5daf2b908b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterInfoServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterInfoServiceIT.java @@ -32,7 +32,6 @@ package org.opensearch.cluster; -import com.carrotsearch.hppc.cursors.ObjectCursor; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; @@ -47,7 +46,6 @@ import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Strings; -import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.index.IndexService; @@ -69,6 +67,7 @@ import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Locale; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; @@ -174,24 +173,24 @@ public void testClusterInfoServiceCollectsInformation() { infoService.setUpdateFrequency(TimeValue.timeValueMillis(200)); ClusterInfo info = infoService.refresh(); assertNotNull("info should not be null", info); - ImmutableOpenMap leastUsages = info.getNodeLeastAvailableDiskUsages(); - ImmutableOpenMap mostUsages = info.getNodeMostAvailableDiskUsages(); - ImmutableOpenMap shardSizes = info.shardSizes; + final Map leastUsages = info.getNodeLeastAvailableDiskUsages(); + final Map mostUsages = info.getNodeMostAvailableDiskUsages(); + final Map shardSizes = info.shardSizes; assertNotNull(leastUsages); assertNotNull(shardSizes); assertThat("some usages are populated", leastUsages.values().size(), Matchers.equalTo(2)); assertThat("some shard sizes are populated", shardSizes.values().size(), greaterThan(0)); - for (ObjectCursor usage : leastUsages.values()) { - logger.info("--> usage: {}", usage.value); - assertThat("usage has be retrieved", usage.value.getFreeBytes(), greaterThan(0L)); + for (Map.Entry usage : leastUsages.entrySet()) { + logger.info("--> usage: {}", usage.getValue()); + assertThat("usage has be retrieved", usage.getValue().getFreeBytes(), greaterThan(0L)); } - for (ObjectCursor usage : mostUsages.values()) { - logger.info("--> usage: {}", usage.value); - assertThat("usage has be retrieved", usage.value.getFreeBytes(), greaterThan(0L)); + for (DiskUsage usage : mostUsages.values()) { + logger.info("--> usage: {}", usage); + assertThat("usage has be retrieved", usage.getFreeBytes(), greaterThan(0L)); } - for (ObjectCursor size : shardSizes.values()) { - logger.info("--> shard size: {}", size.value); - assertThat("shard size is greater than 0", size.value, greaterThanOrEqualTo(0L)); + for (Long size : shardSizes.values()) { + logger.info("--> shard size: {}", size); + assertThat("shard size is greater than 0", size, greaterThanOrEqualTo(0L)); } ClusterService clusterService = internalTestCluster.getInstance(ClusterService.class, internalTestCluster.getClusterManagerName()); ClusterState state = clusterService.state(); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java index bbe9db135ff5f..ed8c94d10c36f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java @@ -574,7 +574,7 @@ private void refreshDiskUsage() { // if the nodes were all under the low watermark already (but unbalanced) then a change in the disk usage doesn't trigger a reroute // even though it's now possible to achieve better balance, so we have to do an explicit reroute. TODO fix this? if (StreamSupport.stream(clusterInfoService.getClusterInfo().getNodeMostAvailableDiskUsages().values().spliterator(), false) - .allMatch(cur -> cur.value.getFreeBytes() > WATERMARK_BYTES)) { + .allMatch(cur -> cur.getFreeBytes() > WATERMARK_BYTES)) { assertAcked(client().admin().cluster().prepareReroute()); } diff --git a/server/src/main/java/org/opensearch/cluster/ClusterInfo.java b/server/src/main/java/org/opensearch/cluster/ClusterInfo.java index eb728e8fb5035..eb3f1527ba326 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterInfo.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterInfo.java @@ -34,9 +34,7 @@ import com.carrotsearch.hppc.ObjectHashSet; import com.carrotsearch.hppc.cursors.ObjectCursor; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.opensearch.cluster.routing.ShardRouting; -import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; @@ -47,6 +45,7 @@ import org.opensearch.index.store.StoreStats; import java.io.IOException; +import java.util.Collections; import java.util.Map; import java.util.Objects; @@ -59,15 +58,15 @@ * @opensearch.internal */ public class ClusterInfo implements ToXContentFragment, Writeable { - private final ImmutableOpenMap leastAvailableSpaceUsage; - private final ImmutableOpenMap mostAvailableSpaceUsage; - final ImmutableOpenMap shardSizes; + private final Map leastAvailableSpaceUsage; + private final Map mostAvailableSpaceUsage; + final Map shardSizes; // pkg-private for testing only public static final ClusterInfo EMPTY = new ClusterInfo(); - final ImmutableOpenMap routingToDataPath; - final ImmutableOpenMap reservedSpace; + final Map routingToDataPath; + final Map reservedSpace; protected ClusterInfo() { - this(ImmutableOpenMap.of(), ImmutableOpenMap.of(), ImmutableOpenMap.of(), ImmutableOpenMap.of(), ImmutableOpenMap.of()); + this(Map.of(), Map.of(), Map.of(), Map.of(), Map.of()); } /** @@ -81,11 +80,11 @@ protected ClusterInfo() { * @see #shardIdentifierFromRouting */ public ClusterInfo( - ImmutableOpenMap leastAvailableSpaceUsage, - ImmutableOpenMap mostAvailableSpaceUsage, - ImmutableOpenMap shardSizes, - ImmutableOpenMap routingToDataPath, - ImmutableOpenMap reservedSpace + final Map leastAvailableSpaceUsage, + final Map mostAvailableSpaceUsage, + final Map shardSizes, + final Map routingToDataPath, + final Map reservedSpace ) { this.leastAvailableSpaceUsage = leastAvailableSpaceUsage; this.shardSizes = shardSizes; @@ -106,48 +105,39 @@ public ClusterInfo(StreamInput in) throws IOException { reservedSpaceMap = Map.of(); } - ImmutableOpenMap.Builder leastBuilder = ImmutableOpenMap.builder(); - this.leastAvailableSpaceUsage = leastBuilder.putAll(leastMap).build(); - ImmutableOpenMap.Builder mostBuilder = ImmutableOpenMap.builder(); - this.mostAvailableSpaceUsage = mostBuilder.putAll(mostMap).build(); - ImmutableOpenMap.Builder sizeBuilder = ImmutableOpenMap.builder(); - this.shardSizes = sizeBuilder.putAll(sizeMap).build(); - ImmutableOpenMap.Builder routingBuilder = ImmutableOpenMap.builder(); - this.routingToDataPath = routingBuilder.putAll(routingMap).build(); - ImmutableOpenMap.Builder reservedSpaceBuilder = ImmutableOpenMap.builder(); - this.reservedSpace = reservedSpaceBuilder.putAll(reservedSpaceMap).build(); + this.leastAvailableSpaceUsage = Collections.unmodifiableMap(leastMap); + this.mostAvailableSpaceUsage = Collections.unmodifiableMap(mostMap); + this.shardSizes = Collections.unmodifiableMap(sizeMap); + this.routingToDataPath = Collections.unmodifiableMap(routingMap); + this.reservedSpace = Collections.unmodifiableMap(reservedSpaceMap); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(this.leastAvailableSpaceUsage.size()); - for (ObjectObjectCursor c : this.leastAvailableSpaceUsage) { - out.writeString(c.key); - c.value.writeTo(out); - } + out.writeMap(this.leastAvailableSpaceUsage, StreamOutput::writeString, (o, v) -> v.writeTo(o)); out.writeMap(this.mostAvailableSpaceUsage, StreamOutput::writeString, (o, v) -> v.writeTo(o)); out.writeMap(this.shardSizes, StreamOutput::writeString, (o, v) -> out.writeLong(v == null ? -1 : v)); out.writeMap(this.routingToDataPath, (o, k) -> k.writeTo(o), StreamOutput::writeString); if (out.getVersion().onOrAfter(StoreStats.RESERVED_BYTES_VERSION)) { - out.writeMap(this.reservedSpace); + out.writeMap(this.reservedSpace, (o, v) -> v.writeTo(o), (o, v) -> v.writeTo(o)); } } public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject("nodes"); { - for (ObjectObjectCursor c : this.leastAvailableSpaceUsage) { - builder.startObject(c.key); + for (Map.Entry c : this.leastAvailableSpaceUsage.entrySet()) { + builder.startObject(c.getKey()); { // node - builder.field("node_name", c.value.getNodeName()); + builder.field("node_name", c.getValue().getNodeName()); builder.startObject("least_available"); { - c.value.toShortXContent(builder); + c.getValue().toShortXContent(builder); } builder.endObject(); // end "least_available" builder.startObject("most_available"); { - DiskUsage most = this.mostAvailableSpaceUsage.get(c.key); + DiskUsage most = this.mostAvailableSpaceUsage.get(c.getKey()); if (most != null) { most.toShortXContent(builder); } @@ -160,26 +150,26 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); // end "nodes" builder.startObject("shard_sizes"); { - for (ObjectObjectCursor c : this.shardSizes) { - builder.humanReadableField(c.key + "_bytes", c.key, new ByteSizeValue(c.value)); + for (Map.Entry c : this.shardSizes.entrySet()) { + builder.humanReadableField(c.getKey() + "_bytes", c.getKey(), new ByteSizeValue(c.getValue())); } } builder.endObject(); // end "shard_sizes" builder.startObject("shard_paths"); { - for (ObjectObjectCursor c : this.routingToDataPath) { - builder.field(c.key.toString(), c.value); + for (Map.Entry c : this.routingToDataPath.entrySet()) { + builder.field(c.getKey().toString(), c.getValue()); } } builder.endObject(); // end "shard_paths" builder.startArray("reserved_sizes"); { - for (ObjectObjectCursor c : this.reservedSpace) { + for (Map.Entry c : this.reservedSpace.entrySet()) { builder.startObject(); { - builder.field("node_id", c.key.nodeId); - builder.field("path", c.key.path); - c.value.toXContent(builder, params); + builder.field("node_id", c.getKey().nodeId); + builder.field("path", c.getKey().path); + c.getValue().toXContent(builder, params); } builder.endObject(); // NodeAndPath } @@ -192,16 +182,16 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws * Returns a node id to disk usage mapping for the path that has the least available space on the node. * Note that this does not take account of reserved space: there may be another path with less available _and unreserved_ space. */ - public ImmutableOpenMap getNodeLeastAvailableDiskUsages() { - return this.leastAvailableSpaceUsage; + public Map getNodeLeastAvailableDiskUsages() { + return Collections.unmodifiableMap(this.leastAvailableSpaceUsage); } /** * Returns a node id to disk usage mapping for the path that has the most available space on the node. * Note that this does not take account of reserved space: there may be another path with more available _and unreserved_ space. */ - public ImmutableOpenMap getNodeMostAvailableDiskUsages() { - return this.mostAvailableSpaceUsage; + public Map getNodeMostAvailableDiskUsages() { + return Collections.unmodifiableMap(this.mostAvailableSpaceUsage); } /** diff --git a/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java index 052c320e9b268..0acc7bece439f 100644 --- a/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java @@ -51,7 +51,6 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.allocation.DiskThresholdSettings; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; @@ -64,6 +63,7 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.ReceiveTimeoutTransportException; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -108,8 +108,8 @@ public class InternalClusterInfoService implements ClusterInfoService, ClusterSt private volatile TimeValue updateFrequency; - private volatile ImmutableOpenMap leastAvailableSpaceUsages; - private volatile ImmutableOpenMap mostAvailableSpaceUsages; + private volatile Map leastAvailableSpaceUsages; + private volatile Map mostAvailableSpaceUsages; private volatile IndicesStatsSummary indicesStatsSummary; // null if this node is not currently the cluster-manager private final AtomicReference refreshAndRescheduleRunnable = new AtomicReference<>(); @@ -120,8 +120,8 @@ public class InternalClusterInfoService implements ClusterInfoService, ClusterSt private final List> listeners = new CopyOnWriteArrayList<>(); public InternalClusterInfoService(Settings settings, ClusterService clusterService, ThreadPool threadPool, Client client) { - this.leastAvailableSpaceUsages = ImmutableOpenMap.of(); - this.mostAvailableSpaceUsages = ImmutableOpenMap.of(); + this.leastAvailableSpaceUsages = Map.of(); + this.mostAvailableSpaceUsages = Map.of(); this.indicesStatsSummary = IndicesStatsSummary.EMPTY; this.threadPool = threadPool; this.client = client; @@ -180,14 +180,14 @@ public void clusterChanged(ClusterChangedEvent event) { if (removedNode.isDataNode()) { logger.trace("Removing node from cluster info: {}", removedNode.getId()); if (leastAvailableSpaceUsages.containsKey(removedNode.getId())) { - ImmutableOpenMap.Builder newMaxUsages = ImmutableOpenMap.builder(leastAvailableSpaceUsages); + Map newMaxUsages = new HashMap<>(leastAvailableSpaceUsages); newMaxUsages.remove(removedNode.getId()); - leastAvailableSpaceUsages = newMaxUsages.build(); + leastAvailableSpaceUsages = Collections.unmodifiableMap(newMaxUsages); } if (mostAvailableSpaceUsages.containsKey(removedNode.getId())) { - ImmutableOpenMap.Builder newMinUsages = ImmutableOpenMap.builder(mostAvailableSpaceUsages); + Map newMinUsages = new HashMap<>(mostAvailableSpaceUsages); newMinUsages.remove(removedNode.getId()); - mostAvailableSpaceUsages = newMinUsages.build(); + mostAvailableSpaceUsages = Collections.unmodifiableMap(newMinUsages); } } } @@ -254,16 +254,16 @@ public final ClusterInfo refresh() { final CountDownLatch nodeLatch = updateNodeStats(new ActionListener() { @Override public void onResponse(NodesStatsResponse nodesStatsResponse) { - ImmutableOpenMap.Builder leastAvailableUsagesBuilder = ImmutableOpenMap.builder(); - ImmutableOpenMap.Builder mostAvailableUsagesBuilder = ImmutableOpenMap.builder(); + final Map leastAvailableUsagesBuilder = new HashMap<>(); + final Map mostAvailableUsagesBuilder = new HashMap<>(); fillDiskUsagePerNode( logger, adjustNodesStats(nodesStatsResponse.getNodes()), leastAvailableUsagesBuilder, mostAvailableUsagesBuilder ); - leastAvailableSpaceUsages = leastAvailableUsagesBuilder.build(); - mostAvailableSpaceUsages = mostAvailableUsagesBuilder.build(); + leastAvailableSpaceUsages = Collections.unmodifiableMap(leastAvailableUsagesBuilder); + mostAvailableSpaceUsages = Collections.unmodifiableMap(mostAvailableUsagesBuilder); } @Override @@ -279,29 +279,25 @@ public void onFailure(Exception e) { logger.warn("Failed to execute NodeStatsAction for ClusterInfoUpdateJob", e); } // we empty the usages list, to be safe - we don't know what's going on. - leastAvailableSpaceUsages = ImmutableOpenMap.of(); - mostAvailableSpaceUsages = ImmutableOpenMap.of(); + leastAvailableSpaceUsages = Map.of(); + mostAvailableSpaceUsages = Map.of(); } } }); - final CountDownLatch indicesLatch = updateIndicesStats(new ActionListener() { + final CountDownLatch indicesLatch = updateIndicesStats(new ActionListener<>() { @Override public void onResponse(IndicesStatsResponse indicesStatsResponse) { final ShardStats[] stats = indicesStatsResponse.getShards(); - final ImmutableOpenMap.Builder shardSizeByIdentifierBuilder = ImmutableOpenMap.builder(); - final ImmutableOpenMap.Builder dataPathByShardRoutingBuilder = ImmutableOpenMap.builder(); + final Map shardSizeByIdentifierBuilder = new HashMap<>(); + final Map dataPathByShardRoutingBuilder = new HashMap<>(); final Map reservedSpaceBuilders = new HashMap<>(); buildShardLevelInfo(logger, stats, shardSizeByIdentifierBuilder, dataPathByShardRoutingBuilder, reservedSpaceBuilders); - final ImmutableOpenMap.Builder rsrvdSpace = ImmutableOpenMap.builder(); + final Map rsrvdSpace = new HashMap<>(); reservedSpaceBuilders.forEach((nodeAndPath, builder) -> rsrvdSpace.put(nodeAndPath, builder.build())); - indicesStatsSummary = new IndicesStatsSummary( - shardSizeByIdentifierBuilder.build(), - dataPathByShardRoutingBuilder.build(), - rsrvdSpace.build() - ); + indicesStatsSummary = new IndicesStatsSummary(shardSizeByIdentifierBuilder, dataPathByShardRoutingBuilder, rsrvdSpace); } @Override @@ -360,9 +356,9 @@ public void addListener(Consumer clusterInfoConsumer) { static void buildShardLevelInfo( Logger logger, ShardStats[] stats, - ImmutableOpenMap.Builder shardSizes, - ImmutableOpenMap.Builder newShardRoutingToDataPath, - Map reservedSpaceByShard + final Map shardSizes, + final Map newShardRoutingToDataPath, + final Map reservedSpaceByShard ) { for (ShardStats s : stats) { final ShardRouting shardRouting = s.getShardRouting(); @@ -392,8 +388,8 @@ static void buildShardLevelInfo( static void fillDiskUsagePerNode( Logger logger, List nodeStatsArray, - ImmutableOpenMap.Builder newLeastAvailableUsages, - ImmutableOpenMap.Builder newMostAvailableUsages + final Map newLeastAvailableUsages, + final Map newMostAvailableUsages ) { for (NodeStats nodeStats : nodeStatsArray) { if (nodeStats.getFs() == null) { @@ -475,20 +471,16 @@ static void fillDiskUsagePerNode( * @opensearch.internal */ private static class IndicesStatsSummary { - static final IndicesStatsSummary EMPTY = new IndicesStatsSummary( - ImmutableOpenMap.of(), - ImmutableOpenMap.of(), - ImmutableOpenMap.of() - ); + static final IndicesStatsSummary EMPTY = new IndicesStatsSummary(Map.of(), Map.of(), Map.of()); - final ImmutableOpenMap shardSizes; - final ImmutableOpenMap shardRoutingToDataPath; - final ImmutableOpenMap reservedSpace; + final Map shardSizes; + final Map shardRoutingToDataPath; + final Map reservedSpace; IndicesStatsSummary( - ImmutableOpenMap shardSizes, - ImmutableOpenMap shardRoutingToDataPath, - ImmutableOpenMap reservedSpace + final Map shardSizes, + final Map shardRoutingToDataPath, + final Map reservedSpace ) { this.shardSizes = shardSizes; this.shardRoutingToDataPath = shardRoutingToDataPath; diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java index 6f63aff2f3a90..5bf1a3b199919 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java @@ -32,8 +32,6 @@ package org.opensearch.cluster.routing.allocation; -import com.carrotsearch.hppc.ObjectLookupContainer; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -63,6 +61,7 @@ import java.util.ArrayList; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; @@ -145,7 +144,7 @@ public void onNewInfo(ClusterInfo info) { return; } - final ImmutableOpenMap usages = info.getNodeLeastAvailableDiskUsages(); + final Map usages = info.getNodeLeastAvailableDiskUsages(); if (usages == null) { logger.trace("skipping monitor as no disk usage information is available"); checkFinished(); @@ -159,7 +158,7 @@ public void onNewInfo(ClusterInfo info) { final long currentTimeMillis = currentTimeMillisSupplier.getAsLong(); // Clean up nodes that have been removed from the cluster - final ObjectLookupContainer nodes = usages.keys(); + final Set nodes = usages.keySet(); cleanUpRemovedNodes(nodes, nodesOverLowThreshold); cleanUpRemovedNodes(nodes, nodesOverHighThreshold); cleanUpRemovedNodes(nodes, nodesOverHighThresholdAndRelocating); @@ -172,9 +171,9 @@ public void onNewInfo(ClusterInfo info) { final List usagesOverHighThreshold = new ArrayList<>(); - for (final ObjectObjectCursor entry : usages) { - final String node = entry.key; - final DiskUsage usage = entry.value; + for (final Map.Entry entry : usages.entrySet()) { + final String node = entry.getKey(); + final DiskUsage usage = entry.getValue(); final RoutingNode routingNode = routingNodes.node(node); if (usage.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdFloodStage().getBytes() @@ -428,7 +427,7 @@ long sizeOfRelocatingShards(RoutingNode routingNode, DiskUsage diskUsage, Cluste private void markNodesMissingUsageIneligibleForRelease( RoutingNodes routingNodes, - ImmutableOpenMap usages, + Map usages, Set indicesToMarkIneligibleForAutoRelease ) { for (RoutingNode routingNode : routingNodes) { @@ -488,7 +487,7 @@ protected void updateIndicesReadOnly(Set indicesToUpdate, ActionListener .execute(ActionListener.map(wrappedListener, r -> null)); } - private static void cleanUpRemovedNodes(ObjectLookupContainer nodesToKeep, Set nodesToCleanUp) { + private static void cleanUpRemovedNodes(Set nodesToKeep, Set nodesToCleanUp) { for (String node : nodesToCleanUp) { if (nodesToKeep.contains(node) == false) { nodesToCleanUp.remove(node); diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index ffc80fbc973cb..ddd5e9274f08b 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.routing.allocation.decider; -import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.Version; @@ -49,7 +48,6 @@ import org.opensearch.cluster.routing.allocation.DiskThresholdSettings; import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.common.Strings; -import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; @@ -59,6 +57,7 @@ import org.opensearch.snapshots.SnapshotShardSizeInfo; import java.util.List; +import java.util.Map; import java.util.Set; import static org.opensearch.cluster.routing.allocation.DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING; @@ -168,7 +167,7 @@ public static long sizeOfRelocatingShards( @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { ClusterInfo clusterInfo = allocation.clusterInfo(); - ImmutableOpenMap usages = clusterInfo.getNodeMostAvailableDiskUsages(); + Map usages = clusterInfo.getNodeMostAvailableDiskUsages(); final Decision decision = earlyTerminate(allocation, usages); if (decision != null) { return decision; @@ -424,7 +423,7 @@ public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAl throw new IllegalArgumentException("Shard [" + shardRouting + "] is not allocated on node: [" + node.nodeId() + "]"); } final ClusterInfo clusterInfo = allocation.clusterInfo(); - final ImmutableOpenMap usages = clusterInfo.getNodeLeastAvailableDiskUsages(); + final Map usages = clusterInfo.getNodeLeastAvailableDiskUsages(); final Decision decision = earlyTerminate(allocation, usages); if (decision != null) { return decision; @@ -520,7 +519,7 @@ public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAl private DiskUsageWithRelocations getDiskUsage( RoutingNode node, RoutingAllocation allocation, - ImmutableOpenMap usages, + final Map usages, boolean subtractLeavingShards ) { DiskUsage usage = usages.get(node.nodeId()); @@ -566,15 +565,15 @@ private DiskUsageWithRelocations getDiskUsage( * @param usages Map of nodeId to DiskUsage for all known nodes * @return DiskUsage representing given node using the average disk usage */ - DiskUsage averageUsage(RoutingNode node, ImmutableOpenMap usages) { + DiskUsage averageUsage(RoutingNode node, final Map usages) { if (usages.size() == 0) { return new DiskUsage(node.nodeId(), node.node().getName(), "_na_", 0, 0); } long totalBytes = 0; long freeBytes = 0; - for (ObjectCursor du : usages.values()) { - totalBytes += du.value.getTotalBytes(); - freeBytes += du.value.getFreeBytes(); + for (DiskUsage du : usages.values()) { + totalBytes += du.getTotalBytes(); + freeBytes += du.getFreeBytes(); } return new DiskUsage(node.nodeId(), node.node().getName(), "_na_", totalBytes / usages.size(), freeBytes / usages.size()); } @@ -598,7 +597,7 @@ DiskUsage averageUsage(RoutingNode node, ImmutableOpenMap usa return newUsage.getFreeDiskAsPercentage(); } - private Decision earlyTerminate(RoutingAllocation allocation, ImmutableOpenMap usages) { + private Decision earlyTerminate(RoutingAllocation allocation, final Map usages) { // Always allow allocation if the decider is disabled if (diskThresholdSettings.isEnabled() == false) { return allocation.decision(Decision.YES, NAME, "the disk threshold decider is disabled"); diff --git a/server/src/test/java/org/opensearch/cluster/ClusterInfoTests.java b/server/src/test/java/org/opensearch/cluster/ClusterInfoTests.java index 4abbef0c19374..a32d6e35d0182 100644 --- a/server/src/test/java/org/opensearch/cluster/ClusterInfoTests.java +++ b/server/src/test/java/org/opensearch/cluster/ClusterInfoTests.java @@ -34,11 +34,13 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.TestShardRouting; -import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.index.shard.ShardId; import org.opensearch.test.OpenSearchTestCase; +import java.util.HashMap; +import java.util.Map; + public class ClusterInfoTests extends OpenSearchTestCase { public void testSerialization() throws Exception { @@ -60,9 +62,9 @@ public void testSerialization() throws Exception { assertEquals(clusterInfo.reservedSpace, result.reservedSpace); } - private static ImmutableOpenMap randomDiskUsage() { + private static Map randomDiskUsage() { int numEntries = randomIntBetween(0, 128); - ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(numEntries); + final Map builder = new HashMap<>(numEntries); for (int i = 0; i < numEntries; i++) { String key = randomAlphaOfLength(32); DiskUsage diskUsage = new DiskUsage( @@ -74,34 +76,34 @@ private static ImmutableOpenMap randomDiskUsage() { ); builder.put(key, diskUsage); } - return builder.build(); + return builder; } - private static ImmutableOpenMap randomShardSizes() { + private static Map randomShardSizes() { int numEntries = randomIntBetween(0, 128); - ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(numEntries); + final Map builder = new HashMap<>(numEntries); for (int i = 0; i < numEntries; i++) { String key = randomAlphaOfLength(32); long shardSize = randomIntBetween(0, Integer.MAX_VALUE); builder.put(key, shardSize); } - return builder.build(); + return builder; } - private static ImmutableOpenMap randomRoutingToDataPath() { + private static Map randomRoutingToDataPath() { int numEntries = randomIntBetween(0, 128); - ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(numEntries); + final Map builder = new HashMap<>(numEntries); for (int i = 0; i < numEntries; i++) { ShardId shardId = new ShardId(randomAlphaOfLength(32), randomAlphaOfLength(32), randomIntBetween(0, Integer.MAX_VALUE)); ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, null, randomBoolean(), ShardRoutingState.UNASSIGNED); builder.put(shardRouting, randomAlphaOfLength(32)); } - return builder.build(); + return builder; } - private static ImmutableOpenMap randomReservedSpace() { + private static Map randomReservedSpace() { int numEntries = randomIntBetween(0, 128); - ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(numEntries); + final Map builder = new HashMap<>(numEntries); for (int i = 0; i < numEntries; i++) { final ClusterInfo.NodeAndPath key = new ClusterInfo.NodeAndPath(randomAlphaOfLength(10), randomAlphaOfLength(10)); final ClusterInfo.ReservedSpace.Builder valueBuilder = new ClusterInfo.ReservedSpace.Builder(); @@ -111,7 +113,7 @@ private static ImmutableOpenMap shardSizes = ImmutableOpenMap.builder(); - ImmutableOpenMap.Builder routingToPath = ImmutableOpenMap.builder(); - ClusterState state = ClusterState.builder(new ClusterName("blarg")).version(0).build(); + final Map shardSizes = new HashMap<>(); + final Map routingToPath = new HashMap<>(); InternalClusterInfoService.buildShardLevelInfo(logger, stats, shardSizes, routingToPath, new HashMap<>()); assertEquals(2, shardSizes.size()); assertTrue(shardSizes.containsKey(ClusterInfo.shardIdentifierFromRouting(test_0))); @@ -155,8 +154,8 @@ public void testFillShardLevelInfo() { } public void testFillDiskUsage() { - ImmutableOpenMap.Builder newLeastAvaiableUsages = ImmutableOpenMap.builder(); - ImmutableOpenMap.Builder newMostAvaiableUsages = ImmutableOpenMap.builder(); + final Map newLeastAvaiableUsages = new HashMap<>(); + final Map newMostAvaiableUsages = new HashMap<>(); FsInfo.Path[] node1FSInfo = new FsInfo.Path[] { new FsInfo.Path("/middle", "/dev/sda", 100, 90, 80), new FsInfo.Path("/least", "/dev/sdb", 200, 190, 70), @@ -261,8 +260,8 @@ public void testFillDiskUsage() { } public void testFillDiskUsageSomeInvalidValues() { - ImmutableOpenMap.Builder newLeastAvailableUsages = ImmutableOpenMap.builder(); - ImmutableOpenMap.Builder newMostAvailableUsages = ImmutableOpenMap.builder(); + final Map newLeastAvailableUsages = new HashMap<>(); + final Map newMostAvailableUsages = new HashMap<>(); FsInfo.Path[] node1FSInfo = new FsInfo.Path[] { new FsInfo.Path("/middle", "/dev/sda", 100, 90, 80), new FsInfo.Path("/least", "/dev/sdb", -1, -1, -1), diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitorTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitorTests.java index e4f3c4eeeb903..21d891bdbc317 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitorTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitorTests.java @@ -49,7 +49,6 @@ import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.common.Priority; -import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.index.shard.ShardId; @@ -58,7 +57,9 @@ import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; +import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -137,19 +138,19 @@ protected void setIndexCreateBlock(ActionListener listener, boolean indexC } }; - ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(); + Map builder = new HashMap<>(); builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", 100, 4)); builder.put("node2", new DiskUsage("node2", "node2", "/foo/bar", 100, 30)); - monitor.onNewInfo(clusterInfo(builder.build())); + monitor.onNewInfo(clusterInfo(builder)); assertFalse(reroute.get()); assertEquals(new HashSet<>(Arrays.asList("test_1", "test_2")), indices.get()); indices.set(null); - builder = ImmutableOpenMap.builder(); + builder = new HashMap<>(); builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", 100, 4)); builder.put("node2", new DiskUsage("node2", "node2", "/foo/bar", 100, 5)); currentTime.addAndGet(randomLongBetween(60001, 120000)); - monitor.onNewInfo(clusterInfo(builder.build())); + monitor.onNewInfo(clusterInfo(builder)); assertTrue(reroute.get()); assertEquals(new HashSet<>(Arrays.asList("test_1", "test_2")), indices.get()); IndexMetadata indexMetadata = IndexMetadata.builder(clusterState.metadata().index("test_2")) @@ -200,10 +201,10 @@ protected void setIndexCreateBlock(ActionListener listener, boolean indexC indices.set(null); reroute.set(false); - builder = ImmutableOpenMap.builder(); + builder = new HashMap<>(); builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", 100, 4)); builder.put("node2", new DiskUsage("node2", "node2", "/foo/bar", 100, 5)); - monitor.onNewInfo(clusterInfo(builder.build())); + monitor.onNewInfo(clusterInfo(builder)); assertTrue(reroute.get()); assertEquals(Collections.singleton("test_1"), indices.get()); } @@ -232,16 +233,13 @@ protected void updateIndicesReadOnly(Set indicesToMarkReadOnly, ActionLi } }; - final ImmutableOpenMap.Builder allDisksOkBuilder; - allDisksOkBuilder = ImmutableOpenMap.builder(); - allDisksOkBuilder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", 100, 50)); - allDisksOkBuilder.put("node2", new DiskUsage("node2", "node2", "/foo/bar", 100, 50)); - final ImmutableOpenMap allDisksOk = allDisksOkBuilder.build(); + final Map allDisksOk = new HashMap<>(); + allDisksOk.put("node1", new DiskUsage("node1", "node1", "/foo/bar", 100, 50)); + allDisksOk.put("node2", new DiskUsage("node2", "node2", "/foo/bar", 100, 50)); - final ImmutableOpenMap.Builder oneDiskAboveWatermarkBuilder = ImmutableOpenMap.builder(); - oneDiskAboveWatermarkBuilder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", 100, between(5, 9))); - oneDiskAboveWatermarkBuilder.put("node2", new DiskUsage("node2", "node2", "/foo/bar", 100, 50)); - final ImmutableOpenMap oneDiskAboveWatermark = oneDiskAboveWatermarkBuilder.build(); + final Map oneDiskAboveWatermark = new HashMap<>(); + oneDiskAboveWatermark.put("node1", new DiskUsage("node1", "node1", "/foo/bar", 100, between(5, 9))); + oneDiskAboveWatermark.put("node2", new DiskUsage("node2", "node2", "/foo/bar", 100, 50)); // should not reroute when all disks are ok currentTime.addAndGet(randomLongBetween(0, 120000)); @@ -308,12 +306,11 @@ protected void updateIndicesReadOnly(Set indicesToMarkReadOnly, ActionLi assertNull(listenerReference.get()); // should reroute again when one disk has reserved space that pushes it over the high watermark - final ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(1); - builder.put( + final Map reservedSpaces = new HashMap<>(1); + reservedSpaces.put( new ClusterInfo.NodeAndPath("node1", "/foo/bar"), new ClusterInfo.ReservedSpace.Builder().add(new ShardId("baz", "quux", 0), between(41, 100)).build() ); - final ImmutableOpenMap reservedSpaces = builder.build(); currentTime.addAndGet( randomLongBetween( @@ -324,7 +321,6 @@ protected void updateIndicesReadOnly(Set indicesToMarkReadOnly, ActionLi monitor.onNewInfo(clusterInfo(allDisksOk, reservedSpaces)); assertNotNull(listenerReference.get()); listenerReference.getAndSet(null).onResponse(null); - } public void testAutoReleaseIndices() { @@ -348,19 +344,17 @@ public void testAutoReleaseIndices() { ); assertThat(clusterState.getRoutingTable().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(8)); - final ImmutableOpenMap.Builder reservedSpacesBuilder = ImmutableOpenMap - .builder(); + final Map reservedSpaces = new HashMap<>(); final int reservedSpaceNode1 = between(0, 10); - reservedSpacesBuilder.put( + reservedSpaces.put( new ClusterInfo.NodeAndPath("node1", "/foo/bar"), new ClusterInfo.ReservedSpace.Builder().add(new ShardId("", "", 0), reservedSpaceNode1).build() ); final int reservedSpaceNode2 = between(0, 10); - reservedSpacesBuilder.put( + reservedSpaces.put( new ClusterInfo.NodeAndPath("node2", "/foo/bar"), new ClusterInfo.ReservedSpace.Builder().add(new ShardId("", "", 0), reservedSpaceNode2).build() ); - ImmutableOpenMap reservedSpaces = reservedSpacesBuilder.build(); DiskThresholdMonitor monitor = new DiskThresholdMonitor( Settings.EMPTY, @@ -392,20 +386,20 @@ protected void setIndexCreateBlock(ActionListener listener, boolean indexC }; indicesToMarkReadOnly.set(null); indicesToRelease.set(null); - ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(); + Map builder = new HashMap<>(); builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", 100, between(0, 4))); builder.put("node2", new DiskUsage("node2", "node2", "/foo/bar", 100, between(0, 4))); - monitor.onNewInfo(clusterInfo(builder.build(), reservedSpaces)); + monitor.onNewInfo(clusterInfo(builder, reservedSpaces)); assertEquals(new HashSet<>(Arrays.asList("test_1", "test_2")), indicesToMarkReadOnly.get()); assertNull(indicesToRelease.get()); // Reserved space is ignored when applying block indicesToMarkReadOnly.set(null); indicesToRelease.set(null); - builder = ImmutableOpenMap.builder(); + builder = new HashMap<>(); builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", 100, between(5, 90))); builder.put("node2", new DiskUsage("node2", "node2", "/foo/bar", 100, between(5, 90))); - monitor.onNewInfo(clusterInfo(builder.build(), reservedSpaces)); + monitor.onNewInfo(clusterInfo(builder, reservedSpaces)); assertNull(indicesToMarkReadOnly.get()); assertNull(indicesToRelease.get()); @@ -454,66 +448,66 @@ protected void setIndexCreateBlock(ActionListener listener, boolean indexC // When free disk on any of node1 or node2 goes below 5% flood watermark, then apply index block on indices not having the block indicesToMarkReadOnly.set(null); indicesToRelease.set(null); - builder = ImmutableOpenMap.builder(); + builder = new HashMap<>(); builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", 100, between(0, 100))); builder.put("node2", new DiskUsage("node2", "node2", "/foo/bar", 100, between(0, 4))); - monitor.onNewInfo(clusterInfo(builder.build(), reservedSpaces)); + monitor.onNewInfo(clusterInfo(builder, reservedSpaces)); assertThat(indicesToMarkReadOnly.get(), contains("test_1")); assertNull(indicesToRelease.get()); // When free disk on node1 and node2 goes above 10% high watermark then release index block, ignoring reserved space indicesToMarkReadOnly.set(null); indicesToRelease.set(null); - builder = ImmutableOpenMap.builder(); + builder = new HashMap<>(); builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", 100, between(10, 100))); builder.put("node2", new DiskUsage("node2", "node2", "/foo/bar", 100, between(10, 100))); - monitor.onNewInfo(clusterInfo(builder.build(), reservedSpaces)); + monitor.onNewInfo(clusterInfo(builder, reservedSpaces)); assertNull(indicesToMarkReadOnly.get()); assertThat(indicesToRelease.get(), contains("test_2")); // When no usage information is present for node2, we don't release the block indicesToMarkReadOnly.set(null); indicesToRelease.set(null); - builder = ImmutableOpenMap.builder(); + builder = new HashMap<>(); builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", 100, between(0, 4))); - monitor.onNewInfo(clusterInfo(builder.build())); + monitor.onNewInfo(clusterInfo(builder)); assertThat(indicesToMarkReadOnly.get(), contains("test_1")); assertNull(indicesToRelease.get()); // When disk usage on one node is between the high and flood-stage watermarks, nothing changes indicesToMarkReadOnly.set(null); indicesToRelease.set(null); - builder = ImmutableOpenMap.builder(); + builder = new HashMap<>(); builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", 100, between(5, 9))); builder.put("node2", new DiskUsage("node2", "node2", "/foo/bar", 100, between(5, 100))); if (randomBoolean()) { builder.put("node3", new DiskUsage("node3", "node3", "/foo/bar", 100, between(0, 100))); } - monitor.onNewInfo(clusterInfo(builder.build())); + monitor.onNewInfo(clusterInfo(builder)); assertNull(indicesToMarkReadOnly.get()); assertNull(indicesToRelease.get()); // When disk usage on one node is missing and the other is below the high watermark, nothing changes indicesToMarkReadOnly.set(null); indicesToRelease.set(null); - builder = ImmutableOpenMap.builder(); + builder = new HashMap<>(); builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", 100, between(5, 100))); if (randomBoolean()) { builder.put("node3", new DiskUsage("node3", "node3", "/foo/bar", 100, between(0, 100))); } - monitor.onNewInfo(clusterInfo(builder.build())); + monitor.onNewInfo(clusterInfo(builder)); assertNull(indicesToMarkReadOnly.get()); assertNull(indicesToRelease.get()); // When disk usage on one node is missing and the other is above the flood-stage watermark, affected indices are blocked indicesToMarkReadOnly.set(null); indicesToRelease.set(null); - builder = ImmutableOpenMap.builder(); + builder = new HashMap<>(); builder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", 100, between(0, 4))); if (randomBoolean()) { builder.put("node3", new DiskUsage("node3", "node3", "/foo/bar", 100, between(0, 100))); } - monitor.onNewInfo(clusterInfo(builder.build())); + monitor.onNewInfo(clusterInfo(builder)); assertThat(indicesToMarkReadOnly.get(), contains("test_1")); assertNull(indicesToRelease.get()); } @@ -565,22 +559,18 @@ protected void setIndexCreateBlock(ActionListener listener, boolean indexC } }; - final ImmutableOpenMap.Builder allDisksOkBuilder; - allDisksOkBuilder = ImmutableOpenMap.builder(); - allDisksOkBuilder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", 100, between(15, 100))); - final ImmutableOpenMap allDisksOk = allDisksOkBuilder.build(); + final Map allDisksOk; + allDisksOk = new HashMap<>(); + allDisksOk.put("node1", new DiskUsage("node1", "node1", "/foo/bar", 100, between(15, 100))); - final ImmutableOpenMap.Builder aboveLowWatermarkBuilder = ImmutableOpenMap.builder(); - aboveLowWatermarkBuilder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", 100, between(10, 14))); - final ImmutableOpenMap aboveLowWatermark = aboveLowWatermarkBuilder.build(); + final Map aboveLowWatermark = new HashMap<>(); + aboveLowWatermark.put("node1", new DiskUsage("node1", "node1", "/foo/bar", 100, between(10, 14))); - final ImmutableOpenMap.Builder aboveHighWatermarkBuilder = ImmutableOpenMap.builder(); - aboveHighWatermarkBuilder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", 100, between(5, 9))); - final ImmutableOpenMap aboveHighWatermark = aboveHighWatermarkBuilder.build(); + final Map aboveHighWatermark = new HashMap<>(); + aboveHighWatermark.put("node1", new DiskUsage("node1", "node1", "/foo/bar", 100, between(5, 9))); - final ImmutableOpenMap.Builder aboveFloodStageWatermarkBuilder = ImmutableOpenMap.builder(); - aboveFloodStageWatermarkBuilder.put("node1", new DiskUsage("node1", "node1", "/foo/bar", 100, between(0, 4))); - final ImmutableOpenMap aboveFloodStageWatermark = aboveFloodStageWatermarkBuilder.build(); + final Map aboveFloodStageWatermark = new HashMap<>(); + aboveFloodStageWatermark.put("node1", new DiskUsage("node1", "node1", "/foo/bar", 100, between(0, 4))); assertNoLogging(monitor, allDisksOk); @@ -727,13 +717,12 @@ protected void setIndexCreateBlock(ActionListener listener, boolean indexC } }; - ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(); - monitor.onNewInfo(clusterInfo(builder.build())); + final Map builder = new HashMap<>(); + monitor.onNewInfo(clusterInfo(builder)); assertTrue(countBlocksCalled.get() == 0); } - private void assertNoLogging(DiskThresholdMonitor monitor, ImmutableOpenMap diskUsages) - throws IllegalAccessException { + private void assertNoLogging(DiskThresholdMonitor monitor, final Map diskUsages) throws IllegalAccessException { try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(DiskThresholdMonitor.class))) { mockAppender.addExpectation( new MockLogAppender.UnseenEventExpectation( @@ -760,26 +749,26 @@ private void assertNoLogging(DiskThresholdMonitor monitor, ImmutableOpenMap diskUsages, String message) + private void assertRepeatedWarningMessages(DiskThresholdMonitor monitor, final Map diskUsages, String message) throws IllegalAccessException { for (int i = between(1, 3); i >= 0; i--) { assertLogging(monitor, diskUsages, Level.WARN, message); } } - private void assertSingleWarningMessage(DiskThresholdMonitor monitor, ImmutableOpenMap diskUsages, String message) + private void assertSingleWarningMessage(DiskThresholdMonitor monitor, final Map diskUsages, String message) throws IllegalAccessException { assertLogging(monitor, diskUsages, Level.WARN, message); assertNoLogging(monitor, diskUsages); } - private void assertSingleInfoMessage(DiskThresholdMonitor monitor, ImmutableOpenMap diskUsages, String message) + private void assertSingleInfoMessage(DiskThresholdMonitor monitor, final Map diskUsages, String message) throws IllegalAccessException { assertLogging(monitor, diskUsages, Level.INFO, message); assertNoLogging(monitor, diskUsages); } - private void assertLogging(DiskThresholdMonitor monitor, ImmutableOpenMap diskUsages, Level level, String message) + private void assertLogging(DiskThresholdMonitor monitor, final Map diskUsages, Level level, String message) throws IllegalAccessException { try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(DiskThresholdMonitor.class))) { mockAppender.start(); @@ -801,13 +790,13 @@ private void assertLogging(DiskThresholdMonitor monitor, ImmutableOpenMap diskUsages) { - return clusterInfo(diskUsages, ImmutableOpenMap.of()); + private static ClusterInfo clusterInfo(final Map diskUsages) { + return clusterInfo(diskUsages, Map.of()); } private static ClusterInfo clusterInfo( - ImmutableOpenMap diskUsages, - ImmutableOpenMap reservedSpace + final Map diskUsages, + final Map reservedSpace ) { return new ClusterInfo(diskUsages, null, null, null, reservedSpace); } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardConstraintDeciderOverlapTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardConstraintDeciderOverlapTests.java index 24c6dcff42849..7112af6b4efc0 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardConstraintDeciderOverlapTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardConstraintDeciderOverlapTests.java @@ -15,7 +15,6 @@ import org.opensearch.cluster.OpenSearchAllocationWithConstraintsTestCase; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.ShardRouting; -import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.settings.Settings; import org.opensearch.index.shard.ShardId; import org.opensearch.test.VersionUtils; @@ -50,28 +49,22 @@ public void testHighWatermarkBreachWithLowShardCount() { .build(); // Build Shard size and disk usages - ImmutableOpenMap.Builder usagesBuilder = ImmutableOpenMap.builder(); - usagesBuilder.put("node_0", new DiskUsage("node_0", "node_0", "/dev/null", 100, 80)); // 20% used - usagesBuilder.put("node_1", new DiskUsage("node_1", "node_1", "/dev/null", 100, 55)); // 45% used - usagesBuilder.put("node_2", new DiskUsage("node_2", "node_2", "/dev/null", 100, 35)); // 65% used - usagesBuilder.put("high_watermark_node_0", new DiskUsage("high_watermark_node_0", "high_watermark_node_0", "/dev/null", 100, 10)); // 90% - // used - - ImmutableOpenMap usages = usagesBuilder.build(); - ImmutableOpenMap.Builder shardSizesBuilder = ImmutableOpenMap.builder(); - clusterState.getRoutingTable().allShards().forEach(shard -> shardSizesBuilder.put(shardIdentifierFromRouting(shard), 1L)); // Each - // shard - // is 1 - // byte - ImmutableOpenMap shardSizes = shardSizesBuilder.build(); - - final ImmutableOpenMap reservedSpace = new ImmutableOpenMap.Builder< - ClusterInfo.NodeAndPath, - ClusterInfo.ReservedSpace>().fPut(getNodeAndDevNullPath("node_0"), getReservedSpace()) - .fPut(getNodeAndDevNullPath("node_1"), getReservedSpace()) - .fPut(getNodeAndDevNullPath("node_2"), getReservedSpace()) - .fPut(getNodeAndDevNullPath("high_watermark_node_0"), getReservedSpace()) - .build(); + final Map usages = new HashMap<>(); + usages.put("node_0", new DiskUsage("node_0", "node_0", "/dev/null", 100, 80)); // 20% used + usages.put("node_1", new DiskUsage("node_1", "node_1", "/dev/null", 100, 55)); // 45% used + usages.put("node_2", new DiskUsage("node_2", "node_2", "/dev/null", 100, 35)); // 65% used + usages.put("high_watermark_node_0", new DiskUsage("high_watermark_node_0", "high_watermark_node_0", "/dev/null", 100, 10)); // 90% + // used + final Map shardSizes = new HashMap<>(); + clusterState.getRoutingTable().allShards().forEach(shard -> shardSizes.put(shardIdentifierFromRouting(shard), 1L)); // Each + // shard + // is 1 + // byte + final Map reservedSpace = new HashMap<>(); + reservedSpace.put(getNodeAndDevNullPath("node_0"), getReservedSpace()); + reservedSpace.put(getNodeAndDevNullPath("node_1"), getReservedSpace()); + reservedSpace.put(getNodeAndDevNullPath("node_2"), getReservedSpace()); + reservedSpace.put(getNodeAndDevNullPath("high_watermark_node_0"), getReservedSpace()); final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes, reservedSpace); ClusterInfoService cis = () -> clusterInfo; allocation = createAllocationService(settings, cis); @@ -84,10 +77,9 @@ public void testHighWatermarkBreachWithLowShardCount() { /* Shard sizes that would breach high watermark on node_2 if allocated. */ addIndices("big_index_", 1, 10, 0); - ImmutableOpenMap.Builder bigIndexShardSizeBuilder = ImmutableOpenMap.builder(shardSizes); - clusterState.getRoutingNodes().unassigned().forEach(shard -> bigIndexShardSizeBuilder.put(shardIdentifierFromRouting(shard), 20L)); - shardSizes = bigIndexShardSizeBuilder.build(); - final ClusterInfo bigIndexClusterInfo = new DevNullClusterInfo(usages, usages, shardSizes, reservedSpace); + final Map bigIndexShardSizes = new HashMap<>(shardSizes); + clusterState.getRoutingNodes().unassigned().forEach(shard -> bigIndexShardSizes.put(shardIdentifierFromRouting(shard), 20L)); + final ClusterInfo bigIndexClusterInfo = new DevNullClusterInfo(usages, usages, bigIndexShardSizes, reservedSpace); cis = () -> bigIndexClusterInfo; allocation = createAllocationService(settings, cis); @@ -179,10 +171,10 @@ public void testZoneUnbalanced() { */ public static class DevNullClusterInfo extends ClusterInfo { public DevNullClusterInfo( - ImmutableOpenMap leastAvailableSpaceUsage, - ImmutableOpenMap mostAvailableSpaceUsage, - ImmutableOpenMap shardSizes, - ImmutableOpenMap reservedSpace + final Map leastAvailableSpaceUsage, + final Map mostAvailableSpaceUsage, + final Map shardSizes, + final Map reservedSpace ) { super(leastAvailableSpaceUsage, mostAvailableSpaceUsage, shardSizes, null, reservedSpace); } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java index 67278c56b2f78..9d7d0ebc5b2b1 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java @@ -29,7 +29,6 @@ import org.opensearch.cluster.routing.allocation.decider.AllocationDecider; import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; import org.opensearch.common.SuppressForbidden; -import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.index.IndexModule; @@ -38,6 +37,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Set; import static org.opensearch.cluster.routing.ShardRoutingState.INITIALIZING; @@ -235,11 +235,11 @@ public ShardsAllocator createShardAllocator(Settings settings) { */ public static class DevNullClusterInfo extends ClusterInfo { public DevNullClusterInfo( - ImmutableOpenMap leastAvailableSpaceUsage, - ImmutableOpenMap mostAvailableSpaceUsage, - ImmutableOpenMap shardSizes + final Map leastAvailableSpaceUsage, + final Map mostAvailableSpaceUsage, + final Map shardSizes ) { - super(leastAvailableSpaceUsage, mostAvailableSpaceUsage, shardSizes, null, ImmutableOpenMap.of()); + super(leastAvailableSpaceUsage, mostAvailableSpaceUsage, shardSizes, null, Map.of()); } @Override diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index c3f54fa7580ac..da50dd53b7d54 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -113,17 +113,15 @@ public void testDiskThreshold() { .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), 0.8) .build(); - ImmutableOpenMap.Builder usagesBuilder = ImmutableOpenMap.builder(); - usagesBuilder.put("node1", new DiskUsage("node1", "node1", "/dev/null", 100, 10)); // 90% used - usagesBuilder.put("node2", new DiskUsage("node2", "node2", "/dev/null", 100, 35)); // 65% used - usagesBuilder.put("node3", new DiskUsage("node3", "node3", "/dev/null", 100, 60)); // 40% used - usagesBuilder.put("node4", new DiskUsage("node4", "node4", "/dev/null", 100, 80)); // 20% used - ImmutableOpenMap usages = usagesBuilder.build(); - - ImmutableOpenMap.Builder shardSizesBuilder = ImmutableOpenMap.builder(); - shardSizesBuilder.put("[test][0][p]", 10L); // 10 bytes - shardSizesBuilder.put("[test][0][r]", 10L); - ImmutableOpenMap shardSizes = shardSizesBuilder.build(); + final Map usages = new HashMap<>(); + usages.put("node1", new DiskUsage("node1", "node1", "/dev/null", 100, 10)); // 90% used + usages.put("node2", new DiskUsage("node2", "node2", "/dev/null", 100, 35)); // 65% used + usages.put("node3", new DiskUsage("node3", "node3", "/dev/null", 100, 60)); // 40% used + usages.put("node4", new DiskUsage("node4", "node4", "/dev/null", 100, 80)); // 20% used + + final Map shardSizes = new HashMap<>(); + shardSizes.put("[test][0][p]", 10L); // 10 bytes + shardSizes.put("[test][0][r]", 10L); final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes); ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); @@ -294,18 +292,16 @@ public void testDiskThresholdWithAbsoluteSizes() { .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), "5b") .build(); - ImmutableOpenMap.Builder usagesBuilder = ImmutableOpenMap.builder(); - usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 10)); // 90% used - usagesBuilder.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 10)); // 90% used - usagesBuilder.put("node3", new DiskUsage("node3", "n3", "/dev/null", 100, 60)); // 40% used - usagesBuilder.put("node4", new DiskUsage("node4", "n4", "/dev/null", 100, 80)); // 20% used - usagesBuilder.put("node5", new DiskUsage("node5", "n5", "/dev/null", 100, 85)); // 15% used - ImmutableOpenMap usages = usagesBuilder.build(); - - ImmutableOpenMap.Builder shardSizesBuilder = ImmutableOpenMap.builder(); - shardSizesBuilder.put("[test][0][p]", 10L); // 10 bytes - shardSizesBuilder.put("[test][0][r]", 10L); - ImmutableOpenMap shardSizes = shardSizesBuilder.build(); + final Map usages = new HashMap<>(); + usages.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 10)); // 90% used + usages.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 10)); // 90% used + usages.put("node3", new DiskUsage("node3", "n3", "/dev/null", 100, 60)); // 40% used + usages.put("node4", new DiskUsage("node4", "n4", "/dev/null", 100, 80)); // 20% used + usages.put("node5", new DiskUsage("node5", "n5", "/dev/null", 100, 85)); // 15% used + + final Map shardSizes = new HashMap<>(); + shardSizes.put("[test][0][p]", 10L); // 10 bytes + shardSizes.put("[test][0][r]", 10L); final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes); ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); @@ -360,9 +356,7 @@ public void testDiskThresholdWithAbsoluteSizes() { logger.info("--> nodeWithoutPrimary: {}", nodeWithoutPrimary); // Make node without the primary now habitable to replicas - usagesBuilder = ImmutableOpenMap.builder(usages); - usagesBuilder.put(nodeWithoutPrimary, new DiskUsage(nodeWithoutPrimary, "", "/dev/null", 100, 35)); // 65% used - usages = usagesBuilder.build(); + usages.put(nodeWithoutPrimary, new DiskUsage(nodeWithoutPrimary, "", "/dev/null", 100, 35)); // 65% used final ClusterInfo clusterInfo2 = new DevNullClusterInfo(usages, usages, shardSizes); cis = () -> { logger.info("--> calling fake getClusterInfo"); @@ -540,14 +534,15 @@ public void testDiskThresholdWithShardSizes() { .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "71%") .build(); - ImmutableOpenMap.Builder usagesBuilder = ImmutableOpenMap.builder(); - usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 31)); // 69% used - usagesBuilder.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 1)); // 99% used - ImmutableOpenMap usages = usagesBuilder.build(); + final Map usages = Map.of( + "node1", + new DiskUsage("node1", "n1", "/dev/null", 100, 31), // 69% used + "node2", + new DiskUsage("node2", "n2", "/dev/null", 100, 1) + ); // 99% used - ImmutableOpenMap.Builder shardSizesBuilder = ImmutableOpenMap.builder(); - shardSizesBuilder.put("[test][0][p]", 10L); // 10 bytes - ImmutableOpenMap shardSizes = shardSizesBuilder.build(); + final Map shardSizes = new HashMap<>(); + shardSizes.put("[test][0][p]", 10L); // 10 bytes final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes); AllocationDeciders deciders = new AllocationDeciders( @@ -612,15 +607,13 @@ public void testUnknownDiskUsage() { .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), 0.85) .build(); - ImmutableOpenMap.Builder usagesBuilder = ImmutableOpenMap.builder(); - usagesBuilder.put("node2", new DiskUsage("node2", "node2", "/dev/null", 100, 50)); // 50% used - usagesBuilder.put("node3", new DiskUsage("node3", "node3", "/dev/null", 100, 0)); // 100% used - ImmutableOpenMap usages = usagesBuilder.build(); + final Map usages = new HashMap<>(); + usages.put("node2", new DiskUsage("node2", "node2", "/dev/null", 100, 50)); // 50% used + usages.put("node3", new DiskUsage("node3", "node3", "/dev/null", 100, 0)); // 100% used - ImmutableOpenMap.Builder shardSizesBuilder = ImmutableOpenMap.builder(); - shardSizesBuilder.put("[test][0][p]", 10L); // 10 bytes - shardSizesBuilder.put("[test][0][r]", 10L); // 10 bytes - ImmutableOpenMap shardSizes = shardSizesBuilder.build(); + final Map shardSizes = new HashMap<>(); + shardSizes.put("[test][0][p]", 10L); // 10 bytes + shardSizes.put("[test][0][r]", 10L); // 10 bytes final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes); AllocationDeciders deciders = new AllocationDeciders( @@ -688,11 +681,11 @@ public void testAverageUsage() { RoutingNode rn = new RoutingNode("node1", newNode("node1")); DiskThresholdDecider decider = makeDecider(Settings.EMPTY); - ImmutableOpenMap.Builder usages = ImmutableOpenMap.builder(); + final Map usages = new HashMap<>(); usages.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 50)); // 50% used usages.put("node3", new DiskUsage("node3", "n3", "/dev/null", 100, 0)); // 100% used - DiskUsage node1Usage = decider.averageUsage(rn, usages.build()); + DiskUsage node1Usage = decider.averageUsage(rn, usages); assertThat(node1Usage.getTotalBytes(), equalTo(100L)); assertThat(node1Usage.getFreeBytes(), equalTo(25L)); } @@ -718,18 +711,16 @@ public void testShardRelocationsTakenIntoAccount() { .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), 0.8) .build(); - ImmutableOpenMap.Builder usagesBuilder = ImmutableOpenMap.builder(); - usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 40)); // 60% used - usagesBuilder.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 40)); // 60% used - usagesBuilder.put("node3", new DiskUsage("node3", "n3", "/dev/null", 100, 40)); // 60% used - ImmutableOpenMap usages = usagesBuilder.build(); - - ImmutableOpenMap.Builder shardSizesBuilder = ImmutableOpenMap.builder(); - shardSizesBuilder.put("[test][0][p]", 14L); // 14 bytes - shardSizesBuilder.put("[test][0][r]", 14L); - shardSizesBuilder.put("[test2][0][p]", 1L); // 1 bytes - shardSizesBuilder.put("[test2][0][r]", 1L); - ImmutableOpenMap shardSizes = shardSizesBuilder.build(); + final Map usages = new HashMap<>(); + usages.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 40)); // 60% used + usages.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 40)); // 60% used + usages.put("node3", new DiskUsage("node3", "n3", "/dev/null", 100, 40)); // 60% used + + final Map shardSizes = new HashMap<>(); + shardSizes.put("[test][0][p]", 14L); // 14 bytes + shardSizes.put("[test][0][r]", 14L); + shardSizes.put("[test2][0][p]", 1L); // 1 bytes + shardSizes.put("[test2][0][r]", 1L); final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes); DiskThresholdDecider decider = makeDecider(diskSettings); @@ -801,18 +792,16 @@ public void testShardRelocationsTakenIntoAccount() { logShardStates(clusterState); } - final ImmutableOpenMap.Builder overfullUsagesBuilder = ImmutableOpenMap.builder(); - overfullUsagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 40)); // 60% used - overfullUsagesBuilder.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 40)); // 60% used - overfullUsagesBuilder.put("node3", new DiskUsage("node3", "n3", "/dev/null", 100, 0)); // 100% used - final ImmutableOpenMap overfullUsages = overfullUsagesBuilder.build(); + final Map overfullUsages = new HashMap<>(); + overfullUsages.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 40)); // 60% used + overfullUsages.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 40)); // 60% used + overfullUsages.put("node3", new DiskUsage("node3", "n3", "/dev/null", 100, 0)); // 100% used - final ImmutableOpenMap.Builder largerShardSizesBuilder = ImmutableOpenMap.builder(); - largerShardSizesBuilder.put("[test][0][p]", 14L); - largerShardSizesBuilder.put("[test][0][r]", 14L); - largerShardSizesBuilder.put("[test2][0][p]", 2L); - largerShardSizesBuilder.put("[test2][0][r]", 2L); - final ImmutableOpenMap largerShardSizes = largerShardSizesBuilder.build(); + final Map largerShardSizes = new HashMap<>(); + largerShardSizes.put("[test][0][p]", 14L); + largerShardSizes.put("[test][0][r]", 14L); + largerShardSizes.put("[test2][0][p]", 2L); + largerShardSizes.put("[test2][0][r]", 2L); final ClusterInfo overfullClusterInfo = new DevNullClusterInfo(overfullUsages, overfullUsages, largerShardSizes); @@ -872,10 +861,10 @@ public void testShardRelocationsTakenIntoAccount() { usages, usages, shardSizes, - (new ImmutableOpenMap.Builder()).fPut( + Map.of( new ClusterInfo.NodeAndPath("node1", "/dev/null"), new ClusterInfo.ReservedSpace.Builder().add(new ShardId("", "", 0), between(51, 200)).build() - ).build() + ) ) ); clusterState = applyStartedShardsUntilNoChange(clusterState, strategy); @@ -896,16 +885,14 @@ public void testCanRemainWithShardRelocatingAway() { .build(); // We have an index with 2 primary shards each taking 40 bytes. Each node has 100 bytes available - ImmutableOpenMap.Builder usagesBuilder = ImmutableOpenMap.builder(); - usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 20)); // 80% used - usagesBuilder.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 100)); // 0% used - ImmutableOpenMap usages = usagesBuilder.build(); + final Map usages = new HashMap<>(); + usages.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 20)); // 80% used + usages.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 100)); // 0% used - ImmutableOpenMap.Builder shardSizesBuilder = ImmutableOpenMap.builder(); - shardSizesBuilder.put("[test][0][p]", 40L); - shardSizesBuilder.put("[test][1][p]", 40L); - shardSizesBuilder.put("[foo][0][p]", 10L); - ImmutableOpenMap shardSizes = shardSizesBuilder.build(); + final Map shardSizes = new HashMap<>(); + shardSizes.put("[test][0][p]", 40L); + shardSizes.put("[test][1][p]", 40L); + shardSizes.put("[foo][0][p]", 10L); final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes); @@ -1053,17 +1040,16 @@ public void testForSingleDataNode() { .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "70%") .build(); - ImmutableOpenMap.Builder usagesBuilder = ImmutableOpenMap.builder(); - usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 100)); // 0% used - usagesBuilder.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 20)); // 80% used - usagesBuilder.put("node3", new DiskUsage("node3", "n3", "/dev/null", 100, 100)); // 0% used - ImmutableOpenMap usages = usagesBuilder.build(); + final Map usages = new HashMap<>(); + usages.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 100)); // 0% used + usages.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 20)); // 80% used + usages.put("node3", new DiskUsage("node3", "n3", "/dev/null", 100, 100)); // 0% used // We have an index with 1 primary shards each taking 40 bytes. Each node has 100 bytes available - ImmutableOpenMap.Builder shardSizes = ImmutableOpenMap.builder(); + final Map shardSizes = new HashMap<>(); shardSizes.put("[test][0][p]", 40L); shardSizes.put("[test][1][p]", 40L); - final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes.build()); + final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes); DiskThresholdDecider diskThresholdDecider = makeDecider(diskSettings); Metadata metadata = Metadata.builder() @@ -1207,14 +1193,13 @@ public void testWatermarksEnabledForSingleDataNode() { .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "70%") .build(); - ImmutableOpenMap.Builder usagesBuilder = ImmutableOpenMap.builder(); - usagesBuilder.put("data", new DiskUsage("data", "data", "/dev/null", 100, 20)); // 80% used - ImmutableOpenMap usages = usagesBuilder.build(); + final Map usages = new HashMap<>(); + usages.put("data", new DiskUsage("data", "data", "/dev/null", 100, 20)); // 80% used // We have an index with 1 primary shard, taking 40 bytes. The single data node has only 20 bytes free. - ImmutableOpenMap.Builder shardSizes = ImmutableOpenMap.builder(); + final Map shardSizes = new HashMap<>(); shardSizes.put("[test][0][p]", 40L); - final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes.build()); + final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes); DiskThresholdDecider diskThresholdDecider = makeDecider(diskSettings); Metadata metadata = Metadata.builder() @@ -1319,11 +1304,10 @@ public void testDiskThresholdWithSnapshotShardSizes() { .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "95%") .build(); - final ImmutableOpenMap.Builder usagesBuilder = ImmutableOpenMap.builder(); - usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 21)); // 79% used - usagesBuilder.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 1)); // 99% used - final ImmutableOpenMap usages = usagesBuilder.build(); - final ClusterInfoService clusterInfoService = () -> new DevNullClusterInfo(usages, usages, ImmutableOpenMap.of()); + final Map usages = new HashMap<>(); + usages.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 21)); // 79% used + usages.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 1)); // 99% used + final ClusterInfoService clusterInfoService = () -> new DevNullClusterInfo(usages, usages, Map.of()); final AllocationDeciders deciders = new AllocationDeciders( new HashSet<>( @@ -1468,18 +1452,18 @@ public void logShardStates(ClusterState state) { */ static class DevNullClusterInfo extends ClusterInfo { DevNullClusterInfo( - ImmutableOpenMap leastAvailableSpaceUsage, - ImmutableOpenMap mostAvailableSpaceUsage, - ImmutableOpenMap shardSizes + final Map leastAvailableSpaceUsage, + final Map mostAvailableSpaceUsage, + final Map shardSizes ) { - this(leastAvailableSpaceUsage, mostAvailableSpaceUsage, shardSizes, ImmutableOpenMap.of()); + this(leastAvailableSpaceUsage, mostAvailableSpaceUsage, shardSizes, Map.of()); } DevNullClusterInfo( - ImmutableOpenMap leastAvailableSpaceUsage, - ImmutableOpenMap mostAvailableSpaceUsage, - ImmutableOpenMap shardSizes, - ImmutableOpenMap reservedSpace + final Map leastAvailableSpaceUsage, + final Map mostAvailableSpaceUsage, + final Map shardSizes, + Map reservedSpace ) { super(leastAvailableSpaceUsage, mostAvailableSpaceUsage, shardSizes, null, reservedSpace); } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java index bbd9658361114..caab381e65e84 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java @@ -54,14 +54,15 @@ import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.routing.allocation.RoutingAllocation; -import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.index.Index; import org.opensearch.index.shard.ShardId; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; +import java.util.Map; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; @@ -114,25 +115,19 @@ public void testCanAllocateUsesMaxAvailableSpace() { clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(node_0).add(node_1)).build(); // actual test -- after all that bloat :) - ImmutableOpenMap.Builder leastAvailableUsages = ImmutableOpenMap.builder(); + final Map leastAvailableUsages = new HashMap<>(); leastAvailableUsages.put("node_0", new DiskUsage("node_0", "node_0", "_na_", 100, 0)); // all full leastAvailableUsages.put("node_1", new DiskUsage("node_1", "node_1", "_na_", 100, 0)); // all full - ImmutableOpenMap.Builder mostAvailableUsage = ImmutableOpenMap.builder(); + final Map mostAvailableUsage = new HashMap<>(); // 20 - 99 percent since after allocation there must be at least 10% left and shard is 10byte mostAvailableUsage.put("node_0", new DiskUsage("node_0", "node_0", "_na_", 100, randomIntBetween(20, 100))); // this is weird and smells like a bug! it should be up to 20%? mostAvailableUsage.put("node_1", new DiskUsage("node_1", "node_1", "_na_", 100, randomIntBetween(0, 10))); - ImmutableOpenMap.Builder shardSizes = ImmutableOpenMap.builder(); + final Map shardSizes = new HashMap<>(); shardSizes.put("[test][0][p]", 10L); // 10 bytes - final ClusterInfo clusterInfo = new ClusterInfo( - leastAvailableUsages.build(), - mostAvailableUsage.build(), - shardSizes.build(), - ImmutableOpenMap.of(), - ImmutableOpenMap.of() - ); + final ClusterInfo clusterInfo = new ClusterInfo(leastAvailableUsages, mostAvailableUsage, shardSizes, Map.of(), Map.of()); RoutingAllocation allocation = new RoutingAllocation( new AllocationDeciders(Collections.singleton(decider)), clusterState.getRoutingNodes(), @@ -198,23 +193,17 @@ public void testCannotAllocateDueToLackOfDiskResources() { // actual test -- after all that bloat :) - ImmutableOpenMap.Builder leastAvailableUsages = ImmutableOpenMap.builder(); + final Map leastAvailableUsages = new HashMap<>(); leastAvailableUsages.put("node_0", new DiskUsage("node_0", "node_0", "_na_", 100, 0)); // all full - ImmutableOpenMap.Builder mostAvailableUsage = ImmutableOpenMap.builder(); + final Map mostAvailableUsage = new HashMap<>(); final int freeBytes = randomIntBetween(20, 100); mostAvailableUsage.put("node_0", new DiskUsage("node_0", "node_0", "_na_", 100, freeBytes)); - ImmutableOpenMap.Builder shardSizes = ImmutableOpenMap.builder(); + final Map shardSizes = new HashMap<>(); // way bigger than available space final long shardSize = randomIntBetween(110, 1000); shardSizes.put("[test][0][p]", shardSize); - ClusterInfo clusterInfo = new ClusterInfo( - leastAvailableUsages.build(), - mostAvailableUsage.build(), - shardSizes.build(), - ImmutableOpenMap.of(), - ImmutableOpenMap.of() - ); + ClusterInfo clusterInfo = new ClusterInfo(leastAvailableUsages, mostAvailableUsage, shardSizes, Map.of(), Map.of()); RoutingAllocation allocation = new RoutingAllocation( new AllocationDeciders(Collections.singleton(decider)), clusterState.getRoutingNodes(), @@ -245,7 +234,7 @@ public void testCannotAllocateDueToLackOfDiskResources() { public void testCanRemainUsesLeastAvailableSpace() { ClusterSettings nss = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY, nss); - ImmutableOpenMap.Builder shardRoutingMap = ImmutableOpenMap.builder(); + final Map shardRoutingMap = new HashMap<>(); DiscoveryNode node_0 = new DiscoveryNode( "node_0", @@ -318,26 +307,20 @@ public void testCanRemainUsesLeastAvailableSpace() { clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(node_0).add(node_1)).build(); // actual test -- after all that bloat :) - ImmutableOpenMap.Builder leastAvailableUsages = ImmutableOpenMap.builder(); + final Map leastAvailableUsages = new HashMap<>(); leastAvailableUsages.put("node_0", new DiskUsage("node_0", "node_0", "/node0/least", 100, 10)); // 90% used leastAvailableUsages.put("node_1", new DiskUsage("node_1", "node_1", "/node1/least", 100, 9)); // 91% used - ImmutableOpenMap.Builder mostAvailableUsage = ImmutableOpenMap.builder(); + final Map mostAvailableUsage = new HashMap<>(); mostAvailableUsage.put("node_0", new DiskUsage("node_0", "node_0", "/node0/most", 100, 90)); // 10% used mostAvailableUsage.put("node_1", new DiskUsage("node_1", "node_1", "/node1/most", 100, 90)); // 10% used - ImmutableOpenMap.Builder shardSizes = ImmutableOpenMap.builder(); + final Map shardSizes = new HashMap<>(); shardSizes.put("[test][0][p]", 10L); // 10 bytes shardSizes.put("[test][1][p]", 10L); shardSizes.put("[test][2][p]", 10L); - final ClusterInfo clusterInfo = new ClusterInfo( - leastAvailableUsages.build(), - mostAvailableUsage.build(), - shardSizes.build(), - shardRoutingMap.build(), - ImmutableOpenMap.of() - ); + final ClusterInfo clusterInfo = new ClusterInfo(leastAvailableUsages, mostAvailableUsage, shardSizes, shardRoutingMap, Map.of()); RoutingAllocation allocation = new RoutingAllocation( new AllocationDeciders(Collections.singleton(decider)), clusterState.getRoutingNodes(), @@ -392,16 +375,12 @@ public void testCanRemainUsesLeastAvailableSpace() { } public void testShardSizeAndRelocatingSize() { - ImmutableOpenMap.Builder shardSizes = ImmutableOpenMap.builder(); + final Map shardSizes = new HashMap<>(); shardSizes.put("[test][0][r]", 10L); shardSizes.put("[test][1][r]", 100L); shardSizes.put("[test][2][r]", 1000L); shardSizes.put("[other][0][p]", 10000L); - ClusterInfo info = new DiskThresholdDeciderTests.DevNullClusterInfo( - ImmutableOpenMap.of(), - ImmutableOpenMap.of(), - shardSizes.build() - ); + ClusterInfo info = new DiskThresholdDeciderTests.DevNullClusterInfo(Map.of(), Map.of(), shardSizes); Metadata.Builder metaBuilder = Metadata.builder(); metaBuilder.put( IndexMetadata.builder("test") @@ -520,17 +499,13 @@ public long sizeOfRelocatingShards(RoutingAllocation allocation, RoutingNode nod } public void testSizeShrinkIndex() { - ImmutableOpenMap.Builder shardSizes = ImmutableOpenMap.builder(); + final Map shardSizes = new HashMap<>(); shardSizes.put("[test][0][p]", 10L); shardSizes.put("[test][1][p]", 100L); shardSizes.put("[test][2][p]", 500L); shardSizes.put("[test][3][p]", 500L); - ClusterInfo info = new DiskThresholdDeciderTests.DevNullClusterInfo( - ImmutableOpenMap.of(), - ImmutableOpenMap.of(), - shardSizes.build() - ); + ClusterInfo info = new DiskThresholdDeciderTests.DevNullClusterInfo(Map.of(), Map.of(), shardSizes); Metadata.Builder metaBuilder = Metadata.builder(); metaBuilder.put( IndexMetadata.builder("test") From aa8b8f552ea1b6f30f75cb1bd458a080c4ba0b61 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Fri, 21 Apr 2023 04:46:49 +0000 Subject: [PATCH 2/3] [Refactor] Metadata members from ImmutableOpenMap to j.u.Map (#7165) Refactors Metadata.{indices, templates, customs} member variables from ImmutableOpenMap to use jdk Maps. Usage of these variables across the codebase is also refactored to use jdk maps. Signed-off-by: Nicholas Walter Knize --- CHANGELOG.md | 1 + .../client/indices/GetIndexResponseTests.java | 6 +- .../indices/GetMappingsResponseTests.java | 7 +- .../admin/indices/create/CreateIndexIT.java | 4 +- .../action/admin/indices/get/GetIndexIT.java | 5 +- .../cluster/ClusterStateDiffIT.java | 13 +- .../cluster/SimpleClusterStateIT.java | 10 +- .../opensearch/gateway/MetadataNodesIT.java | 5 +- .../gateway/RecoveryFromGatewayIT.java | 5 +- .../SegmentReplicationAllocationIT.java | 1 - .../DestructiveOperationsIT.java | 9 +- .../org/opensearch/recovery/RelocationIT.java | 5 +- .../search/SearchWeightedRoutingIT.java | 1 - .../state/TransportClusterStateAction.java | 7 +- .../TransportDeleteDanglingIndexAction.java | 5 +- .../admin/indices/get/GetIndexResponse.java | 26 +-- .../indices/get/TransportGetIndexAction.java | 3 +- .../mapping/get/GetMappingsResponse.java | 39 ++-- .../get/TransportGetMappingsAction.java | 5 +- .../get/TransportGetIndexTemplatesAction.java | 10 +- .../cluster/ClusterChangedEvent.java | 28 ++- .../org/opensearch/cluster/ClusterModule.java | 2 +- .../org/opensearch/cluster/ClusterState.java | 6 +- .../coordination/RemoveCustomsCommand.java | 6 +- .../UnsafeBootstrapClusterManagerCommand.java | 4 +- .../opensearch/cluster/metadata/Metadata.java | 174 +++++++++--------- .../MetadataIndexTemplateService.java | 15 +- .../SystemIndexMetadataUpgradeService.java | 23 ++- .../metadata/TemplateUpgradeService.java | 18 +- .../opensearch/env/NodeRepurposeCommand.java | 9 +- .../gateway/ClusterStateUpdaters.java | 5 +- .../gateway/DanglingIndicesState.java | 5 +- .../java/org/opensearch/gateway/Gateway.java | 5 +- .../opensearch/gateway/GatewayMetaState.java | 8 +- .../gateway/PersistedClusterStateService.java | 10 +- .../RemoveCorruptedShardDataCommand.java | 7 - .../rest/action/cat/RestTemplatesAction.java | 4 +- .../opensearch/snapshots/RestoreService.java | 12 +- .../opensearch/snapshots/SnapshotUtils.java | 7 +- .../reroute/ClusterRerouteResponseTests.java | 5 +- .../indices/get/GetIndexResponseTests.java | 13 +- .../mapping/get/GetMappingsResponseTests.java | 11 +- .../bulk/TransportBulkActionIngestTests.java | 59 +++--- .../cluster/ClusterChangedEventTests.java | 15 +- .../MetadataCreateIndexServiceTests.java | 5 +- .../cluster/metadata/MetadataTests.java | 54 +++--- .../metadata/TemplateUpgradeServiceTests.java | 3 +- .../allocation/AddIncrementallyTests.java | 4 +- .../allocation/BalanceConfigurationTests.java | 8 +- .../allocation/ThrottlingAllocationTests.java | 7 +- .../EnableAllocationShortCircuitTests.java | 4 +- .../gateway/DanglingIndicesStateTests.java | 7 +- .../mapper/FieldFilterMapperPluginTests.java | 3 +- .../indices/ShardLimitValidatorTests.java | 10 +- ...ClusterStateServiceRandomUpdatesTests.java | 8 +- .../snapshots/SnapshotUtilsTests.java | 4 +- 56 files changed, 331 insertions(+), 404 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 73ef05688fb8c..6b1e880ace593 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -56,6 +56,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Pass localNode info to all plugins on node start ([#7919](https://github.com/opensearch-project/OpenSearch/pull/7919) - Compress and cache cluster state during validate join request ([#7321](https://github.com/opensearch-project/OpenSearch/pull/7321)) - [Refactor] Sets util from server to common lib ([#8230](https://github.com/opensearch-project/OpenSearch/pull/8230)) +- [Refactor] Metadata members from ImmutableOpenMap to j.u.Map ([#7165](https://github.com/opensearch-project/OpenSearch/pull/7165)) ### Deprecated diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/indices/GetIndexResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/indices/GetIndexResponseTests.java index 63586bdbe4783..db2d327e50a65 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/indices/GetIndexResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/indices/GetIndexResponseTests.java @@ -62,7 +62,7 @@ public class GetIndexResponseTests extends AbstractResponseTestCase< @Override protected org.opensearch.action.admin.indices.get.GetIndexResponse createServerTestInstance(XContentType xContentType) { String[] indices = generateRandomStringArray(5, 5, false, false); - ImmutableOpenMap.Builder mappings = ImmutableOpenMap.builder(); + final Map mappings = new HashMap<>(); ImmutableOpenMap.Builder> aliases = ImmutableOpenMap.builder(); ImmutableOpenMap.Builder settings = ImmutableOpenMap.builder(); ImmutableOpenMap.Builder defaultSettings = ImmutableOpenMap.builder(); @@ -94,7 +94,7 @@ protected org.opensearch.action.admin.indices.get.GetIndexResponse createServerT } return new org.opensearch.action.admin.indices.get.GetIndexResponse( indices, - mappings.build(), + mappings, aliases.build(), settings.build(), defaultSettings.build(), @@ -113,7 +113,7 @@ protected void assertInstances( GetIndexResponse clientInstance ) { assertArrayEquals(serverTestInstance.getIndices(), clientInstance.getIndices()); - assertMapEquals(serverTestInstance.getMappings(), clientInstance.getMappings()); + assertEquals(serverTestInstance.getMappings(), clientInstance.getMappings()); assertMapEquals(serverTestInstance.getSettings(), clientInstance.getSettings()); assertMapEquals(serverTestInstance.defaultSettings(), clientInstance.getDefaultSettings()); assertMapEquals(serverTestInstance.getAliases(), clientInstance.getAliases()); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/indices/GetMappingsResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/indices/GetMappingsResponseTests.java index 8158dc1ca4e2b..fe87e0fe6aac9 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/indices/GetMappingsResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/indices/GetMappingsResponseTests.java @@ -34,7 +34,6 @@ import org.opensearch.client.AbstractResponseTestCase; import org.opensearch.cluster.metadata.MappingMetadata; -import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.mapper.MapperService; @@ -50,12 +49,12 @@ public class GetMappingsResponseTests extends AbstractResponseTestCase< @Override protected org.opensearch.action.admin.indices.mapping.get.GetMappingsResponse createServerTestInstance(XContentType xContentType) { - ImmutableOpenMap.Builder mappings = ImmutableOpenMap.builder(); + final Map mappings = new HashMap<>(); int numberOfIndexes = randomIntBetween(1, 5); for (int i = 0; i < numberOfIndexes; i++) { mappings.put("index-" + randomAlphaOfLength(5), randomMappingMetadata()); } - return new org.opensearch.action.admin.indices.mapping.get.GetMappingsResponse(mappings.build()); + return new org.opensearch.action.admin.indices.mapping.get.GetMappingsResponse(mappings); } @Override @@ -68,7 +67,7 @@ protected void assertInstances( org.opensearch.action.admin.indices.mapping.get.GetMappingsResponse serverTestInstance, GetMappingsResponse clientInstance ) { - assertMapEquals(serverTestInstance.getMappings(), clientInstance.mappings()); + assertEquals(serverTestInstance.getMappings(), clientInstance.mappings()); } public static MappingMetadata randomMappingMetadata() { diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java index 51ef63ae9e9c1..f6c016828f884 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java @@ -45,7 +45,6 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; import org.opensearch.cluster.metadata.Metadata; -import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentFactory; @@ -59,6 +58,7 @@ import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiFunction; @@ -100,7 +100,7 @@ public void testCreationDateGenerated() { assertThat(state, notNullValue()); Metadata metadata = state.getMetadata(); assertThat(metadata, notNullValue()); - ImmutableOpenMap indices = metadata.getIndices(); + final Map indices = metadata.getIndices(); assertThat(indices, notNullValue()); assertThat(indices.size(), equalTo(1)); IndexMetadata index = indices.get("test"); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/get/GetIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/get/GetIndexIT.java index ffc738ac98de5..2bc661d85b4d9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/get/GetIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/get/GetIndexIT.java @@ -46,6 +46,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Map; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_METADATA_BLOCK; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_METADATA; @@ -271,7 +272,7 @@ private void assertNonEmptySettings(GetIndexResponse response, String indexName) } private void assertMappings(GetIndexResponse response, String indexName) { - ImmutableOpenMap mappings = response.mappings(); + final Map mappings = response.mappings(); assertThat(mappings, notNullValue()); assertThat(mappings.size(), equalTo(1)); MappingMetadata indexMappings = mappings.get(indexName); @@ -279,7 +280,7 @@ private void assertMappings(GetIndexResponse response, String indexName) { } private void assertEmptyOrOnlyDefaultMappings(GetIndexResponse response, String indexName) { - ImmutableOpenMap mappings = response.mappings(); + final Map mappings = response.mappings(); assertThat(mappings, notNullValue()); assertThat(mappings.size(), equalTo(1)); MappingMetadata indexMappings = mappings.get(indexName); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterStateDiffIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterStateDiffIT.java index d7275598a2e06..ad886f8574c1e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterStateDiffIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterStateDiffIT.java @@ -75,6 +75,7 @@ import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Set; import static java.util.Collections.emptyList; @@ -526,7 +527,7 @@ private interface RandomPart { /** * Returns list of parts from metadata */ - ImmutableOpenMap parts(Metadata metadata); + Map parts(Metadata metadata); /** * Puts the part back into metadata @@ -556,10 +557,10 @@ private interface RandomPart { */ private Metadata randomParts(Metadata metadata, String prefix, RandomPart randomPart) { Metadata.Builder builder = Metadata.builder(metadata); - ImmutableOpenMap parts = randomPart.parts(metadata); + final Map parts = randomPart.parts(metadata); int partCount = parts.size(); if (partCount > 0) { - List randomParts = randomSubsetOf(randomInt(partCount - 1), randomPart.parts(metadata).keys().toArray(String.class)); + List randomParts = randomSubsetOf(randomInt(partCount - 1), randomPart.parts(metadata).keySet().toArray(new String[0])); for (String part : randomParts) { if (randomBoolean()) { randomPart.remove(builder, part); @@ -583,7 +584,7 @@ private Metadata randomIndices(Metadata metadata) { return randomParts(metadata, "index", new RandomPart() { @Override - public ImmutableOpenMap parts(Metadata metadata) { + public Map parts(Metadata metadata) { return metadata.indices(); } @@ -645,7 +646,7 @@ public IndexMetadata randomChange(IndexMetadata part) { private Metadata randomTemplates(Metadata metadata) { return randomParts(metadata, "template", new RandomPart() { @Override - public ImmutableOpenMap parts(Metadata metadata) { + public Map parts(Metadata metadata) { return metadata.templates(); } @@ -702,7 +703,7 @@ private Metadata randomMetadataCustoms(final Metadata metadata) { return randomParts(metadata, "custom", new RandomPart() { @Override - public ImmutableOpenMap parts(Metadata metadata) { + public Map parts(Metadata metadata) { return metadata.customs(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/SimpleClusterStateIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/SimpleClusterStateIT.java index fbc5ec85cb84e..c1cfbbbf1fda4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/SimpleClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/SimpleClusterStateIT.java @@ -47,7 +47,6 @@ import org.opensearch.common.Priority; import org.opensearch.common.Strings; import org.opensearch.common.UUIDs; -import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -65,7 +64,6 @@ import org.opensearch.repositories.RepositoriesService; import org.opensearch.script.ScriptService; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.hamcrest.CollectionAssertions; import org.opensearch.threadpool.ThreadPool; import org.opensearch.watcher.ResourceWatcherService; @@ -76,6 +74,7 @@ import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Supplier; @@ -83,6 +82,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertIndexTemplateExists; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; @@ -238,14 +238,14 @@ private void testFilteringByIndexWorks(String[] indices, String[] expected) { .setIndices(indices) .get(); - ImmutableOpenMap metadata = clusterState.getState().getMetadata().indices(); + final Map metadata = clusterState.getState().getMetadata().indices(); assertThat(metadata.size(), is(expected.length)); RoutingTable routingTable = clusterState.getState().getRoutingTable(); assertThat(routingTable.indicesRouting().size(), is(expected.length)); - for (String expectedIndex : expected) { - assertThat(metadata, CollectionAssertions.hasKey(expectedIndex)); + for (final String expectedIndex : expected) { + assertThat(metadata, hasKey(expectedIndex)); assertThat(routingTable.hasIndex(expectedIndex), is(true)); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/MetadataNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/MetadataNodesIT.java index 4bbd968d851b8..cfd1dd7f9a008 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/MetadataNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/MetadataNodesIT.java @@ -36,7 +36,6 @@ import org.opensearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.opensearch.cluster.coordination.Coordinator; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.settings.Settings; import org.opensearch.discovery.Discovery; import org.opensearch.env.NodeEnvironment; @@ -152,7 +151,7 @@ public void testMetaWrittenWhenIndexIsClosedAndMetaUpdated() throws Exception { ); // make sure it was also written on red node although index is closed - ImmutableOpenMap indicesMetadata = getIndicesMetadataOnNode(dataNode); + Map indicesMetadata = getIndicesMetadataOnNode(dataNode); assertNotNull(((Map) (indicesMetadata.get(index).mapping().getSourceAsMap().get("properties"))).get("integer_field")); assertThat(indicesMetadata.get(index).getState(), equalTo(IndexMetadata.State.CLOSE)); @@ -239,7 +238,7 @@ private boolean indexDirectoryExists(String nodeName, Index index) { return false; } - private ImmutableOpenMap getIndicesMetadataOnNode(String nodeName) { + private Map getIndicesMetadataOnNode(String nodeName) { final Coordinator coordinator = (Coordinator) internalCluster().getInstance(Discovery.class, nodeName); return coordinator.getApplierState().getMetadata().getIndices(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java index 11af1fb3cbfab..298ec5a8efc10 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java @@ -32,8 +32,6 @@ package org.opensearch.gateway; -import com.carrotsearch.hppc.cursors.ObjectCursor; - import org.opensearch.action.admin.cluster.configuration.AddVotingConfigExclusionsAction; import org.opensearch.action.admin.cluster.configuration.AddVotingConfigExclusionsRequest; import org.opensearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsAction; @@ -181,8 +179,7 @@ private Map assertAndCapturePrimaryTerms(Map pre } final Map result = new HashMap<>(); final ClusterState state = client().admin().cluster().prepareState().get().getState(); - for (ObjectCursor cursor : state.metadata().indices().values()) { - final IndexMetadata indexMetadata = cursor.value; + for (final IndexMetadata indexMetadata : state.metadata().indices().values()) { final String index = indexMetadata.getIndex().getName(); final long[] previous = previousTerms.get(index); final long[] current = IntStream.range(0, indexMetadata.getNumberOfShards()).mapToLong(indexMetadata::primaryTerm).toArray(); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java index 04f0a2c4bfb13..f2c760638b54b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java @@ -61,7 +61,6 @@ public void enablePreferPrimaryBalance() { /** * This test verifies that the overall primary balance is attained during allocation. This test verifies primary * balance per index and across all indices is maintained. - * @throws Exception */ public void testGlobalPrimaryAllocation() throws Exception { internalCluster().startClusterManagerOnlyNode(); diff --git a/server/src/internalClusterTest/java/org/opensearch/operateAllIndices/DestructiveOperationsIT.java b/server/src/internalClusterTest/java/org/opensearch/operateAllIndices/DestructiveOperationsIT.java index 7a19f3c832312..4732456cb092e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/operateAllIndices/DestructiveOperationsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/operateAllIndices/DestructiveOperationsIT.java @@ -32,7 +32,6 @@ package org.opensearch.operateAllIndices; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.opensearch.action.support.DestructiveOperations; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; @@ -109,8 +108,8 @@ public void testCloseIndexDefaultBehaviour() throws Exception { } ClusterState state = client().admin().cluster().prepareState().get().getState(); - for (ObjectObjectCursor indexMetadataObjectObjectCursor : state.getMetadata().indices()) { - assertEquals(IndexMetadata.State.CLOSE, indexMetadataObjectObjectCursor.value.getState()); + for (final IndexMetadata indexMetadataObjectObjectCursor : state.getMetadata().indices().values()) { + assertEquals(IndexMetadata.State.CLOSE, indexMetadataObjectObjectCursor.getState()); } } @@ -141,8 +140,8 @@ public void testOpenIndexDefaultBehaviour() throws Exception { } ClusterState state = client().admin().cluster().prepareState().get().getState(); - for (ObjectObjectCursor indexMetadataObjectObjectCursor : state.getMetadata().indices()) { - assertEquals(IndexMetadata.State.OPEN, indexMetadataObjectObjectCursor.value.getState()); + for (final IndexMetadata indexMetadataObjectObjectCursor : state.getMetadata().indices().values()) { + assertEquals(IndexMetadata.State.OPEN, indexMetadataObjectObjectCursor.getState()); } } } diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java index 1f16cc0363686..515d1ace8d492 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java @@ -32,7 +32,6 @@ package org.opensearch.recovery; -import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.tests.util.English; import org.opensearch.action.ActionFuture; @@ -771,8 +770,8 @@ public void testRelocationEstablishedPeerRecoveryRetentionLeases() throws Except private void assertActiveCopiesEstablishedPeerRecoveryRetentionLeases() throws Exception { assertBusy(() -> { - for (ObjectCursor it : client().admin().cluster().prepareState().get().getState().metadata().indices().keys()) { - Map> byShardId = Stream.of(client().admin().indices().prepareStats(it.value).get().getShards()) + for (final String it : client().admin().cluster().prepareState().get().getState().metadata().indices().keySet()) { + Map> byShardId = Stream.of(client().admin().indices().prepareStats(it).get().getShards()) .collect(Collectors.groupingBy(l -> l.getShardRouting().shardId())); for (List shardStats : byShardId.values()) { Set expectedLeaseIds = shardStats.stream() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java index a4cf5ebb028e3..d8e9c74e97d56 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java @@ -663,7 +663,6 @@ public void testStrictWeightedRoutingWithCustomString_FailOpenDisabled() throws /** * Should failopen shards even if failopen enabled with custom search preference. - * @throws Exception */ public void testStrictWeightedRoutingWithShardPrefNetworkDisruption_FailOpenEnabled() throws Exception { Settings commonSettings = Settings.builder() diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java index 9d65146716496..88f94cacf3a81 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -57,6 +57,7 @@ import java.io.IOException; import java.util.function.Predicate; +import java.util.Map; /** * Transport action for obtaining cluster state @@ -212,9 +213,9 @@ private ClusterStateResponse buildResponse(final ClusterStateRequest request, fi } // filter out metadata that shouldn't be returned by the API - for (ObjectObjectCursor custom : currentState.metadata().customs()) { - if (custom.value.context().contains(Metadata.XContentContext.API) == false) { - mdBuilder.removeCustom(custom.key); + for (final Map.Entry custom : currentState.metadata().customs().entrySet()) { + if (custom.getValue().context().contains(Metadata.XContentContext.API) == false) { + mdBuilder.removeCustom(custom.getKey()); } } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java index e14125c21af9c..23f5f3e177df6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.indices.dangling.delete; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.OpenSearchException; @@ -186,8 +185,8 @@ public void onFailure(Exception e) { private ClusterState deleteDanglingIndex(ClusterState currentState, Index indexToDelete) { final Metadata metaData = currentState.getMetadata(); - for (ObjectObjectCursor each : metaData.indices()) { - if (indexToDelete.getUUID().equals(each.value.getIndexUUID())) { + for (final IndexMetadata each : metaData.indices().values()) { + if (indexToDelete.getUUID().equals(each.getIndexUUID())) { throw new IllegalArgumentException( "Refusing to delete dangling index " + indexToDelete diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java index 5f98c184c95f7..b2a312f557778 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexResponse.java @@ -52,7 +52,9 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.Objects; /** @@ -62,7 +64,7 @@ */ public class GetIndexResponse extends ActionResponse implements ToXContentObject { - private ImmutableOpenMap mappings = ImmutableOpenMap.of(); + private Map mappings = Map.of(); private ImmutableOpenMap> aliases = ImmutableOpenMap.of(); private ImmutableOpenMap settings = ImmutableOpenMap.of(); private ImmutableOpenMap defaultSettings = ImmutableOpenMap.of(); @@ -71,7 +73,7 @@ public class GetIndexResponse extends ActionResponse implements ToXContentObject public GetIndexResponse( String[] indices, - ImmutableOpenMap mappings, + Map mappings, ImmutableOpenMap> aliases, ImmutableOpenMap settings, ImmutableOpenMap defaultSettings, @@ -102,7 +104,7 @@ public GetIndexResponse( this.indices = in.readStringArray(); int mappingsSize = in.readVInt(); - ImmutableOpenMap.Builder mappingsMapBuilder = ImmutableOpenMap.builder(); + Map mappingsMapBuilder = new HashMap<>(); for (int i = 0; i < mappingsSize; i++) { String index = in.readString(); if (in.getVersion().before(Version.V_2_0_0)) { @@ -123,7 +125,7 @@ public GetIndexResponse( mappingsMapBuilder.put(index, metadata != null ? metadata : MappingMetadata.EMPTY_MAPPINGS); } } - mappings = mappingsMapBuilder.build(); + mappings = Collections.unmodifiableMap(mappingsMapBuilder); int aliasesSize = in.readVInt(); ImmutableOpenMap.Builder> aliasesMapBuilder = ImmutableOpenMap.builder(); @@ -171,11 +173,11 @@ public String[] getIndices() { return indices(); } - public ImmutableOpenMap mappings() { + public Map mappings() { return mappings; } - public ImmutableOpenMap getMappings() { + public Map getMappings() { return mappings(); } @@ -243,16 +245,16 @@ public String getSetting(String index, String setting) { public void writeTo(StreamOutput out) throws IOException { out.writeStringArray(indices); out.writeVInt(mappings.size()); - for (ObjectObjectCursor indexEntry : mappings) { - out.writeString(indexEntry.key); + for (final Map.Entry indexEntry : mappings.entrySet()) { + out.writeString(indexEntry.getKey()); if (out.getVersion().before(Version.V_2_0_0)) { - out.writeVInt(indexEntry.value == MappingMetadata.EMPTY_MAPPINGS ? 0 : 1); - if (indexEntry.value != MappingMetadata.EMPTY_MAPPINGS) { + out.writeVInt(indexEntry.getValue() == MappingMetadata.EMPTY_MAPPINGS ? 0 : 1); + if (indexEntry.getValue() != MappingMetadata.EMPTY_MAPPINGS) { out.writeString(MapperService.SINGLE_MAPPING_NAME); - indexEntry.value.writeTo(out); + indexEntry.getValue().writeTo(out); } } else { - out.writeOptionalWriteable(indexEntry.value); + out.writeOptionalWriteable(indexEntry.getValue()); } } out.writeVInt(aliases.size()); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/TransportGetIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/get/TransportGetIndexAction.java index de272bab332a7..dbd9afd58a1fa 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/TransportGetIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/TransportGetIndexAction.java @@ -53,6 +53,7 @@ import java.io.IOException; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; import java.util.stream.StreamSupport; @@ -104,7 +105,7 @@ protected void doClusterManagerOperation( final ClusterState state, final ActionListener listener ) { - ImmutableOpenMap mappingsResult = ImmutableOpenMap.of(); + Map mappingsResult = Map.of(); ImmutableOpenMap> aliasesResult = ImmutableOpenMap.of(); ImmutableOpenMap settings = ImmutableOpenMap.of(); ImmutableOpenMap defaultSettings = ImmutableOpenMap.of(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsResponse.java index b8415a3e95ac8..757e6e350a60b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsResponse.java @@ -32,12 +32,10 @@ package org.opensearch.action.admin.indices.mapping.get; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.opensearch.Version; import org.opensearch.action.ActionResponse; import org.opensearch.cluster.metadata.MappingMetadata; import org.opensearch.common.Strings; -import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.xcontent.XContentType; @@ -47,6 +45,9 @@ import org.opensearch.index.mapper.MapperService; import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; /** * Transport response to get field mappings. @@ -57,16 +58,16 @@ public class GetMappingsResponse extends ActionResponse implements ToXContentFra private static final ParseField MAPPINGS = new ParseField("mappings"); - private final ImmutableOpenMap mappings; + private final Map mappings; - public GetMappingsResponse(ImmutableOpenMap mappings) { - this.mappings = mappings; + public GetMappingsResponse(final Map mappings) { + this.mappings = Collections.unmodifiableMap(mappings); } GetMappingsResponse(StreamInput in) throws IOException { super(in); int size = in.readVInt(); - ImmutableOpenMap.Builder indexMapBuilder = ImmutableOpenMap.builder(); + final Map indexMapBuilder = new HashMap<>(); for (int i = 0; i < size; i++) { String index = in.readString(); if (in.getVersion().before(Version.V_2_0_0)) { @@ -87,40 +88,40 @@ public GetMappingsResponse(ImmutableOpenMap mappings) { indexMapBuilder.put(index, hasMapping ? new MappingMetadata(in) : MappingMetadata.EMPTY_MAPPINGS); } } - mappings = indexMapBuilder.build(); + mappings = Collections.unmodifiableMap(indexMapBuilder); } - public ImmutableOpenMap mappings() { + public Map mappings() { return mappings; } - public ImmutableOpenMap getMappings() { + public Map getMappings() { return mappings(); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(mappings.size()); - for (ObjectObjectCursor indexEntry : mappings) { - out.writeString(indexEntry.key); + for (Map.Entry indexEntry : mappings.entrySet()) { + out.writeString(indexEntry.getKey()); if (out.getVersion().before(Version.V_2_0_0)) { - out.writeVInt(indexEntry.value == MappingMetadata.EMPTY_MAPPINGS ? 0 : 1); - if (indexEntry.value != MappingMetadata.EMPTY_MAPPINGS) { + out.writeVInt(indexEntry.getValue() == MappingMetadata.EMPTY_MAPPINGS ? 0 : 1); + if (indexEntry.getValue() != MappingMetadata.EMPTY_MAPPINGS) { out.writeString(MapperService.SINGLE_MAPPING_NAME); - indexEntry.value.writeTo(out); + indexEntry.getValue().writeTo(out); } } else { - out.writeOptionalWriteable(indexEntry.value); + out.writeOptionalWriteable(indexEntry.getValue()); } } } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - for (final ObjectObjectCursor indexEntry : getMappings()) { - builder.startObject(indexEntry.key); - if (indexEntry.value != null) { - builder.field(MAPPINGS.getPreferredName(), indexEntry.value.sourceAsMap()); + for (final Map.Entry indexEntry : getMappings().entrySet()) { + builder.startObject(indexEntry.getKey()); + if (indexEntry.getValue() != null) { + builder.field(MAPPINGS.getPreferredName(), indexEntry.getValue().sourceAsMap()); } else { builder.startObject(MAPPINGS.getPreferredName()).endObject(); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java index e724320728b66..933d598cd4dd8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java @@ -41,7 +41,6 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.metadata.MappingMetadata; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.inject.Inject; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.indices.IndicesService; @@ -49,6 +48,7 @@ import org.opensearch.transport.TransportService; import java.io.IOException; +import java.util.Map; /** * Transport action to get field mappings. @@ -96,8 +96,7 @@ protected void doClusterManagerOperation( ) { logger.trace("serving getMapping request based on version {}", state.version()); try { - ImmutableOpenMap result = state.metadata() - .findMappings(concreteIndices, indicesService.getFieldFilter()); + final Map result = state.metadata().findMappings(concreteIndices, indicesService.getFieldFilter()); listener.onResponse(new GetMappingsResponse(result)); } catch (IOException e) { listener.onFailure(e); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java index d74ff9e309842..2a511e60eaf7b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java @@ -31,7 +31,6 @@ package org.opensearch.action.admin.indices.template.get; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; @@ -51,6 +50,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Map; /** * Transport action to retrieve one or more Index templates @@ -105,16 +105,16 @@ protected void clusterManagerOperation( // If we did not ask for a specific name, then we return all templates if (request.names().length == 0) { - results = Arrays.asList(state.metadata().templates().values().toArray(IndexTemplateMetadata.class)); + results = Arrays.asList(state.metadata().templates().values().toArray(new IndexTemplateMetadata[0])); } else { results = new ArrayList<>(); } for (String name : request.names()) { if (Regex.isSimpleMatchPattern(name)) { - for (ObjectObjectCursor entry : state.metadata().templates()) { - if (Regex.simpleMatch(name, entry.key)) { - results.add(entry.value); + for (final Map.Entry entry : state.metadata().templates().entrySet()) { + if (Regex.simpleMatch(name, entry.getKey())) { + results.add(entry.getValue()); } } } else if (state.metadata().templates().containsKey(name)) { diff --git a/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java b/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java index 7e5de435267b4..28085dd6e3860 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java @@ -32,14 +32,11 @@ package org.opensearch.cluster; -import com.carrotsearch.hppc.cursors.ObjectCursor; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.opensearch.cluster.metadata.IndexGraveyard; import org.opensearch.cluster.metadata.IndexGraveyard.IndexGraveyardDiff; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.gateway.GatewayService; import org.opensearch.index.Index; @@ -47,6 +44,7 @@ import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; @@ -129,8 +127,7 @@ public List indicesCreated() { return Collections.emptyList(); } List created = null; - for (ObjectCursor cursor : state.metadata().indices().keys()) { - String index = cursor.value; + for (final String index : state.metadata().indices().keySet()) { if (!previousState.metadata().hasIndex(index)) { if (created == null) { created = new ArrayList<>(); @@ -170,20 +167,20 @@ public boolean metadataChanged() { */ public Set changedCustomMetadataSet() { Set result = new HashSet<>(); - ImmutableOpenMap currentCustoms = state.metadata().customs(); - ImmutableOpenMap previousCustoms = previousState.metadata().customs(); + Map currentCustoms = state.metadata().customs(); + Map previousCustoms = previousState.metadata().customs(); if (currentCustoms.equals(previousCustoms) == false) { - for (ObjectObjectCursor currentCustomMetadata : currentCustoms) { + for (Map.Entry currentCustomMetadata : currentCustoms.entrySet()) { // new custom md added or existing custom md changed - if (previousCustoms.containsKey(currentCustomMetadata.key) == false - || currentCustomMetadata.value.equals(previousCustoms.get(currentCustomMetadata.key)) == false) { - result.add(currentCustomMetadata.key); + if (previousCustoms.containsKey(currentCustomMetadata.getKey()) == false + || currentCustomMetadata.getValue().equals(previousCustoms.get(currentCustomMetadata.getKey())) == false) { + result.add(currentCustomMetadata.getKey()); } } // existing custom md deleted - for (ObjectObjectCursor previousCustomMetadata : previousCustoms) { - if (currentCustoms.containsKey(previousCustomMetadata.key) == false) { - result.add(previousCustomMetadata.key); + for (Map.Entry previousCustomMetadata : previousCustoms.entrySet()) { + if (currentCustoms.containsKey(previousCustomMetadata.getKey()) == false) { + result.add(previousCustomMetadata.getKey()); } } } @@ -286,8 +283,7 @@ private List indicesDeletedFromClusterState() { final Metadata previousMetadata = previousState.metadata(); final Metadata currentMetadata = state.metadata(); - for (ObjectCursor cursor : previousMetadata.indices().values()) { - IndexMetadata index = cursor.value; + for (final IndexMetadata index : previousMetadata.indices().values()) { IndexMetadata current = currentMetadata.index(index.getIndex()); if (current == null) { if (deleted == null) { diff --git a/server/src/main/java/org/opensearch/cluster/ClusterModule.java b/server/src/main/java/org/opensearch/cluster/ClusterModule.java index 8a4e17e5c0dc3..011a3d988c7f9 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterModule.java @@ -230,7 +230,7 @@ public static ClusterState filterCustomsForPre63Clients(ClusterState clusterStat } }); final Metadata.Builder metaBuilder = Metadata.builder(clusterState.metadata()); - clusterState.metadata().customs().keysIt().forEachRemaining(name -> { + clusterState.metadata().customs().keySet().iterator().forEachRemaining(name -> { if (PRE_6_3_METADATA_CUSTOMS_WHITE_LIST.contains(name) == false) { metaBuilder.removeCustom(name); } diff --git a/server/src/main/java/org/opensearch/cluster/ClusterState.java b/server/src/main/java/org/opensearch/cluster/ClusterState.java index 98d1e9c6db0e5..5d32d2edb8494 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterState.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterState.java @@ -375,9 +375,9 @@ public String toString() { } if (metadata.customs().isEmpty() == false) { sb.append("metadata customs:\n"); - for (final ObjectObjectCursor cursor : metadata.customs()) { - final String type = cursor.key; - final Metadata.Custom custom = cursor.value; + for (final Map.Entry cursor : metadata.customs().entrySet()) { + final String type = cursor.getKey(); + final Metadata.Custom custom = cursor.getValue(); sb.append(TAB).append(type).append(": ").append(custom); } sb.append("\n"); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/RemoveCustomsCommand.java b/server/src/main/java/org/opensearch/cluster/coordination/RemoveCustomsCommand.java index 5e7614d86cecd..8dad8e3836635 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/RemoveCustomsCommand.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/RemoveCustomsCommand.java @@ -31,7 +31,6 @@ package org.opensearch.cluster.coordination; -import com.carrotsearch.hppc.cursors.ObjectCursor; import joptsimple.OptionSet; import joptsimple.OptionSpec; import org.opensearch.cli.ExitCodes; @@ -84,12 +83,11 @@ protected void processNodePaths(Terminal terminal, Path[] dataPaths, int nodeLoc terminal.println(Terminal.Verbosity.VERBOSE, "Loading cluster state"); final Tuple termAndClusterState = loadTermAndClusterState(persistedClusterStateService, env); final ClusterState oldClusterState = termAndClusterState.v2(); - terminal.println(Terminal.Verbosity.VERBOSE, "custom metadata names: " + oldClusterState.metadata().customs().keys()); + terminal.println(Terminal.Verbosity.VERBOSE, "custom metadata names: " + oldClusterState.metadata().customs().keySet()); final Metadata.Builder metadataBuilder = Metadata.builder(oldClusterState.metadata()); for (String customToRemove : customsToRemove) { boolean matched = false; - for (ObjectCursor customKeyCur : oldClusterState.metadata().customs().keys()) { - final String customKey = customKeyCur.value; + for (final String customKey : oldClusterState.metadata().customs().keySet()) { if (Regex.simpleMatch(customToRemove, customKey)) { metadataBuilder.removeCustom(customKey); if (matched == false) { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapClusterManagerCommand.java b/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapClusterManagerCommand.java index 229c20b8dea17..188ea1325e806 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapClusterManagerCommand.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapClusterManagerCommand.java @@ -31,7 +31,6 @@ package org.opensearch.cluster.coordination; -import com.carrotsearch.hppc.cursors.ObjectCursor; import joptsimple.OptionSet; import joptsimple.OptionSpec; import org.opensearch.OpenSearchException; @@ -151,8 +150,7 @@ protected void processNodePaths(Terminal terminal, Path[] dataPaths, int nodeLoc .clusterUUIDCommitted(true) .persistentSettings(persistentSettings) .coordinationMetadata(newCoordinationMetadata); - for (ObjectCursor idx : metadata.indices().values()) { - IndexMetadata indexMetadata = idx.value; + for (final IndexMetadata indexMetadata : metadata.indices().values()) { newMetadata.put( IndexMetadata.builder(indexMetadata) .settings( diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index af84bfd2cb864..1eaec1519e790 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -32,10 +32,9 @@ package org.opensearch.cluster.metadata; -import com.carrotsearch.hppc.ObjectHashSet; import com.carrotsearch.hppc.cursors.ObjectCursor; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; import org.apache.lucene.util.CollectionUtil; @@ -55,7 +54,6 @@ import org.opensearch.common.Nullable; import org.opensearch.common.Strings; import org.opensearch.common.UUIDs; -import org.opensearch.common.collect.HppcMaps; import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -90,6 +88,7 @@ import java.util.Optional; import java.util.Set; import java.util.SortedMap; +import java.util.Spliterators; import java.util.TreeMap; import java.util.function.Function; import java.util.function.Predicate; @@ -244,9 +243,9 @@ public interface Custom extends NamedDiffable, ToXContentFragment, Clust private final Settings persistentSettings; private final Settings settings; private final DiffableStringMap hashesOfConsistentSettings; - private final ImmutableOpenMap indices; - private final ImmutableOpenMap templates; - private final ImmutableOpenMap customs; + private final Map indices; + private final Map templates; + private final Map customs; private final transient int totalNumberOfShards; // Transient ? not serializable anyway? private final int totalOpenIndexShards; @@ -268,9 +267,9 @@ public interface Custom extends NamedDiffable, ToXContentFragment, Clust Settings transientSettings, Settings persistentSettings, DiffableStringMap hashesOfConsistentSettings, - ImmutableOpenMap indices, - ImmutableOpenMap templates, - ImmutableOpenMap customs, + final Map indices, + final Map templates, + final Map customs, String[] allIndices, String[] visibleIndices, String[] allOpenIndices, @@ -287,15 +286,15 @@ public interface Custom extends NamedDiffable, ToXContentFragment, Clust this.persistentSettings = persistentSettings; this.settings = Settings.builder().put(persistentSettings).put(transientSettings).build(); this.hashesOfConsistentSettings = hashesOfConsistentSettings; - this.indices = indices; - this.customs = customs; - this.templates = templates; + this.indices = Collections.unmodifiableMap(indices); + this.customs = Collections.unmodifiableMap(customs); + this.templates = Collections.unmodifiableMap(templates); int totalNumberOfShards = 0; int totalOpenIndexShards = 0; - for (ObjectCursor cursor : indices.values()) { - totalNumberOfShards += cursor.value.getTotalNumberOfShards(); - if (IndexMetadata.State.OPEN.equals(cursor.value.getState())) { - totalOpenIndexShards += cursor.value.getTotalNumberOfShards(); + for (IndexMetadata cursor : indices.values()) { + totalNumberOfShards += cursor.getTotalNumberOfShards(); + if (IndexMetadata.State.OPEN.equals(cursor.getState())) { + totalOpenIndexShards += cursor.getTotalNumberOfShards(); } } this.totalNumberOfShards = totalNumberOfShards; @@ -359,8 +358,7 @@ public boolean hasAlias(String alias) { } public boolean equalsAliases(Metadata other) { - for (ObjectCursor cursor : other.indices().values()) { - IndexMetadata otherIndex = cursor.value; + for (IndexMetadata otherIndex : other.indices().values()) { IndexMetadata thisIndex = index(otherIndex.getIndex()); if (thisIndex == null) { return false; @@ -469,21 +467,19 @@ private ImmutableOpenMap> findAliases(final String[] * @see MapperPlugin#getFieldFilter() * */ - public ImmutableOpenMap findMappings(String[] concreteIndices, Function> fieldFilter) + public Map findMappings(String[] concreteIndices, Function> fieldFilter) throws IOException { assert concreteIndices != null; if (concreteIndices.length == 0) { - return ImmutableOpenMap.of(); + return Map.of(); } - ImmutableOpenMap.Builder indexMapBuilder = ImmutableOpenMap.builder(); - Iterable intersection = HppcMaps.intersection(ObjectHashSet.from(concreteIndices), indices.keys()); - for (String index : intersection) { - IndexMetadata indexMetadata = indices.get(index); - Predicate fieldPredicate = fieldFilter.apply(index); - indexMapBuilder.put(index, filterFields(indexMetadata.mapping(), fieldPredicate)); - } - return indexMapBuilder.build(); + final Map indexMapBuilder = new HashMap<>(); + Arrays.stream(concreteIndices) + .filter(indices.keySet()::contains) + .forEach((idx) -> indexMapBuilder.put(idx, filterFields(indices.get(idx).mapping(), fieldFilter.apply(idx)))); + + return Collections.unmodifiableMap(indexMapBuilder); } /** @@ -786,20 +782,20 @@ public IndexMetadata getIndexSafe(Index index) { throw new IndexNotFoundException(index); } - public ImmutableOpenMap indices() { + public Map indices() { return this.indices; } - public ImmutableOpenMap getIndices() { + public Map getIndices() { return indices(); } - public ImmutableOpenMap templates() { + public Map templates() { return this.templates; } - public ImmutableOpenMap getTemplates() { - return this.templates; + public Map getTemplates() { + return templates(); } public Map componentTemplates() { @@ -824,12 +820,12 @@ public DecommissionAttributeMetadata decommissionAttributeMetadata() { return custom(DecommissionAttributeMetadata.TYPE); } - public ImmutableOpenMap customs() { + public Map customs() { return this.customs; } - public ImmutableOpenMap getCustoms() { - return this.customs; + public Map getCustoms() { + return this.customs(); } /** @@ -908,7 +904,7 @@ public boolean routingRequired(String concreteIndex) { @Override public Iterator iterator() { - return indices.valuesIt(); + return indices.values().iterator(); } public static boolean isGlobalStateEquals(Metadata metadata1, Metadata metadata2) { @@ -932,15 +928,15 @@ public static boolean isGlobalStateEquals(Metadata metadata1, Metadata metadata2 } // Check if any persistent metadata needs to be saved int customCount1 = 0; - for (ObjectObjectCursor cursor : metadata1.customs) { - if (cursor.value.context().contains(XContentContext.GATEWAY)) { - if (!cursor.value.equals(metadata2.custom(cursor.key))) return false; + for (Map.Entry cursor : metadata1.customs.entrySet()) { + if (cursor.getValue().context().contains(XContentContext.GATEWAY)) { + if (!cursor.getValue().equals(metadata2.custom(cursor.getKey()))) return false; customCount1++; } } int customCount2 = 0; - for (ObjectCursor cursor : metadata2.customs.values()) { - if (cursor.value.context().contains(XContentContext.GATEWAY)) { + for (final Custom cursor : metadata2.customs.values()) { + if (cursor.context().contains(XContentContext.GATEWAY)) { customCount2++; } } @@ -981,9 +977,9 @@ private static class MetadataDiff implements Diff { private final Settings transientSettings; private final Settings persistentSettings; private final Diff hashesOfConsistentSettings; - private final Diff> indices; - private final Diff> templates; - private final Diff> customs; + private final Diff> indices; + private final Diff> templates; + private final Diff> customs; MetadataDiff(Metadata before, Metadata after) { clusterUUID = after.clusterUUID; @@ -1021,9 +1017,9 @@ private static class MetadataDiff implements Diff { } else { hashesOfConsistentSettings = DiffableStringMap.DiffableStringMapDiff.EMPTY; } - indices = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), INDEX_METADATA_DIFF_VALUE_READER); - templates = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), TEMPLATES_DIFF_VALUE_READER); - customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER); + indices = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), INDEX_METADATA_DIFF_VALUE_READER); + templates = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), TEMPLATES_DIFF_VALUE_READER); + customs = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER); } @Override @@ -1114,20 +1110,20 @@ public void writeTo(StreamOutput out) throws IOException { indexMetadata.writeTo(out); } out.writeVInt(templates.size()); - for (ObjectCursor cursor : templates.values()) { - cursor.value.writeTo(out); + for (final IndexTemplateMetadata cursor : templates.values()) { + cursor.writeTo(out); } // filter out custom states not supported by the other node int numberOfCustoms = 0; - for (final ObjectCursor cursor : customs.values()) { - if (FeatureAware.shouldSerialize(out, cursor.value)) { + for (final Custom cursor : customs.values()) { + if (FeatureAware.shouldSerialize(out, cursor)) { numberOfCustoms++; } } out.writeVInt(numberOfCustoms); - for (final ObjectCursor cursor : customs.values()) { - if (FeatureAware.shouldSerialize(out, cursor.value)) { - out.writeNamedWriteable(cursor.value); + for (final Custom cursor : customs.values()) { + if (FeatureAware.shouldSerialize(out, cursor)) { + out.writeNamedWriteable(cursor); } } } @@ -1156,15 +1152,15 @@ public static class Builder { private Settings persistentSettings = Settings.Builder.EMPTY_SETTINGS; private DiffableStringMap hashesOfConsistentSettings = new DiffableStringMap(Collections.emptyMap()); - private final ImmutableOpenMap.Builder indices; - private final ImmutableOpenMap.Builder templates; - private final ImmutableOpenMap.Builder customs; + private final Map indices; + private final Map templates; + private final Map customs; public Builder() { clusterUUID = UNKNOWN_CLUSTER_UUID; - indices = ImmutableOpenMap.builder(); - templates = ImmutableOpenMap.builder(); - customs = ImmutableOpenMap.builder(); + indices = new HashMap<>(); + templates = new HashMap<>(); + customs = new HashMap<>(); indexGraveyard(IndexGraveyard.builder().build()); // create new empty index graveyard to initialize } @@ -1176,9 +1172,9 @@ public Builder(Metadata metadata) { this.persistentSettings = metadata.persistentSettings; this.hashesOfConsistentSettings = metadata.hashesOfConsistentSettings; this.version = metadata.version; - this.indices = ImmutableOpenMap.builder(metadata.indices); - this.templates = ImmutableOpenMap.builder(metadata.templates); - this.customs = ImmutableOpenMap.builder(metadata.customs); + this.indices = new HashMap<>(metadata.indices); + this.templates = new HashMap<>(metadata.templates); + this.customs = new HashMap<>(metadata.customs); } public Builder put(IndexMetadata.Builder indexMetadataBuilder) { @@ -1231,7 +1227,7 @@ public Builder removeAllIndices() { return this; } - public Builder indices(ImmutableOpenMap indices) { + public Builder indices(final Map indices) { this.indices.putAll(indices); return this; } @@ -1250,7 +1246,7 @@ public Builder removeTemplate(String templateName) { return this; } - public Builder templates(ImmutableOpenMap templates) { + public Builder templates(Map templates) { this.templates.putAll(templates); return this; } @@ -1349,8 +1345,9 @@ public Builder removeCustom(String type) { return this; } - public Builder customs(ImmutableOpenMap customs) { - StreamSupport.stream(customs.spliterator(), false).forEach(cursor -> Objects.requireNonNull(cursor.value, cursor.key)); + public Builder customs(Map customs) { + StreamSupport.stream(Spliterators.spliterator(customs.entrySet(), 0), false) + .forEach(cursor -> Objects.requireNonNull(cursor.getValue(), cursor.getKey())); this.customs.putAll(customs); return this; } @@ -1376,7 +1373,7 @@ public DecommissionAttributeMetadata decommissionAttributeMetadata() { public Builder updateSettings(Settings settings, String... indices) { if (indices == null || indices.length == 0) { - indices = this.indices.keys().toArray(String.class); + indices = this.indices.keySet().toArray(new String[0]); } for (String index : indices) { IndexMetadata indexMetadata = this.indices.get(index); @@ -1478,8 +1475,7 @@ public Metadata build() { final List allClosedIndices = new ArrayList<>(); final List visibleClosedIndices = new ArrayList<>(); final Set allAliases = new HashSet<>(); - for (ObjectCursor cursor : indices.values()) { - final IndexMetadata indexMetadata = cursor.value; + for (final IndexMetadata indexMetadata : indices.values()) { final String name = indexMetadata.getIndex().getName(); boolean added = allIndices.add(name); assert added : "double index named [" + name + "]"; @@ -1514,10 +1510,10 @@ public Metadata build() { ArrayList duplicates = new ArrayList<>(); if (aliasDuplicatesWithIndices.isEmpty() == false) { // iterate again and constructs a helpful message - for (ObjectCursor cursor : indices.values()) { + for (final IndexMetadata cursor : indices.values()) { for (String alias : aliasDuplicatesWithIndices) { - if (cursor.value.getAliases().containsKey(alias)) { - duplicates.add(alias + " (alias of " + cursor.value.getIndex() + ") conflicts with index"); + if (cursor.getAliases().containsKey(alias)) { + duplicates.add(alias + " (alias of " + cursor.getIndex() + ") conflicts with index"); } } } @@ -1527,10 +1523,10 @@ public Metadata build() { aliasDuplicatesWithDataStreams.retainAll(allDataStreams); if (aliasDuplicatesWithDataStreams.isEmpty() == false) { // iterate again and constructs a helpful message - for (ObjectCursor cursor : indices.values()) { + for (final IndexMetadata cursor : indices.values()) { for (String alias : aliasDuplicatesWithDataStreams) { - if (cursor.value.getAliases().containsKey(alias)) { - duplicates.add(alias + " (alias of " + cursor.value.getIndex() + ") conflicts with data stream"); + if (cursor.getAliases().containsKey(alias)) { + duplicates.add(alias + " (alias of " + cursor.getIndex() + ") conflicts with data stream"); } } } @@ -1576,9 +1572,9 @@ public Metadata build() { transientSettings, persistentSettings, hashesOfConsistentSettings, - indices.build(), - templates.build(), - customs.build(), + indices, + templates, + customs, allIndicesArray, visibleIndicesArray, allOpenIndicesArray, @@ -1615,9 +1611,7 @@ private SortedMap buildIndicesLookup() { } } - for (ObjectCursor cursor : indices.values()) { - IndexMetadata indexMetadata = cursor.value; - + for (final IndexMetadata indexMetadata : indices.values()) { IndexAbstraction.Index index; DataStream parent = indexToDataStreamLookup.get(indexMetadata.getIndex().getName()); if (parent != null) { @@ -1630,7 +1624,7 @@ private SortedMap buildIndicesLookup() { IndexAbstraction existing = indicesLookup.put(indexMetadata.getIndex().getName(), index); assert existing == null : "duplicate for " + indexMetadata.getIndex(); - for (ObjectObjectCursor aliasCursor : indexMetadata.getAliases()) { + for (final ObjectObjectCursor aliasCursor : indexMetadata.getAliases()) { AliasMetadata aliasMetadata = aliasCursor.value; indicesLookup.compute(aliasMetadata.getAlias(), (aliasName, alias) -> { if (alias == null) { @@ -1714,8 +1708,8 @@ public static void toXContent(Metadata metadata, XContentBuilder builder, ToXCon } builder.startObject("templates"); - for (ObjectCursor cursor : metadata.templates().values()) { - IndexTemplateMetadata.Builder.toXContentWithTypes(cursor.value, builder, params); + for (final IndexTemplateMetadata cursor : metadata.templates().values()) { + IndexTemplateMetadata.Builder.toXContentWithTypes(cursor, builder, params); } builder.endObject(); @@ -1727,10 +1721,10 @@ public static void toXContent(Metadata metadata, XContentBuilder builder, ToXCon builder.endObject(); } - for (ObjectObjectCursor cursor : metadata.customs()) { - if (cursor.value.context().contains(context)) { - builder.startObject(cursor.key); - cursor.value.toXContent(builder, params); + for (final Map.Entry cursor : metadata.customs().entrySet()) { + if (cursor.getValue().context().contains(context)) { + builder.startObject(cursor.getKey()); + cursor.getValue().toXContent(builder, params); builder.endObject(); } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java index 25cde7685b5e5..5ec718687dad9 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java @@ -31,7 +31,6 @@ package org.opensearch.cluster.metadata; -import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -170,8 +169,7 @@ public ClusterManagerTaskThrottler.ThrottlingKey getClusterManagerThrottlingKey( @Override public ClusterState execute(ClusterState currentState) { Set templateNames = new HashSet<>(); - for (ObjectCursor cursor : currentState.metadata().templates().keys()) { - String templateName = cursor.value; + for (final String templateName : currentState.metadata().templates().keySet()) { if (Regex.simpleMatch(request.name, templateName)) { templateNames.add(templateName); } @@ -713,9 +711,9 @@ public static Map> findConflictingV1Templates( ) { Automaton v2automaton = Regex.simpleMatchToAutomaton(indexPatterns.toArray(Strings.EMPTY_ARRAY)); Map> overlappingTemplates = new HashMap<>(); - for (ObjectObjectCursor cursor : state.metadata().templates()) { - String name = cursor.key; - IndexTemplateMetadata template = cursor.value; + for (final Map.Entry cursor : state.metadata().templates().entrySet()) { + String name = cursor.getKey(); + IndexTemplateMetadata template = cursor.getValue(); Automaton v1automaton = Regex.simpleMatchToAutomaton(template.patterns().toArray(Strings.EMPTY_ARRAY)); if (Operations.isEmpty(Operations.intersection(v2automaton, v1automaton)) == false) { logger.debug( @@ -1014,8 +1012,7 @@ static ClusterState innerPutTemplate( public static List findV1Templates(Metadata metadata, String indexName, @Nullable Boolean isHidden) { final Predicate patternMatchPredicate = pattern -> Regex.simpleMatch(pattern, indexName); final List matchedTemplates = new ArrayList<>(); - for (ObjectCursor cursor : metadata.templates().values()) { - final IndexTemplateMetadata template = cursor.value; + for (final IndexTemplateMetadata template : metadata.templates().values()) { if (isHidden == null || isHidden == Boolean.FALSE) { final boolean matched = template.patterns().stream().anyMatch(patternMatchPredicate); if (matched) { @@ -1238,7 +1235,7 @@ public static List> resolveAliases(final List { if (template.aliases() != null) { Map aliasMeta = new HashMap<>(); - for (ObjectObjectCursor cursor : template.aliases()) { + for (final ObjectObjectCursor cursor : template.aliases()) { aliasMeta.put(cursor.key, cursor.value); } resolvedAliases.add(aliasMeta); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/SystemIndexMetadataUpgradeService.java b/server/src/main/java/org/opensearch/cluster/metadata/SystemIndexMetadataUpgradeService.java index b1dd490a032b0..22d7235fd1f98 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/SystemIndexMetadataUpgradeService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/SystemIndexMetadataUpgradeService.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.metadata; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.cluster.ClusterChangedEvent; @@ -40,11 +39,11 @@ import org.opensearch.cluster.ClusterStateListener; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.indices.SystemIndices; import java.util.ArrayList; import java.util.List; +import java.util.Map; /** * A service responsible for updating the metadata used by system indices. @@ -60,7 +59,7 @@ public class SystemIndexMetadataUpgradeService implements ClusterStateListener { private boolean clusterManager = false; - private volatile ImmutableOpenMap lastIndexMetadataMap = ImmutableOpenMap.of(); + private volatile Map lastIndexMetadataMap = Map.of(); private volatile boolean updateTaskPending = false; public SystemIndexMetadataUpgradeService(SystemIndices systemIndices, ClusterService clusterService) { @@ -75,12 +74,12 @@ public void clusterChanged(ClusterChangedEvent event) { } if (clusterManager && updateTaskPending == false) { - final ImmutableOpenMap indexMetadataMap = event.state().metadata().indices(); + final Map indexMetadataMap = event.state().metadata().indices(); if (lastIndexMetadataMap != indexMetadataMap) { - for (ObjectObjectCursor cursor : indexMetadataMap) { - if (cursor.value != lastIndexMetadataMap.get(cursor.key)) { - if (systemIndices.isSystemIndex(cursor.value.getIndex()) != cursor.value.isSystem()) { + for (final Map.Entry cursor : indexMetadataMap.entrySet()) { + if (cursor.getValue() != lastIndexMetadataMap.get(cursor.getKey())) { + if (systemIndices.isSystemIndex(cursor.getValue().getIndex()) != cursor.getValue().isSystem()) { updateTaskPending = true; clusterService.submitStateUpdateTask( "system_index_metadata_upgrade_service {system metadata change}", @@ -103,12 +102,12 @@ public class SystemIndexMetadataUpdateTask extends ClusterStateUpdateTask { @Override public ClusterState execute(ClusterState currentState) throws Exception { - final ImmutableOpenMap indexMetadataMap = currentState.metadata().indices(); + final Map indexMetadataMap = currentState.metadata().indices(); final List updatedMetadata = new ArrayList<>(); - for (ObjectObjectCursor cursor : indexMetadataMap) { - if (cursor.value != lastIndexMetadataMap.get(cursor.key)) { - if (systemIndices.isSystemIndex(cursor.value.getIndex()) != cursor.value.isSystem()) { - updatedMetadata.add(IndexMetadata.builder(cursor.value).system(!cursor.value.isSystem()).build()); + for (Map.Entry cursor : indexMetadataMap.entrySet()) { + if (cursor.getValue() != lastIndexMetadataMap.get(cursor.getKey())) { + if (systemIndices.isSystemIndex(cursor.getValue().getIndex()) != cursor.getValue().isSystem()) { + updatedMetadata.add(IndexMetadata.builder(cursor.getValue()).system(!cursor.getValue().isSystem()).build()); } } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/TemplateUpgradeService.java b/server/src/main/java/org/opensearch/cluster/metadata/TemplateUpgradeService.java index 2f1fdc1d010b2..b7c9c1512956d 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/TemplateUpgradeService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/TemplateUpgradeService.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.metadata; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -48,7 +47,6 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.bytes.BytesReference; -import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.collect.Tuple; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.ToXContent; @@ -91,7 +89,7 @@ public class TemplateUpgradeService implements ClusterStateListener { final AtomicInteger upgradesInProgress = new AtomicInteger(); - private ImmutableOpenMap lastTemplateMetadata; + private Map lastTemplateMetadata; public TemplateUpgradeService( Client client, @@ -131,7 +129,7 @@ public void clusterChanged(ClusterChangedEvent event) { return; } - ImmutableOpenMap templates = state.getMetadata().getTemplates(); + final Map templates = state.getMetadata().getTemplates(); if (templates == lastTemplateMetadata) { // we already checked these sets of templates - no reason to check it again @@ -225,9 +223,7 @@ void tryFinishUpgrade(AtomicBoolean anyUpgradeFailed) { // Check upgraders are satisfied after the update completed. If they still // report that changes are required, this might indicate a bug or that something // else tinkering with the templates during the upgrade. - final ImmutableOpenMap upgradedTemplates = clusterService.state() - .getMetadata() - .getTemplates(); + final Map upgradedTemplates = clusterService.state().getMetadata().getTemplates(); final boolean changesRequired = calculateTemplateChanges(upgradedTemplates).isPresent(); if (changesRequired) { logger.warn("Templates are still reported as out of date after the upgrade. The template upgrade will be retried."); @@ -239,13 +235,11 @@ void tryFinishUpgrade(AtomicBoolean anyUpgradeFailed) { } } - Optional, Set>> calculateTemplateChanges( - ImmutableOpenMap templates - ) { + Optional, Set>> calculateTemplateChanges(final Map templates) { // collect current templates Map existingMap = new HashMap<>(); - for (ObjectObjectCursor customCursor : templates) { - existingMap.put(customCursor.key, customCursor.value); + for (Map.Entry customCursor : templates.entrySet()) { + existingMap.put(customCursor.getKey(), customCursor.getValue()); } // upgrade global custom meta data Map upgradedMap = indexTemplateMetadataUpgraders.apply(existingMap); diff --git a/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java b/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java index f505a27a5624c..e56f7a838d85f 100644 --- a/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java +++ b/server/src/main/java/org/opensearch/env/NodeRepurposeCommand.java @@ -31,7 +31,6 @@ package org.opensearch.env; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import joptsimple.OptionParser; import joptsimple.OptionSet; import org.opensearch.OpenSearchException; @@ -158,7 +157,7 @@ private void processNoClusterManagerRepurposeNode( Sets.union( indexUUIDsFor(indexPaths), StreamSupport.stream(metadata.indices().values().spliterator(), false) - .map(imd -> imd.value.getIndexUUID()) + .map(imd -> imd.getIndexUUID()) .collect(Collectors.toSet()) ) ); @@ -302,9 +301,9 @@ private void outputHowToSeeVerboseInformation(Terminal terminal) { private String toIndexName(String uuid, Metadata metadata) { if (metadata != null) { - for (ObjectObjectCursor indexMetadata : metadata.indices()) { - if (indexMetadata.value.getIndexUUID().equals(uuid)) { - return indexMetadata.value.getIndex().getName(); + for (final IndexMetadata indexMetadata : metadata.indices().values()) { + if (indexMetadata.getIndexUUID().equals(uuid)) { + return indexMetadata.getIndex().getName(); } } } diff --git a/server/src/main/java/org/opensearch/gateway/ClusterStateUpdaters.java b/server/src/main/java/org/opensearch/gateway/ClusterStateUpdaters.java index 4402da155ff5a..1563ac84bdd1c 100644 --- a/server/src/main/java/org/opensearch/gateway/ClusterStateUpdaters.java +++ b/server/src/main/java/org/opensearch/gateway/ClusterStateUpdaters.java @@ -32,7 +32,6 @@ package org.opensearch.gateway; -import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -120,8 +119,8 @@ static ClusterState recoverClusterBlocks(final ClusterState state) { static ClusterState updateRoutingTable(final ClusterState state) { // initialize all index routing tables as empty final RoutingTable.Builder routingTableBuilder = RoutingTable.builder(state.routingTable()); - for (final ObjectCursor cursor : state.metadata().indices().values()) { - routingTableBuilder.addAsRecovery(cursor.value); + for (final IndexMetadata cursor : state.metadata().indices().values()) { + routingTableBuilder.addAsRecovery(cursor); } // start with 0 based versions for routing table routingTableBuilder.version(0); diff --git a/server/src/main/java/org/opensearch/gateway/DanglingIndicesState.java b/server/src/main/java/org/opensearch/gateway/DanglingIndicesState.java index 8c2a55fda3742..5e0c32751befb 100644 --- a/server/src/main/java/org/opensearch/gateway/DanglingIndicesState.java +++ b/server/src/main/java/org/opensearch/gateway/DanglingIndicesState.java @@ -32,7 +32,6 @@ package org.opensearch.gateway; -import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.action.ActionListener; @@ -191,8 +190,8 @@ void findNewAndAddDanglingIndices(final Metadata metadata) { */ public Map findNewDanglingIndices(Map existingDanglingIndices, final Metadata metadata) { final Set excludeIndexPathIds = new HashSet<>(metadata.indices().size() + danglingIndices.size()); - for (ObjectCursor cursor : metadata.indices().values()) { - excludeIndexPathIds.add(cursor.value.getIndex().getUUID()); + for (final IndexMetadata indexMetadata : metadata.indices().values()) { + excludeIndexPathIds.add(indexMetadata.getIndex().getUUID()); } for (Index index : existingDanglingIndices.keySet()) { excludeIndexPathIds.add(index.getUUID()); diff --git a/server/src/main/java/org/opensearch/gateway/Gateway.java b/server/src/main/java/org/opensearch/gateway/Gateway.java index 413c08569c64a..2648af4b73be3 100644 --- a/server/src/main/java/org/opensearch/gateway/Gateway.java +++ b/server/src/main/java/org/opensearch/gateway/Gateway.java @@ -33,7 +33,6 @@ package org.opensearch.gateway; import com.carrotsearch.hppc.ObjectFloatHashMap; -import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.action.FailedNodeException; @@ -95,8 +94,8 @@ public void performStateRecovery(final GatewayStateRecoveredListener listener) t } else if (nodeState.metadata().version() > electedGlobalState.version()) { electedGlobalState = nodeState.metadata(); } - for (final ObjectCursor cursor : nodeState.metadata().indices().values()) { - indices.addTo(cursor.value.getIndex(), 1); + for (final IndexMetadata cursor : nodeState.metadata().indices().values()) { + indices.addTo(cursor.getIndex(), 1); } } if (found < requiredAllocation) { diff --git a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java index 9ac1248269589..f2ac2d56a9a26 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java @@ -32,7 +32,6 @@ package org.opensearch.gateway; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.store.AlreadyClosedException; @@ -55,7 +54,6 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SetOnce; -import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.AbstractRunnable; @@ -271,15 +269,15 @@ static Metadata upgradeMetadata( } private static boolean applyPluginUpgraders( - ImmutableOpenMap existingData, + final Map existingData, UnaryOperator> upgrader, Consumer removeData, BiConsumer putData ) { // collect current data Map existingMap = new HashMap<>(); - for (ObjectObjectCursor customCursor : existingData) { - existingMap.put(customCursor.key, customCursor.value); + for (Map.Entry customCursor : existingData.entrySet()) { + existingMap.put(customCursor.getKey(), customCursor.getValue()); } // upgrade global custom meta data Map upgradedCustoms = upgrader.apply(existingMap); diff --git a/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java b/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java index d874b4a9375b8..54c5d88918e45 100644 --- a/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java @@ -31,7 +31,6 @@ package org.opensearch.gateway; -import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -745,8 +744,7 @@ private WriterStats updateMetadata(Metadata previouslyWrittenMetadata, Metadata } final Map indexMetadataVersionByUUID = new HashMap<>(previouslyWrittenMetadata.indices().size()); - for (ObjectCursor cursor : previouslyWrittenMetadata.indices().values()) { - final IndexMetadata indexMetadata = cursor.value; + for (final IndexMetadata indexMetadata : previouslyWrittenMetadata.indices().values()) { final Long previousValue = indexMetadataVersionByUUID.putIfAbsent( indexMetadata.getIndexUUID(), indexMetadata.getVersion() @@ -756,8 +754,7 @@ private WriterStats updateMetadata(Metadata previouslyWrittenMetadata, Metadata int numIndicesUpdated = 0; int numIndicesUnchanged = 0; - for (ObjectCursor cursor : metadata.indices().values()) { - final IndexMetadata indexMetadata = cursor.value; + for (final IndexMetadata indexMetadata : metadata.indices().values()) { final Long previousVersion = indexMetadataVersionByUUID.get(indexMetadata.getIndexUUID()); if (previousVersion == null || indexMetadata.getVersion() != previousVersion) { logger.trace( @@ -817,8 +814,7 @@ private WriterStats addMetadata(Metadata metadata) throws IOException { metadataIndexWriter.updateGlobalMetadata(globalMetadataDocument); } - for (ObjectCursor cursor : metadata.indices().values()) { - final IndexMetadata indexMetadata = cursor.value; + for (final IndexMetadata indexMetadata : metadata.indices().values()) { final Document indexMetadataDocument = makeIndexMetadataDocument(indexMetadata, documentBuffer); for (MetadataIndexWriter metadataIndexWriter : metadataIndexWriters) { metadataIndexWriter.updateIndexMetadataDocument(indexMetadataDocument, indexMetadata.getIndex()); diff --git a/server/src/main/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommand.java b/server/src/main/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommand.java index e2ff76e872a5e..bd4cbd6573961 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommand.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommand.java @@ -144,8 +144,6 @@ protected void findAndProcessShardPath( final IndexMetadata indexMetadata; final int shardId; - final int fromNodeId; - final int toNodeId; if (options.has(folderOption)) { final Path path = getPath(folderOption.value(options)).getParent(); @@ -166,10 +164,7 @@ protected void findAndProcessShardPath( && NodeEnvironment.NODES_FOLDER.equals(shardParentParent.getParent().getParent().getFileName().toString()) // `nodes` check ) { shardId = Integer.parseInt(shardIdFileName); - fromNodeId = Integer.parseInt(nodeIdFileName); - toNodeId = fromNodeId + 1; indexMetadata = StreamSupport.stream(clusterState.metadata().indices().values().spliterator(), false) - .map(imd -> imd.value) .filter(imd -> imd.getIndexUUID().equals(indexUUIDFolderName)) .findFirst() .orElse(null); @@ -249,11 +244,9 @@ protected void dropCorruptMarkerFiles(Terminal terminal, Path path, Directory di ); } String[] files = directory.listAll(); - boolean found = false; for (String file : files) { if (file.startsWith(Store.CORRUPTED_MARKER_NAME_PREFIX)) { directory.deleteFile(file); - terminal.println("Deleted corrupt marker " + file + " from " + path); } } diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestTemplatesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestTemplatesAction.java index 3e8c68391b9e3..c4d632f81cfcb 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestTemplatesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestTemplatesAction.java @@ -32,7 +32,6 @@ package org.opensearch.rest.action.cat; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.opensearch.action.admin.cluster.state.ClusterStateRequest; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; import org.opensearch.client.node.NodeClient; @@ -112,8 +111,7 @@ protected Table getTableWithHeader(RestRequest request) { private Table buildTable(RestRequest request, ClusterStateResponse clusterStateResponse, String patternString) { Table table = getTableWithHeader(request); Metadata metadata = clusterStateResponse.getState().metadata(); - for (ObjectObjectCursor entry : metadata.templates()) { - IndexTemplateMetadata indexData = entry.value; + for (final IndexTemplateMetadata indexData : metadata.templates().values()) { if (patternString == null || Regex.simpleMatch(patternString, indexData.name())) { table.startRow(); table.addCell(indexData.name()); diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index 9e1fda3a06779..d4aef452a8cc2 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -646,18 +646,18 @@ public ClusterState execute(ClusterState currentState) { } if (metadata.templates() != null) { // TODO: Should all existing templates be deleted first? - for (ObjectCursor cursor : metadata.templates().values()) { - mdBuilder.put(cursor.value); + for (final IndexTemplateMetadata cursor : metadata.templates().values()) { + mdBuilder.put(cursor); } } if (metadata.customs() != null) { - for (ObjectObjectCursor cursor : metadata.customs()) { - if (RepositoriesMetadata.TYPE.equals(cursor.key) == false - && DataStreamMetadata.TYPE.equals(cursor.key) == false) { + for (final Map.Entry cursor : metadata.customs().entrySet()) { + if (RepositoriesMetadata.TYPE.equals(cursor.getKey()) == false + && DataStreamMetadata.TYPE.equals(cursor.getKey()) == false) { // Don't restore repositories while we are working with them // TODO: Should we restore them at the end? // Also, don't restore data streams here, we already added them to the metadata builder above - mdBuilder.putCustom(cursor.key, cursor.value); + mdBuilder.putCustom(cursor.getKey(), cursor.getValue()); } } } diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotUtils.java b/server/src/main/java/org/opensearch/snapshots/SnapshotUtils.java index 2be7cf9d4dbb3..e8115a97ac98a 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotUtils.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotUtils.java @@ -31,11 +31,9 @@ package org.opensearch.snapshots; -import com.carrotsearch.hppc.cursors.ObjectCursor; import org.opensearch.action.support.IndicesOptions; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; -import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.regex.Regex; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexNotFoundException; @@ -162,7 +160,7 @@ public static List filterIndices(List availableIndices, String[] * @param repoName repo name for which the verification is being done */ public static void validateSnapshotsBackingAnyIndex( - ImmutableOpenMap metadata, + final Map metadata, List snapshotIds, String repoName ) { @@ -170,8 +168,7 @@ public static void validateSnapshotsBackingAnyIndex( final Set snapshotsToBeNotDeleted = new HashSet<>(); snapshotIds.forEach(snapshotId -> uuidToSnapshotId.put(snapshotId.getUUID(), snapshotId)); - for (ObjectCursor cursor : metadata.values()) { - IndexMetadata indexMetadata = cursor.value; + for (final IndexMetadata indexMetadata : metadata.values()) { String storeType = indexMetadata.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey()); if (IndexModule.Type.REMOTE_SNAPSHOT.match(storeType)) { String snapshotId = indexMetadata.getSettings().get(IndexSettings.SEARCHABLE_SNAPSHOT_ID_UUID.getKey()); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java index d466e5bba4d20..5571eb020b9e0 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java @@ -44,7 +44,6 @@ import org.opensearch.cluster.routing.allocation.command.AllocateReplicaAllocationCommand; import org.opensearch.cluster.routing.allocation.decider.Decision; import org.opensearch.common.Strings; -import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.settings.Settings; import org.opensearch.common.transport.TransportAddress; import org.opensearch.core.xcontent.ToXContent; @@ -73,9 +72,9 @@ public void testToXContent() throws IOException { .build() ) .build(); - ImmutableOpenMap.Builder openMapBuilder = ImmutableOpenMap.builder(); + final HashMap openMapBuilder = new HashMap<>(); openMapBuilder.put("index", indexMetadata); - Metadata metadata = Metadata.builder().indices(openMapBuilder.build()).build(); + Metadata metadata = Metadata.builder().indices(openMapBuilder).build(); ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(nodes).metadata(metadata).build(); RoutingExplanations routingExplanations = new RoutingExplanations(); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexResponseTests.java index f712b93b409dc..225c435ee1106 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexResponseTests.java @@ -47,6 +47,8 @@ import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; +import java.util.HashMap; +import java.util.Map; import java.util.List; import java.util.Locale; @@ -60,7 +62,7 @@ protected Writeable.Reader instanceReader() { @Override protected GetIndexResponse createTestInstance() { String[] indices = generateRandomStringArray(5, 5, false, false); - ImmutableOpenMap.Builder mappings = ImmutableOpenMap.builder(); + final Map mappings = new HashMap<>(); ImmutableOpenMap.Builder> aliases = ImmutableOpenMap.builder(); ImmutableOpenMap.Builder settings = ImmutableOpenMap.builder(); ImmutableOpenMap.Builder defaultSettings = ImmutableOpenMap.builder(); @@ -90,13 +92,6 @@ protected GetIndexResponse createTestInstance() { dataStreams.put(index, randomAlphaOfLength(5).toLowerCase(Locale.ROOT)); } } - return new GetIndexResponse( - indices, - mappings.build(), - aliases.build(), - settings.build(), - defaultSettings.build(), - dataStreams.build() - ); + return new GetIndexResponse(indices, mappings, aliases.build(), settings.build(), defaultSettings.build(), dataStreams.build()); } } diff --git a/server/src/test/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsResponseTests.java index 5dd05789429bf..99a4315b3acee 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsResponseTests.java @@ -33,7 +33,6 @@ package org.opensearch.action.admin.indices.mapping.get; import org.opensearch.cluster.metadata.MappingMetadata; -import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.io.stream.Writeable; import org.opensearch.index.mapper.MapperService; import org.opensearch.test.AbstractWireSerializingTestCase; @@ -57,11 +56,11 @@ protected Writeable.Reader instanceReader() { } private static GetMappingsResponse mutate(GetMappingsResponse original) { - ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(original.mappings()); - String indexKey = original.mappings().keys().iterator().next().value; + final Map builder = new HashMap<>(original.mappings()); + String indexKey = original.mappings().keySet().iterator().next(); builder.put(indexKey + "1", createMappingsForIndex()); - return new GetMappingsResponse(builder.build()); + return new GetMappingsResponse(builder); } @Override @@ -84,9 +83,9 @@ public static MappingMetadata createMappingsForIndex() { @Override protected GetMappingsResponse createTestInstance() { - ImmutableOpenMap.Builder indexBuilder = ImmutableOpenMap.builder(); + final Map indexBuilder = new HashMap<>(); indexBuilder.put("index-" + randomAlphaOfLength(5), createMappingsForIndex()); - GetMappingsResponse resp = new GetMappingsResponse(indexBuilder.build()); + GetMappingsResponse resp = new GetMappingsResponse(indexBuilder); logger.debug("--> created: {}", resp); return resp; } diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java index 5b96c2a71dbf8..5881c72587756 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java @@ -82,6 +82,7 @@ import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.concurrent.ExecutorService; @@ -245,32 +246,30 @@ public void setupAction() { when(state.getNodes()).thenReturn(nodes); Metadata metadata = Metadata.builder() .indices( - ImmutableOpenMap.builder() - .putAll( - MapBuilder.newMapBuilder() - .put( - WITH_DEFAULT_PIPELINE, - IndexMetadata.builder(WITH_DEFAULT_PIPELINE) - .settings( - settings(Version.CURRENT).put(IndexSettings.DEFAULT_PIPELINE.getKey(), "default_pipeline").build() - ) - .putAlias(AliasMetadata.builder(WITH_DEFAULT_PIPELINE_ALIAS).build()) - .numberOfShards(1) - .numberOfReplicas(1) - .build() - ) - .put( - ".system", - IndexMetadata.builder(".system") - .settings(settings(Version.CURRENT)) - .system(true) - .numberOfShards(1) - .numberOfReplicas(0) - .build() - ) - .map() - ) - .build() + new HashMap<>( + MapBuilder.newMapBuilder() + .put( + WITH_DEFAULT_PIPELINE, + IndexMetadata.builder(WITH_DEFAULT_PIPELINE) + .settings( + settings(Version.CURRENT).put(IndexSettings.DEFAULT_PIPELINE.getKey(), "default_pipeline").build() + ) + .putAlias(AliasMetadata.builder(WITH_DEFAULT_PIPELINE_ALIAS).build()) + .numberOfShards(1) + .numberOfReplicas(1) + .build() + ) + .put( + ".system", + IndexMetadata.builder(".system") + .settings(settings(Version.CURRENT)) + .system(true) + .numberOfShards(1) + .numberOfReplicas(0) + .build() + ) + .map() + ) ) .build(); when(state.getMetadata()).thenReturn(metadata); @@ -659,7 +658,7 @@ public void testFindDefaultPipelineFromTemplateMatch() { Exception exception = new Exception("fake exception"); ClusterState state = clusterService.state(); - ImmutableOpenMap.Builder templateMetadataBuilder = ImmutableOpenMap.builder(); + final Map templateMetadataBuilder = new HashMap<>(); templateMetadataBuilder.put( "template1", IndexTemplateMetadata.builder("template1") @@ -692,9 +691,9 @@ public void testFindDefaultPipelineFromTemplateMatch() { Metadata metadata = mock(Metadata.class); when(state.metadata()).thenReturn(metadata); when(state.getMetadata()).thenReturn(metadata); - when(metadata.templates()).thenReturn(templateMetadataBuilder.build()); - when(metadata.getTemplates()).thenReturn(templateMetadataBuilder.build()); - when(metadata.indices()).thenReturn(ImmutableOpenMap.of()); + when(metadata.templates()).thenReturn(templateMetadataBuilder); + when(metadata.getTemplates()).thenReturn(templateMetadataBuilder); + when(metadata.indices()).thenReturn(Map.of()); IndexRequest indexRequest = new IndexRequest("missing_index").id("id"); indexRequest.source(emptyMap()); diff --git a/server/src/test/java/org/opensearch/cluster/ClusterChangedEventTests.java b/server/src/test/java/org/opensearch/cluster/ClusterChangedEventTests.java index df7c10cd1acd8..9a5866a21dab6 100644 --- a/server/src/test/java/org/opensearch/cluster/ClusterChangedEventTests.java +++ b/server/src/test/java/org/opensearch/cluster/ClusterChangedEventTests.java @@ -32,8 +32,6 @@ package org.opensearch.cluster; -import com.carrotsearch.hppc.cursors.ObjectCursor; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.opensearch.Version; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.metadata.IndexGraveyard; @@ -57,6 +55,7 @@ import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.stream.Collectors; @@ -411,9 +410,9 @@ private static ClusterState nextState(final ClusterState previousState, List customMetadata : previousState.metadata().customs()) { - if (customMetadata.value instanceof TestCustomMetadata) { - metadataBuilder.removeCustom(customMetadata.key); + for (Map.Entry customMetadata : previousState.metadata().customs().entrySet()) { + if (customMetadata.getValue() instanceof TestCustomMetadata) { + metadataBuilder.removeCustom(customMetadata.getKey()); } } for (TestCustomMetadata testCustomMetadata : customMetadataList) { @@ -550,8 +549,8 @@ private static IndexMetadata createIndexMetadata(final Index index, final long v // Create the routing table for a cluster state. private static RoutingTable createRoutingTable(final long version, final Metadata metadata) { final RoutingTable.Builder builder = RoutingTable.builder().version(version); - for (ObjectCursor cursor : metadata.indices().values()) { - builder.addAsNew(cursor.value); + for (final IndexMetadata cursor : metadata.indices().values()) { + builder.addAsNew(cursor); } return builder.build(); } @@ -582,7 +581,7 @@ private static ClusterState executeIndicesChangesTest( ) { final int numAdd = randomIntBetween(0, 5); // add random # of indices to the next cluster state final List stateIndices = new ArrayList<>(); - for (Iterator iter = previousState.metadata().indices().valuesIt(); iter.hasNext();) { + for (Iterator iter = previousState.metadata().indices().values().iterator(); iter.hasNext();) { stateIndices.add(iter.next().getIndex()); } final int numDel; diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index 21fab839fe5c6..b9b5a08096619 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -59,7 +59,6 @@ import org.opensearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Strings; -import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.IndexScopedSettings; @@ -812,9 +811,9 @@ public void testAggregateSettingsAppliesSettingsFromTemplatesAndRequest() { IndexTemplateMetadata templateMetadata = addMatchingTemplate(builder -> { builder.settings(Settings.builder().put("template_setting", "value1")); }); - ImmutableOpenMap.Builder templatesBuilder = ImmutableOpenMap.builder(); + final Map templatesBuilder = new HashMap<>(); templatesBuilder.put("template_1", templateMetadata); - Metadata metadata = new Metadata.Builder().templates(templatesBuilder.build()).build(); + Metadata metadata = new Metadata.Builder().templates(templatesBuilder).build(); ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metadata(metadata) .build(); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataTests.java index cb675cb9308af..8c2422c7dcf3b 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataTests.java @@ -659,19 +659,16 @@ public void testFindMappings() throws IOException { .build(); { - ImmutableOpenMap mappings = metadata.findMappings(Strings.EMPTY_ARRAY, MapperPlugin.NOOP_FIELD_FILTER); + final Map mappings = metadata.findMappings(Strings.EMPTY_ARRAY, MapperPlugin.NOOP_FIELD_FILTER); assertEquals(0, mappings.size()); } { - ImmutableOpenMap mappings = metadata.findMappings( - new String[] { "index1" }, - MapperPlugin.NOOP_FIELD_FILTER - ); + final Map mappings = metadata.findMappings(new String[] { "index1" }, MapperPlugin.NOOP_FIELD_FILTER); assertEquals(1, mappings.size()); assertIndexMappingsNotFiltered(mappings, "index1"); } { - ImmutableOpenMap mappings = metadata.findMappings( + final Map mappings = metadata.findMappings( new String[] { "index1", "index2" }, MapperPlugin.NOOP_FIELD_FILTER ); @@ -701,15 +698,12 @@ public void testFindMappingsNoOpFilters() throws IOException { .build(); { - ImmutableOpenMap mappings = metadata.findMappings( - new String[] { "index1" }, - MapperPlugin.NOOP_FIELD_FILTER - ); + final Map mappings = metadata.findMappings(new String[] { "index1" }, MapperPlugin.NOOP_FIELD_FILTER); MappingMetadata mappingMetadata = mappings.get("index1"); assertSame(originalMappingMetadata, mappingMetadata); } { - ImmutableOpenMap mappings = metadata.findMappings( + final Map mappings = metadata.findMappings( new String[] { "index1" }, index -> field -> randomBoolean() ); @@ -764,21 +758,18 @@ public void testFindMappingsWithFilters() throws IOException { .build(); { - ImmutableOpenMap mappings = metadata.findMappings( - new String[] { "index1", "index2", "index3" }, - index -> { - if (index.equals("index1")) { - return field -> field.startsWith("name.") == false - && field.startsWith("properties.key.") == false - && field.equals("age") == false - && field.equals("address.location") == false; - } - if (index.equals("index2")) { - return field -> false; - } - return MapperPlugin.NOOP_FIELD_PREDICATE; + final Map mappings = metadata.findMappings(new String[] { "index1", "index2", "index3" }, index -> { + if (index.equals("index1")) { + return field -> field.startsWith("name.") == false + && field.startsWith("properties.key.") == false + && field.equals("age") == false + && field.equals("address.location") == false; } - ); + if (index.equals("index2")) { + return field -> false; + } + return MapperPlugin.NOOP_FIELD_PREDICATE; + }); assertIndexMappingsNoFields(mappings, "index2"); assertIndexMappingsNotFiltered(mappings, "index3"); @@ -825,7 +816,7 @@ public void testFindMappingsWithFilters() throws IOException { } { - ImmutableOpenMap mappings = metadata.findMappings( + final Map mappings = metadata.findMappings( new String[] { "index1", "index2", "index3" }, index -> field -> (index.equals("index3") && field.endsWith("keyword")) ); @@ -860,7 +851,7 @@ public void testFindMappingsWithFilters() throws IOException { } { - ImmutableOpenMap mappings = metadata.findMappings( + final Map mappings = metadata.findMappings( new String[] { "index1", "index2", "index3" }, index -> field -> (index.equals("index2")) ); @@ -881,7 +872,7 @@ private static IndexMetadata.Builder buildIndexMetadata(String name, String alia } @SuppressWarnings("unchecked") - private static void assertIndexMappingsNoFields(ImmutableOpenMap mappings, String index) { + private static void assertIndexMappingsNoFields(final Map mappings, String index) { MappingMetadata docMapping = mappings.get(index); assertNotNull(docMapping); Map sourceAsMap = docMapping.getSourceAsMap(); @@ -893,7 +884,7 @@ private static void assertIndexMappingsNoFields(ImmutableOpenMap mappings, String index) { + private static void assertIndexMappingsNotFiltered(final Map mappings, String index) { MappingMetadata docMapping = mappings.get(index); assertNotNull(docMapping); @@ -1050,10 +1041,9 @@ public void testBuilderRejectsNullCustom() { public void testBuilderRejectsNullInCustoms() { final Metadata.Builder builder = Metadata.builder(); final String key = randomAlphaOfLength(10); - final ImmutableOpenMap.Builder mapBuilder = ImmutableOpenMap.builder(); + final Map mapBuilder = new HashMap<>(); mapBuilder.put(key, null); - final ImmutableOpenMap map = mapBuilder.build(); - assertThat(expectThrows(NullPointerException.class, () -> builder.customs(map)).getMessage(), containsString(key)); + assertThat(expectThrows(NullPointerException.class, () -> builder.customs(mapBuilder)).getMessage(), containsString(key)); } public void testBuilderRejectsDataStreamThatConflictsWithIndex() { diff --git a/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java index d3e47c3e42f25..1219e684ce5ac 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java @@ -48,7 +48,6 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.bytes.BytesReference; -import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.collect.Tuple; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.test.OpenSearchTestCase; @@ -345,7 +344,7 @@ void upgradeTemplates(Map changes, Set deletions @Override Optional, Set>> calculateTemplateChanges( - ImmutableOpenMap templates + final Map templates ) { final Optional, Set>> ans = super.calculateTemplateChanges(templates); calculateInvocation.release(); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/AddIncrementallyTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/AddIncrementallyTests.java index c1959a96bffcd..c2be6dfa60b51 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/AddIncrementallyTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/AddIncrementallyTests.java @@ -294,8 +294,8 @@ private ClusterState initCluster( Metadata metadata = metadataBuilder.build(); - for (ObjectCursor cursor : metadata.indices().values()) { - routingTableBuilder.addAsNew(cursor.value); + for (final IndexMetadata cursor : metadata.indices().values()) { + routingTableBuilder.addAsNew(cursor); } RoutingTable initialRoutingTable = routingTableBuilder.build(); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java index a163897bf508d..7a08f0e75faf0 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java @@ -655,8 +655,8 @@ private ClusterState initCluster( } Metadata metadata = metadataBuilder.build(); - for (ObjectCursor cursor : metadata.indices().values()) { - routingTableBuilder.addAsNew(cursor.value); + for (final IndexMetadata cursor : metadata.indices().values()) { + routingTableBuilder.addAsNew(cursor); } RoutingTable initialRoutingTable = routingTableBuilder.build(); @@ -910,8 +910,8 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing .numberOfReplicas(1); metadataBuilder = metadataBuilder.put(indexMeta); Metadata metadata = metadataBuilder.build(); - for (ObjectCursor cursor : metadata.indices().values()) { - routingTableBuilder.addAsNew(cursor.value); + for (final IndexMetadata cursor : metadata.indices().values()) { + routingTableBuilder.addAsNew(cursor); } RoutingTable routingTable = routingTableBuilder.build(); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/ThrottlingAllocationTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/ThrottlingAllocationTests.java index 47111fae31d28..ab6042513af94 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/ThrottlingAllocationTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/ThrottlingAllocationTests.java @@ -33,7 +33,6 @@ package org.opensearch.cluster.routing.allocation; import com.carrotsearch.hppc.IntHashSet; -import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.Version; @@ -774,9 +773,9 @@ private ClusterState createRecoveryStateAndInitializeAllocations( Snapshot snapshot = new Snapshot("repo", new SnapshotId("snap", "randomId")); Set snapshotIndices = new HashSet<>(); String restoreUUID = UUIDs.randomBase64UUID(); - for (ObjectCursor cursor : metadata.indices().values()) { - Index index = cursor.value.getIndex(); - IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(cursor.value); + for (final IndexMetadata cursor : metadata.indices().values()) { + Index index = cursor.getIndex(); + IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(cursor); final int recoveryType = inputRecoveryType == null ? randomInt(5) : inputRecoveryType.intValue(); if (recoveryType <= 4) { diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/EnableAllocationShortCircuitTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/EnableAllocationShortCircuitTests.java index 6a1b3c912ad4a..d688c5a7edfb3 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/EnableAllocationShortCircuitTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/EnableAllocationShortCircuitTests.java @@ -135,7 +135,7 @@ public void testRebalancingSkippedIfDisabled() { public void testRebalancingSkippedIfDisabledIncludingOnSpecificIndices() { ClusterState clusterState = createClusterStateWithAllShardsAssigned(); - final IndexMetadata indexMetadata = randomFrom(clusterState.metadata().indices().values().toArray(IndexMetadata.class)); + final IndexMetadata indexMetadata = randomFrom(clusterState.metadata().indices().values().toArray(new IndexMetadata[0])); clusterState = ClusterState.builder(clusterState) .metadata( Metadata.builder(clusterState.metadata()) @@ -162,7 +162,7 @@ public void testRebalancingSkippedIfDisabledIncludingOnSpecificIndices() { public void testRebalancingAttemptedIfDisabledButOverridenOnSpecificIndices() { ClusterState clusterState = createClusterStateWithAllShardsAssigned(); - final IndexMetadata indexMetadata = randomFrom(clusterState.metadata().indices().values().toArray(IndexMetadata.class)); + final IndexMetadata indexMetadata = randomFrom(clusterState.metadata().indices().values().toArray(new IndexMetadata[0])); clusterState = ClusterState.builder(clusterState) .metadata( Metadata.builder(clusterState.metadata()) diff --git a/server/src/test/java/org/opensearch/gateway/DanglingIndicesStateTests.java b/server/src/test/java/org/opensearch/gateway/DanglingIndicesStateTests.java index 9534a53dbe77f..1934307f88792 100644 --- a/server/src/test/java/org/opensearch/gateway/DanglingIndicesStateTests.java +++ b/server/src/test/java/org/opensearch/gateway/DanglingIndicesStateTests.java @@ -37,7 +37,6 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.settings.Settings; import org.opensearch.env.NodeEnvironment; import org.opensearch.index.Index; @@ -47,6 +46,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -233,9 +233,8 @@ public void testDanglingIndicesNotImportedWhenIndexNameIsAlreadyUsed() throws Ex IndexMetadata existingIndex = IndexMetadata.builder("test_index").settings(existingSettings).build(); metaStateService.writeIndex("test_write", existingIndex); - final ImmutableOpenMap indices = ImmutableOpenMap.builder() - .fPut(dangledIndex.getIndex().getName(), existingIndex) - .build(); + final Map indices = new HashMap<>(); + indices.put(dangledIndex.getIndex().getName(), existingIndex); final Metadata metadata = Metadata.builder().indices(indices).build(); // All dangling indices should be found... diff --git a/server/src/test/java/org/opensearch/index/mapper/FieldFilterMapperPluginTests.java b/server/src/test/java/org/opensearch/index/mapper/FieldFilterMapperPluginTests.java index 1e5a212c59825..2c825401ab94a 100644 --- a/server/src/test/java/org/opensearch/index/mapper/FieldFilterMapperPluginTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/FieldFilterMapperPluginTests.java @@ -40,7 +40,6 @@ import org.opensearch.action.fieldcaps.FieldCapabilitiesRequest; import org.opensearch.action.fieldcaps.FieldCapabilitiesResponse; import org.opensearch.cluster.metadata.MappingMetadata; -import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.xcontent.XContentType; import org.opensearch.indices.IndicesModule; import org.opensearch.plugins.MapperPlugin; @@ -166,7 +165,7 @@ private static void assertFieldMappings( assertEquals("Some unexpected fields were returned: " + fields.keySet(), 0, fields.size()); } - private void assertExpectedMappings(ImmutableOpenMap mappings) { + private void assertExpectedMappings(Map mappings) { assertEquals(2, mappings.size()); assertNotFiltered(mappings.get("index1")); MappingMetadata filtered = mappings.get("filtered"); diff --git a/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java b/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java index 8edd62438fb1d..ef3e85b0da775 100644 --- a/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java +++ b/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java @@ -331,7 +331,7 @@ public void testNonSystemIndexOpeningFails() { counts.getFailingIndexReplicas() ); - Index[] indices = Arrays.stream(state.metadata().indices().values().toArray(IndexMetadata.class)) + Index[] indices = Arrays.stream(state.metadata().indices().values().toArray(new IndexMetadata[0])) .map(IndexMetadata::getIndex) .collect(Collectors.toList()) .toArray(new Index[2]); @@ -373,7 +373,7 @@ public void testSystemIndexOpeningSucceeds() { counts.getFailingIndexReplicas() ); - Index[] indices = Arrays.stream(state.metadata().indices().values().toArray(IndexMetadata.class)) + Index[] indices = Arrays.stream(state.metadata().indices().values().toArray(new IndexMetadata[0])) .map(IndexMetadata::getIndex) .collect(Collectors.toList()) .toArray(new Index[2]); @@ -401,7 +401,7 @@ public void testDotIndexOpeningSucceeds() { counts.getFailingIndexReplicas() ); - Index[] indices = Arrays.stream(state.metadata().indices().values().toArray(IndexMetadata.class)) + Index[] indices = Arrays.stream(state.metadata().indices().values().toArray(new IndexMetadata[0])) .map(IndexMetadata::getIndex) .collect(Collectors.toList()) .toArray(new Index[2]); @@ -429,7 +429,7 @@ public void testDotIndexOpeningFails() { counts.getFailingIndexReplicas() ); - Index[] indices = Arrays.stream(state.metadata().indices().values().toArray(IndexMetadata.class)) + Index[] indices = Arrays.stream(state.metadata().indices().values().toArray(new IndexMetadata[0])) .map(IndexMetadata::getIndex) .collect(Collectors.toList()) .toArray(new Index[2]); @@ -472,7 +472,7 @@ public void testDataStreamIndexOpeningFails() { counts.getFailingIndexReplicas() ); - Index[] indices = Arrays.stream(state.metadata().indices().values().toArray(IndexMetadata.class)) + Index[] indices = Arrays.stream(state.metadata().indices().values().toArray(new IndexMetadata[0])) .map(IndexMetadata::getIndex) .collect(Collectors.toList()) .toArray(new Index[2]); diff --git a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 6506bff9ad60e..6080233c3a759 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -416,7 +416,7 @@ public ClusterState randomlyUpdateClusterState( // randomly delete indices Set indicesToDelete = new HashSet<>(); int numberOfIndicesToDelete = randomInt(Math.min(2, state.metadata().indices().size())); - for (String index : randomSubsetOf(numberOfIndicesToDelete, state.metadata().indices().keys().toArray(String.class))) { + for (String index : randomSubsetOf(numberOfIndicesToDelete, state.metadata().indices().keySet().toArray(new String[0]))) { indicesToDelete.add(state.metadata().index(index).getIndex().getName()); } if (indicesToDelete.isEmpty() == false) { @@ -429,14 +429,14 @@ public ClusterState randomlyUpdateClusterState( // randomly close indices int numberOfIndicesToClose = randomInt(Math.min(1, state.metadata().indices().size())); - for (String index : randomSubsetOf(numberOfIndicesToClose, state.metadata().indices().keys().toArray(String.class))) { + for (String index : randomSubsetOf(numberOfIndicesToClose, state.metadata().indices().keySet().toArray(new String[0]))) { CloseIndexRequest closeIndexRequest = new CloseIndexRequest(state.metadata().index(index).getIndex().getName()); state = cluster.closeIndices(state, closeIndexRequest); } // randomly open indices int numberOfIndicesToOpen = randomInt(Math.min(1, state.metadata().indices().size())); - for (String index : randomSubsetOf(numberOfIndicesToOpen, state.metadata().indices().keys().toArray(String.class))) { + for (String index : randomSubsetOf(numberOfIndicesToOpen, state.metadata().indices().keySet().toArray(new String[0]))) { OpenIndexRequest openIndexRequest = new OpenIndexRequest(state.metadata().index(index).getIndex().getName()); state = cluster.openIndices(state, openIndexRequest); } @@ -445,7 +445,7 @@ public ClusterState randomlyUpdateClusterState( Set indicesToUpdate = new HashSet<>(); boolean containsClosedIndex = false; int numberOfIndicesToUpdate = randomInt(Math.min(2, state.metadata().indices().size())); - for (String index : randomSubsetOf(numberOfIndicesToUpdate, state.metadata().indices().keys().toArray(String.class))) { + for (String index : randomSubsetOf(numberOfIndicesToUpdate, state.metadata().indices().keySet().toArray(new String[0]))) { indicesToUpdate.add(state.metadata().index(index).getIndex().getName()); if (state.metadata().index(index).getState() == IndexMetadata.State.CLOSE) { containsClosedIndex = true; diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotUtilsTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotUtilsTests.java index 4a378acc39b6d..417bc4d0421a0 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotUtilsTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotUtilsTests.java @@ -35,7 +35,6 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; -import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.settings.Settings; import org.opensearch.index.Index; import org.opensearch.index.IndexModule; @@ -44,6 +43,7 @@ import java.util.Arrays; import java.util.List; +import java.util.Map; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_CREATED; @@ -114,7 +114,7 @@ public void testValidateSnapshotsBackingAnyIndexThrowsException() { ); } - private static ImmutableOpenMap getIndexMetadata(SnapshotId snapshotId, String repoName) { + private static Map getIndexMetadata(SnapshotId snapshotId, String repoName) { final String index = "test-index"; Snapshot snapshot = new Snapshot(repoName, snapshotId); final Metadata.Builder builder = Metadata.builder(); From ba756e7400c80a7b061a957f2129235046420dbe Mon Sep 17 00:00:00 2001 From: Nicholas Walter Knize Date: Fri, 23 Jun 2023 17:03:44 -0500 Subject: [PATCH 3/3] merge cleanup Signed-off-by: Nicholas Walter Knize --- .../cluster/routing/allocation/DiskThresholdMonitor.java | 1 - .../metadata/MetadataCreateIndexServiceTests.java | 9 --------- .../org/opensearch/test/OpenSearchIntegTestCase.java | 2 +- 3 files changed, 1 insertion(+), 11 deletions(-) diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java index 5bf1a3b199919..aa4acc135c279 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java @@ -52,7 +52,6 @@ import org.opensearch.cluster.routing.allocation.decider.DiskThresholdDecider; import org.opensearch.common.Priority; import org.opensearch.common.Strings; -import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index b9b5a08096619..b2027c5484bbe 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.metadata; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.hamcrest.Matchers; import org.junit.Before; import org.opensearch.ExceptionsHelper; @@ -1732,14 +1731,6 @@ private CompressedXContent createMapping(String fieldName, String fieldType) { } } - private static Map convertMappings(ImmutableOpenMap mappings) { - Map converted = new HashMap<>(mappings.size()); - for (ObjectObjectCursor cursor : mappings) { - converted.put(cursor.key, cursor.value); - } - return converted; - } - private ShardLimitValidator randomShardLimitService() { return createTestShardLimitService(randomIntBetween(10, 10000), false); } diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 0252a08bd1d87..57fd9f6d28e23 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -1309,7 +1309,7 @@ private ClusterState removePluginCustoms(final ClusterState clusterState) { } }); final Metadata.Builder mdBuilder = Metadata.builder(clusterState.metadata()); - clusterState.metadata().customs().keysIt().forEachRemaining(key -> { + clusterState.metadata().customs().keySet().iterator().forEachRemaining(key -> { if (SAFE_METADATA_CUSTOMS.contains(key) == false) { mdBuilder.removeCustom(key); }