From 1c8481aa1969e7c06641793d7e7e4e97b899f4d4 Mon Sep 17 00:00:00 2001 From: Varun Bansal Date: Tue, 4 Apr 2023 13:13:34 +0530 Subject: [PATCH 01/28] Enhance search preference based routing for WRR (#6834) (#6894) Signed-off-by: Varun Bansal (cherry picked from commit d8d6e7324adb4be088cab8e1fa5833e18ef19109) --- CHANGELOG.md | 1 + .../search/SearchWeightedRoutingIT.java | 473 +++++++++++++++++- .../routing/FailAwareWeightedRouting.java | 21 +- .../routing/IndexShardRoutingTable.java | 34 +- .../cluster/routing/OperationRouting.java | 56 ++- .../cluster/routing/WeightedRoutingUtils.java | 12 + .../common/settings/ClusterSettings.java | 1 + .../FailAwareWeightedRoutingTests.java | 124 ++++- .../structure/RoutingIteratorTests.java | 95 +++- 9 files changed, 771 insertions(+), 46 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dcdd4d739ab77..c4d347d30dab7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Segment Replication] Add new cluster setting to set replication strategy by default for all indices in cluster. ([#6791](https://github.com/opensearch-project/OpenSearch/pull/6791)) - Enable sort optimization for all NumericTypes ([#6464](https://github.com/opensearch-project/OpenSearch/pull/6464) - Remove 'cluster_manager' role attachment when using 'node.master' deprecated setting ([#6331](https://github.com/opensearch-project/OpenSearch/pull/6331)) +- Add new cluster settings to ignore weighted round-robin routing and fallback to default behaviour. ([#6834](https://github.com/opensearch-project/OpenSearch/pull/6834)) ### Dependencies - Bump `org.apache.logging.log4j:log4j-core` from 2.18.0 to 2.20.0 ([#6490](https://github.com/opensearch-project/OpenSearch/pull/6490)) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java index 02b51f9e625d3..a4cf5ebb028e3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java @@ -49,6 +49,7 @@ import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.concurrent.Future; @@ -513,6 +514,292 @@ public void testShardRoutingWithNetworkDisruption_FailOpenEnabled() throws Excep assertNoSearchInAZ("a"); } + public void testStrictWeightedRoutingWithCustomString_FailOpenEnabled() throws Exception { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .put("cluster.routing.weighted.fail_open", true) + .put("cluster.routing.weighted.strict", true) + .build(); + + int nodeCountPerAZ = 1; + Map> nodeMap = setupCluster(nodeCountPerAZ, commonSettings); + + int numShards = 10; + int numReplicas = 1; + setUpIndexing(numShards, numReplicas); + + logger.info("--> creating network partition disruption"); + final String clusterManagerNode1 = internalCluster().getClusterManagerName(); + Set nodesInOneSide = Stream.of(clusterManagerNode1, nodeMap.get("b").get(0)).collect(Collectors.toCollection(HashSet::new)); + Set nodesInOtherSide = Stream.of(nodeMap.get("a").get(0)).collect(Collectors.toCollection(HashSet::new)); + + logger.info("--> setting shard routing weights for weighted round robin"); + Map weights = Map.of("a", 1.0, "b", 1.0, "c", 0.0); + setShardRoutingWeights(weights); + + NetworkDisruption networkDisruption = new NetworkDisruption( + new NetworkDisruption.TwoPartitions(nodesInOneSide, nodesInOtherSide), + NetworkDisruption.UNRESPONSIVE + ); + internalCluster().setDisruptionScheme(networkDisruption); + + logger.info("--> network disruption is started"); + networkDisruption.startDisrupting(); + + Set hitNodes = new HashSet<>(); + Future[] responses = new Future[50]; + String customPreference = randomAlphaOfLength(10); + logger.info("--> making search requests"); + for (int i = 0; i < 50; i++) { + responses[i] = internalCluster().client(nodeMap.get("b").get(0)) + .prepareSearch("test") + .setPreference(customPreference) + .setSize(100) + .setQuery(QueryBuilders.matchAllQuery()) + .execute(); + } + + logger.info("--> network disruption is stopped"); + networkDisruption.stopDisrupting(); + + logger.info("--> shards should fail due to network disruption"); + for (int i = 0; i < 50; i++) { + try { + SearchResponse searchResponse = responses[i].get(); + assertEquals(searchResponse.getFailedShards(), 0); + for (int j = 0; j < searchResponse.getHits().getHits().length; j++) { + hitNodes.add(searchResponse.getHits().getAt(j).getShard().getNodeId()); + } + } catch (Exception t) { + fail("search should not fail"); + } + } + + try { + assertSearchInAZ("b"); + } catch (AssertionError ae) { + assertSearchInAZ("c"); + } + assertNoSearchInAZ("a"); + } + + public void testStrictWeightedRoutingWithCustomString_FailOpenDisabled() throws Exception { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .put("cluster.routing.weighted.fail_open", false) + .put("cluster.routing.weighted.strict", true) + .build(); + + int nodeCountPerAZ = 1; + Map> nodeMap = setupCluster(nodeCountPerAZ, commonSettings); + + int numShards = 10; + int numReplicas = 1; + setUpIndexing(numShards, numReplicas); + + logger.info("--> creating network partition disruption"); + final String clusterManagerNode1 = internalCluster().getClusterManagerName(); + Set nodesInOneSide = Stream.of(clusterManagerNode1, nodeMap.get("b").get(0)).collect(Collectors.toCollection(HashSet::new)); + Set nodesInOtherSide = Stream.of(nodeMap.get("a").get(0)).collect(Collectors.toCollection(HashSet::new)); + + logger.info("--> setting shard routing weights for weighted round robin"); + Map weights = Map.of("a", 1.0, "b", 1.0, "c", 0.0); + setShardRoutingWeights(weights); + + NetworkDisruption networkDisruption = new NetworkDisruption( + new NetworkDisruption.TwoPartitions(nodesInOneSide, nodesInOtherSide), + NetworkDisruption.UNRESPONSIVE + ); + internalCluster().setDisruptionScheme(networkDisruption); + + logger.info("--> network disruption is started"); + networkDisruption.startDisrupting(); + + Set hitNodes = new HashSet<>(); + Future[] responses = new Future[50]; + String customPreference = randomAlphaOfLength(10); + logger.info("--> making search requests"); + for (int i = 0; i < 50; i++) { + responses[i] = internalCluster().client(nodeMap.get("b").get(0)) + .prepareSearch("test") + .setPreference(customPreference) + .setSize(100) + .setQuery(QueryBuilders.matchAllQuery()) + .execute(); + } + + logger.info("--> network disruption is stopped"); + networkDisruption.stopDisrupting(); + + logger.info("--> shards should fail due to network disruption"); + for (int i = 0; i < 50; i++) { + try { + SearchResponse searchResponse = responses[i].get(); + assertNotEquals(searchResponse.getFailedShards(), 0); + for (int j = 0; j < searchResponse.getHits().getHits().length; j++) { + hitNodes.add(searchResponse.getHits().getAt(j).getShard().getNodeId()); + } + } catch (Exception t) { + fail("search should not fail"); + } + } + + DiscoveryNodes dataNodes = internalCluster().clusterService().state().nodes(); + Set expectedHotNodes = new HashSet<>(); + for (DiscoveryNode node : dataNodes) { + if (node.getAttributes().getOrDefault("zone", "").equals("b")) { + expectedHotNodes.add(node.getId()); + } + } + + assertEquals(expectedHotNodes, hitNodes); + + assertSearchInAZ("b"); + assertNoSearchInAZ("c"); + assertNoSearchInAZ("a"); + } + + /** + * Should failopen shards even if failopen enabled with custom search preference. + * @throws Exception + */ + public void testStrictWeightedRoutingWithShardPrefNetworkDisruption_FailOpenEnabled() throws Exception { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .put("cluster.routing.weighted.fail_open", true) + .put("cluster.routing.weighted.strict", true) + .build(); + + int nodeCountPerAZ = 1; + Map> nodeMap = setupCluster(nodeCountPerAZ, commonSettings); + + int numShards = 10; + int numReplicas = 1; + setUpIndexing(numShards, numReplicas); + + logger.info("--> creating network partition disruption"); + final String clusterManagerNode1 = internalCluster().getClusterManagerName(); + Set nodesInOneSide = Stream.of(clusterManagerNode1, nodeMap.get("c").get(0)).collect(Collectors.toCollection(HashSet::new)); + Set nodesInOtherSide = Stream.of(nodeMap.get("a").get(0)).collect(Collectors.toCollection(HashSet::new)); + + logger.info("--> setting shard routing weights for weighted round robin"); + Map weights = Map.of("a", 1.0, "b", 1.0, "c", 0.0); + setShardRoutingWeights(weights); + + NetworkDisruption networkDisruption = new NetworkDisruption( + new NetworkDisruption.TwoPartitions(nodesInOneSide, nodesInOtherSide), + NetworkDisruption.UNRESPONSIVE + ); + internalCluster().setDisruptionScheme(networkDisruption); + + logger.info("--> network disruption is started"); + networkDisruption.startDisrupting(); + + Future[] responses = new Future[50]; + DiscoveryNodes dataNodes = internalCluster().clusterService().state().nodes(); + ShardId shardId = internalCluster().clusterService() + .state() + .getRoutingTable() + .index("test") + .randomAllActiveShardsIt() + .getShardRoutings() + .stream() + .filter(shard -> { + return dataNodes.get(shard.currentNodeId()).getAttributes().getOrDefault("zone", "").equals("c"); + }) + .findFirst() + .get() + .shardId(); + + for (int i = 0; i < 50; i++) { + responses[i] = internalCluster().client(nodeMap.get("c").get(0)) + .prepareSearch("test") + .setPreference(String.format(Locale.ROOT, "_shards:%s", shardId.getId())) + .setSize(100) + .setQuery(QueryBuilders.matchAllQuery()) + .execute(); + } + + logger.info("--> network disruption is stopped"); + networkDisruption.stopDisrupting(); + + for (int i = 0; i < 50; i++) { + try { + SearchResponse searchResponse = responses[i].get(); + assertEquals(searchResponse.getFailedShards(), 0); + } catch (Exception t) { + fail("search should not fail"); + } + } + + assertNoSearchInAZ("a"); + try { + assertSearchInAZ("c"); + } catch (AssertionError ae) { + assertSearchInAZ("b"); + } + } + + public void testStrictWeightedRoutingWithShardPref() throws Exception { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .put("cluster.routing.weighted.fail_open", true) + .put("cluster.routing.weighted.strict", true) + .build(); + + int nodeCountPerAZ = 1; + Map> nodeMap = setupCluster(nodeCountPerAZ, commonSettings); + + int numShards = 10; + int numReplicas = 1; + setUpIndexing(numShards, numReplicas); + + logger.info("--> setting shard routing weights for weighted round robin"); + Map weights = Map.of("a", 1.0, "b", 1.0, "c", 0.0); + setShardRoutingWeights(weights); + + DiscoveryNodes dataNodes = internalCluster().clusterService().state().nodes(); + ShardId shardId = internalCluster().clusterService() + .state() + .getRoutingTable() + .index("test") + .randomAllActiveShardsIt() + .getShardRoutings() + .stream() + .filter(shard -> { + return dataNodes.get(shard.currentNodeId()).getAttributes().getOrDefault("zone", "").equals("c"); + }) + .findFirst() + .get() + .shardId(); + + Future[] responses = new Future[50]; + logger.info("--> making search requests"); + for (int i = 0; i < 50; i++) { + responses[i] = internalCluster().client(nodeMap.get("b").get(0)) + .prepareSearch("test") + .setPreference(String.format(Locale.ROOT, "_shards:%s", shardId.getId())) + .setSize(100) + .setQuery(QueryBuilders.matchAllQuery()) + .execute(); + } + + for (int i = 0; i < 50; i++) { + try { + SearchResponse searchResponse = responses[i].get(); + assertEquals(searchResponse.getFailedShards(), 0); + assertNotEquals(searchResponse.getHits().getTotalHits().value, 0); + } catch (Exception t) { + fail("search should not fail"); + } + } + assertNoSearchInAZ("c"); + } + private void assertNoSearchInAZ(String az) { ImmutableOpenMap dataNodes = internalCluster().clusterService().state().nodes().getDataNodes(); String dataNodeId = null; @@ -805,8 +1092,7 @@ public void testMultiGetWithNetworkDisruption_FailOpenDisabled() throws Exceptio /** * Assert that preference based search is not allowed with strict weighted shard routing */ - public void testStrictWeightedRouting() { - + public void testStrictWeightedRoutingWithCustomString() { Settings commonSettings = Settings.builder() .put("cluster.routing.allocation.awareness.attributes", "zone") .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") @@ -827,15 +1113,36 @@ public void testStrictWeightedRouting() { String nodeInZoneA = nodeMap.get("a").get(0); String customPreference = randomAlphaOfLength(10); - assertThrows( - PreferenceBasedSearchNotAllowedException.class, - () -> internalCluster().client(nodeMap.get("b").get(0)) - .prepareSearch() - .setSize(0) - .setPreference(randomFrom("_local", "_only_nodes:" + nodeInZoneA, "_prefer_nodes:" + nodeInZoneA, customPreference)) - .get() - ); + SearchResponse searchResponse = internalCluster().client(nodeMap.get("b").get(0)) + .prepareSearch() + .setSize(20) + .setPreference(customPreference) + .get(); + assertEquals(RestStatus.OK.getStatus(), searchResponse.status().getStatus()); + assertNoSearchInAZ("c"); + assertSearchInAZ("a"); + assertSearchInAZ("b"); + // disable strict weighed routing + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put("cluster.routing.weighted.strict", false)) + .get(); + + // make search requests with custom string + internalCluster().client(nodeMap.get("a").get(0)) + .prepareSearch() + .setSize(20) + .setPreference(customPreference) + .setQuery(QueryBuilders.matchAllQuery()) + .get(); + // assert search on data nodes on az c (weighed away az) + try { + assertSearchInAZ("c"); + } catch (AssertionError ae) { + assertSearchInAZ("a"); + } } /** @@ -862,13 +1169,157 @@ public void testPreferenceSearchWithWeightedRouting() { String customPreference = randomAlphaOfLength(10); String nodeInZoneA = nodeMap.get("a").get(0); + String nodeInZoneB = nodeMap.get("b").get(0); + String nodeInZoneC = nodeMap.get("c").get(0); + + Map nodeIDMap = new HashMap<>(); + DiscoveryNodes dataNodes = internalCluster().clusterService().state().nodes(); + for (DiscoveryNode node : dataNodes) { + nodeIDMap.put(node.getName(), node.getId()); + } + SearchResponse searchResponse = internalCluster().client(nodeMap.get("b").get(0)) + .prepareSearch() + .setPreference(randomFrom("_local", "_prefer_nodes:" + "zone:a", customPreference)) + .get(); + assertEquals(RestStatus.OK.getStatus(), searchResponse.status().getStatus()); + + searchResponse = internalCluster().client(nodeMap.get("a").get(0)) + .prepareSearch() + .setPreference( + "_only_nodes:" + nodeIDMap.get(nodeInZoneA) + "," + nodeIDMap.get(nodeInZoneB) + "," + nodeIDMap.get(nodeInZoneC) + ) + .get(); + assertEquals(RestStatus.OK.getStatus(), searchResponse.status().getStatus()); + } + + public void testPreferenceSearchWithIgnoreWeightedRouting() { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .put("cluster.routing.weighted.fail_open", true) + .put("cluster.routing.weighted.strict", false) + .put("cluster.routing.ignore_weighted_routing", true) + .build(); + + int nodeCountPerAZ = 1; + Map> nodeMap = setupCluster(nodeCountPerAZ, commonSettings); + + int numShards = 10; + int numReplicas = 2; + setUpIndexing(numShards, numReplicas); + + logger.info("--> setting shard routing weights for weighted round robin"); + Map weights = Map.of("a", 1.0, "b", 1.0, "c", 0.0); + setShardRoutingWeights(weights); + + String customPreference = randomAlphaOfLength(10); + String nodeInZoneA = nodeMap.get("a").get(0); + String nodeInZoneB = nodeMap.get("b").get(0); + String nodeInZoneC = nodeMap.get("c").get(0); + + Map nodeIDMap = new HashMap<>(); + DiscoveryNodes dataNodes = internalCluster().clusterService().state().nodes(); + for (DiscoveryNode node : dataNodes) { + nodeIDMap.put(node.getName(), node.getId()); + } + + SearchResponse searchResponse = internalCluster().client(nodeMap.get("b").get(0)) + .prepareSearch() + .setPreference(randomFrom("_local", "_prefer_nodes:" + "zone:a", customPreference)) + .get(); + assertEquals(RestStatus.OK.getStatus(), searchResponse.status().getStatus()); + + searchResponse = internalCluster().client(nodeMap.get("a").get(0)) + .prepareSearch() + .setPreference( + "_only_nodes:" + nodeIDMap.get(nodeInZoneA) + "," + nodeIDMap.get(nodeInZoneB) + "," + nodeIDMap.get(nodeInZoneC) + ) + .get(); + assertEquals(RestStatus.OK.getStatus(), searchResponse.status().getStatus()); + } + + /** + * Assert that preference based search with preference type is not allowed with strict weighted shard routing + */ + public void testStrictWeightedRouting() { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .put("cluster.routing.weighted.fail_open", true) + .put("cluster.routing.weighted.strict", true) + .build(); + + int nodeCountPerAZ = 1; + Map> nodeMap = setupCluster(nodeCountPerAZ, commonSettings); + + int numShards = 10; + int numReplicas = 1; + setUpIndexing(numShards, numReplicas); + + logger.info("--> setting shard routing weights for weighted round robin"); + Map weights = Map.of("a", 1.0, "b", 1.0, "c", 0.0); + setShardRoutingWeights(weights); + String nodeInZoneA = nodeMap.get("a").get(0); + + assertThrows( + PreferenceBasedSearchNotAllowedException.class, + () -> internalCluster().client(nodeMap.get("b").get(0)) + .prepareSearch() + .setSize(0) + .setPreference("_only_nodes:" + nodeInZoneA) + .get() + ); + + assertThrows( + PreferenceBasedSearchNotAllowedException.class, + () -> internalCluster().client(nodeMap.get("b").get(0)) + .prepareSearch() + .setSize(0) + .setPreference("_prefer_nodes:" + nodeInZoneA) + .get() + ); + } + + public void testStrictWeightedRoutingAllowedForSomeSearchPrefs() { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .put("cluster.routing.weighted.fail_open", true) + .put("cluster.routing.weighted.strict", true) + .build(); + + int nodeCountPerAZ = 1; + Map> nodeMap = setupCluster(nodeCountPerAZ, commonSettings); + + int numShards = 10; + int numReplicas = 1; + setUpIndexing(numShards, numReplicas); + + logger.info("--> setting shard routing weights for weighted round robin"); + Map weights = Map.of("a", 1.0, "b", 1.0, "c", 0.0); + setShardRoutingWeights(weights); + String nodeInZoneA = nodeMap.get("a").get(0); + String customPreference = randomAlphaOfLength(10); SearchResponse searchResponse = internalCluster().client(nodeMap.get("b").get(0)) .prepareSearch() .setSize(0) - .setPreference(randomFrom("_local", "_only_nodes:" + nodeInZoneA, "_prefer_nodes:" + nodeInZoneA, customPreference)) + .setPreference("_only_local:" + nodeInZoneA) .get(); assertEquals(RestStatus.OK.getStatus(), searchResponse.status().getStatus()); + + searchResponse = internalCluster().client(nodeMap.get("b").get(0)) + .prepareSearch() + .setSize(0) + .setPreference("_local:" + nodeInZoneA) + .get(); + assertEquals(RestStatus.OK.getStatus(), searchResponse.status().getStatus()); + + searchResponse = internalCluster().client(nodeMap.get("b").get(0)).prepareSearch().setSize(0).setPreference("_shards:1").get(); + assertEquals(RestStatus.OK.getStatus(), searchResponse.status().getStatus()); + + searchResponse = internalCluster().client(nodeMap.get("b").get(0)).prepareSearch().setSize(0).setPreference(customPreference).get(); + assertEquals(RestStatus.OK.getStatus(), searchResponse.status().getStatus()); } /** diff --git a/server/src/main/java/org/opensearch/cluster/routing/FailAwareWeightedRouting.java b/server/src/main/java/org/opensearch/cluster/routing/FailAwareWeightedRouting.java index dbef876c9a258..72c189f20eaf6 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/FailAwareWeightedRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/FailAwareWeightedRouting.java @@ -20,6 +20,8 @@ import java.util.List; +import static org.opensearch.cluster.routing.OperationRouting.IGNORE_WEIGHTED_SHARD_ROUTING; + /** * This class contains logic to find next shard to retry search request in case of failure from other shard copy. * This decides if retryable shard search requests can be tried on shard copies present in data @@ -72,9 +74,13 @@ public SearchShardTarget findNext( Runnable onShardSkipped ) { SearchShardTarget next = shardIt.nextOrNull(); + if (ignoreWeightedRouting(clusterState)) { + return next; + } + while (next != null && WeightedRoutingUtils.isWeighedAway(next.getNodeId(), clusterState)) { SearchShardTarget nextShard = next; - if (canFailOpen(nextShard.getShardId(), exception, clusterState)) { + if (canFailOpen(nextShard.getShardId(), shardIt.size(), exception, clusterState)) { logger.info(() -> new ParameterizedMessage("{}: Fail open executed due to exception", nextShard.getShardId()), exception); getWeightedRoutingStats().updateFailOpenCount(); break; @@ -98,10 +104,13 @@ public SearchShardTarget findNext( */ public ShardRouting findNext(final ShardsIterator shardsIt, ClusterState clusterState, Exception exception, Runnable onShardSkipped) { ShardRouting next = shardsIt.nextOrNull(); + if (ignoreWeightedRouting(clusterState)) { + return next; + } while (next != null && WeightedRoutingUtils.isWeighedAway(next.currentNodeId(), clusterState)) { ShardRouting nextShard = next; - if (canFailOpen(nextShard.shardId(), exception, clusterState)) { + if (canFailOpen(nextShard.shardId(), shardsIt.size(), exception, clusterState)) { logger.info(() -> new ParameterizedMessage("{}: Fail open executed due to exception", nextShard.shardId()), exception); getWeightedRoutingStats().updateFailOpenCount(); break; @@ -117,8 +126,8 @@ public ShardRouting findNext(final ShardsIterator shardsIt, ClusterState cluster * @return true if can fail open ie request shard copies present in nodes with weighted shard * routing weight set to zero */ - private boolean canFailOpen(ShardId shardId, Exception exception, ClusterState clusterState) { - return isInternalFailure(exception) || hasInActiveShardCopies(clusterState, shardId); + private boolean canFailOpen(ShardId shardId, int shardItSize, Exception exception, ClusterState clusterState) { + return shardItSize == 1 || isInternalFailure(exception) || hasInActiveShardCopies(clusterState, shardId); } private boolean hasInActiveShardCopies(ClusterState clusterState, ShardId shardId) { @@ -131,6 +140,10 @@ private boolean hasInActiveShardCopies(ClusterState clusterState, ShardId shardI return false; } + private boolean ignoreWeightedRouting(ClusterState clusterState) { + return IGNORE_WEIGHTED_SHARD_ROUTING.get(clusterState.getMetadata().settings()); + } + public WeightedRoutingStats getWeightedRoutingStats() { return WeightedRoutingStats.getInstance(); } diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java index f730a2833fd02..711a750ade712 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java @@ -324,17 +324,13 @@ public ShardIterator activeInitializingShardsWeightedIt( WeightedRouting weightedRouting, DiscoveryNodes nodes, double defaultWeight, - boolean isFailOpenEnabled + boolean isFailOpenEnabled, + @Nullable Integer seed ) { - final int seed = shufflerForWeightedRouting.nextSeed(); - List ordered = new ArrayList<>(); - List orderedActiveShards = getActiveShardsByWeight(weightedRouting, nodes, defaultWeight); - List orderedListWithDistinctShards; - ordered.addAll(shufflerForWeightedRouting.shuffle(orderedActiveShards, seed)); - if (!allInitializingShards.isEmpty()) { - List orderedInitializingShards = getInitializingShardsByWeight(weightedRouting, nodes, defaultWeight); - ordered.addAll(orderedInitializingShards); + if (seed == null) { + seed = shufflerForWeightedRouting.nextSeed(); } + List ordered = activeInitializingShardsWithWeights(weightedRouting, nodes, defaultWeight, seed); // append shards for attribute value with weight zero, so that shard search requests can be tried on // shard copies in case of request failure from other attribute values. @@ -357,8 +353,26 @@ public ShardIterator activeInitializingShardsWeightedIt( logger.debug("no shard copies found for shard id [{}] for node attribute with weight zero", shardId); } } + + return new PlainShardIterator(shardId, ordered); + } + + private List activeInitializingShardsWithWeights( + WeightedRouting weightedRouting, + DiscoveryNodes nodes, + double defaultWeight, + int seed + ) { + List ordered = new ArrayList<>(); + List orderedActiveShards = getActiveShardsByWeight(weightedRouting, nodes, defaultWeight); + ordered.addAll(shufflerForWeightedRouting.shuffle(orderedActiveShards, seed)); + if (!allInitializingShards.isEmpty()) { + List orderedInitializingShards = getInitializingShardsByWeight(weightedRouting, nodes, defaultWeight); + ordered.addAll(orderedInitializingShards); + } + List orderedListWithDistinctShards; orderedListWithDistinctShards = ordered.stream().distinct().collect(Collectors.toList()); - return new PlainShardIterator(shardId, orderedListWithDistinctShards); + return orderedListWithDistinctShards; } /** diff --git a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java index cb20e223c9e20..9e59e81921d96 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java @@ -93,16 +93,30 @@ public class OperationRouting { public static final Setting STRICT_WEIGHTED_SHARD_ROUTING_ENABLED = Setting.boolSetting( "cluster.routing.weighted.strict", + true, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final Setting IGNORE_WEIGHTED_SHARD_ROUTING = Setting.boolSetting( + "cluster.routing.ignore_weighted_routing", false, Setting.Property.Dynamic, Setting.Property.NodeScope ); + + private static final List WEIGHTED_ROUTING_RESTRICTED_PREFERENCES = Arrays.asList( + Preference.ONLY_NODES, + Preference.PREFER_NODES + ); + private volatile List awarenessAttributes; private volatile boolean useAdaptiveReplicaSelection; private volatile boolean ignoreAwarenessAttr; private volatile double weightedRoutingDefaultWeight; private volatile boolean isFailOpenEnabled; private volatile boolean isStrictWeightedShardRouting; + private volatile boolean ignoreWeightedRouting; public OperationRouting(Settings settings, ClusterSettings clusterSettings) { // whether to ignore awareness attributes when routing requests @@ -116,11 +130,13 @@ public OperationRouting(Settings settings, ClusterSettings clusterSettings) { this.weightedRoutingDefaultWeight = WEIGHTED_ROUTING_DEFAULT_WEIGHT.get(settings); this.isFailOpenEnabled = WEIGHTED_ROUTING_FAILOPEN_ENABLED.get(settings); this.isStrictWeightedShardRouting = STRICT_WEIGHTED_SHARD_ROUTING_ENABLED.get(settings); + this.ignoreWeightedRouting = IGNORE_WEIGHTED_SHARD_ROUTING.get(settings); clusterSettings.addSettingsUpdateConsumer(USE_ADAPTIVE_REPLICA_SELECTION_SETTING, this::setUseAdaptiveReplicaSelection); clusterSettings.addSettingsUpdateConsumer(IGNORE_AWARENESS_ATTRIBUTES_SETTING, this::setIgnoreAwarenessAttributes); clusterSettings.addSettingsUpdateConsumer(WEIGHTED_ROUTING_DEFAULT_WEIGHT, this::setWeightedRoutingDefaultWeight); clusterSettings.addSettingsUpdateConsumer(WEIGHTED_ROUTING_FAILOPEN_ENABLED, this::setFailOpenEnabled); clusterSettings.addSettingsUpdateConsumer(STRICT_WEIGHTED_SHARD_ROUTING_ENABLED, this::setStrictWeightedShardRouting); + clusterSettings.addSettingsUpdateConsumer(IGNORE_WEIGHTED_SHARD_ROUTING, this::setIgnoreWeightedRouting); } void setUseAdaptiveReplicaSelection(boolean useAdaptiveReplicaSelection) { @@ -143,6 +159,10 @@ void setStrictWeightedShardRouting(boolean strictWeightedShardRouting) { this.isStrictWeightedShardRouting = strictWeightedShardRouting; } + void setIgnoreWeightedRouting(boolean isWeightedRoundRobinEnabled) { + this.ignoreWeightedRouting = isWeightedRoundRobinEnabled; + } + public boolean isIgnoreAwarenessAttr() { return ignoreAwarenessAttr; } @@ -281,11 +301,6 @@ private ShardIterator preferenceActiveShardIterator( if (preference == null || preference.isEmpty()) { return shardRoutings(indexShard, nodes, collectorService, nodeCounts, weightedRoutingMetadata); } - if (weightedRoutingMetadata != null && weightedRoutingMetadata.getWeightedRouting().isSet() && isStrictWeightedShardRouting) { - throw new PreferenceBasedSearchNotAllowedException( - "Preference based routing not allowed with strict weighted shard routing setting" - ); - } if (preference.charAt(0) == '_') { Preference preferenceType = Preference.parse(preference); if (preferenceType == Preference.SHARDS) { @@ -318,6 +333,7 @@ private ShardIterator preferenceActiveShardIterator( } } preferenceType = Preference.parse(preference); + checkPreferenceBasedRoutingAllowed(preferenceType, weightedRoutingMetadata); switch (preferenceType) { case PREFER_NODES: final Set nodesIds = Arrays.stream(preference.substring(Preference.PREFER_NODES.type().length() + 1).split(",")) @@ -343,7 +359,19 @@ private ShardIterator preferenceActiveShardIterator( // for a different element in the list by also incorporating the // shard ID into the hash of the user-supplied preference key. routingHash = 31 * routingHash + indexShard.shardId.hashCode(); - if (ignoreAwarenessAttributes()) { + if (WeightedRoutingUtils.shouldPerformStrictWeightedRouting( + isStrictWeightedShardRouting, + ignoreWeightedRouting, + weightedRoutingMetadata + )) { + return indexShard.activeInitializingShardsWeightedIt( + weightedRoutingMetadata.getWeightedRouting(), + nodes, + getWeightedRoutingDefaultWeight(), + isFailOpenEnabled, + routingHash + ); + } else if (ignoreAwarenessAttributes()) { return indexShard.activeInitializingShardsIt(routingHash); } else { return indexShard.preferAttributesActiveInitializingShardsIt(awarenessAttributes, nodes, routingHash); @@ -357,12 +385,13 @@ private ShardIterator shardRoutings( @Nullable Map nodeCounts, @Nullable WeightedRoutingMetadata weightedRoutingMetadata ) { - if (weightedRoutingMetadata != null && weightedRoutingMetadata.getWeightedRouting().isSet()) { + if (WeightedRoutingUtils.shouldPerformWeightedRouting(ignoreWeightedRouting, weightedRoutingMetadata)) { return indexShard.activeInitializingShardsWeightedIt( weightedRoutingMetadata.getWeightedRouting(), nodes, getWeightedRoutingDefaultWeight(), - isFailOpenEnabled + isFailOpenEnabled, + null ); } else if (ignoreAwarenessAttributes()) { if (useAdaptiveReplicaSelection) { @@ -430,4 +459,15 @@ private static int calculateScaledShardId(IndexMetadata indexMetadata, String ef return Math.floorMod(hash, indexMetadata.getRoutingNumShards()) / indexMetadata.getRoutingFactor(); } + private void checkPreferenceBasedRoutingAllowed(Preference preference, @Nullable WeightedRoutingMetadata weightedRoutingMetadata) { + if (WeightedRoutingUtils.shouldPerformStrictWeightedRouting( + isStrictWeightedShardRouting, + ignoreWeightedRouting, + weightedRoutingMetadata + ) && WEIGHTED_ROUTING_RESTRICTED_PREFERENCES.contains(preference)) { + throw new PreferenceBasedSearchNotAllowedException( + "Preference type based routing not allowed with strict weighted shard routing enabled" + ); + } + } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingUtils.java b/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingUtils.java index c7d40cbbbea61..72387aad0fa45 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingUtils.java +++ b/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingUtils.java @@ -53,4 +53,16 @@ public static boolean isWeighedAway(String nodeId, ClusterState clusterState) { } return false; } + + public static boolean shouldPerformWeightedRouting(boolean ignoreWeightedRouting, WeightedRoutingMetadata weightedRoutingMetadata) { + return !ignoreWeightedRouting && weightedRoutingMetadata != null && weightedRoutingMetadata.getWeightedRouting().isSet(); + } + + public static boolean shouldPerformStrictWeightedRouting( + boolean isStrictWeightedShardRouting, + boolean ignoreWeightedRouting, + WeightedRoutingMetadata weightedRoutingMetadata + ) { + return isStrictWeightedShardRouting && shouldPerformWeightedRouting(ignoreWeightedRouting, weightedRoutingMetadata); + } } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 1b9e148fe6bf0..9e62b77de04ff 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -547,6 +547,7 @@ public void apply(Settings value, Settings current, Settings previous) { OperationRouting.WEIGHTED_ROUTING_DEFAULT_WEIGHT, OperationRouting.WEIGHTED_ROUTING_FAILOPEN_ENABLED, OperationRouting.STRICT_WEIGHTED_SHARD_ROUTING_ENABLED, + OperationRouting.IGNORE_WEIGHTED_SHARD_ROUTING, IndexGraveyard.SETTING_MAX_TOMBSTONES, PersistentTasksClusterService.CLUSTER_TASKS_ALLOCATION_RECHECK_INTERVAL_SETTING, EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING, diff --git a/server/src/test/java/org/opensearch/cluster/routing/FailAwareWeightedRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/FailAwareWeightedRoutingTests.java index 5c3a2454c4074..c0164f1afd924 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/FailAwareWeightedRoutingTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/FailAwareWeightedRoutingTests.java @@ -42,6 +42,10 @@ public class FailAwareWeightedRoutingTests extends OpenSearchTestCase { private ClusterState setUpCluster() { + return setUpCluster(Settings.EMPTY); + } + + private ClusterState setUpCluster(Settings transientSettings) { ClusterState clusterState = ClusterState.builder(new ClusterName("test")).build(); // set up nodes @@ -78,7 +82,7 @@ private ClusterState setUpCluster() { Map weights = Map.of("a", 1.0, "b", 1.0, "c", 0.0); WeightedRouting weightedRouting = new WeightedRouting("zone", weights); WeightedRoutingMetadata weightedRoutingMetadata = new WeightedRoutingMetadata(weightedRouting, 0); - Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()); + Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()).transientSettings(transientSettings); metadataBuilder.putCustom(WeightedRoutingMetadata.TYPE, weightedRoutingMetadata); clusterState = ClusterState.builder(clusterState).metadata(metadataBuilder).build(); @@ -143,6 +147,124 @@ public void testFindNextWithoutFailOpen() throws IOException { assertEquals(1, shardSkipped.get()); } + public void testFindNextWithJustOneShardInStandbyZone() throws IOException { + ClusterState clusterState = setUpCluster(); + + AtomicInteger shardSkipped = new AtomicInteger(); + // set up index + IndexMetadata indexMetadata = IndexMetadata.builder("test") + .settings( + Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 2) + .put(SETTING_CREATION_DATE, System.currentTimeMillis()) + ) + .build(); + + ShardRouting shardRoutingA = TestShardRouting.newShardRouting("test", 0, "node_zone_a", true, ShardRoutingState.STARTED); + ShardRouting shardRoutingB = TestShardRouting.newShardRouting("test", 0, "node_zone_b", false, ShardRoutingState.STARTED); + ShardRouting shardRoutingC = TestShardRouting.newShardRouting("test", 0, "node_zone_c", false, ShardRoutingState.STARTED); + + Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()); + metadataBuilder.put(indexMetadata, false).generateClusterUuidIfNeeded(); + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(indexMetadata.getIndex()); + + final ShardId shardId = new ShardId("test", "_na_", 0); + IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); + indexShardRoutingBuilder.addShard(shardRoutingA); + indexShardRoutingBuilder.addShard(shardRoutingB); + indexShardRoutingBuilder.addShard(shardRoutingC); + + indexRoutingTableBuilder.addIndexShard(indexShardRoutingBuilder.build()); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + routingTableBuilder.add(indexRoutingTableBuilder.build()); + clusterState = ClusterState.builder(clusterState).routingTable(routingTableBuilder.build()).build(); + + List shardRoutings = new ArrayList<>(); + shardRoutings.add(shardRoutingC); + + String clusterAlias = randomBoolean() ? null : randomAlphaOfLengthBetween(5, 10); + SearchShardIterator searchShardIterator = new SearchShardIterator( + clusterAlias, + shardId, + shardRoutings, + OriginalIndicesTests.randomOriginalIndices() + ); + + // fail open is not executed since fail open conditions don't met + SearchShardTarget next = FailAwareWeightedRouting.getInstance() + .findNext(searchShardIterator, clusterState, new OpenSearchRejectedExecutionException(), () -> shardSkipped.incrementAndGet()); + assertNotNull(next); + next = FailAwareWeightedRouting.getInstance() + .findNext(searchShardIterator, clusterState, new OpenSearchRejectedExecutionException(), () -> shardSkipped.incrementAndGet()); + assertNull(next); + assertEquals(0, shardSkipped.get()); + } + + public void testFindNextWithIgnoreWeightedRoutingTrue() throws IOException { + ClusterState clusterState = setUpCluster(Settings.builder().put("cluster.routing.ignore_weighted_routing", true).build()); + + AtomicInteger shardSkipped = new AtomicInteger(); + // set up index + IndexMetadata indexMetadata = IndexMetadata.builder("test") + .settings( + Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 2) + .put(SETTING_CREATION_DATE, System.currentTimeMillis()) + ) + .build(); + + ShardRouting shardRoutingA = TestShardRouting.newShardRouting("test", 0, "node_zone_a", true, ShardRoutingState.STARTED); + ShardRouting shardRoutingB = TestShardRouting.newShardRouting("test", 0, "node_zone_b", false, ShardRoutingState.STARTED); + ShardRouting shardRoutingC = TestShardRouting.newShardRouting("test", 0, "node_zone_c", false, ShardRoutingState.STARTED); + + Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()); + metadataBuilder.put(indexMetadata, false).generateClusterUuidIfNeeded(); + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(indexMetadata.getIndex()); + + final ShardId shardId = new ShardId("test", "_na_", 0); + IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); + indexShardRoutingBuilder.addShard(shardRoutingA); + indexShardRoutingBuilder.addShard(shardRoutingB); + indexShardRoutingBuilder.addShard(shardRoutingC); + + indexRoutingTableBuilder.addIndexShard(indexShardRoutingBuilder.build()); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + routingTableBuilder.add(indexRoutingTableBuilder.build()); + clusterState = ClusterState.builder(clusterState).routingTable(routingTableBuilder.build()).build(); + + List shardRoutings = new ArrayList<>(); + shardRoutings.add(shardRoutingA); + shardRoutings.add(shardRoutingB); + shardRoutings.add(shardRoutingC); + + String clusterAlias = randomBoolean() ? null : randomAlphaOfLengthBetween(5, 10); + SearchShardIterator searchShardIterator = new SearchShardIterator( + clusterAlias, + shardId, + shardRoutings, + OriginalIndicesTests.randomOriginalIndices() + ); + + // fail open is not executed since fail open conditions don't met + SearchShardTarget next = FailAwareWeightedRouting.getInstance() + .findNext(searchShardIterator, clusterState, new OpenSearchRejectedExecutionException(), () -> shardSkipped.incrementAndGet()); + assertNotNull(next); + next = FailAwareWeightedRouting.getInstance() + .findNext(searchShardIterator, clusterState, new OpenSearchRejectedExecutionException(), () -> shardSkipped.incrementAndGet()); + assertNotNull(next); + next = FailAwareWeightedRouting.getInstance() + .findNext(searchShardIterator, clusterState, new OpenSearchRejectedExecutionException(), () -> shardSkipped.incrementAndGet()); + assertNotNull(next); + next = FailAwareWeightedRouting.getInstance() + .findNext(searchShardIterator, clusterState, new OpenSearchRejectedExecutionException(), () -> shardSkipped.incrementAndGet()); + assertNull(next); + assertEquals(0, shardSkipped.get()); + } + public void testFindNextWithFailOpenDueTo5xx() throws IOException { ClusterState clusterState = setUpCluster(); diff --git a/server/src/test/java/org/opensearch/cluster/structure/RoutingIteratorTests.java b/server/src/test/java/org/opensearch/cluster/structure/RoutingIteratorTests.java index 9715d3af09fc7..59ea0dfca559a 100644 --- a/server/src/test/java/org/opensearch/cluster/structure/RoutingIteratorTests.java +++ b/server/src/test/java/org/opensearch/cluster/structure/RoutingIteratorTests.java @@ -552,7 +552,7 @@ public void testWeightedRoutingWithDifferentWeights() { ShardIterator shardIterator = clusterState.routingTable() .index("test") .shard(0) - .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, false); + .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, false, null); assertEquals(2, shardIterator.size()); ShardRouting shardRouting; @@ -565,7 +565,7 @@ public void testWeightedRoutingWithDifferentWeights() { shardIterator = clusterState.routingTable() .index("test") .shard(0) - .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, false); + .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, false, null); assertEquals(3, shardIterator.size()); weights = Map.of("zone1", -1.0, "zone2", 0.0, "zone3", 1.0); @@ -573,7 +573,7 @@ public void testWeightedRoutingWithDifferentWeights() { shardIterator = clusterState.routingTable() .index("test") .shard(0) - .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, false); + .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, false, null); assertEquals(1, shardIterator.size()); shardRouting = shardIterator.nextOrNull(); assertNotNull(shardRouting); @@ -584,7 +584,7 @@ public void testWeightedRoutingWithDifferentWeights() { shardIterator = clusterState.routingTable() .index("test") .shard(0) - .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, true); + .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, true, null); assertEquals(3, shardIterator.size()); shardRouting = shardIterator.nextOrNull(); assertNotNull(shardRouting); @@ -646,7 +646,7 @@ public void testWeightedRoutingInMemoryStore() { ShardIterator shardIterator = clusterState.routingTable() .index("test") .shard(0) - .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, false); + .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, false, null); assertEquals(2, shardIterator.size()); ShardRouting shardRouting; shardRouting = shardIterator.nextOrNull(); @@ -660,7 +660,7 @@ public void testWeightedRoutingInMemoryStore() { shardIterator = clusterState.routingTable() .index("test") .shard(0) - .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, false); + .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, false, null); assertEquals(2, shardIterator.size()); shardRouting = shardIterator.nextOrNull(); assertNotNull(shardRouting); @@ -675,7 +675,7 @@ public void testWeightedRoutingInMemoryStore() { shardIterator = clusterState.routingTable() .index("test") .shard(0) - .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, false); + .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, false, null); assertEquals(2, shardIterator.size()); shardRouting = shardIterator.nextOrNull(); assertNotNull(shardRouting); @@ -690,7 +690,7 @@ public void testWeightedRoutingInMemoryStore() { shardIterator = clusterState.routingTable() .index("test") .shard(0) - .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, false); + .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, false, null); assertEquals(2, shardIterator.size()); shardRouting = shardIterator.nextOrNull(); assertNotNull(shardRouting); @@ -755,7 +755,7 @@ public void testWeightedRoutingShardState() { ShardIterator shardIterator = clusterState.routingTable() .index("test") .shard(0) - .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, true); + .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, true, null); assertEquals(3, shardIterator.size()); ShardRouting shardRouting; @@ -834,21 +834,21 @@ public void testWeightedRoutingShardStateWithDifferentWeights() { ShardIterator shardIterator = clusterState.routingTable() .index("test") .shard(0) - .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, true); + .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, true, null); ShardRouting shardRouting1 = shardIterator.nextOrNull(); shardIterator = clusterState.routingTable() .index("test") .shard(0) - .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, true); + .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, true, null); ShardRouting shardRouting2 = shardIterator.nextOrNull(); shardIterator = clusterState.routingTable() .index("test") .shard(0) - .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, true); + .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, true, null); ShardRouting shardRouting3 = shardIterator.nextOrNull(); @@ -860,4 +860,75 @@ public void testWeightedRoutingShardStateWithDifferentWeights() { terminate(threadPool); } } + + /** + * Test to validate that simple weighted shard routing with seed return same shard routing on each call + */ + public void testActiveInitializingShardsWeightedItWithCustomSeed() { + TestThreadPool threadPool = new TestThreadPool("testActiveInitializingShardsWeightedItWithCustomSeed"); + try { + Settings.Builder settings = Settings.builder() + .put("cluster.routing.allocation.node_concurrent_recoveries", 10) + .put("cluster.routing.allocation.awareness.attributes", "zone"); + AllocationService strategy = createAllocationService(settings.build()); + + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2)) + .build(); + + RoutingTable routingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); + + ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .build(); + + ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); + + Map node1Attributes = new HashMap<>(); + node1Attributes.put("zone", "zone1"); + Map node2Attributes = new HashMap<>(); + node2Attributes.put("zone", "zone2"); + Map node3Attributes = new HashMap<>(); + node3Attributes.put("zone", "zone3"); + clusterState = ClusterState.builder(clusterState) + .nodes( + DiscoveryNodes.builder() + .add(newNode("node1", unmodifiableMap(node1Attributes))) + .add(newNode("node2", unmodifiableMap(node2Attributes))) + .add(newNode("node3", unmodifiableMap(node3Attributes))) + .localNodeId("node1") + ) + .build(); + clusterState = strategy.reroute(clusterState, "reroute"); + + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + List> weightsList = new ArrayList<>(); + Map weights1 = Map.of("zone1", 1.0, "zone2", 1.0, "zone3", 0.0); + weightsList.add(weights1); + + WeightedRouting weightedRouting = new WeightedRouting("zone", weights1); + ShardIterator shardIterator = clusterState.routingTable() + .index("test") + .shard(0) + .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, true, 1); + + ShardRouting shardRouting1 = shardIterator.nextOrNull(); + + for (int i = 0; i < 50; i++) { + shardIterator = clusterState.routingTable() + .index("test") + .shard(0) + .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, true, 1); + + ShardRouting shardRouting2 = shardIterator.nextOrNull(); + + assertEquals(shardRouting1.currentNodeId(), shardRouting2.currentNodeId()); + } + + } finally { + terminate(threadPool); + } + } } From d8f459141660fa3028e152a784e2ebfe02ea27c1 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Tue, 4 Apr 2023 19:11:16 +0530 Subject: [PATCH 02/28] Validate checksum in footer separately for segment metadata files (#6931) (#6973) (cherry picked from commit 1856090a87e86b64f87aff81f2be4bc3879fb4b9) Signed-off-by: Varun Bansal --- .../opensearch/common/io/VersionedCodecStreamWrapper.java | 8 +++----- .../index/store/RemoteSegmentStoreDirectory.java | 5 ++++- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/server/src/main/java/org/opensearch/common/io/VersionedCodecStreamWrapper.java b/server/src/main/java/org/opensearch/common/io/VersionedCodecStreamWrapper.java index 9a1a951b0796e..ff0af3954a3a3 100644 --- a/server/src/main/java/org/opensearch/common/io/VersionedCodecStreamWrapper.java +++ b/server/src/main/java/org/opensearch/common/io/VersionedCodecStreamWrapper.java @@ -11,7 +11,6 @@ import java.io.IOException; import org.apache.lucene.codecs.CodecUtil; -import org.apache.lucene.store.BufferedChecksumIndexInput; import org.apache.lucene.store.ChecksumIndexInput; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; @@ -47,10 +46,9 @@ public VersionedCodecStreamWrapper(IndexIOStreamHandler indexIOStreamHandler, * @return stream content parsed into {@link T} */ public T readStream(IndexInput indexInput) throws IOException { - ChecksumIndexInput checksumIndexInput = new BufferedChecksumIndexInput(indexInput); - int readStreamVersion = checkHeader(checksumIndexInput); - T content = getHandlerForVersion(readStreamVersion).readContent(checksumIndexInput); - checkFooter(checksumIndexInput); + CodecUtil.checksumEntireFile(indexInput); + int readStreamVersion = checkHeader(indexInput); + T content = getHandlerForVersion(readStreamVersion).readContent(indexInput); return content; } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index a0bd32403bd39..095f1c01792e6 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -17,6 +17,7 @@ import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; import org.opensearch.common.UUIDs; +import org.opensearch.common.lucene.store.ByteArrayIndexInput; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.common.io.VersionedCodecStreamWrapper; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandler; @@ -135,7 +136,9 @@ private Map readLatestMetadataFile() throws IOE private Map readMetadataFile(String metadataFilename) throws IOException { try (IndexInput indexInput = remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)) { - RemoteSegmentMetadata metadata = metadataStreamWrapper.readStream(indexInput); + byte[] metadataBytes = new byte[(int) indexInput.length()]; + indexInput.readBytes(metadataBytes, 0, (int) indexInput.length()); + RemoteSegmentMetadata metadata = metadataStreamWrapper.readStream(new ByteArrayIndexInput(metadataFilename, metadataBytes)); return metadata.getMetadata(); } } From 068ec12672310c986d36d8c3c98972689a1399fc Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Tue, 4 Apr 2023 21:34:39 +0530 Subject: [PATCH 03/28] [Remote Store] Add file details to recoveryState while downloading segments from remote store (#6825) (#6976) * Use existing StatsDirectoryWrapper to record recovery stats (cherry picked from commit e12a5b92c9e1638685c684d8bd5a75964c23a150) Signed-off-by: Sachin Kale --- .../opensearch/index/shard/IndexShard.java | 15 ++++- .../opensearch/index/shard/StoreRecovery.java | 6 +- .../store/RemoteSegmentStoreDirectory.java | 17 +++-- .../RemoteSegmentStoreDirectoryTests.java | 66 ++++++++++++++----- .../RemoteSegmentMetadataHandlerTests.java | 20 +++++- 5 files changed, 99 insertions(+), 25 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index f0e84aa6fc7d2..eaf26c933635f 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -4409,12 +4409,25 @@ public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, boolean re ((RemoteSegmentStoreDirectory) remoteDirectory).init(); Map uploadedSegments = ((RemoteSegmentStoreDirectory) remoteDirectory) .getSegmentsUploadedToRemoteStore(); - final Directory storeDirectory = store.directory(); store.incRef(); remoteStore.incRef(); List downloadedSegments = new ArrayList<>(); List skippedSegments = new ArrayList<>(); try { + final Directory storeDirectory; + if (recoveryState.getStage() == RecoveryState.Stage.INDEX) { + storeDirectory = new StoreRecovery.StatsDirectoryWrapper(store.directory(), recoveryState.getIndex()); + for (String file : uploadedSegments.keySet()) { + long checksum = Long.parseLong(uploadedSegments.get(file).getChecksum()); + if (overrideLocal || localDirectoryContains(storeDirectory, file, checksum) == false) { + recoveryState.getIndex().addFileDetail(file, uploadedSegments.get(file).getLength(), false); + } else { + recoveryState.getIndex().addFileDetail(file, uploadedSegments.get(file).getLength(), true); + } + } + } else { + storeDirectory = store.directory(); + } String segmentInfosSnapshotFilename = null; Set localSegmentFiles = Sets.newHashSet(storeDirectory.listAll()); for (String file : uploadedSegments.keySet()) { diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index 972a76bc17eb5..31a863129cc8c 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -268,7 +268,9 @@ public void copyFrom(Directory from, String src, String dest, IOContext context) in.copyFrom(new FilterDirectory(from) { @Override public IndexInput openInput(String name, IOContext context) throws IOException { - index.addFileDetail(dest, l, false); + if (index.getFileDetails(dest) == null) { + index.addFileDetail(dest, l, false); + } copies.set(true); final IndexInput input = in.openInput(name, context); return new IndexInput("StatsDirectoryWrapper(" + input.toString() + ")") { @@ -311,7 +313,7 @@ public void readBytes(byte[] b, int offset, int len) throws IOException { }; } }, src, dest, context); - if (copies.get() == false) { + if (copies.get() == false && index.getFileDetails(dest) == null) { index.addFileDetail(dest, l, true); // hardlinked - we treat it as reused since the file was already somewhat there } else { assert index.getFileDetails(dest) != null : "File [" + dest + "] has no file details"; diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index 095f1c01792e6..c385303813844 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -153,25 +153,31 @@ public static class UploadedSegmentMetadata { private final String originalFilename; private final String uploadedFilename; private final String checksum; + private final long length; - UploadedSegmentMetadata(String originalFilename, String uploadedFilename, String checksum) { + UploadedSegmentMetadata(String originalFilename, String uploadedFilename, String checksum, long length) { this.originalFilename = originalFilename; this.uploadedFilename = uploadedFilename; this.checksum = checksum; + this.length = length; } @Override public String toString() { - return String.join(SEPARATOR, originalFilename, uploadedFilename, checksum); + return String.join(SEPARATOR, originalFilename, uploadedFilename, checksum, String.valueOf(length)); } public String getChecksum() { return this.checksum; } + public long getLength() { + return this.length; + } + public static UploadedSegmentMetadata fromString(String uploadedFilename) { String[] values = uploadedFilename.split(SEPARATOR); - return new UploadedSegmentMetadata(values[0], values[1], values[2]); + return new UploadedSegmentMetadata(values[0], values[1], values[2], Long.parseLong(values[3])); } } @@ -273,6 +279,9 @@ public void deleteFile(String name) throws IOException { */ @Override public long fileLength(String name) throws IOException { + if (segmentsUploadedToRemoteStore.containsKey(name)) { + return segmentsUploadedToRemoteStore.get(name).getLength(); + } String remoteFilename = getExistingRemoteFilename(name); if (remoteFilename != null) { return remoteDataDirectory.fileLength(remoteFilename); @@ -317,7 +326,7 @@ public void copyFrom(Directory from, String src, String dest, IOContext context, } remoteDataDirectory.copyFrom(from, src, remoteFilename, context); String checksum = getChecksumOfLocalFile(from, src); - UploadedSegmentMetadata segmentMetadata = new UploadedSegmentMetadata(src, remoteFilename, checksum); + UploadedSegmentMetadata segmentMetadata = new UploadedSegmentMetadata(src, remoteFilename, checksum, from.fileLength(src)); segmentsUploadedToRemoteStore.put(src, segmentMetadata); } diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 956279c3ea048..49a2d50dfae06 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -64,16 +64,17 @@ public void testUploadedSegmentMetadataToString() { RemoteSegmentStoreDirectory.UploadedSegmentMetadata metadata = new RemoteSegmentStoreDirectory.UploadedSegmentMetadata( "abc", "pqr", - "123456" + "123456", + 1234 ); - assertEquals("abc::pqr::123456", metadata.toString()); + assertEquals("abc::pqr::123456::1234", metadata.toString()); } public void testUploadedSegmentMetadataFromString() { RemoteSegmentStoreDirectory.UploadedSegmentMetadata metadata = RemoteSegmentStoreDirectory.UploadedSegmentMetadata.fromString( - "_0.cfe::_0.cfe__uuidxyz::4567" + "_0.cfe::_0.cfe__uuidxyz::4567::372000" ); - assertEquals("_0.cfe::_0.cfe__uuidxyz::4567", metadata.toString()); + assertEquals("_0.cfe::_0.cfe__uuidxyz::4567::372000", metadata.toString()); } public void testGetMetadataFilename() { @@ -141,9 +142,42 @@ public void testInitNoMetadataFile() throws IOException { private Map getDummyMetadata(String prefix, int commitGeneration) { Map metadata = new HashMap<>(); - metadata.put(prefix + ".cfe", prefix + ".cfe::" + prefix + ".cfe__" + UUIDs.base64UUID() + "::" + randomIntBetween(1000, 5000)); - metadata.put(prefix + ".cfs", prefix + ".cfs::" + prefix + ".cfs__" + UUIDs.base64UUID() + "::" + randomIntBetween(1000, 5000)); - metadata.put(prefix + ".si", prefix + ".si::" + prefix + ".si__" + UUIDs.base64UUID() + "::" + randomIntBetween(1000, 5000)); + metadata.put( + prefix + ".cfe", + prefix + + ".cfe::" + + prefix + + ".cfe__" + + UUIDs.base64UUID() + + "::" + + randomIntBetween(1000, 5000) + + "::" + + randomIntBetween(512000, 1024000) + ); + metadata.put( + prefix + ".cfs", + prefix + + ".cfs::" + + prefix + + ".cfs__" + + UUIDs.base64UUID() + + "::" + + randomIntBetween(1000, 5000) + + "::" + + randomIntBetween(512000, 1024000) + ); + metadata.put( + prefix + ".si", + prefix + + ".si::" + + prefix + + ".si__" + + UUIDs.base64UUID() + + "::" + + randomIntBetween(1000, 5000) + + "::" + + randomIntBetween(512000, 1024000) + ); metadata.put( "segments_" + commitGeneration, "segments_" @@ -154,6 +188,8 @@ private Map getDummyMetadata(String prefix, int commitGeneration + UUIDs.base64UUID() + "::" + randomIntBetween(1000, 5000) + + "::" + + randomIntBetween(1024, 5120) ); return metadata; } @@ -250,7 +286,7 @@ public void testDeleteFileException() throws IOException { assertThrows(IOException.class, () -> remoteSegmentStoreDirectory.deleteFile("_0.si")); } - public void testFileLenght() throws IOException { + public void testFileLength() throws IOException { populateMetadata(); remoteSegmentStoreDirectory.init(); @@ -259,9 +295,7 @@ public void testFileLenght() throws IOException { assertTrue(uploadedSegments.containsKey("_0.si")); - when(remoteDataDirectory.fileLength(startsWith("_0.si"))).thenReturn(1234L); - - assertEquals(1234L, remoteSegmentStoreDirectory.fileLength("_0.si")); + assertEquals(uploadedSegments.get("_0.si").getLength(), remoteSegmentStoreDirectory.fileLength("_0.si")); } public void testFileLenghtNoSuchFile() throws IOException { @@ -376,8 +410,8 @@ public void testContainsFile() throws IOException { ); Map metadata = new HashMap<>(); - metadata.put("_0.cfe", "_0.cfe::_0.cfe__" + UUIDs.base64UUID() + "::1234"); - metadata.put("_0.cfs", "_0.cfs::_0.cfs__" + UUIDs.base64UUID() + "::2345"); + metadata.put("_0.cfe", "_0.cfe::_0.cfe__" + UUIDs.base64UUID() + "::1234::512"); + metadata.put("_0.cfs", "_0.cfs::_0.cfs__" + UUIDs.base64UUID() + "::2345::1024"); when(remoteMetadataDirectory.openInput("metadata__1__5__abc", IOContext.DEFAULT)).thenReturn(createMetadataFileBytes(metadata)); @@ -390,7 +424,7 @@ public void testContainsFile() throws IOException { UnsupportedOperationException.class, () -> uploadedSegmentMetadataMap.put( "_100.si", - new RemoteSegmentStoreDirectory.UploadedSegmentMetadata("_100.si", "_100.si__uuid1", "1234") + new RemoteSegmentStoreDirectory.UploadedSegmentMetadata("_100.si", "_100.si__uuid1", "1234", 500) ) ); @@ -531,8 +565,8 @@ public void testIncorrectChecksumCorruptIndexException() throws IOException { ); Map metadata = new HashMap<>(); - metadata.put("_0.cfe", "_0.cfe::_0.cfe__" + UUIDs.base64UUID() + "::1234"); - metadata.put("_0.cfs", "_0.cfs::_0.cfs__" + UUIDs.base64UUID() + "::2345"); + metadata.put("_0.cfe", "_0.cfe::_0.cfe__" + UUIDs.base64UUID() + "::1234::512"); + metadata.put("_0.cfs", "_0.cfs::_0.cfs__" + UUIDs.base64UUID() + "::2345::1024"); BytesStreamOutput output = new BytesStreamOutput(); IndexOutput indexOutput = new OutputStreamIndexOutput("segment metadata", "metadata output stream", output, 4096); diff --git a/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerTests.java b/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerTests.java index 2a30e58b8802c..3a73015c25589 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerTests.java @@ -59,11 +59,27 @@ private Map getDummyData() { String prefix = "_0"; expectedOutput.put( prefix + ".cfe", - prefix + ".cfe::" + prefix + ".cfe__" + UUIDs.base64UUID() + "::" + randomIntBetween(1000, 5000) + prefix + + ".cfe::" + + prefix + + ".cfe__" + + UUIDs.base64UUID() + + "::" + + randomIntBetween(1000, 5000) + + "::" + + randomIntBetween(1024, 2048) ); expectedOutput.put( prefix + ".cfs", - prefix + ".cfs::" + prefix + ".cfs__" + UUIDs.base64UUID() + "::" + randomIntBetween(1000, 5000) + prefix + + ".cfs::" + + prefix + + ".cfs__" + + UUIDs.base64UUID() + + "::" + + randomIntBetween(1000, 5000) + + "::" + + randomIntBetween(1024, 2048) ); return expectedOutput; } From 500108de8d3bfc46b2208216c7628dd1b6d5a298 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Tue, 4 Apr 2023 13:39:09 -0400 Subject: [PATCH 04/28] Fixing SortField comparison to use equals instead of reference equality (#6901) (#6957) (cherry picked from commit 55936ac60a2f22d7a769901d8153b9af0c7fa79c) Signed-off-by: Andriy Redko Signed-off-by: github-actions[bot] Co-authored-by: github-actions[bot] --- .../test/search/260_sort_mixed.yml | 71 +++++++++++++++++++ .../action/search/SearchPhaseController.java | 2 +- 2 files changed, 72 insertions(+), 1 deletion(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_mixed.yml diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_mixed.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_mixed.yml new file mode 100644 index 0000000000000..321883a1063e7 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_mixed.yml @@ -0,0 +1,71 @@ +"search across indices with mixed long and double numeric types": + - skip: + version: " - 2.6.99" + reason: relies on numeric sort optimization that landed in 2.7.0 only + + - do: + indices.create: + index: test_1 + body: + settings: + number_of_shards: "1" + number_of_replicas: "0" + mappings: + properties: + counter: + type: long + + - do: + indices.create: + index: test_2 + body: + settings: + number_of_shards: "1" + number_of_replicas: "0" + mappings: + properties: + counter: + type: double + - do: + bulk: + refresh: true + body: + - index: + _index: test_1 + - counter: 223372036854775800 + - index: + _index: test_2 + - counter: 1223372036854775800.23 + - index: + _index: test_2 + - counter: 184.4 + + - do: + search: + index: test_* + rest_total_hits_as_int: true + body: + sort: [{ counter: desc }] + - match: { hits.total: 3 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test_2 } + - match: { hits.hits.0._source.counter: 1223372036854775800.23 } + - match: { hits.hits.0.sort: [1223372036854775800.23] } + - match: { hits.hits.1._index: test_1 } + - match: { hits.hits.1._source.counter: 223372036854775800 } + - match: { hits.hits.1.sort: [223372036854775800] } + + - do: + search: + index: test_* + rest_total_hits_as_int: true + body: + sort: [{ counter: asc }] + - match: { hits.total: 3 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test_2 } + - match: { hits.hits.0._source.counter: 184.4 } + - match: { hits.hits.0.sort: [184.4] } + - match: { hits.hits.1._index: test_1 } + - match: { hits.hits.1._source.counter: 223372036854775800 } + - match: { hits.hits.1.sort: [223372036854775800] } diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java b/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java index a141d2a0680d6..6199af11a565f 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java @@ -635,7 +635,7 @@ private static Sort createSort(TopFieldDocs[] topFieldDocs) { */ private static boolean isSortWideningRequired(TopFieldDocs[] topFieldDocs, int sortFieldindex) { for (int i = 0; i < topFieldDocs.length - 1; i++) { - if (topFieldDocs[i].fields[sortFieldindex] != topFieldDocs[i + 1].fields[sortFieldindex]) { + if (!topFieldDocs[i].fields[sortFieldindex].equals(topFieldDocs[i + 1].fields[sortFieldindex])) { return true; } } From c656949ed9695680bc032a667752a4e87f5d1b8f Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Wed, 5 Apr 2023 10:51:43 +0530 Subject: [PATCH 05/28] Fix assertion failure in IndexShard.updateGlobalCheckpointOnReplica() when remote translog is enabled (#6975) (#6978) (cherry picked from commit 95c6ed9d7f14c10119967b3d107fde04bd89e7fd) Signed-off-by: Sachin Kale --- .../java/org/opensearch/remotestore/RemoteStoreIT.java | 4 ---- .../main/java/org/opensearch/index/shard/IndexShard.java | 6 ++++-- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index 86e4e50a08a38..1644d1c3e63ba 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -282,22 +282,18 @@ public void testPeerRecoveryWithRemoteStoreNoRemoteTranslogRefresh() throws Exce testPeerRecovery(false, randomIntBetween(2, 5), false); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/6193") public void testPeerRecoveryWithRemoteStoreAndRemoteTranslogNoDataFlush() throws Exception { testPeerRecovery(true, 1, true); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/6193") public void testPeerRecoveryWithRemoteStoreAndRemoteTranslogFlush() throws Exception { testPeerRecovery(true, randomIntBetween(2, 5), true); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/6193") public void testPeerRecoveryWithRemoteStoreAndRemoteTranslogNoDataRefresh() throws Exception { testPeerRecovery(true, 1, false); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/6193") public void testPeerRecoveryWithRemoteStoreAndRemoteTranslogRefresh() throws Exception { testPeerRecovery(true, randomIntBetween(2, 5), false); } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index eaf26c933635f..9d06ce7c6a391 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -3087,9 +3087,11 @@ public void updateGlobalCheckpointOnReplica(final long globalCheckpoint, final S * calculations of the global checkpoint. However, we can not assert that we are in the translog stage of recovery here as * while the global checkpoint update may have emanated from the primary when we were in that state, we could subsequently move * to recovery finalization, or even finished recovery before the update arrives here. + * When remote translog is enabled for an index, replication operation is limited to primary term validation and does not + * update local checkpoint at replica, so the local checkpoint at replica can be less than globalCheckpoint. */ - assert state() != IndexShardState.POST_RECOVERY && state() != IndexShardState.STARTED - : "supposedly in-sync shard copy received a global checkpoint [" + assert (state() != IndexShardState.POST_RECOVERY && state() != IndexShardState.STARTED) + || indexSettings.isRemoteTranslogStoreEnabled() : "supposedly in-sync shard copy received a global checkpoint [" + globalCheckpoint + "] " + "that is higher than its local checkpoint [" From c89a2e3deeac0ff81db362498f4323b0b397f484 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Tue, 4 Apr 2023 22:59:28 -0700 Subject: [PATCH 06/28] Use DirectoryFactory interface to create remote directory (#6970) (#6985) --- .../java/org/opensearch/index/IndexModule.java | 2 +- .../java/org/opensearch/index/IndexService.java | 10 +++------- .../RemoteSegmentStoreDirectoryFactory.java | 5 +++-- .../org/opensearch/indices/IndicesService.java | 6 +++--- .../src/main/java/org/opensearch/node/Node.java | 2 +- .../org/opensearch/plugins/IndexStorePlugin.java | 16 ---------------- .../RemoteSegmentStoreDirectoryFactoryTests.java | 14 +++++++------- 7 files changed, 18 insertions(+), 37 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index 5e8f95acdfe39..417983c3b68d3 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -503,7 +503,7 @@ public IndexService newIndexService( NamedWriteableRegistry namedWriteableRegistry, BooleanSupplier idFieldDataEnabled, ValuesSourceRegistry valuesSourceRegistry, - IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory, + IndexStorePlugin.DirectoryFactory remoteDirectoryFactory, BiFunction translogFactorySupplier ) throws IOException { final IndexEventListener eventListener = freeze(); diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 2ced9f56d7a35..7d791ace44682 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -138,7 +138,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust private final NodeEnvironment nodeEnv; private final ShardStoreDeleter shardStoreDeleter; private final IndexStorePlugin.DirectoryFactory directoryFactory; - private final IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory; + private final IndexStorePlugin.DirectoryFactory remoteDirectoryFactory; private final IndexStorePlugin.RecoveryStateFactory recoveryStateFactory; private final CheckedFunction readerWrapper; private final IndexCache indexCache; @@ -194,7 +194,7 @@ public IndexService( Client client, QueryCache queryCache, IndexStorePlugin.DirectoryFactory directoryFactory, - IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory, + IndexStorePlugin.DirectoryFactory remoteDirectoryFactory, IndexEventListener eventListener, Function> wrapperFactory, MapperRegistry mapperRegistry, @@ -470,11 +470,7 @@ public synchronized IndexShard createShard( Store remoteStore = null; if (this.indexSettings.isRemoteStoreEnabled()) { - Directory remoteDirectory = remoteDirectoryFactory.newDirectory( - this.indexSettings.getRemoteStoreRepository(), - this.indexSettings, - path - ); + Directory remoteDirectory = remoteDirectoryFactory.newDirectory(this.indexSettings, path); remoteStore = new Store(shardId, this.indexSettings, remoteDirectory, lock, Store.OnClose.EMPTY); } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java index e77eb52bd3891..cb5548167a577 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java @@ -27,7 +27,7 @@ * * @opensearch.internal */ -public class RemoteSegmentStoreDirectoryFactory implements IndexStorePlugin.RemoteDirectoryFactory { +public class RemoteSegmentStoreDirectoryFactory implements IndexStorePlugin.DirectoryFactory { private final Supplier repositoriesService; @@ -36,7 +36,8 @@ public RemoteSegmentStoreDirectoryFactory(Supplier reposito } @Override - public Directory newDirectory(String repositoryName, IndexSettings indexSettings, ShardPath path) throws IOException { + public Directory newDirectory(IndexSettings indexSettings, ShardPath path) throws IOException { + String repositoryName = indexSettings.getRemoteStoreRepository(); try (Repository repository = repositoriesService.get().repository(repositoryName)) { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; BlobPath commonBlobPath = ((BlobStoreRepository) repository).basePath(); diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 629f7f7397f96..d26f99956ba48 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -290,7 +290,7 @@ public class IndicesService extends AbstractLifecycleComponent private final Set danglingIndicesToWrite = Sets.newConcurrentHashSet(); private final boolean nodeWriteDanglingIndicesInfo; private final ValuesSourceRegistry valuesSourceRegistry; - private final IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory; + private final IndexStorePlugin.DirectoryFactory remoteDirectoryFactory; private final BiFunction translogFactorySupplier; @Override @@ -320,7 +320,7 @@ public IndicesService( Map directoryFactories, ValuesSourceRegistry valuesSourceRegistry, Map recoveryStateFactories, - IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory, + IndexStorePlugin.DirectoryFactory remoteDirectoryFactory, Supplier repositoriesServiceSupplier ) { this.settings = settings; @@ -435,7 +435,7 @@ public IndicesService( Map directoryFactories, ValuesSourceRegistry valuesSourceRegistry, Map recoveryStateFactories, - IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory, + IndexStorePlugin.DirectoryFactory remoteDirectoryFactory, Supplier repositoriesServiceSupplier ) { this.settings = settings; diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 05b394305fbdb..4c6c129794876 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -671,7 +671,7 @@ protected Node( rerouteServiceReference.set(rerouteService); clusterService.setRerouteService(rerouteService); - final IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory = new RemoteSegmentStoreDirectoryFactory( + final IndexStorePlugin.DirectoryFactory remoteDirectoryFactory = new RemoteSegmentStoreDirectoryFactory( repositoriesServiceReference::get ); diff --git a/server/src/main/java/org/opensearch/plugins/IndexStorePlugin.java b/server/src/main/java/org/opensearch/plugins/IndexStorePlugin.java index 1dc90a21c2f70..2f549fec54759 100644 --- a/server/src/main/java/org/opensearch/plugins/IndexStorePlugin.java +++ b/server/src/main/java/org/opensearch/plugins/IndexStorePlugin.java @@ -66,22 +66,6 @@ interface DirectoryFactory { Directory newDirectory(IndexSettings indexSettings, ShardPath shardPath) throws IOException; } - /** - * An interface that describes how to create a new remote directory instance per shard. - */ - @FunctionalInterface - interface RemoteDirectoryFactory { - /** - * Creates a new remote directory per shard. This method is called once per shard on shard creation. - * @param repositoryName repository name - * @param indexSettings the shards index settings - * @param shardPath the path the shard is using - * @return a new RemoteDirectory instance - * @throws IOException if an IOException occurs while opening the directory - */ - Directory newDirectory(String repositoryName, IndexSettings indexSettings, ShardPath shardPath) throws IOException; - } - /** * The {@link DirectoryFactory} mappings for this plugin. When an index is created the store type setting * {@link org.opensearch.index.IndexModule#INDEX_STORE_TYPE_SETTING} on the index will be examined and either use the default or a diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java index 0105d0dc309c2..7be86aa0d96a4 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java @@ -52,7 +52,10 @@ public void setup() { } public void testNewDirectory() throws IOException { - Settings settings = Settings.builder().put(IndexMetadata.SETTING_INDEX_UUID, "uuid_1").build(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_INDEX_UUID, "uuid_1") + .put(IndexMetadata.SETTING_REMOTE_STORE_REPOSITORY, "remote_store_repository") + .build(); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("foo", settings); Path tempDir = createTempDir().resolve(indexSettings.getUUID()).resolve("0"); ShardPath shardPath = new ShardPath(false, tempDir, tempDir, new ShardId(indexSettings.getIndex(), 0)); @@ -66,7 +69,7 @@ public void testNewDirectory() throws IOException { when(repositoriesService.repository("remote_store_repository")).thenReturn(repository); - try (Directory directory = remoteSegmentStoreDirectoryFactory.newDirectory("remote_store_repository", indexSettings, shardPath)) { + try (Directory directory = remoteSegmentStoreDirectoryFactory.newDirectory(indexSettings, shardPath)) { assertTrue(directory instanceof RemoteSegmentStoreDirectory); ArgumentCaptor blobPathCaptor = ArgumentCaptor.forClass(BlobPath.class); verify(blobStore, times(2)).blobContainer(blobPathCaptor.capture()); @@ -80,17 +83,14 @@ public void testNewDirectory() throws IOException { } public void testNewDirectoryRepositoryDoesNotExist() { - Settings settings = Settings.builder().build(); + Settings settings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_REPOSITORY, "remote_store_repository").build(); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("foo", settings); Path tempDir = createTempDir().resolve(indexSettings.getUUID()).resolve("0"); ShardPath shardPath = new ShardPath(false, tempDir, tempDir, new ShardId(indexSettings.getIndex(), 0)); when(repositoriesService.repository("remote_store_repository")).thenThrow(new RepositoryMissingException("Missing")); - assertThrows( - IllegalArgumentException.class, - () -> remoteSegmentStoreDirectoryFactory.newDirectory("remote_store_repository", indexSettings, shardPath) - ); + assertThrows(IllegalArgumentException.class, () -> remoteSegmentStoreDirectoryFactory.newDirectory(indexSettings, shardPath)); } } From 3accec8cfc9cb3f03061b996ac2ee80b71ddf414 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Wed, 5 Apr 2023 08:42:28 -0400 Subject: [PATCH 07/28] Add experimental support for zstd compression. (#3577) (#6982) * Add experimental support for zstd and lz4 (native) compression. Add experimental support for zstd (with and without dictionary support) and lz4 (native) compressions as discussed in: https://github.com/opensearch-project/OpenSearch/issues/3354. Users would be able to set the index.codec setting with the values "lz4_native" (for lz4 native), "zstd" (for zstd with dictionary), and "zstd_no_dict" (for zstd without a dictionary). * Fix license issues, add tests for zstd, zstd_no_dict, and lz4_native compressions. * Fix DCO and and issues with CodecTests.java. * Fix forbidden api violation error for lz4-java. * Fix license headers. Remove and fix unnecessary fields. * Fix magic numbers. Use more restrictive access modifiers. * Use protected access modifier for Zstd and LZ4 compression mode classes. * Allow negative compression levels for zstd. Use more restrictive access modifiers. * Use a more restrictive permission for loading zstd-jni and lz4-java libraries. * Rename a file (follow a consistent version naming convention). * Refactor and create a new custom-codecs sandbox module. * Remove blank lines. * Restore Lucene92CustomCodec to extend from FilterCodec. * Make use of the compressionLevel argument. * Make Lucene92CustomCodec abstract and use a package-private access modifier. * Fix missing JavaDoc issues. Remove unused field in PerFieldMappingPostingFormatCodec. * Fix lint errors. * Fix the description for the custom-codecs plugin. * Fix wildcard import and improve documentation. * Access control exception fixed. Removed lz4-java support for now. - PRs were made to zstd-jni and lz4-java to use AccessController.doPrivileged. - The zstd-jni PR is merged since version 1.5.4-1. - The lz4-java support temporarily removed until the PR gets merged. * Upgrade plugin to use Lucene95Codec. Rename files accordingly. - Upgrade plugin to use Lucene95Codec. Rename files accordingly. - Fix lint issue with plugin-security. - Remove thridPartyAudit that was there for supporting lz4-java. * Add test cases for compression/decompression. Other minor changes. - add test cases for compression/decompression. - rename package. - add a CHANGELOG entry. - add more checks for signed integer arithmetic. * Remove ES grant in plugin-security.policy. Fix minor javadoc issue. - Remove ES grant in plugin-security.policy file. - Replace @link and @See to fix javadoc error. * Upgrade jettison version to 1.5.4. * Update SHA for jettison 1.5.4. --------- (cherry picked from commit f071c9badfb649bbe875af041b8b3cc955fd2865) Signed-off-by: Mulugeta Mammo Signed-off-by: github-actions[bot] Co-authored-by: github-actions[bot] Co-authored-by: Daniel (dB.) Doubrovkine --- CHANGELOG.md | 2 + sandbox/modules/custom-codecs/build.gradle | 28 +++ .../licenses/zstd-jni-1.5.4-1.jar.sha1 | 1 + .../licenses/zstd-jni-LICENSE.txt | 29 +++ .../licenses/zstd-jni-NOTICE.txt | 1 + .../codec/customcodecs/CustomCodecPlugin.java | 40 ++++ .../customcodecs/CustomCodecService.java | 67 ++++++ .../CustomCodecServiceFactory.java | 27 +++ .../customcodecs/Lucene95CustomCodec.java | 45 ++++ .../Lucene95CustomStoredFieldsFormat.java | 107 +++++++++ .../PerFieldMappingPostingFormatCodec.java | 25 ++ .../index/codec/customcodecs/ZstdCodec.java | 36 +++ .../customcodecs/ZstdCompressionMode.java | 203 ++++++++++++++++ .../codec/customcodecs/ZstdNoDictCodec.java | 36 +++ .../ZstdNoDictCompressionMode.java | 178 ++++++++++++++ .../codec/customcodecs/package-info.java | 12 + .../plugin-metadata/plugin-security.policy | 11 + .../services/org.apache.lucene.codecs.Codec | 2 + .../customcodecs/AbstractCompressorTests.java | 219 ++++++++++++++++++ .../customcodecs/ZstdCompressorTests.java | 30 +++ .../ZstdNoDictCompressorTests.java | 30 +++ 21 files changed, 1129 insertions(+) create mode 100644 sandbox/modules/custom-codecs/build.gradle create mode 100644 sandbox/modules/custom-codecs/licenses/zstd-jni-1.5.4-1.jar.sha1 create mode 100644 sandbox/modules/custom-codecs/licenses/zstd-jni-LICENSE.txt create mode 100644 sandbox/modules/custom-codecs/licenses/zstd-jni-NOTICE.txt create mode 100644 sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecPlugin.java create mode 100644 sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecService.java create mode 100644 sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecServiceFactory.java create mode 100644 sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java create mode 100644 sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java create mode 100644 sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/PerFieldMappingPostingFormatCodec.java create mode 100644 sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java create mode 100644 sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCompressionMode.java create mode 100644 sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java create mode 100644 sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressionMode.java create mode 100644 sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/package-info.java create mode 100644 sandbox/modules/custom-codecs/src/main/plugin-metadata/plugin-security.policy create mode 100644 sandbox/modules/custom-codecs/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec create mode 100644 sandbox/modules/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/AbstractCompressorTests.java create mode 100644 sandbox/modules/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdCompressorTests.java create mode 100644 sandbox/modules/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressorTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index c4d347d30dab7..3202547a55e67 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Enable sort optimization for all NumericTypes ([#6464](https://github.com/opensearch-project/OpenSearch/pull/6464) - Remove 'cluster_manager' role attachment when using 'node.master' deprecated setting ([#6331](https://github.com/opensearch-project/OpenSearch/pull/6331)) - Add new cluster settings to ignore weighted round-robin routing and fallback to default behaviour. ([#6834](https://github.com/opensearch-project/OpenSearch/pull/6834)) +- Add experimental support for ZSTD compression. ([#3577](https://github.com/opensearch-project/OpenSearch/pull/3577)) ### Dependencies - Bump `org.apache.logging.log4j:log4j-core` from 2.18.0 to 2.20.0 ([#6490](https://github.com/opensearch-project/OpenSearch/pull/6490)) @@ -35,6 +36,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `net.minidev:json-smart` from 2.4.7 to 2.4.10 - Bump `org.apache.maven:maven-model` from 3.6.2 to 3.9.1 - Bump `org.codehaus.jettison:jettison` from 1.5.3 to 1.5.4 ([#6878](https://github.com/opensearch-project/OpenSearch/pull/6878)) +- Add `com.github.luben:zstd-jni:1.5.4-1` ([#3577](https://github.com/opensearch-project/OpenSearch/pull/3577)) ### Changed - Require MediaType in Strings.toString API ([#6009](https://github.com/opensearch-project/OpenSearch/pull/6009)) diff --git a/sandbox/modules/custom-codecs/build.gradle b/sandbox/modules/custom-codecs/build.gradle new file mode 100644 index 0000000000000..bf1bc719b0ae6 --- /dev/null +++ b/sandbox/modules/custom-codecs/build.gradle @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +apply plugin: 'opensearch.opensearchplugin' +apply plugin: 'opensearch.yaml-rest-test' + +opensearchplugin { + name 'custom-codecs' + description 'A plugin that implements custom compression codecs.' + classname 'org.opensearch.index.codec.customcodecs.CustomCodecPlugin' + licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile rootProject.file('NOTICE.txt') +} + +dependencies { + api "com.github.luben:zstd-jni:1.5.4-1" +} + +yamlRestTest.enabled = false; +testingConventions.enabled = false; diff --git a/sandbox/modules/custom-codecs/licenses/zstd-jni-1.5.4-1.jar.sha1 b/sandbox/modules/custom-codecs/licenses/zstd-jni-1.5.4-1.jar.sha1 new file mode 100644 index 0000000000000..e95377f702a6c --- /dev/null +++ b/sandbox/modules/custom-codecs/licenses/zstd-jni-1.5.4-1.jar.sha1 @@ -0,0 +1 @@ +291ccaacc039e41932de877303edb6af98a91c24 diff --git a/sandbox/modules/custom-codecs/licenses/zstd-jni-LICENSE.txt b/sandbox/modules/custom-codecs/licenses/zstd-jni-LICENSE.txt new file mode 100644 index 0000000000000..c4dd507c1c72f --- /dev/null +++ b/sandbox/modules/custom-codecs/licenses/zstd-jni-LICENSE.txt @@ -0,0 +1,29 @@ +----------------------------------------------------------------------------- +** Beginning of "BSD License" text. ** + +Zstd-jni: JNI bindings to Zstd Library + +Copyright (c) 2015-present, Luben Karavelov/ All rights reserved. + +BSD License + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/sandbox/modules/custom-codecs/licenses/zstd-jni-NOTICE.txt b/sandbox/modules/custom-codecs/licenses/zstd-jni-NOTICE.txt new file mode 100644 index 0000000000000..389c97cbc892d --- /dev/null +++ b/sandbox/modules/custom-codecs/licenses/zstd-jni-NOTICE.txt @@ -0,0 +1 @@ +The code for the JNI bindings to Zstd library was originally authored by Luben Karavelov diff --git a/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecPlugin.java b/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecPlugin.java new file mode 100644 index 0000000000000..1e0245f3c8c6b --- /dev/null +++ b/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecPlugin.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.customcodecs; + +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.EnginePlugin; +import org.opensearch.index.codec.CodecServiceFactory; +import org.opensearch.index.IndexSettings; + +import java.util.Optional; + +/** + * A plugin that implements custom codecs. Supports these codecs: + *
    + *
  • ZSTD + *
  • ZSTDNODICT + *
+ * + * @opensearch.internal + */ +public final class CustomCodecPlugin extends Plugin implements EnginePlugin { + + /** Creates a new instance */ + public CustomCodecPlugin() {} + + /** + * @param indexSettings is the default indexSettings + * @return the engine factory + */ + @Override + public Optional getCustomCodecServiceFactory(final IndexSettings indexSettings) { + return Optional.of(new CustomCodecServiceFactory()); + } +} diff --git a/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecService.java b/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecService.java new file mode 100644 index 0000000000000..4dd25caa86d94 --- /dev/null +++ b/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecService.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.customcodecs; + +import org.apache.logging.log4j.Logger; +import org.apache.lucene.codecs.Codec; +import org.opensearch.common.collect.MapBuilder; +import org.opensearch.index.codec.CodecService; +import org.opensearch.index.mapper.MapperService; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Map; + +/** + * CustomCodecService provides ZSTD and ZSTDNODICT compression codecs. + */ +public class CustomCodecService extends CodecService { + private final Map codecs; + + /** + * Creates a new CustomCodecService. + * + * @param mapperService A mapper service. + * @param logger A logger. + */ + public CustomCodecService(MapperService mapperService, Logger logger) { + super(mapperService, logger); + final MapBuilder codecs = MapBuilder.newMapBuilder(); + if (mapperService == null) { + codecs.put(Lucene95CustomCodec.Mode.ZSTD.name(), new ZstdCodec()); + codecs.put(Lucene95CustomCodec.Mode.ZSTDNODICT.name(), new ZstdNoDictCodec()); + } else { + codecs.put( + Lucene95CustomCodec.Mode.ZSTD.name(), + new PerFieldMappingPostingFormatCodec(Lucene95CustomCodec.Mode.ZSTD, mapperService) + ); + codecs.put( + Lucene95CustomCodec.Mode.ZSTDNODICT.name(), + new PerFieldMappingPostingFormatCodec(Lucene95CustomCodec.Mode.ZSTDNODICT, mapperService) + ); + } + this.codecs = codecs.immutableMap(); + } + + @Override + public Codec codec(String name) { + Codec codec = codecs.get(name); + if (codec == null) { + return super.codec(name); + } + return codec; + } + + @Override + public String[] availableCodecs() { + ArrayList ac = new ArrayList(Arrays.asList(super.availableCodecs())); + ac.addAll(codecs.keySet()); + return ac.toArray(new String[0]); + } +} diff --git a/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecServiceFactory.java b/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecServiceFactory.java new file mode 100644 index 0000000000000..9a1872abfcbd7 --- /dev/null +++ b/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecServiceFactory.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.customcodecs; + +import org.opensearch.index.codec.CodecService; +import org.opensearch.index.codec.CodecServiceConfig; +import org.opensearch.index.codec.CodecServiceFactory; + +/** + * A factory for creating new {@link CodecService} instance + */ +public class CustomCodecServiceFactory implements CodecServiceFactory { + + /** Creates a new instance. */ + public CustomCodecServiceFactory() {} + + @Override + public CodecService createCodecService(CodecServiceConfig config) { + return new CustomCodecService(config.getMapperService(), config.getLogger()); + } +} diff --git a/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java b/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java new file mode 100644 index 0000000000000..652306e59559b --- /dev/null +++ b/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.customcodecs; + +import org.apache.lucene.codecs.StoredFieldsFormat; +import org.apache.lucene.codecs.FilterCodec; +import org.apache.lucene.codecs.lucene95.Lucene95Codec; + +abstract class Lucene95CustomCodec extends FilterCodec { + public static final int DEFAULT_COMPRESSION_LEVEL = 6; + + /** Each mode represents a compression algorithm. */ + public enum Mode { + ZSTD, + ZSTDNODICT + } + + private final StoredFieldsFormat storedFieldsFormat; + + /** new codec for a given compression algorithm and default compression level */ + public Lucene95CustomCodec(Mode mode) { + this(mode, DEFAULT_COMPRESSION_LEVEL); + } + + public Lucene95CustomCodec(Mode mode, int compressionLevel) { + super(mode.name(), new Lucene95Codec()); + this.storedFieldsFormat = new Lucene95CustomStoredFieldsFormat(mode, compressionLevel); + } + + @Override + public StoredFieldsFormat storedFieldsFormat() { + return storedFieldsFormat; + } + + @Override + public String toString() { + return getClass().getSimpleName(); + } +} diff --git a/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java b/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java new file mode 100644 index 0000000000000..e0253516b6d0a --- /dev/null +++ b/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java @@ -0,0 +1,107 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.customcodecs; + +import java.io.IOException; +import java.util.Objects; +import org.apache.lucene.codecs.StoredFieldsFormat; +import org.apache.lucene.codecs.StoredFieldsReader; +import org.apache.lucene.codecs.StoredFieldsWriter; +import org.apache.lucene.codecs.compressing.CompressionMode; +import org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsFormat; +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.SegmentInfo; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; + +/** Stored field format used by pluggable codec */ +public class Lucene95CustomStoredFieldsFormat extends StoredFieldsFormat { + + /** A key that we use to map to a mode */ + public static final String MODE_KEY = Lucene95CustomStoredFieldsFormat.class.getSimpleName() + ".mode"; + + private static final int ZSTD_BLOCK_LENGTH = 10 * 48 * 1024; + private static final int ZSTD_MAX_DOCS_PER_BLOCK = 4096; + private static final int ZSTD_BLOCK_SHIFT = 10; + + private final CompressionMode zstdCompressionMode; + private final CompressionMode zstdNoDictCompressionMode; + + private final Lucene95CustomCodec.Mode mode; + + /** default constructor */ + public Lucene95CustomStoredFieldsFormat() { + this(Lucene95CustomCodec.Mode.ZSTD, Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL); + } + + /** + * Creates a new instance. + * + * @param mode The mode represents ZSTD or ZSTDNODICT + */ + public Lucene95CustomStoredFieldsFormat(Lucene95CustomCodec.Mode mode) { + this(mode, Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL); + } + + /** + * Creates a new instance with the specified mode and compression level. + * + * @param mode The mode represents ZSTD or ZSTDNODICT + * @param compressionLevel The compression level for the mode. + */ + public Lucene95CustomStoredFieldsFormat(Lucene95CustomCodec.Mode mode, int compressionLevel) { + this.mode = Objects.requireNonNull(mode); + zstdCompressionMode = new ZstdCompressionMode(compressionLevel); + zstdNoDictCompressionMode = new ZstdNoDictCompressionMode(compressionLevel); + } + + @Override + public StoredFieldsReader fieldsReader(Directory directory, SegmentInfo si, FieldInfos fn, IOContext context) throws IOException { + String value = si.getAttribute(MODE_KEY); + if (value == null) { + throw new IllegalStateException("missing value for " + MODE_KEY + " for segment: " + si.name); + } + Lucene95CustomCodec.Mode mode = Lucene95CustomCodec.Mode.valueOf(value); + return impl(mode).fieldsReader(directory, si, fn, context); + } + + @Override + public StoredFieldsWriter fieldsWriter(Directory directory, SegmentInfo si, IOContext context) throws IOException { + String previous = si.putAttribute(MODE_KEY, mode.name()); + if (previous != null && previous.equals(mode.name()) == false) { + throw new IllegalStateException( + "found existing value for " + MODE_KEY + " for segment: " + si.name + " old = " + previous + ", new = " + mode.name() + ); + } + return impl(mode).fieldsWriter(directory, si, context); + } + + private StoredFieldsFormat impl(Lucene95CustomCodec.Mode mode) { + switch (mode) { + case ZSTD: + return new Lucene90CompressingStoredFieldsFormat( + "CustomStoredFieldsZstd", + zstdCompressionMode, + ZSTD_BLOCK_LENGTH, + ZSTD_MAX_DOCS_PER_BLOCK, + ZSTD_BLOCK_SHIFT + ); + case ZSTDNODICT: + return new Lucene90CompressingStoredFieldsFormat( + "CustomStoredFieldsZstdNoDict", + zstdNoDictCompressionMode, + ZSTD_BLOCK_LENGTH, + ZSTD_MAX_DOCS_PER_BLOCK, + ZSTD_BLOCK_SHIFT + ); + default: + throw new AssertionError(); + } + } +} diff --git a/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/PerFieldMappingPostingFormatCodec.java b/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/PerFieldMappingPostingFormatCodec.java new file mode 100644 index 0000000000000..f1c64853bca40 --- /dev/null +++ b/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/PerFieldMappingPostingFormatCodec.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.customcodecs; + +import org.opensearch.index.mapper.MapperService; + +/** PerFieldMappingPostingFormatCodec. {@link org.opensearch.index.codec.PerFieldMappingPostingFormatCodec} */ +public class PerFieldMappingPostingFormatCodec extends Lucene95CustomCodec { + + /** + * Creates a new instance. + * + * @param compressionMode The compression mode (ZSTD or ZSTDNODICT). + * @param mapperService The mapper service. + */ + public PerFieldMappingPostingFormatCodec(Lucene95CustomCodec.Mode compressionMode, MapperService mapperService) { + super(compressionMode); + } +} diff --git a/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java b/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java new file mode 100644 index 0000000000000..086e2461b1f6a --- /dev/null +++ b/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.customcodecs; + +/** + * ZstdCodec provides ZSTD compressor using the zstd-jni library. + */ +public class ZstdCodec extends Lucene95CustomCodec { + + /** + * Creates a new ZstdCodec instance with the default compression level. + */ + public ZstdCodec() { + this(DEFAULT_COMPRESSION_LEVEL); + } + + /** + * Creates a new ZstdCodec instance. + * + * @param compressionLevel The compression level. + */ + public ZstdCodec(int compressionLevel) { + super(Mode.ZSTD, compressionLevel); + } + + @Override + public String toString() { + return getClass().getSimpleName(); + } +} diff --git a/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCompressionMode.java b/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCompressionMode.java new file mode 100644 index 0000000000000..795ddf3ab2d17 --- /dev/null +++ b/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCompressionMode.java @@ -0,0 +1,203 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.customcodecs; + +import com.github.luben.zstd.Zstd; +import com.github.luben.zstd.ZstdCompressCtx; +import com.github.luben.zstd.ZstdDecompressCtx; +import com.github.luben.zstd.ZstdDictCompress; +import com.github.luben.zstd.ZstdDictDecompress; +import java.io.IOException; +import org.apache.lucene.codecs.compressing.CompressionMode; +import org.apache.lucene.codecs.compressing.Compressor; +import org.apache.lucene.codecs.compressing.Decompressor; +import org.apache.lucene.store.DataInput; +import org.apache.lucene.store.DataOutput; +import org.apache.lucene.store.ByteBuffersDataInput; +import org.apache.lucene.util.ArrayUtil; +import org.apache.lucene.util.BytesRef; + +/** Zstandard Compression Mode */ +public class ZstdCompressionMode extends CompressionMode { + + private static final int NUM_SUB_BLOCKS = 10; + private static final int DICT_SIZE_FACTOR = 6; + private static final int DEFAULT_COMPRESSION_LEVEL = 6; + + private final int compressionLevel; + + /** default constructor */ + protected ZstdCompressionMode() { + this.compressionLevel = DEFAULT_COMPRESSION_LEVEL; + } + + /** + * Creates a new instance. + * + * @param compressionLevel The compression level to use. + */ + protected ZstdCompressionMode(int compressionLevel) { + this.compressionLevel = compressionLevel; + } + + @Override + public Compressor newCompressor() { + return new ZstdCompressor(compressionLevel); + } + + @Override + public Decompressor newDecompressor() { + return new ZstdDecompressor(); + } + + /** zstandard compressor */ + private static final class ZstdCompressor extends Compressor { + + private final int compressionLevel; + private byte[] compressedBuffer; + + /** compressor with a given compresion level */ + public ZstdCompressor(int compressionLevel) { + this.compressionLevel = compressionLevel; + compressedBuffer = BytesRef.EMPTY_BYTES; + } + + /*resuable compress function*/ + private void doCompress(byte[] bytes, int offset, int length, ZstdCompressCtx cctx, DataOutput out) throws IOException { + if (length == 0) { + out.writeVInt(0); + return; + } + final int maxCompressedLength = (int) Zstd.compressBound(length); + compressedBuffer = ArrayUtil.grow(compressedBuffer, maxCompressedLength); + + int compressedSize = cctx.compressByteArray(compressedBuffer, 0, compressedBuffer.length, bytes, offset, length); + + out.writeVInt(compressedSize); + out.writeBytes(compressedBuffer, compressedSize); + } + + private void compress(byte[] bytes, int offset, int length, DataOutput out) throws IOException { + assert offset >= 0 : "offset value must be greater than 0"; + + final int dictLength = length / (NUM_SUB_BLOCKS * DICT_SIZE_FACTOR); + final int blockLength = (length - dictLength + NUM_SUB_BLOCKS - 1) / NUM_SUB_BLOCKS; + out.writeVInt(dictLength); + out.writeVInt(blockLength); + + final int end = offset + length; + assert end >= 0 : "buffer read size must be greater than 0"; + + try (ZstdCompressCtx cctx = new ZstdCompressCtx()) { + cctx.setLevel(compressionLevel); + + // dictionary compression first + doCompress(bytes, offset, dictLength, cctx, out); + cctx.loadDict(new ZstdDictCompress(bytes, offset, dictLength, compressionLevel)); + + for (int start = offset + dictLength; start < end; start += blockLength) { + int l = Math.min(blockLength, end - start); + doCompress(bytes, start, l, cctx, out); + } + } + } + + @Override + public void compress(ByteBuffersDataInput buffersInput, DataOutput out) throws IOException { + final int length = (int) buffersInput.size(); + byte[] bytes = new byte[length]; + buffersInput.readBytes(bytes, 0, length); + compress(bytes, 0, length, out); + } + + @Override + public void close() throws IOException {} + } + + /** zstandard decompressor */ + private static final class ZstdDecompressor extends Decompressor { + + private byte[] compressedBuffer; + + /** default decompressor */ + public ZstdDecompressor() { + compressedBuffer = BytesRef.EMPTY_BYTES; + } + + /*resuable decompress function*/ + private void doDecompress(DataInput in, ZstdDecompressCtx dctx, BytesRef bytes, int decompressedLen) throws IOException { + final int compressedLength = in.readVInt(); + if (compressedLength == 0) { + return; + } + + compressedBuffer = ArrayUtil.grow(compressedBuffer, compressedLength); + in.readBytes(compressedBuffer, 0, compressedLength); + + bytes.bytes = ArrayUtil.grow(bytes.bytes, bytes.length + decompressedLen); + int uncompressed = dctx.decompressByteArray(bytes.bytes, bytes.length, decompressedLen, compressedBuffer, 0, compressedLength); + + if (decompressedLen != uncompressed) { + throw new IllegalStateException(decompressedLen + " " + uncompressed); + } + bytes.length += uncompressed; + } + + @Override + public void decompress(DataInput in, int originalLength, int offset, int length, BytesRef bytes) throws IOException { + assert offset + length <= originalLength : "buffer read size must be within limit"; + + if (length == 0) { + bytes.length = 0; + return; + } + final int dictLength = in.readVInt(); + final int blockLength = in.readVInt(); + bytes.bytes = ArrayUtil.grow(bytes.bytes, dictLength); + bytes.offset = bytes.length = 0; + + try (ZstdDecompressCtx dctx = new ZstdDecompressCtx()) { + + // decompress dictionary first + doDecompress(in, dctx, bytes, dictLength); + + dctx.loadDict(new ZstdDictDecompress(bytes.bytes, 0, dictLength)); + + int offsetInBlock = dictLength; + int offsetInBytesRef = offset; + + // Skip unneeded blocks + while (offsetInBlock + blockLength < offset) { + final int compressedLength = in.readVInt(); + in.skipBytes(compressedLength); + offsetInBlock += blockLength; + offsetInBytesRef -= blockLength; + } + + // Read blocks that intersect with the interval we need + while (offsetInBlock < offset + length) { + bytes.bytes = ArrayUtil.grow(bytes.bytes, bytes.length + blockLength); + int l = Math.min(blockLength, originalLength - offsetInBlock); + doDecompress(in, dctx, bytes, l); + offsetInBlock += blockLength; + } + + bytes.offset = offsetInBytesRef; + bytes.length = length; + + assert bytes.isValid() : "decompression output is corrupted"; + } + } + + @Override + public Decompressor clone() { + return new ZstdDecompressor(); + } + } +} diff --git a/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java b/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java new file mode 100644 index 0000000000000..c33ca1f4ff6e7 --- /dev/null +++ b/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.customcodecs; + +/** + * ZstdNoDictCodec provides ZSTD compressor without a dictionary support. + */ +public class ZstdNoDictCodec extends Lucene95CustomCodec { + + /** + * Creates a new ZstdNoDictCodec instance with the default compression level. + */ + public ZstdNoDictCodec() { + this(DEFAULT_COMPRESSION_LEVEL); + } + + /** + * Creates a new ZstdNoDictCodec instance. + * + * @param compressionLevel The compression level. + */ + public ZstdNoDictCodec(int compressionLevel) { + super(Mode.ZSTDNODICT, compressionLevel); + } + + @Override + public String toString() { + return getClass().getSimpleName(); + } +} diff --git a/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressionMode.java b/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressionMode.java new file mode 100644 index 0000000000000..61808191556f0 --- /dev/null +++ b/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressionMode.java @@ -0,0 +1,178 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.customcodecs; + +import com.github.luben.zstd.Zstd; +import java.io.IOException; +import org.apache.lucene.codecs.compressing.CompressionMode; +import org.apache.lucene.codecs.compressing.Compressor; +import org.apache.lucene.codecs.compressing.Decompressor; +import org.apache.lucene.store.DataInput; +import org.apache.lucene.store.DataOutput; +import org.apache.lucene.store.ByteBuffersDataInput; +import org.apache.lucene.util.ArrayUtil; +import org.apache.lucene.util.BytesRef; + +/** ZSTD Compression Mode (without a dictionary support). */ +public class ZstdNoDictCompressionMode extends CompressionMode { + + private static final int NUM_SUB_BLOCKS = 10; + private static final int DEFAULT_COMPRESSION_LEVEL = 6; + + private final int compressionLevel; + + /** default constructor */ + protected ZstdNoDictCompressionMode() { + this.compressionLevel = DEFAULT_COMPRESSION_LEVEL; + } + + /** + * Creates a new instance with the given compression level. + * + * @param compressionLevel The compression level. + */ + protected ZstdNoDictCompressionMode(int compressionLevel) { + this.compressionLevel = compressionLevel; + } + + @Override + public Compressor newCompressor() { + return new ZstdCompressor(compressionLevel); + } + + @Override + public Decompressor newDecompressor() { + return new ZstdDecompressor(); + } + + /** zstandard compressor */ + private static final class ZstdCompressor extends Compressor { + + private final int compressionLevel; + private byte[] compressedBuffer; + + /** compressor with a given compresion level */ + public ZstdCompressor(int compressionLevel) { + this.compressionLevel = compressionLevel; + compressedBuffer = BytesRef.EMPTY_BYTES; + } + + private void compress(byte[] bytes, int offset, int length, DataOutput out) throws IOException { + assert offset >= 0 : "offset value must be greater than 0"; + + int blockLength = (length + NUM_SUB_BLOCKS - 1) / NUM_SUB_BLOCKS; + out.writeVInt(blockLength); + + final int end = offset + length; + assert end >= 0 : "buffer read size must be greater than 0"; + + for (int start = offset; start < end; start += blockLength) { + int l = Math.min(blockLength, end - start); + + if (l == 0) { + out.writeVInt(0); + return; + } + + final int maxCompressedLength = (int) Zstd.compressBound(l); + compressedBuffer = ArrayUtil.grow(compressedBuffer, maxCompressedLength); + + int compressedSize = (int) Zstd.compressByteArray( + compressedBuffer, + 0, + compressedBuffer.length, + bytes, + start, + l, + compressionLevel + ); + + out.writeVInt(compressedSize); + out.writeBytes(compressedBuffer, compressedSize); + } + } + + @Override + public void compress(ByteBuffersDataInput buffersInput, DataOutput out) throws IOException { + final int length = (int) buffersInput.size(); + byte[] bytes = new byte[length]; + buffersInput.readBytes(bytes, 0, length); + compress(bytes, 0, length, out); + } + + @Override + public void close() throws IOException {} + } + + /** zstandard decompressor */ + private static final class ZstdDecompressor extends Decompressor { + + private byte[] compressed; + + /** default decompressor */ + public ZstdDecompressor() { + compressed = BytesRef.EMPTY_BYTES; + } + + @Override + public void decompress(DataInput in, int originalLength, int offset, int length, BytesRef bytes) throws IOException { + assert offset + length <= originalLength : "buffer read size must be within limit"; + + if (length == 0) { + bytes.length = 0; + return; + } + + final int blockLength = in.readVInt(); + bytes.offset = bytes.length = 0; + int offsetInBlock = 0; + int offsetInBytesRef = offset; + + // Skip unneeded blocks + while (offsetInBlock + blockLength < offset) { + final int compressedLength = in.readVInt(); + in.skipBytes(compressedLength); + offsetInBlock += blockLength; + offsetInBytesRef -= blockLength; + } + + // Read blocks that intersect with the interval we need + while (offsetInBlock < offset + length) { + bytes.bytes = ArrayUtil.grow(bytes.bytes, bytes.length + blockLength); + final int compressedLength = in.readVInt(); + if (compressedLength == 0) { + return; + } + compressed = ArrayUtil.grow(compressed, compressedLength); + in.readBytes(compressed, 0, compressedLength); + + int l = Math.min(blockLength, originalLength - offsetInBlock); + bytes.bytes = ArrayUtil.grow(bytes.bytes, bytes.length + l); + + byte[] output = new byte[l]; + + final int uncompressed = (int) Zstd.decompressByteArray(output, 0, l, compressed, 0, compressedLength); + System.arraycopy(output, 0, bytes.bytes, bytes.length, uncompressed); + + bytes.length += uncompressed; + offsetInBlock += blockLength; + } + + bytes.offset = offsetInBytesRef; + bytes.length = length; + + assert bytes.isValid() : "decompression output is corrupted."; + } + + @Override + public Decompressor clone() { + return new ZstdDecompressor(); + } + } +} diff --git a/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/package-info.java b/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/package-info.java new file mode 100644 index 0000000000000..e996873963b1b --- /dev/null +++ b/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * A plugin that implements compression codecs with native implementation. + */ +package org.opensearch.index.codec.customcodecs; diff --git a/sandbox/modules/custom-codecs/src/main/plugin-metadata/plugin-security.policy b/sandbox/modules/custom-codecs/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..8161010cfa897 --- /dev/null +++ b/sandbox/modules/custom-codecs/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,11 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant codeBase "${codebase.zstd-jni}" { + permission java.lang.RuntimePermission "loadLibrary.*"; +}; diff --git a/sandbox/modules/custom-codecs/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec b/sandbox/modules/custom-codecs/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec new file mode 100644 index 0000000000000..8b37d91cd8bc4 --- /dev/null +++ b/sandbox/modules/custom-codecs/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec @@ -0,0 +1,2 @@ +org.opensearch.index.codec.customcodecs.ZstdCodec +org.opensearch.index.codec.customcodecs.ZstdNoDictCodec diff --git a/sandbox/modules/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/AbstractCompressorTests.java b/sandbox/modules/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/AbstractCompressorTests.java new file mode 100644 index 0000000000000..fcfb06ca6b050 --- /dev/null +++ b/sandbox/modules/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/AbstractCompressorTests.java @@ -0,0 +1,219 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.customcodecs; + +import org.apache.lucene.tests.util.LineFileDocs; +import org.apache.lucene.tests.util.TestUtil; +import org.opensearch.test.OpenSearchTestCase; +import org.apache.lucene.codecs.compressing.Compressor; +import org.apache.lucene.codecs.compressing.Decompressor; +import org.apache.lucene.store.ByteArrayDataInput; +import org.apache.lucene.store.ByteBuffersDataInput; +import org.apache.lucene.store.ByteBuffersDataOutput; +import org.apache.lucene.util.BytesRef; + +import java.util.List; +import java.nio.ByteBuffer; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Random; + +/** + * Test cases for compressors (based on {@See org.opensearch.common.compress.DeflateCompressTests}). + */ +public abstract class AbstractCompressorTests extends OpenSearchTestCase { + + abstract Compressor compressor(); + + abstract Decompressor decompressor(); + + public void testEmpty() throws IOException { + final byte[] bytes = "".getBytes(StandardCharsets.UTF_8); + doTest(bytes); + } + + public void testShortLiterals() throws IOException { + final byte[] bytes = "1234567345673456745608910123".getBytes(StandardCharsets.UTF_8); + doTest(bytes); + } + + public void testRandom() throws IOException { + Random r = random(); + for (int i = 0; i < 10; i++) { + final byte[] bytes = new byte[TestUtil.nextInt(r, 1, 100000)]; + r.nextBytes(bytes); + doTest(bytes); + } + } + + public void testLineDocs() throws IOException { + Random r = random(); + LineFileDocs lineFileDocs = new LineFileDocs(r); + for (int i = 0; i < 10; i++) { + int numDocs = TestUtil.nextInt(r, 1, 200); + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + for (int j = 0; j < numDocs; j++) { + String s = lineFileDocs.nextDoc().get("body"); + bos.write(s.getBytes(StandardCharsets.UTF_8)); + } + doTest(bos.toByteArray()); + } + lineFileDocs.close(); + } + + public void testRepetitionsL() throws IOException { + Random r = random(); + for (int i = 0; i < 10; i++) { + int numLongs = TestUtil.nextInt(r, 1, 10000); + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + long theValue = r.nextLong(); + for (int j = 0; j < numLongs; j++) { + if (r.nextInt(10) == 0) { + theValue = r.nextLong(); + } + bos.write((byte) (theValue >>> 56)); + bos.write((byte) (theValue >>> 48)); + bos.write((byte) (theValue >>> 40)); + bos.write((byte) (theValue >>> 32)); + bos.write((byte) (theValue >>> 24)); + bos.write((byte) (theValue >>> 16)); + bos.write((byte) (theValue >>> 8)); + bos.write((byte) theValue); + } + doTest(bos.toByteArray()); + } + } + + public void testRepetitionsI() throws IOException { + Random r = random(); + for (int i = 0; i < 10; i++) { + int numInts = TestUtil.nextInt(r, 1, 20000); + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + int theValue = r.nextInt(); + for (int j = 0; j < numInts; j++) { + if (r.nextInt(10) == 0) { + theValue = r.nextInt(); + } + bos.write((byte) (theValue >>> 24)); + bos.write((byte) (theValue >>> 16)); + bos.write((byte) (theValue >>> 8)); + bos.write((byte) theValue); + } + doTest(bos.toByteArray()); + } + } + + public void testRepetitionsS() throws IOException { + Random r = random(); + for (int i = 0; i < 10; i++) { + int numShorts = TestUtil.nextInt(r, 1, 40000); + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + short theValue = (short) r.nextInt(65535); + for (int j = 0; j < numShorts; j++) { + if (r.nextInt(10) == 0) { + theValue = (short) r.nextInt(65535); + } + bos.write((byte) (theValue >>> 8)); + bos.write((byte) theValue); + } + doTest(bos.toByteArray()); + } + } + + public void testMixed() throws IOException { + Random r = random(); + LineFileDocs lineFileDocs = new LineFileDocs(r); + for (int i = 0; i < 2; ++i) { + ByteArrayOutputStream bos = new ByteArrayOutputStream(); + int prevInt = r.nextInt(); + long prevLong = r.nextLong(); + while (bos.size() < 400000) { + switch (r.nextInt(4)) { + case 0: + addInt(r, prevInt, bos); + break; + case 1: + addLong(r, prevLong, bos); + break; + case 2: + addString(lineFileDocs, bos); + break; + case 3: + addBytes(r, bos); + break; + default: + throw new IllegalStateException("Random is broken"); + } + } + doTest(bos.toByteArray()); + } + } + + private void addLong(Random r, long prev, ByteArrayOutputStream bos) { + long theValue = prev; + if (r.nextInt(10) != 0) { + theValue = r.nextLong(); + } + bos.write((byte) (theValue >>> 56)); + bos.write((byte) (theValue >>> 48)); + bos.write((byte) (theValue >>> 40)); + bos.write((byte) (theValue >>> 32)); + bos.write((byte) (theValue >>> 24)); + bos.write((byte) (theValue >>> 16)); + bos.write((byte) (theValue >>> 8)); + bos.write((byte) theValue); + } + + private void addInt(Random r, int prev, ByteArrayOutputStream bos) { + int theValue = prev; + if (r.nextInt(10) != 0) { + theValue = r.nextInt(); + } + bos.write((byte) (theValue >>> 24)); + bos.write((byte) (theValue >>> 16)); + bos.write((byte) (theValue >>> 8)); + bos.write((byte) theValue); + } + + private void addString(LineFileDocs lineFileDocs, ByteArrayOutputStream bos) throws IOException { + String s = lineFileDocs.nextDoc().get("body"); + bos.write(s.getBytes(StandardCharsets.UTF_8)); + } + + private void addBytes(Random r, ByteArrayOutputStream bos) throws IOException { + byte bytes[] = new byte[TestUtil.nextInt(r, 1, 10000)]; + r.nextBytes(bytes); + bos.write(bytes); + } + + private void doTest(byte[] bytes) throws IOException { + final int length = bytes.length; + + ByteBuffersDataInput in = new ByteBuffersDataInput(List.of(ByteBuffer.wrap(bytes))); + ByteBuffersDataOutput out = new ByteBuffersDataOutput(); + + // let's compress + Compressor compressor = compressor(); + compressor.compress(in, out); + byte[] compressed = out.toArrayCopy(); + + // let's decompress + BytesRef outbytes = new BytesRef(); + Decompressor decompressor = decompressor(); + decompressor.decompress(new ByteArrayDataInput(compressed), length, 0, length, outbytes); + + // get the uncompressed array out of outbytes + byte[] restored = new byte[outbytes.length]; + System.arraycopy(outbytes.bytes, 0, restored, 0, outbytes.length); + + assertArrayEquals(bytes, restored); + } + +} diff --git a/sandbox/modules/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdCompressorTests.java b/sandbox/modules/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdCompressorTests.java new file mode 100644 index 0000000000000..78cf62c08f889 --- /dev/null +++ b/sandbox/modules/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdCompressorTests.java @@ -0,0 +1,30 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.index.codec.customcodecs; + +import org.apache.lucene.codecs.compressing.Compressor; +import org.apache.lucene.codecs.compressing.Decompressor; + +/** + * Test ZSTD compression (with dictionary enabled) + */ +public class ZstdCompressorTests extends AbstractCompressorTests { + + private final Compressor compressor = new ZstdCompressionMode().newCompressor(); + private final Decompressor decompressor = new ZstdCompressionMode().newDecompressor(); + + @Override + Compressor compressor() { + return compressor; + } + + @Override + Decompressor decompressor() { + return decompressor; + } +} diff --git a/sandbox/modules/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressorTests.java b/sandbox/modules/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressorTests.java new file mode 100644 index 0000000000000..2eda81a6af2ab --- /dev/null +++ b/sandbox/modules/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressorTests.java @@ -0,0 +1,30 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.index.codec.customcodecs; + +import org.apache.lucene.codecs.compressing.Compressor; +import org.apache.lucene.codecs.compressing.Decompressor; + +/** + * Test ZSTD compression (with no dictionary). + */ +public class ZstdNoDictCompressorTests extends AbstractCompressorTests { + + private final Compressor compressor = new ZstdNoDictCompressionMode().newCompressor(); + private final Decompressor decompressor = new ZstdNoDictCompressionMode().newDecompressor(); + + @Override + Compressor compressor() { + return compressor; + } + + @Override + Decompressor decompressor() { + return decompressor; + } +} From 13d3f118e5010ddead891ad7371529dfa25d859c Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 5 Apr 2023 10:35:14 -0400 Subject: [PATCH 08/28] Routine dependency updates: Netty 4.1.91.Final, ASM 9.5, ByteBuddy 1.14.3 (#6981) (#6986) (cherry picked from commit 509454169778bb8b26a1fe8441975f7c344ec4be) Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + buildSrc/version.properties | 6 +++--- modules/lang-expression/licenses/asm-9.4.jar.sha1 | 1 - modules/lang-expression/licenses/asm-9.5.jar.sha1 | 1 + modules/lang-expression/licenses/asm-commons-9.4.jar.sha1 | 1 - modules/lang-expression/licenses/asm-commons-9.5.jar.sha1 | 1 + modules/lang-expression/licenses/asm-tree-9.4.jar.sha1 | 1 - modules/lang-expression/licenses/asm-tree-9.5.jar.sha1 | 1 + modules/lang-painless/licenses/asm-9.4.jar.sha1 | 1 - modules/lang-painless/licenses/asm-9.5.jar.sha1 | 1 + modules/lang-painless/licenses/asm-analysis-9.4.jar.sha1 | 1 - modules/lang-painless/licenses/asm-analysis-9.5.jar.sha1 | 1 + modules/lang-painless/licenses/asm-commons-9.4.jar.sha1 | 1 - modules/lang-painless/licenses/asm-commons-9.5.jar.sha1 | 1 + modules/lang-painless/licenses/asm-tree-9.4.jar.sha1 | 1 - modules/lang-painless/licenses/asm-tree-9.5.jar.sha1 | 1 + modules/lang-painless/licenses/asm-util-9.4.jar.sha1 | 1 - modules/lang-painless/licenses/asm-util-9.5.jar.sha1 | 1 + modules/transport-netty4/build.gradle | 7 ++++++- .../licenses/netty-buffer-4.1.90.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.91.Final.jar.sha1 | 1 + .../licenses/netty-codec-4.1.90.Final.jar.sha1 | 1 - .../licenses/netty-codec-4.1.91.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.90.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.91.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.90.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.91.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.90.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.91.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.90.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.91.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.90.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.91.Final.jar.sha1 | 1 + ...etty-transport-native-unix-common-4.1.90.Final.jar.sha1 | 1 - ...etty-transport-native-unix-common-4.1.91.Final.jar.sha1 | 1 + .../licenses/netty-codec-dns-4.1.90.Final.jar.sha1 | 1 - .../licenses/netty-codec-dns-4.1.91.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.90.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.91.Final.jar.sha1 | 1 + .../licenses/netty-codec-socks-4.1.90.Final.jar.sha1 | 1 - .../licenses/netty-codec-socks-4.1.91.Final.jar.sha1 | 1 + .../licenses/netty-handler-proxy-4.1.90.Final.jar.sha1 | 1 - .../licenses/netty-handler-proxy-4.1.91.Final.jar.sha1 | 1 + .../licenses/netty-resolver-dns-4.1.90.Final.jar.sha1 | 1 - .../licenses/netty-resolver-dns-4.1.91.Final.jar.sha1 | 1 + ...etty-transport-native-unix-common-4.1.90.Final.jar.sha1 | 1 - ...etty-transport-native-unix-common-4.1.91.Final.jar.sha1 | 1 + .../licenses/netty-all-4.1.90.Final.jar.sha1 | 1 - .../licenses/netty-all-4.1.91.Final.jar.sha1 | 1 + plugins/transport-nio/build.gradle | 7 ++++++- .../licenses/netty-buffer-4.1.90.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.91.Final.jar.sha1 | 1 + .../licenses/netty-codec-4.1.90.Final.jar.sha1 | 1 - .../licenses/netty-codec-4.1.91.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.90.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.91.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.90.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.91.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.90.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.91.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.90.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.91.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.90.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.91.Final.jar.sha1 | 1 + 64 files changed, 46 insertions(+), 35 deletions(-) delete mode 100644 modules/lang-expression/licenses/asm-9.4.jar.sha1 create mode 100644 modules/lang-expression/licenses/asm-9.5.jar.sha1 delete mode 100644 modules/lang-expression/licenses/asm-commons-9.4.jar.sha1 create mode 100644 modules/lang-expression/licenses/asm-commons-9.5.jar.sha1 delete mode 100644 modules/lang-expression/licenses/asm-tree-9.4.jar.sha1 create mode 100644 modules/lang-expression/licenses/asm-tree-9.5.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-9.4.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-9.5.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-analysis-9.4.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-analysis-9.5.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-commons-9.4.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-commons-9.5.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-tree-9.4.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-tree-9.5.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-util-9.4.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-util-9.5.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.90.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.91.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.90.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.91.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.90.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.91.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-common-4.1.90.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-common-4.1.91.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.90.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.91.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.90.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.91.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.90.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.91.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.90.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.91.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.90.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.91.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.90.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.91.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.90.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.91.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.90.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.91.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.90.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.91.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.90.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.91.Final.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.90.Final.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.91.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.90.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.91.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.90.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.91.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.90.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.91.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-common-4.1.90.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-common-4.1.91.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.90.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.91.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.90.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.91.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.90.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.91.Final.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 3202547a55e67..0041b6df93708 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,6 +37,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.apache.maven:maven-model` from 3.6.2 to 3.9.1 - Bump `org.codehaus.jettison:jettison` from 1.5.3 to 1.5.4 ([#6878](https://github.com/opensearch-project/OpenSearch/pull/6878)) - Add `com.github.luben:zstd-jni:1.5.4-1` ([#3577](https://github.com/opensearch-project/OpenSearch/pull/3577)) +- Bump: Netty from 4.1.90.Final to 4.1.91.Final , ASM 9.4 to ASM 9.5, ByteBuddy 1.14.2 to 1.14.3 ([#6981](https://github.com/opensearch-project/OpenSearch/pull/6981)) ### Changed - Require MediaType in Strings.toString API ([#6009](https://github.com/opensearch-project/OpenSearch/pull/6009)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 7143c151decfd..06957a509d4de 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -15,7 +15,7 @@ supercsv = 2.4.0 # Update to 2.17.2+ is breaking OpenSearchJsonLayout (see https://issues.apache.org/jira/browse/LOG4J2-3562) log4j = 2.17.1 slf4j = 1.7.36 -asm = 9.4 +asm = 9.5 jettison = 1.5.4 woodstox = 6.4.0 kotlin = 1.7.10 @@ -25,7 +25,7 @@ guava = 31.1-jre # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.5.0 -netty = 4.1.90.Final +netty = 4.1.91.Final joda = 2.12.2 # client dependencies @@ -48,7 +48,7 @@ junit = 4.13.2 hamcrest = 2.1 mockito = 5.2.0 objenesis = 3.2 -bytebuddy = 1.14.2 +bytebuddy = 1.14.3 # benchmark dependencies jmh = 1.35 diff --git a/modules/lang-expression/licenses/asm-9.4.jar.sha1 b/modules/lang-expression/licenses/asm-9.4.jar.sha1 deleted file mode 100644 index 75f2b0fe9a112..0000000000000 --- a/modules/lang-expression/licenses/asm-9.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b4e0e2d2e023aa317b7cfcfc916377ea348e07d1 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-9.5.jar.sha1 b/modules/lang-expression/licenses/asm-9.5.jar.sha1 new file mode 100644 index 0000000000000..ea4aa3581dc87 --- /dev/null +++ b/modules/lang-expression/licenses/asm-9.5.jar.sha1 @@ -0,0 +1 @@ +dc6ea1875f4d64fbc85e1691c95b96a3d8569c90 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-commons-9.4.jar.sha1 b/modules/lang-expression/licenses/asm-commons-9.4.jar.sha1 deleted file mode 100644 index e0e2a2f4e63e9..0000000000000 --- a/modules/lang-expression/licenses/asm-commons-9.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8fc2810ddbcbbec0a8bbccb3f8eda58321839912 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-commons-9.5.jar.sha1 b/modules/lang-expression/licenses/asm-commons-9.5.jar.sha1 new file mode 100644 index 0000000000000..5be792660c19f --- /dev/null +++ b/modules/lang-expression/licenses/asm-commons-9.5.jar.sha1 @@ -0,0 +1 @@ +19ab5b5800a3910d30d3a3e64fdb00fd0cb42de0 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-tree-9.4.jar.sha1 b/modules/lang-expression/licenses/asm-tree-9.4.jar.sha1 deleted file mode 100644 index 50ce6d740aab7..0000000000000 --- a/modules/lang-expression/licenses/asm-tree-9.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a99175a17d7fdc18cbcbd0e8ea6a5d276844190a \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-tree-9.5.jar.sha1 b/modules/lang-expression/licenses/asm-tree-9.5.jar.sha1 new file mode 100644 index 0000000000000..fb42db6a9d15c --- /dev/null +++ b/modules/lang-expression/licenses/asm-tree-9.5.jar.sha1 @@ -0,0 +1 @@ +fd33c8b6373abaa675be407082fdfda35021254a \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-9.4.jar.sha1 b/modules/lang-painless/licenses/asm-9.4.jar.sha1 deleted file mode 100644 index 75f2b0fe9a112..0000000000000 --- a/modules/lang-painless/licenses/asm-9.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b4e0e2d2e023aa317b7cfcfc916377ea348e07d1 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-9.5.jar.sha1 new file mode 100644 index 0000000000000..ea4aa3581dc87 --- /dev/null +++ b/modules/lang-painless/licenses/asm-9.5.jar.sha1 @@ -0,0 +1 @@ +dc6ea1875f4d64fbc85e1691c95b96a3d8569c90 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-analysis-9.4.jar.sha1 b/modules/lang-painless/licenses/asm-analysis-9.4.jar.sha1 deleted file mode 100644 index 850a070775e4d..0000000000000 --- a/modules/lang-painless/licenses/asm-analysis-9.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a5fec9dfc039448d4fd098fbaffcaf55373b223 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-analysis-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-analysis-9.5.jar.sha1 new file mode 100644 index 0000000000000..9e87d3ce7d719 --- /dev/null +++ b/modules/lang-painless/licenses/asm-analysis-9.5.jar.sha1 @@ -0,0 +1 @@ +490bacc77de7cbc0be1a30bb3471072d705be4a4 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-commons-9.4.jar.sha1 b/modules/lang-painless/licenses/asm-commons-9.4.jar.sha1 deleted file mode 100644 index e0e2a2f4e63e9..0000000000000 --- a/modules/lang-painless/licenses/asm-commons-9.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8fc2810ddbcbbec0a8bbccb3f8eda58321839912 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-commons-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-commons-9.5.jar.sha1 new file mode 100644 index 0000000000000..5be792660c19f --- /dev/null +++ b/modules/lang-painless/licenses/asm-commons-9.5.jar.sha1 @@ -0,0 +1 @@ +19ab5b5800a3910d30d3a3e64fdb00fd0cb42de0 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-tree-9.4.jar.sha1 b/modules/lang-painless/licenses/asm-tree-9.4.jar.sha1 deleted file mode 100644 index 50ce6d740aab7..0000000000000 --- a/modules/lang-painless/licenses/asm-tree-9.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a99175a17d7fdc18cbcbd0e8ea6a5d276844190a \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-tree-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-tree-9.5.jar.sha1 new file mode 100644 index 0000000000000..fb42db6a9d15c --- /dev/null +++ b/modules/lang-painless/licenses/asm-tree-9.5.jar.sha1 @@ -0,0 +1 @@ +fd33c8b6373abaa675be407082fdfda35021254a \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-util-9.4.jar.sha1 b/modules/lang-painless/licenses/asm-util-9.4.jar.sha1 deleted file mode 100644 index 8c5854f41bcda..0000000000000 --- a/modules/lang-painless/licenses/asm-util-9.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ab1e0a84b72561dbaf1ee260321e72148ebf4b19 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-util-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-util-9.5.jar.sha1 new file mode 100644 index 0000000000000..5fffbfe655deb --- /dev/null +++ b/modules/lang-painless/licenses/asm-util-9.5.jar.sha1 @@ -0,0 +1 @@ +64b5a1fc8c1b15ed2efd6a063e976bc8d3dc5ffe \ No newline at end of file diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 9d5048ba337c7..291c2982791db 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -252,6 +252,11 @@ thirdPartyAudit { 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', - 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator' + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$1', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$2', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$3', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$4', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$5' ) } diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.90.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.90.Final.jar.sha1 deleted file mode 100644 index 67604d11c1eca..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.90.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -937eb60c19c5f5c1326b96123c9ec3d33238d4d5 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.91.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.91.Final.jar.sha1 new file mode 100644 index 0000000000000..158024bc892d5 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.91.Final.jar.sha1 @@ -0,0 +1 @@ +d8f180291c3501e931968ca7e40ae0323c4eacee \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.90.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.90.Final.jar.sha1 deleted file mode 100644 index c8fb04a021807..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.90.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9992a22c82e18b8fd4f34989535f3e504e55aa37 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.91.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.91.Final.jar.sha1 new file mode 100644 index 0000000000000..764a03d3d73d1 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.91.Final.jar.sha1 @@ -0,0 +1 @@ +3044b8e325e33f72c96ac1ea51dda85bef090cc0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.90.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.90.Final.jar.sha1 deleted file mode 100644 index 861599ce1d1d2..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.90.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -19bbcd46f8ee0d118486f98eff22fe665b9689e5 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.91.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.91.Final.jar.sha1 new file mode 100644 index 0000000000000..ca956129d98c1 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.91.Final.jar.sha1 @@ -0,0 +1 @@ +4519d2ff470941f0086214b19c9acf992868112f \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.90.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.90.Final.jar.sha1 deleted file mode 100644 index afb531805329e..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.90.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -43597a09382c6ae2bef469a9b3a41e8a17850638 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.91.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.91.Final.jar.sha1 new file mode 100644 index 0000000000000..deaad405402f2 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.91.Final.jar.sha1 @@ -0,0 +1 @@ +93e5056462a242718e7689d81180d125c79d7723 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.90.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.90.Final.jar.sha1 deleted file mode 100644 index c98bfb52393d6..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.90.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -64f6946ce4d9189cec5341d3f5f86ac5653099b5 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.91.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.91.Final.jar.sha1 new file mode 100644 index 0000000000000..3e121e5de16b8 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.91.Final.jar.sha1 @@ -0,0 +1 @@ +444cf41e4fe28c47ffebba5e77b9458a12f938a1 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.90.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.90.Final.jar.sha1 deleted file mode 100644 index b92177828aa56..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.90.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -47c415d8c83f08b820ba00e6497a6cf19dd0155f \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.91.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.91.Final.jar.sha1 new file mode 100644 index 0000000000000..bc57e2d01a2bf --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.91.Final.jar.sha1 @@ -0,0 +1 @@ +04725d117d4b71ef0e743aa79062489b45472b26 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.90.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.90.Final.jar.sha1 deleted file mode 100644 index c7a77dbf6aaa8..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.90.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -82d68da212f62b076c763f5efa9b072d2abc018f \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.91.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.91.Final.jar.sha1 new file mode 100644 index 0000000000000..2562ece34790b --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.91.Final.jar.sha1 @@ -0,0 +1 @@ +c2f6bd7143194ca842b535546a405c06aa993934 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.90.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.90.Final.jar.sha1 deleted file mode 100644 index 5f954b2595927..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.90.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e42282002cf22105e7e993651aead231246d0220 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.91.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.91.Final.jar.sha1 new file mode 100644 index 0000000000000..6f45d642c8c0d --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.91.Final.jar.sha1 @@ -0,0 +1 @@ +80990b5885b8b67be096d7090cba18f05c67120e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.90.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.90.Final.jar.sha1 deleted file mode 100644 index 3ef0c5df26b85..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.90.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ebf5da8e6edf783d069d9aca346ff46c55772de6 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.91.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.91.Final.jar.sha1 new file mode 100644 index 0000000000000..de151d86d4595 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.91.Final.jar.sha1 @@ -0,0 +1 @@ +2c0242c69eee44ee559d02c564dbceee8bf0a5c7 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.90.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.90.Final.jar.sha1 deleted file mode 100644 index 64caa309f2c05..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.90.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ea3be877ea976b3d71e1a872958d32854b24db66 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.91.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.91.Final.jar.sha1 new file mode 100644 index 0000000000000..d57336af7f414 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.91.Final.jar.sha1 @@ -0,0 +1 @@ +4ee7027e1653c6ee3f843191e0d932f29e8e14e1 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.90.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.90.Final.jar.sha1 deleted file mode 100644 index 2738db8a6710a..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.90.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7397535a4e03d2f74c71aa2282eb7a2760ffc37b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.91.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.91.Final.jar.sha1 new file mode 100644 index 0000000000000..10d7478ce02ca --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.91.Final.jar.sha1 @@ -0,0 +1 @@ +8f0a52677da411a8ab762c426d723c7f54471504 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.90.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.90.Final.jar.sha1 deleted file mode 100644 index 60bde875d0faf..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.90.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6ab526a43a14f7796434fa6a705c34201603235f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.91.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.91.Final.jar.sha1 new file mode 100644 index 0000000000000..116ed58f33a4d --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.91.Final.jar.sha1 @@ -0,0 +1 @@ +2e3e57eae1a61e4e5f558e39619186fec6c424d3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.90.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.90.Final.jar.sha1 deleted file mode 100644 index 6124f27a050e0..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.90.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c9e6762805fe1bc854352dbc8020226f38674bce \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.91.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.91.Final.jar.sha1 new file mode 100644 index 0000000000000..5f96d34bab52c --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.91.Final.jar.sha1 @@ -0,0 +1 @@ +e1567967f5a85a469b10b7394e3e2b90ea5c0b12 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.90.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.90.Final.jar.sha1 deleted file mode 100644 index 5f954b2595927..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.90.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e42282002cf22105e7e993651aead231246d0220 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.91.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.91.Final.jar.sha1 new file mode 100644 index 0000000000000..6f45d642c8c0d --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.91.Final.jar.sha1 @@ -0,0 +1 @@ +80990b5885b8b67be096d7090cba18f05c67120e \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.90.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.90.Final.jar.sha1 deleted file mode 100644 index 829204d91b994..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.90.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0fb2bac7d106f8db84b111202bfb1c68a1aa89b8 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.91.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.91.Final.jar.sha1 new file mode 100644 index 0000000000000..fd9d37b2f0c8d --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.91.Final.jar.sha1 @@ -0,0 +1 @@ +d96d417b6c6b4a786d54418e09593c4b2292f437 \ No newline at end of file diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index 8a6a6a334e1a9..7f7c75b8e2142 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -193,6 +193,11 @@ thirdPartyAudit { 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', - 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator' + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$1', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$2', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$3', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$4', + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$5' ) } diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.90.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.90.Final.jar.sha1 deleted file mode 100644 index 67604d11c1eca..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.90.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -937eb60c19c5f5c1326b96123c9ec3d33238d4d5 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.91.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.91.Final.jar.sha1 new file mode 100644 index 0000000000000..158024bc892d5 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.91.Final.jar.sha1 @@ -0,0 +1 @@ +d8f180291c3501e931968ca7e40ae0323c4eacee \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.90.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.90.Final.jar.sha1 deleted file mode 100644 index c8fb04a021807..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.90.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9992a22c82e18b8fd4f34989535f3e504e55aa37 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.91.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.91.Final.jar.sha1 new file mode 100644 index 0000000000000..764a03d3d73d1 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.91.Final.jar.sha1 @@ -0,0 +1 @@ +3044b8e325e33f72c96ac1ea51dda85bef090cc0 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.90.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.90.Final.jar.sha1 deleted file mode 100644 index 861599ce1d1d2..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.90.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -19bbcd46f8ee0d118486f98eff22fe665b9689e5 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.91.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.91.Final.jar.sha1 new file mode 100644 index 0000000000000..ca956129d98c1 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.91.Final.jar.sha1 @@ -0,0 +1 @@ +4519d2ff470941f0086214b19c9acf992868112f \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.90.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.90.Final.jar.sha1 deleted file mode 100644 index afb531805329e..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.90.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -43597a09382c6ae2bef469a9b3a41e8a17850638 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.91.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.91.Final.jar.sha1 new file mode 100644 index 0000000000000..deaad405402f2 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.91.Final.jar.sha1 @@ -0,0 +1 @@ +93e5056462a242718e7689d81180d125c79d7723 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.90.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.90.Final.jar.sha1 deleted file mode 100644 index c98bfb52393d6..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.90.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -64f6946ce4d9189cec5341d3f5f86ac5653099b5 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.91.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.91.Final.jar.sha1 new file mode 100644 index 0000000000000..3e121e5de16b8 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.91.Final.jar.sha1 @@ -0,0 +1 @@ +444cf41e4fe28c47ffebba5e77b9458a12f938a1 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.90.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.90.Final.jar.sha1 deleted file mode 100644 index b92177828aa56..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.90.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -47c415d8c83f08b820ba00e6497a6cf19dd0155f \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.91.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.91.Final.jar.sha1 new file mode 100644 index 0000000000000..bc57e2d01a2bf --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.91.Final.jar.sha1 @@ -0,0 +1 @@ +04725d117d4b71ef0e743aa79062489b45472b26 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.90.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.90.Final.jar.sha1 deleted file mode 100644 index c7a77dbf6aaa8..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.90.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -82d68da212f62b076c763f5efa9b072d2abc018f \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.91.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.91.Final.jar.sha1 new file mode 100644 index 0000000000000..2562ece34790b --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.91.Final.jar.sha1 @@ -0,0 +1 @@ +c2f6bd7143194ca842b535546a405c06aa993934 \ No newline at end of file From 5cf523fd2b43e407e3a4deb73e35c2f48ec33aaf Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Wed, 5 Apr 2023 09:24:44 -0700 Subject: [PATCH 09/28] [Segment Replication] Fix flaky testReplicaHasDiffFilesThanPrimary test (#6979) (#6989) * [Segment Replication] Fix flaky testReplicaHasDiffFilesThanPrimary test * Use existing doc assertion methods --------- (cherry picked from commit 4511354122f8b423bbb78d95b7ed6d699450a9b1) Signed-off-by: Suraj Singh Signed-off-by: github-actions[bot] Co-authored-by: github-actions[bot] --- .../opensearch/indices/replication/SegmentReplicationIT.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index 51c0c8710d39d..59c1c45ccd3ea 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -738,6 +738,7 @@ public void testReplicaHasDiffFilesThanPrimary() throws Exception { final SegmentInfos segmentInfos = SegmentInfos.readLatestCommit(replicaShard.store().directory()); replicaShard.finalizeReplication(segmentInfos); + ensureYellow(INDEX_NAME); final int docCount = scaledRandomIntBetween(10, 200); for (int i = 0; i < docCount; i++) { @@ -745,7 +746,8 @@ public void testReplicaHasDiffFilesThanPrimary() throws Exception { refresh(INDEX_NAME); } // Refresh, this should trigger round of segment replication - assertBusy(() -> { assertDocCounts(docCount, replicaNode); }); + waitForSearchableDocs(docCount, primaryNode, replicaNode); + verifyStoreContent(); final IndexShard replicaAfterFailure = getIndexShard(replicaNode, INDEX_NAME); assertNotEquals(replicaAfterFailure.routingEntry().allocationId().getId(), replicaShard.routingEntry().allocationId().getId()); } From 5f1944190d764bf8de16c3fa405122b75bda3921 Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Wed, 5 Apr 2023 12:36:42 -0700 Subject: [PATCH 10/28] [Segment Replication] Add PIT/Scroll compatibility with Segment Replication (#7010) * [Segment Replication] Add PIT/Scroll compatibility with Segment Replication #6644 (#6765) * Segment Replication - PIT/Scroll compatibility. This change makes updates to make PIT/Scroll queries compatibile with Segment Replication. It does this by refcounting files when a new reader is created, and discarding those files after a reader is closed. Signed-off-by: Marc Handalian * Fix broken test. Signed-off-by: Marc Handalian * Fix test bug with PIT where snapshotted segments are queried instead of current store state. Signed-off-by: Marc Handalian * Address review comments and prevent temp file deletion during reader close Signed-off-by: Suraj Singh * Fix precommit failure Signed-off-by: Suraj Singh * Use last committed segment infos reference from replication engine Signed-off-by: Suraj Singh * Clean up and prevent incref on segment info file copied from primary Signed-off-by: Suraj Singh * Fix failing test Signed-off-by: Suraj Singh --------- Signed-off-by: Marc Handalian Signed-off-by: Suraj Singh Co-authored-by: Marc Handalian * Add param definition causing precommit failure Signed-off-by: Suraj Singh * Remove unnecessary override annotation Signed-off-by: Suraj Singh --------- Signed-off-by: Marc Handalian Signed-off-by: Suraj Singh Co-authored-by: Marc Handalian --- CHANGELOG.md | 1 + .../replication/SegmentReplicationIT.java | 369 ++++++++++++++++++ .../allocator/BalancedShardsAllocator.java | 2 +- .../index/engine/NRTReplicationEngine.java | 24 +- .../engine/NRTReplicationReaderManager.java | 27 +- .../index/store/ReplicaFileTracker.java | 51 +++ .../org/opensearch/index/store/Store.java | 64 ++- .../replication/SegmentReplicationTarget.java | 4 +- 8 files changed, 530 insertions(+), 12 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/store/ReplicaFileTracker.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 0041b6df93708..4fbb5fc1bac2c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Remove 'cluster_manager' role attachment when using 'node.master' deprecated setting ([#6331](https://github.com/opensearch-project/OpenSearch/pull/6331)) - Add new cluster settings to ignore weighted round-robin routing and fallback to default behaviour. ([#6834](https://github.com/opensearch-project/OpenSearch/pull/6834)) - Add experimental support for ZSTD compression. ([#3577](https://github.com/opensearch-project/OpenSearch/pull/3577)) +- [Segment Replication] Add point in time and scroll query compatibility. ([#6644](https://github.com/opensearch-project/OpenSearch/pull/6644)) ### Dependencies - Bump `org.apache.logging.log4j:log4j-core` from 2.18.0 to 2.20.0 ([#6490](https://github.com/opensearch-project/OpenSearch/pull/6490)) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index 59c1c45ccd3ea..59713cf0642f6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -17,8 +17,21 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.index.StandardDirectoryReader; import org.apache.lucene.tests.util.TestUtil; import org.apache.lucene.util.BytesRef; +import org.opensearch.action.ActionFuture; +import org.opensearch.action.admin.indices.flush.FlushRequest; +import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.action.search.CreatePitAction; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitAction; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.PitTestsUtil; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.search.SearchType; import org.opensearch.action.support.WriteRequest; import org.opensearch.action.update.UpdateResponse; import org.opensearch.client.Requests; @@ -29,15 +42,24 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.allocation.command.CancelAllocationCommand; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.index.IndexModule; import org.opensearch.index.SegmentReplicationPerGroupStats; import org.opensearch.index.SegmentReplicationPressureService; import org.opensearch.index.SegmentReplicationShardStats; +import org.opensearch.index.engine.Engine; +import org.opensearch.index.engine.NRTReplicationReaderManager; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardId; import org.opensearch.indices.recovery.FileChunkRequest; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.search.SearchService; +import org.opensearch.search.builder.PointInTimeBuilder; +import org.opensearch.search.internal.PitReaderContext; +import org.opensearch.search.sort.SortOrder; import org.opensearch.node.NodeClosedException; import org.opensearch.test.BackgroundIndexer; import org.opensearch.test.InternalTestCluster; @@ -46,14 +68,23 @@ import org.opensearch.transport.TransportService; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; +import java.util.stream.Collectors; import static java.util.Arrays.asList; +import static org.opensearch.action.search.PitTestsUtil.assertSegments; +import static org.opensearch.action.search.SearchContextId.decode; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.matchQuery; +import static org.opensearch.indices.replication.SegmentReplicationTarget.REPLICATION_PREFIX; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAllSuccessful; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchHits; @@ -836,4 +867,342 @@ public void testPressureServiceStats() throws Exception { }); } } + + /** + * Tests a scroll query on the replica + * @throws Exception + */ + public void testScrollCreatedOnReplica() throws Exception { + // create the cluster with one primary node containing primary shard and replica node containing replica shard + final String primary = internalCluster().startNode(); + createIndex(INDEX_NAME); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final String replica = internalCluster().startNode(); + ensureGreen(INDEX_NAME); + + // index 100 docs + for (int i = 0; i < 100; i++) { + client().prepareIndex(INDEX_NAME) + .setId(String.valueOf(i)) + .setSource(jsonBuilder().startObject().field("field", i).endObject()) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + refresh(INDEX_NAME); + } + assertBusy( + () -> assertEquals( + getIndexShard(primary, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion(), + getIndexShard(replica, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion() + ) + ); + final IndexShard replicaShard = getIndexShard(replica, INDEX_NAME); + final SegmentInfos segmentInfos = replicaShard.getLatestSegmentInfosAndCheckpoint().v1().get(); + final Collection snapshottedSegments = segmentInfos.files(false); + // opens a scrolled query before a flush is called. + // this is for testing scroll segment consistency between refresh and flush + SearchResponse searchResponse = client(replica).prepareSearch() + .setQuery(matchAllQuery()) + .setIndices(INDEX_NAME) + .setRequestCache(false) + .setPreference("_only_local") + .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .addSort("field", SortOrder.ASC) + .setSize(10) + .setScroll(TimeValue.timeValueDays(1)) + .get(); + + // force call flush + flush(INDEX_NAME); + + for (int i = 3; i < 50; i++) { + client().prepareDelete(INDEX_NAME, String.valueOf(i)).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + refresh(INDEX_NAME); + if (randomBoolean()) { + client().admin().indices().prepareForceMerge(INDEX_NAME).setMaxNumSegments(1).setFlush(true).get(); + flush(INDEX_NAME); + } + } + assertBusy(() -> { + assertEquals( + getIndexShard(primary, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion(), + getIndexShard(replica, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion() + ); + }); + + client().admin().indices().prepareForceMerge(INDEX_NAME).setMaxNumSegments(1).setFlush(true).get(); + assertBusy(() -> { + assertEquals( + getIndexShard(primary, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion(), + getIndexShard(replica, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion() + ); + }); + // Test stats + logger.info("--> Collect all scroll query hits"); + long scrollHits = 0; + do { + scrollHits += searchResponse.getHits().getHits().length; + searchResponse = client(replica).prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueDays(1)).get(); + assertAllSuccessful(searchResponse); + } while (searchResponse.getHits().getHits().length > 0); + + List currentFiles = List.of(replicaShard.store().directory().listAll()); + assertTrue("Files should be preserved", currentFiles.containsAll(snapshottedSegments)); + + client(replica).prepareClearScroll().addScrollId(searchResponse.getScrollId()).get(); + + currentFiles = List.of(replicaShard.store().directory().listAll()); + assertFalse("Files should be cleaned up post scroll clear request", currentFiles.containsAll(snapshottedSegments)); + assertEquals(100, scrollHits); + } + + /** + * Tests that when scroll query is cleared, it does not delete the temporary replication files, which are part of + * ongoing round of segment replication + * + * @throws Exception + */ + public void testScrollWithOngoingSegmentReplication() throws Exception { + // create the cluster with one primary node containing primary shard and replica node containing replica shard + final String primary = internalCluster().startNode(); + prepareCreate( + INDEX_NAME, + Settings.builder() + // we want to control refreshes + .put("index.refresh_interval", -1) + ).get(); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final String replica = internalCluster().startNode(); + ensureGreen(INDEX_NAME); + + final int initialDocCount = 10; + final int finalDocCount = 20; + for (int i = 0; i < initialDocCount; i++) { + client().prepareIndex(INDEX_NAME) + .setId(String.valueOf(i)) + .setSource(jsonBuilder().startObject().field("field", i).endObject()) + .get(); + } + // catch up replica with primary + refresh(INDEX_NAME); + assertBusy( + () -> assertEquals( + getIndexShard(primary, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion(), + getIndexShard(replica, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion() + ) + ); + logger.info("--> Create scroll query"); + // opens a scrolled query before a flush is called. + SearchResponse searchResponse = client(replica).prepareSearch() + .setQuery(matchAllQuery()) + .setIndices(INDEX_NAME) + .setRequestCache(false) + .setPreference("_only_local") + .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .addSort("field", SortOrder.ASC) + .setSize(10) + .setScroll(TimeValue.timeValueDays(1)) + .get(); + + // force call flush + flush(INDEX_NAME); + + // Index more documents + for (int i = initialDocCount; i < finalDocCount; i++) { + client().prepareIndex(INDEX_NAME) + .setId(String.valueOf(i)) + .setSource(jsonBuilder().startObject().field("field", i).endObject()) + .get(); + } + // Block file copy operation to ensure replica has few temporary replication files + CountDownLatch blockFileCopy = new CountDownLatch(1); + CountDownLatch waitForFileCopy = new CountDownLatch(1); + MockTransportService primaryTransportService = ((MockTransportService) internalCluster().getInstance( + TransportService.class, + primary + )); + primaryTransportService.addSendBehavior( + internalCluster().getInstance(TransportService.class, replica), + (connection, requestId, action, request, options) -> { + if (action.equals(SegmentReplicationTargetService.Actions.FILE_CHUNK)) { + FileChunkRequest req = (FileChunkRequest) request; + logger.debug("file chunk [{}] lastChunk: {}", req, req.lastChunk()); + if (req.name().endsWith("cfs") && req.lastChunk()) { + try { + waitForFileCopy.countDown(); + logger.info("--> Waiting for file copy"); + blockFileCopy.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + } + connection.sendRequest(requestId, action, request, options); + } + ); + + // perform refresh to start round of segment replication + refresh(INDEX_NAME); + + // wait for segrep to start and copy temporary files + waitForFileCopy.await(); + + // verify replica contains temporary files + IndexShard replicaShard = getIndexShard(replica, INDEX_NAME); + List temporaryFiles = Arrays.stream(replicaShard.store().directory().listAll()) + .filter(fileName -> fileName.startsWith(REPLICATION_PREFIX)) + .collect(Collectors.toList()); + logger.info("--> temporaryFiles {}", temporaryFiles); + assertTrue(temporaryFiles.size() > 0); + + // Clear scroll query, this should clean up files on replica + client(replica).prepareClearScroll().addScrollId(searchResponse.getScrollId()).get(); + + // verify temporary files still exist + replicaShard = getIndexShard(replica, INDEX_NAME); + List temporaryFilesPostClear = Arrays.stream(replicaShard.store().directory().listAll()) + .filter(fileName -> fileName.startsWith(REPLICATION_PREFIX)) + .collect(Collectors.toList()); + logger.info("--> temporaryFilesPostClear {}", temporaryFilesPostClear); + + // Unblock segment replication + blockFileCopy.countDown(); + + assertEquals(temporaryFiles.size(), temporaryFilesPostClear.size()); + assertTrue(temporaryFilesPostClear.containsAll(temporaryFiles)); + + // wait for replica to catch up and verify doc count + assertBusy(() -> { + assertEquals( + getIndexShard(primary, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion(), + getIndexShard(replica, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion() + ); + }); + verifyStoreContent(); + waitForSearchableDocs(finalDocCount, primary, replica); + } + + public void testPitCreatedOnReplica() throws Exception { + final String primary = internalCluster().startNode(); + createIndex(INDEX_NAME); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final String replica = internalCluster().startNode(); + ensureGreen(INDEX_NAME); + client().prepareIndex(INDEX_NAME) + .setId("1") + .setSource("foo", randomInt()) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + refresh(INDEX_NAME); + + client().prepareIndex(INDEX_NAME) + .setId("2") + .setSource("foo", randomInt()) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + for (int i = 3; i < 100; i++) { + client().prepareIndex(INDEX_NAME) + .setId(String.valueOf(i)) + .setSource("foo", randomInt()) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + refresh(INDEX_NAME); + } + // wait until replication finishes, then make the pit request. + assertBusy( + () -> assertEquals( + getIndexShard(primary, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion(), + getIndexShard(replica, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion() + ) + ); + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), false); + request.setPreference("_only_local"); + request.setIndices(new String[] { INDEX_NAME }); + ActionFuture execute = client(replica).execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + SearchResponse searchResponse = client(replica).prepareSearch(INDEX_NAME) + .setSize(10) + .setPreference("_only_local") + .setRequestCache(false) + .addSort("foo", SortOrder.ASC) + .searchAfter(new Object[] { 30 }) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .get(); + assertEquals(1, searchResponse.getSuccessfulShards()); + assertEquals(1, searchResponse.getTotalShards()); + FlushRequest flushRequest = Requests.flushRequest(INDEX_NAME); + client().admin().indices().flush(flushRequest).get(); + final IndexShard replicaShard = getIndexShard(replica, INDEX_NAME); + + // fetch the segments snapshotted when the reader context was created. + Collection snapshottedSegments; + SearchService searchService = internalCluster().getInstance(SearchService.class, replica); + NamedWriteableRegistry registry = internalCluster().getInstance(NamedWriteableRegistry.class, replica); + final PitReaderContext pitReaderContext = searchService.getPitReaderContext( + decode(registry, pitResponse.getId()).shards().get(replicaShard.routingEntry().shardId()).getSearchContextId() + ); + try (final Engine.Searcher searcher = pitReaderContext.acquireSearcher("test")) { + final StandardDirectoryReader standardDirectoryReader = NRTReplicationReaderManager.unwrapStandardReader( + (OpenSearchDirectoryReader) searcher.getDirectoryReader() + ); + final SegmentInfos infos = standardDirectoryReader.getSegmentInfos(); + snapshottedSegments = infos.files(false); + } + + flush(INDEX_NAME); + for (int i = 101; i < 200; i++) { + client().prepareIndex(INDEX_NAME) + .setId(String.valueOf(i)) + .setSource("foo", randomInt()) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + refresh(INDEX_NAME); + if (randomBoolean()) { + client().admin().indices().prepareForceMerge(INDEX_NAME).setMaxNumSegments(1).setFlush(true).get(); + flush(INDEX_NAME); + } + } + assertBusy(() -> { + assertEquals( + getIndexShard(primary, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion(), + getIndexShard(replica, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion() + ); + }); + + client().admin().indices().prepareForceMerge(INDEX_NAME).setMaxNumSegments(1).setFlush(true).get(); + assertBusy(() -> { + assertEquals( + getIndexShard(primary, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion(), + getIndexShard(replica, INDEX_NAME).getLatestReplicationCheckpoint().getSegmentInfosVersion() + ); + }); + // Test stats + IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest(); + indicesStatsRequest.indices(INDEX_NAME); + indicesStatsRequest.all(); + IndicesStatsResponse indicesStatsResponse = client().admin().indices().stats(indicesStatsRequest).get(); + long pitCurrent = indicesStatsResponse.getIndex(INDEX_NAME).getTotal().search.getTotal().getPitCurrent(); + long openContexts = indicesStatsResponse.getIndex(INDEX_NAME).getTotal().search.getOpenContexts(); + assertEquals(1, pitCurrent); + assertEquals(1, openContexts); + SearchResponse resp = client(replica).prepareSearch(INDEX_NAME) + .setSize(10) + .setPreference("_only_local") + .addSort("foo", SortOrder.ASC) + .searchAfter(new Object[] { 30 }) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .setRequestCache(false) + .get(); + PitTestsUtil.assertUsingGetAllPits(client(replica), pitResponse.getId(), pitResponse.getCreationTime()); + assertSegments(false, INDEX_NAME, 1, client(replica), pitResponse.getId()); + + List currentFiles = List.of(replicaShard.store().directory().listAll()); + assertTrue("Files should be preserved", currentFiles.containsAll(snapshottedSegments)); + + // delete the PIT + DeletePitRequest deletePITRequest = new DeletePitRequest(pitResponse.getId()); + client().execute(DeletePitAction.INSTANCE, deletePITRequest).actionGet(); + + currentFiles = List.of(replicaShard.store().directory().listAll()); + assertFalse("Files should be cleaned up", currentFiles.containsAll(snapshottedSegments)); + } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 0ff0eeba7d394..6ba8e5d893bc0 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -162,7 +162,7 @@ private void setWeightFunction(float indexBalance, float shardBalanceFactor) { /** * When primary shards balance is desired, enable primary shard balancing constraints - * @param preferPrimaryShardBalance + * @param preferPrimaryShardBalance boolean to prefer balancing by primary shard */ private void setPreferPrimaryShardBalance(boolean preferPrimaryShardBalance) { this.preferPrimaryShardBalance = preferPrimaryShardBalance; diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java index 8071e94d1426d..da3f914d8bd7e 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -70,7 +70,7 @@ public NRTReplicationEngine(EngineConfig engineConfig) { WriteOnlyTranslogManager translogManagerRef = null; try { lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); - readerManager = new NRTReplicationReaderManager(OpenSearchDirectoryReader.wrap(getDirectoryReader(), shardId)); + readerManager = buildReaderManager(); final SequenceNumbers.CommitInfo commitInfo = SequenceNumbers.loadSeqNoInfoFromLuceneCommit( this.lastCommittedSegmentInfos.getUserData().entrySet() ); @@ -121,6 +121,28 @@ public void onAfterTranslogSync() { } } + private NRTReplicationReaderManager buildReaderManager() throws IOException { + return new NRTReplicationReaderManager( + OpenSearchDirectoryReader.wrap(getDirectoryReader(), shardId), + store::incRefFileDeleter, + (files) -> { + store.decRefFileDeleter(files); + try { + store.cleanupAndPreserveLatestCommitPoint( + "On reader closed", + getLatestSegmentInfos(), + getLastCommittedSegmentInfos(), + false + ); + } catch (IOException e) { + // Log but do not rethrow - we can try cleaning up again after next replication cycle. + // If that were to fail, the shard will as well. + logger.error("Unable to clean store after reader closed", e); + } + } + ); + } + public TranslogManager translogManager() { return translogManager; } diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationReaderManager.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationReaderManager.java index 00748acb1d76d..9ec484ebfd383 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationReaderManager.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationReaderManager.java @@ -22,8 +22,10 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.List; import java.util.Objects; +import java.util.function.Consumer; /** * This is an extension of {@link OpenSearchReaderManager} for use with {@link NRTReplicationEngine}. @@ -35,17 +37,27 @@ public class NRTReplicationReaderManager extends OpenSearchReaderManager { private final static Logger logger = LogManager.getLogger(NRTReplicationReaderManager.class); private volatile SegmentInfos currentInfos; + private Consumer> onReaderClosed; + private Consumer> onNewReader; /** * Creates and returns a new SegmentReplicationReaderManager from the given * already-opened {@link OpenSearchDirectoryReader}, stealing * the incoming reference. * - * @param reader the SegmentReplicationReaderManager to use for future reopens + * @param reader - The SegmentReplicationReaderManager to use for future reopens. + * @param onNewReader - Called when a new reader is created. + * @param onReaderClosed - Called when a reader is closed. */ - NRTReplicationReaderManager(OpenSearchDirectoryReader reader) { + NRTReplicationReaderManager( + OpenSearchDirectoryReader reader, + Consumer> onNewReader, + Consumer> onReaderClosed + ) { super(reader); currentInfos = unwrapStandardReader(reader).getSegmentInfos(); + this.onNewReader = onNewReader; + this.onReaderClosed = onReaderClosed; } @Override @@ -60,6 +72,7 @@ protected OpenSearchDirectoryReader refreshIfNeeded(OpenSearchDirectoryReader re for (LeafReaderContext ctx : standardDirectoryReader.leaves()) { subs.add(ctx.reader()); } + final Collection files = currentInfos.files(false); DirectoryReader innerReader = StandardDirectoryReader.open(referenceToRefresh.directory(), currentInfos, subs, null); final DirectoryReader softDeletesDirectoryReaderWrapper = new SoftDeletesDirectoryReaderWrapper( innerReader, @@ -68,7 +81,13 @@ protected OpenSearchDirectoryReader refreshIfNeeded(OpenSearchDirectoryReader re logger.trace( () -> new ParameterizedMessage("updated to SegmentInfosVersion=" + currentInfos.getVersion() + " reader=" + innerReader) ); - return OpenSearchDirectoryReader.wrap(softDeletesDirectoryReaderWrapper, referenceToRefresh.shardId()); + final OpenSearchDirectoryReader reader = OpenSearchDirectoryReader.wrap( + softDeletesDirectoryReaderWrapper, + referenceToRefresh.shardId() + ); + onNewReader.accept(files); + OpenSearchDirectoryReader.addReaderCloseListener(reader, key -> onReaderClosed.accept(files)); + return reader; } /** @@ -89,7 +108,7 @@ public SegmentInfos getSegmentInfos() { return currentInfos; } - private StandardDirectoryReader unwrapStandardReader(OpenSearchDirectoryReader reader) { + public static StandardDirectoryReader unwrapStandardReader(OpenSearchDirectoryReader reader) { final DirectoryReader delegate = reader.getDelegate(); if (delegate instanceof SoftDeletesDirectoryReaderWrapper) { return (StandardDirectoryReader) ((SoftDeletesDirectoryReaderWrapper) delegate).getDelegate(); diff --git a/server/src/main/java/org/opensearch/index/store/ReplicaFileTracker.java b/server/src/main/java/org/opensearch/index/store/ReplicaFileTracker.java new file mode 100644 index 0000000000000..0ec282619337c --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/ReplicaFileTracker.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +/** + * This class is a version of Lucene's ReplicaFileDeleter class used to keep track of + * segment files that should be preserved on replicas between replication events. + * The difference is this component does not actually perform any deletions, it only handles refcounts. + * Our deletions are made through Store.java. + * + * https://github.com/apache/lucene/blob/main/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/ReplicaFileDeleter.java + * + * @opensearch.internal + */ +final class ReplicaFileTracker { + + private final Map refCounts = new HashMap<>(); + + public synchronized void incRef(Collection fileNames) { + for (String fileName : fileNames) { + refCounts.merge(fileName, 1, Integer::sum); + } + } + + public synchronized void decRef(Collection fileNames) { + for (String fileName : fileNames) { + Integer curCount = refCounts.get(fileName); + assert curCount != null : "fileName=" + fileName; + assert curCount > 0; + if (curCount == 1) { + refCounts.remove(fileName); + } else { + refCounts.put(fileName, curCount - 1); + } + } + } + + public synchronized boolean canDelete(String fileName) { + return refCounts.containsKey(fileName) == false; + } +} diff --git a/server/src/main/java/org/opensearch/index/store/Store.java b/server/src/main/java/org/opensearch/index/store/Store.java index 69de85cd23820..f923532b3d9ad 100644 --- a/server/src/main/java/org/opensearch/index/store/Store.java +++ b/server/src/main/java/org/opensearch/index/store/Store.java @@ -124,6 +124,7 @@ import static java.util.Collections.unmodifiableMap; import static org.opensearch.index.seqno.SequenceNumbers.LOCAL_CHECKPOINT_KEY; import static org.opensearch.index.store.Store.MetadataSnapshot.loadMetadata; +import static org.opensearch.indices.replication.SegmentReplicationTarget.REPLICATION_PREFIX; /** * A Store provides plain access to files written by an opensearch index shard. Each shard @@ -182,6 +183,10 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref private final ShardLock shardLock; private final OnClose onClose; + // used to ref count files when a new Reader is opened for PIT/Scroll queries + // prevents segment files deletion until the PIT/Scroll expires or is discarded + private final ReplicaFileTracker replicaFileTracker; + private final AbstractRefCounted refCounter = new AbstractRefCounted("store") { @Override protected void closeInternal() { @@ -202,6 +207,7 @@ public Store(ShardId shardId, IndexSettings indexSettings, Directory directory, this.directory = new StoreDirectory(sizeCachingDir, Loggers.getLogger("index.store.deletes", shardId)); this.shardLock = shardLock; this.onClose = onClose; + this.replicaFileTracker = indexSettings.isSegRepEnabled() ? new ReplicaFileTracker() : null; assert onClose != null; assert shardLock != null; @@ -782,9 +788,10 @@ public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetadata) thr } /** - * Segment Replication method - + * Segment Replication method * This method deletes every file in this store that is not referenced by the passed in SegmentInfos or * part of the latest on-disk commit point. + * * This method is used for segment replication when the in memory SegmentInfos can be ahead of the on disk segment file. * In this case files from both snapshots must be preserved. Verification has been done that all files are present on disk. * @param reason the reason for this cleanup operation logged for each deleted file @@ -792,24 +799,59 @@ public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetadata) thr * @throws IllegalStateException if the latest snapshot in this store differs from the given one after the cleanup. */ public void cleanupAndPreserveLatestCommitPoint(String reason, SegmentInfos infos) throws IOException { + this.cleanupAndPreserveLatestCommitPoint(reason, infos, readLastCommittedSegmentsInfo(), true); + } + + /** + * Segment Replication method + * + * Similar to {@link Store#cleanupAndPreserveLatestCommitPoint(String, SegmentInfos)} with extra parameters for cleanup + * + * This method deletes every file in this store. Except + * 1. Files referenced by the passed in SegmentInfos, usually in-memory segment infos copied from primary + * 2. Files part of the passed in segment infos, typically the last committed segment info + * 3. Files incremented by active reader for pit/scroll queries + * 4. Temporary replication file if passed in deleteTempFiles is true. + * + * @param reason the reason for this cleanup operation logged for each deleted file + * @param infos {@link SegmentInfos} Files from this infos will be preserved on disk if present. + * @param lastCommittedSegmentInfos {@link SegmentInfos} Last committed segment infos + * @param deleteTempFiles Does this clean up delete temporary replication files + * + * @throws IllegalStateException if the latest snapshot in this store differs from the given one after the cleanup. + */ + public void cleanupAndPreserveLatestCommitPoint( + String reason, + SegmentInfos infos, + SegmentInfos lastCommittedSegmentInfos, + boolean deleteTempFiles + ) throws IOException { assert indexSettings.isSegRepEnabled(); // fetch a snapshot from the latest on disk Segments_N file. This can be behind // the passed in local in memory snapshot, so we want to ensure files it references are not removed. metadataLock.writeLock().lock(); try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { - cleanupFiles(reason, getMetadata(readLastCommittedSegmentsInfo()), infos.files(true)); + cleanupFiles(reason, getMetadata(lastCommittedSegmentInfos), infos.files(true), deleteTempFiles); } finally { metadataLock.writeLock().unlock(); } } - private void cleanupFiles(String reason, MetadataSnapshot localSnapshot, @Nullable Collection additionalFiles) - throws IOException { + private void cleanupFiles( + String reason, + MetadataSnapshot localSnapshot, + @Nullable Collection additionalFiles, + boolean deleteTempFiles + ) throws IOException { assert metadataLock.isWriteLockedByCurrentThread(); for (String existingFile : directory.listAll()) { if (Store.isAutogenerated(existingFile) || localSnapshot.contains(existingFile) - || (additionalFiles != null && additionalFiles.contains(existingFile))) { + || (additionalFiles != null && additionalFiles.contains(existingFile)) + // also ensure we are not deleting a file referenced by an active reader. + || replicaFileTracker != null && replicaFileTracker.canDelete(existingFile) == false + // prevent temporary file deletion during reader cleanup + || deleteTempFiles == false && existingFile.startsWith(REPLICATION_PREFIX)) { // don't delete snapshot file, or the checksums file (note, this is extra protection since the Store won't delete // checksum) continue; @@ -1909,4 +1951,16 @@ private static IndexWriterConfig newIndexWriterConfig() { // we also don't specify a codec here and merges should use the engines for this index .setMergePolicy(NoMergePolicy.INSTANCE); } + + public void incRefFileDeleter(Collection files) { + if (this.indexSettings.isSegRepEnabled()) { + this.replicaFileTracker.incRef(files); + } + } + + public void decRefFileDeleter(Collection files) { + if (this.indexSettings.isSegRepEnabled()) { + this.replicaFileTracker.decRef(files); + } + } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index 03d67f4aa2313..995ec58d8768f 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -51,6 +51,8 @@ public class SegmentReplicationTarget extends ReplicationTarget { private final SegmentReplicationState state; protected final MultiFileWriter multiFileWriter; + public final static String REPLICATION_PREFIX = "replication."; + public ReplicationCheckpoint getCheckpoint() { return this.checkpoint; } @@ -85,7 +87,7 @@ protected void closeInternal() { @Override protected String getPrefix() { - return "replication." + UUIDs.randomBase64UUID() + "."; + return REPLICATION_PREFIX + UUIDs.randomBase64UUID() + "."; } @Override From 28dff102a46f15047eb34e608809d32dc25dac0e Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Wed, 5 Apr 2023 14:09:36 -0700 Subject: [PATCH 11/28] Add circuit breaker support for file cache (#6591) (#7011) (cherry picked from commit 06128a904964ce43813ebc8164417a5633d414a2) Signed-off-by: Kunal Kotwani --- .../remote/filecache/FileCacheBenchmark.java | 5 +- .../snapshots/SearchableSnapshotIT.java | 10 +- .../org/opensearch/env/NodeEnvironment.java | 82 +------------ .../store/remote/filecache/FileCache.java | 52 ++++++++- .../remote/filecache/FileCacheCleaner.java | 4 +- .../remote/filecache/FileCacheFactory.java | 14 ++- .../opensearch/indices/IndicesService.java | 11 +- .../opensearch/monitor/MonitorService.java | 6 +- .../org/opensearch/monitor/fs/FsProbe.java | 14 +-- .../org/opensearch/monitor/fs/FsService.java | 5 +- .../main/java/org/opensearch/node/Node.java | 71 +++++++++++- .../java/org/opensearch/node/NodeService.java | 10 +- .../opensearch/env/NodeEnvironmentTests.java | 49 -------- .../remote/filecache/FileCacheTests.java | 109 +++++++----------- .../remote/utils/TransferManagerTests.java | 8 +- .../opensearch/monitor/fs/FsProbeTests.java | 15 ++- .../java/org/opensearch/node/NodeTests.java | 60 ++++++++++ .../snapshots/SnapshotResiliencyTests.java | 8 +- 18 files changed, 291 insertions(+), 242 deletions(-) diff --git a/benchmarks/src/main/java/org/opensearch/benchmark/store/remote/filecache/FileCacheBenchmark.java b/benchmarks/src/main/java/org/opensearch/benchmark/store/remote/filecache/FileCacheBenchmark.java index 03d541dbb7de5..298de3a259346 100644 --- a/benchmarks/src/main/java/org/opensearch/benchmark/store/remote/filecache/FileCacheBenchmark.java +++ b/benchmarks/src/main/java/org/opensearch/benchmark/store/remote/filecache/FileCacheBenchmark.java @@ -27,6 +27,8 @@ import org.openjdk.jmh.annotations.Threads; import org.openjdk.jmh.annotations.Warmup; import org.openjdk.jmh.infra.Blackhole; +import org.opensearch.common.breaker.CircuitBreaker; +import org.opensearch.common.breaker.NoopCircuitBreaker; import org.opensearch.index.store.remote.filecache.CachedIndexInput; import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.index.store.remote.filecache.FileCacheFactory; @@ -91,7 +93,8 @@ public static class CacheParameters { public void setup() { fileCache = FileCacheFactory.createConcurrentLRUFileCache( (long) maximumNumberOfEntries * INDEX_INPUT.length(), - concurrencyLevel + concurrencyLevel, + new NoopCircuitBreaker(CircuitBreaker.REQUEST) ); for (long i = 0; i < maximumNumberOfEntries; i++) { final Path key = Paths.get(Long.toString(i)); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java index f4a34ddf847de..6a536a298da38 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java @@ -27,12 +27,12 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.util.FeatureFlags; -import org.opensearch.env.NodeEnvironment; import org.opensearch.index.Index; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.store.remote.file.CleanerDaemonThreadLeakFilter; import org.opensearch.index.store.remote.filecache.FileCacheStats; import org.opensearch.monitor.fs.FsInfo; +import org.opensearch.node.Node; import org.opensearch.repositories.fs.FsRepository; import java.io.IOException; @@ -582,13 +582,13 @@ public void testCacheIndexFilesClearedOnDelete() throws Exception { */ private void assertCacheDirectoryReplicaAndIndexCount(int numCacheFolderCount, int numIndexCount) throws IOException { // Get the available NodeEnvironment instances - Iterable nodeEnvironments = internalCluster().getInstances(NodeEnvironment.class); + Iterable nodes = internalCluster().getInstances(Node.class); // Filter out search NodeEnvironment(s) since FileCache is initialized only on search nodes and // collect the path for all the cache locations on search nodes. - List searchNodeFileCachePaths = StreamSupport.stream(nodeEnvironments.spliterator(), false) - .filter(nodeEnv -> nodeEnv.fileCache() != null) - .map(nodeEnv -> nodeEnv.fileCacheNodePath().fileCachePath) + List searchNodeFileCachePaths = StreamSupport.stream(nodes.spliterator(), false) + .filter(node -> node.fileCache() != null) + .map(node -> node.getNodeEnvironment().fileCacheNodePath().fileCachePath) .collect(Collectors.toList()); // Walk through the cache directory on nodes diff --git a/server/src/main/java/org/opensearch/env/NodeEnvironment.java b/server/src/main/java/org/opensearch/env/NodeEnvironment.java index 9862508ff03a0..d96b8f55fbc59 100644 --- a/server/src/main/java/org/opensearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/opensearch/env/NodeEnvironment.java @@ -44,7 +44,6 @@ import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.store.NIOFSDirectory; import org.apache.lucene.store.NativeFSLockFactory; -import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; @@ -60,8 +59,6 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.common.settings.SettingsException; -import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -73,15 +70,9 @@ import org.opensearch.index.shard.ShardId; import org.opensearch.index.shard.ShardPath; import org.opensearch.index.store.FsDirectoryFactory; -import org.opensearch.index.store.remote.filecache.FileCache; -import org.opensearch.index.store.remote.filecache.FileCacheFactory; -import org.opensearch.index.store.remote.filecache.FileCacheStats; -import org.opensearch.index.store.remote.utils.cache.CacheUsage; -import org.opensearch.index.store.remote.utils.cache.stats.CacheStats; import org.opensearch.monitor.fs.FsInfo; import org.opensearch.monitor.fs.FsProbe; import org.opensearch.monitor.jvm.JvmInfo; -import org.opensearch.node.Node; import java.io.Closeable; import java.io.IOException; @@ -113,7 +104,6 @@ import java.util.stream.Stream; import static java.util.Collections.unmodifiableSet; -import static org.opensearch.node.Node.NODE_SEARCH_CACHE_SIZE_SETTING; /** * A component that holds all data paths for a single node. @@ -206,8 +196,6 @@ public String toString() { private final NodeMetadata nodeMetadata; - private FileCache fileCache; - /** * Maximum number of data nodes that should run in an environment. */ @@ -365,8 +353,6 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce this.nodePaths = nodeLock.nodePaths; this.fileCacheNodePath = nodePaths[0]; - initializeFileCache(settings); - this.nodeLockId = nodeLock.nodeId; if (logger.isDebugEnabled()) { @@ -404,42 +390,6 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce } } - /** - * Initializes the search cache with a defined capacity. - * The capacity of the cache is based on user configuration for {@link Node#NODE_SEARCH_CACHE_SIZE_SETTING}. - * If the user doesn't configure the cache size, it fails if the node is a data + search node. - * Else it configures the size to 80% of available capacity for a dedicated search node, if not explicitly defined. - */ - private void initializeFileCache(Settings settings) throws IOException { - if (DiscoveryNode.isSearchNode(settings)) { - long capacity = NODE_SEARCH_CACHE_SIZE_SETTING.get(settings).getBytes(); - FsInfo.Path info = ExceptionsHelper.catchAsRuntimeException(() -> FsProbe.getFSInfo(this.fileCacheNodePath)); - long availableCapacity = info.getAvailable().getBytes(); - - // Initialize default values for cache if NODE_SEARCH_CACHE_SIZE_SETTING is not set. - if (capacity == 0) { - // If node is not a dedicated search node without configuration, prevent cache initialization - if (DiscoveryNode.getRolesFromSettings(settings).stream().anyMatch(role -> !DiscoveryNodeRole.SEARCH_ROLE.equals(role))) { - throw new SettingsException( - "Unable to initialize the " - + DiscoveryNodeRole.SEARCH_ROLE.roleName() - + "-" - + DiscoveryNodeRole.DATA_ROLE.roleName() - + " node: Missing value for configuration " - + NODE_SEARCH_CACHE_SIZE_SETTING.getKey() - ); - } else { - capacity = 80 * availableCapacity / 100; - } - } - capacity = Math.min(capacity, availableCapacity); - fileCacheNodePath.fileCacheReservedSize = new ByteSizeValue(capacity, ByteSizeUnit.BYTES); - this.fileCache = FileCacheFactory.createConcurrentLRUFileCache(capacity); - List fileCacheDataPaths = collectFileCacheDataPath(this.fileCacheNodePath); - this.fileCache.restoreFromDirectory(fileCacheDataPaths); - } - } - /** * Resolve a specific nodes/{node.id} path for the specified path and node lock id. * @@ -1296,7 +1246,7 @@ private static boolean isIndexMetadataPath(Path path) { * Collect the path containing cache data in the indicated cache node path. * The returned paths will point to the shard data folder. */ - static List collectFileCacheDataPath(NodePath fileCacheNodePath) throws IOException { + public static List collectFileCacheDataPath(NodePath fileCacheNodePath) throws IOException { List indexSubPaths = new ArrayList<>(); Path fileCachePath = fileCacheNodePath.fileCachePath; if (Files.isDirectory(fileCachePath)) { @@ -1440,34 +1390,4 @@ private static void tryWriteTempFile(Path path) throws IOException { } } } - - /** - * Returns the {@link FileCache} instance for remote search node - */ - public FileCache fileCache() { - return this.fileCache; - } - - /** - * Returns the current {@link FileCacheStats} for remote search node - */ - public FileCacheStats fileCacheStats() { - if (fileCache == null) { - return null; - } - - CacheStats stats = fileCache.stats(); - CacheUsage usage = fileCache.usage(); - return new FileCacheStats( - System.currentTimeMillis(), - usage.activeUsage(), - fileCache.capacity(), - usage.usage(), - stats.evictionWeight(), - stats.removeWeight(), - stats.replaceCount(), - stats.hitCount(), - stats.missCount() - ); - } } diff --git a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCache.java b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCache.java index 2f5693415216b..073ca850a2c64 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCache.java +++ b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCache.java @@ -8,6 +8,8 @@ package org.opensearch.index.store.remote.filecache; +import org.opensearch.common.breaker.CircuitBreaker; +import org.opensearch.common.breaker.CircuitBreakingException; import org.opensearch.index.store.remote.utils.cache.CacheUsage; import org.opensearch.index.store.remote.utils.cache.RefCountedCache; import org.opensearch.index.store.remote.utils.cache.SegmentedCache; @@ -43,8 +45,11 @@ public class FileCache implements RefCountedCache { private final SegmentedCache theCache; - public FileCache(SegmentedCache cache) { + private final CircuitBreaker circuitBreaker; + + public FileCache(SegmentedCache cache, CircuitBreaker circuitBreaker) { this.theCache = cache; + this.circuitBreaker = circuitBreaker; } public long capacity() { @@ -53,7 +58,9 @@ public long capacity() { @Override public CachedIndexInput put(Path filePath, CachedIndexInput indexInput) { - return theCache.put(filePath, indexInput); + CachedIndexInput cachedIndexInput = theCache.put(filePath, indexInput); + checkParentBreaker(filePath); + return cachedIndexInput; } @Override @@ -61,7 +68,9 @@ public CachedIndexInput compute( Path key, BiFunction remappingFunction ) { - return theCache.compute(key, remappingFunction); + CachedIndexInput cachedIndexInput = theCache.compute(key, remappingFunction); + checkParentBreaker(key); + return cachedIndexInput; } /** @@ -121,6 +130,24 @@ public CacheStats stats() { return theCache.stats(); } + /** + * Ensures that the PARENT breaker is not tripped when an entry is added to the cache + * @param filePath the path key for which entry is added + */ + private void checkParentBreaker(Path filePath) { + try { + circuitBreaker.addEstimateBytesAndMaybeBreak(0, "filecache_entry"); + } catch (CircuitBreakingException ex) { + theCache.remove(filePath); + throw new CircuitBreakingException( + "Unable to create file cache entries", + ex.getBytesWanted(), + ex.getByteLimit(), + ex.getDurability() + ); + } + } + /** * Restores the file cache instance performing a folder scan of the * {@link org.opensearch.index.store.remote.directory.RemoteSnapshotDirectoryFactory#LOCAL_STORE_LOCATION} @@ -153,4 +180,23 @@ public void restoreFromDirectory(List fileCacheDataPaths) { } }); } + + /** + * Returns the current {@link FileCacheStats} + */ + public FileCacheStats fileCacheStats() { + CacheStats stats = stats(); + CacheUsage usage = usage(); + return new FileCacheStats( + System.currentTimeMillis(), + usage.activeUsage(), + capacity(), + usage.usage(), + stats.evictionWeight(), + stats.removeWeight(), + stats.replaceCount(), + stats.hitCount(), + stats.missCount() + ); + } } diff --git a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheCleaner.java b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheCleaner.java index 838e6f2bf2fd2..a1411f71c0761 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheCleaner.java +++ b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheCleaner.java @@ -39,9 +39,9 @@ public class FileCacheCleaner implements IndexEventListener { private final NodeEnvironment nodeEnvironment; private final FileCache fileCache; - public FileCacheCleaner(NodeEnvironment nodeEnvironment) { + public FileCacheCleaner(NodeEnvironment nodeEnvironment, FileCache fileCache) { this.nodeEnvironment = nodeEnvironment; - this.fileCache = nodeEnvironment.fileCache(); + this.fileCache = fileCache; } /** diff --git a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheFactory.java b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheFactory.java index 291f479f766f5..4d132eeb75826 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheFactory.java +++ b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheFactory.java @@ -9,6 +9,7 @@ package org.opensearch.index.store.remote.filecache; import org.apache.lucene.store.IndexInput; +import org.opensearch.common.breaker.CircuitBreaker; import org.opensearch.common.cache.RemovalReason; import org.opensearch.index.store.remote.utils.cache.SegmentedCache; import org.opensearch.index.store.remote.file.OnDemandBlockSnapshotIndexInput; @@ -37,15 +38,16 @@ * @opensearch.internal */ public class FileCacheFactory { - public static FileCache createConcurrentLRUFileCache(long capacity) { - return createFileCache(createDefaultBuilder().capacity(capacity).build()); + + public static FileCache createConcurrentLRUFileCache(long capacity, CircuitBreaker circuitBreaker) { + return createFileCache(createDefaultBuilder().capacity(capacity).build(), circuitBreaker); } - public static FileCache createConcurrentLRUFileCache(long capacity, int concurrencyLevel) { - return createFileCache(createDefaultBuilder().capacity(capacity).concurrencyLevel(concurrencyLevel).build()); + public static FileCache createConcurrentLRUFileCache(long capacity, int concurrencyLevel, CircuitBreaker circuitBreaker) { + return createFileCache(createDefaultBuilder().capacity(capacity).concurrencyLevel(concurrencyLevel).build(), circuitBreaker); } - private static FileCache createFileCache(SegmentedCache segmentedCache) { + private static FileCache createFileCache(SegmentedCache segmentedCache, CircuitBreaker circuitBreaker) { /* * Since OnDemandBlockSnapshotIndexInput.Builder.DEFAULT_BLOCK_SIZE is not overridden then it will be upper bound for max IndexInput * size on disk. A single IndexInput size should always be more than a single segment in segmented cache. A FileCache capacity might @@ -55,7 +57,7 @@ private static FileCache createFileCache(SegmentedCache if (segmentedCache.getPerSegmentCapacity() <= OnDemandBlockSnapshotIndexInput.Builder.DEFAULT_BLOCK_SIZE) { throw new IllegalStateException("FileSystem Cache per segment capacity is less than single IndexInput default block size"); } - return new FileCache(segmentedCache); + return new FileCache(segmentedCache, circuitBreaker); } private static SegmentedCache.Builder createDefaultBuilder() { diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index d26f99956ba48..b0d488a5b2cf7 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -293,6 +293,8 @@ public class IndicesService extends AbstractLifecycleComponent private final IndexStorePlugin.DirectoryFactory remoteDirectoryFactory; private final BiFunction translogFactorySupplier; + private final FileCacheCleaner fileCacheCleaner; + @Override protected void doStart() { // Start thread that will manage cleaning the field data cache periodically @@ -321,7 +323,8 @@ public IndicesService( ValuesSourceRegistry valuesSourceRegistry, Map recoveryStateFactories, IndexStorePlugin.DirectoryFactory remoteDirectoryFactory, - Supplier repositoriesServiceSupplier + Supplier repositoriesServiceSupplier, + FileCacheCleaner fileCacheCleaner ) { this.settings = settings; this.threadPool = threadPool; @@ -368,6 +371,7 @@ public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, lon this.directoryFactories = directoryFactories; this.recoveryStateFactories = recoveryStateFactories; + this.fileCacheCleaner = fileCacheCleaner; // doClose() is called when shutting down a node, yet there might still be ongoing requests // that we need to wait for before closing some resources such as the caches. In order to // avoid closing these resources while ongoing requests are still being processed, we use a @@ -436,7 +440,8 @@ public IndicesService( ValuesSourceRegistry valuesSourceRegistry, Map recoveryStateFactories, IndexStorePlugin.DirectoryFactory remoteDirectoryFactory, - Supplier repositoriesServiceSupplier + Supplier repositoriesServiceSupplier, + FileCacheCleaner fileCacheCleaner ) { this.settings = settings; this.threadPool = threadPool; @@ -483,6 +488,7 @@ public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, lon this.directoryFactories = directoryFactories; this.recoveryStateFactories = recoveryStateFactories; + this.fileCacheCleaner = fileCacheCleaner; // doClose() is called when shutting down a node, yet there might still be ongoing requests // that we need to wait for before closing some resources such as the caches. In order to // avoid closing these resources while ongoing requests are still being processed, we use a @@ -770,7 +776,6 @@ public void onStoreClosed(ShardId shardId) { } } }; - final FileCacheCleaner fileCacheCleaner = new FileCacheCleaner(nodeEnv); finalListeners.add(onStoreClose); finalListeners.add(oldShardsStats); finalListeners.add(fileCacheCleaner); diff --git a/server/src/main/java/org/opensearch/monitor/MonitorService.java b/server/src/main/java/org/opensearch/monitor/MonitorService.java index 0e24eb094cd4d..bed638484f7c0 100644 --- a/server/src/main/java/org/opensearch/monitor/MonitorService.java +++ b/server/src/main/java/org/opensearch/monitor/MonitorService.java @@ -35,6 +35,7 @@ import org.opensearch.common.component.AbstractLifecycleComponent; import org.opensearch.common.settings.Settings; import org.opensearch.env.NodeEnvironment; +import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.monitor.fs.FsService; import org.opensearch.monitor.jvm.JvmGcMonitorService; import org.opensearch.monitor.jvm.JvmService; @@ -57,12 +58,13 @@ public class MonitorService extends AbstractLifecycleComponent { private final JvmService jvmService; private final FsService fsService; - public MonitorService(Settings settings, NodeEnvironment nodeEnvironment, ThreadPool threadPool) throws IOException { + public MonitorService(Settings settings, NodeEnvironment nodeEnvironment, ThreadPool threadPool, FileCache fileCache) + throws IOException { this.jvmGcMonitorService = new JvmGcMonitorService(settings, threadPool); this.osService = new OsService(settings); this.processService = new ProcessService(settings); this.jvmService = new JvmService(settings); - this.fsService = new FsService(settings, nodeEnvironment); + this.fsService = new FsService(settings, nodeEnvironment, fileCache); } public OsService osService() { diff --git a/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java b/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java index f225d6528965f..ddef715af6a60 100644 --- a/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java +++ b/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java @@ -36,14 +36,13 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.Constants; -import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.PathUtils; -import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.env.NodeEnvironment; import org.opensearch.env.NodeEnvironment.NodePath; +import org.opensearch.index.store.remote.filecache.FileCache; import java.io.IOException; import java.nio.file.Files; @@ -64,12 +63,11 @@ public class FsProbe { private static final Logger logger = LogManager.getLogger(FsProbe.class); private final NodeEnvironment nodeEnv; + private final FileCache fileCache; - private final Settings settings; - - public FsProbe(NodeEnvironment nodeEnv, Settings settings) { + public FsProbe(NodeEnvironment nodeEnv, FileCache fileCache) { this.nodeEnv = nodeEnv; - this.settings = settings; + this.fileCache = fileCache; } public FsInfo stats(FsInfo previous) throws IOException { @@ -80,9 +78,9 @@ public FsInfo stats(FsInfo previous) throws IOException { FsInfo.Path[] paths = new FsInfo.Path[dataLocations.length]; for (int i = 0; i < dataLocations.length; i++) { paths[i] = getFSInfo(dataLocations[i]); - if (settings != null && DiscoveryNode.isSearchNode(settings) && dataLocations[i].fileCacheReservedSize != ByteSizeValue.ZERO) { + if (fileCache != null && dataLocations[i].fileCacheReservedSize != ByteSizeValue.ZERO) { paths[i].fileCacheReserved = adjustForHugeFilesystems(dataLocations[i].fileCacheReservedSize.getBytes()); - paths[i].fileCacheUtilized = adjustForHugeFilesystems(nodeEnv.fileCacheStats().getUsed().getBytes()); + paths[i].fileCacheUtilized = adjustForHugeFilesystems(fileCache.usage().usage()); paths[i].available -= (paths[i].fileCacheReserved - paths[i].fileCacheUtilized); } } diff --git a/server/src/main/java/org/opensearch/monitor/fs/FsService.java b/server/src/main/java/org/opensearch/monitor/fs/FsService.java index f0cd1eb94c73b..20ea4bd1448ad 100644 --- a/server/src/main/java/org/opensearch/monitor/fs/FsService.java +++ b/server/src/main/java/org/opensearch/monitor/fs/FsService.java @@ -40,6 +40,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.SingleObjectCache; import org.opensearch.env.NodeEnvironment; +import org.opensearch.index.store.remote.filecache.FileCache; import java.io.IOException; import java.util.function.Supplier; @@ -69,8 +70,8 @@ public class FsService { Property.NodeScope ); - public FsService(final Settings settings, final NodeEnvironment nodeEnvironment) { - final FsProbe probe = new FsProbe(nodeEnvironment, settings); + public FsService(final Settings settings, final NodeEnvironment nodeEnvironment, FileCache fileCache) { + final FsProbe probe = new FsProbe(nodeEnvironment, fileCache); final FsInfo initialValue = stats(probe, null); if (ALWAYS_REFRESH_SETTING.get(settings)) { assert REFRESH_INTERVAL_SETTING.exists(settings) == false; diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 4c6c129794876..2fa74c65b57e4 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -35,19 +35,27 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; +import org.opensearch.ExceptionsHelper; import org.opensearch.common.SetOnce; import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; +import org.opensearch.common.settings.SettingsException; +import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexingPressureService; import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.threadpool.RunnableTaskExecutionListener; +import org.opensearch.index.store.remote.filecache.FileCache; +import org.opensearch.index.store.remote.filecache.FileCacheCleaner; +import org.opensearch.index.store.remote.filecache.FileCacheFactory; import org.opensearch.indices.replication.SegmentReplicationSourceFactory; import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.SegmentReplicationSourceService; import org.opensearch.extensions.ExtensionsManager; import org.opensearch.extensions.NoopExtensionsManager; +import org.opensearch.monitor.fs.FsInfo; +import org.opensearch.monitor.fs.FsProbe; import org.opensearch.search.backpressure.SearchBackpressureService; import org.opensearch.search.backpressure.settings.SearchBackpressureSettings; import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; @@ -237,6 +245,7 @@ import static java.util.stream.Collectors.toList; import static org.opensearch.common.util.FeatureFlags.REPLICATION_TYPE; +import static org.opensearch.env.NodeEnvironment.collectFileCacheDataPath; import static org.opensearch.index.ShardIndexingPressureSettings.SHARD_INDEXING_PRESSURE_ENABLED_ATTRIBUTE_KEY; /** @@ -357,6 +366,7 @@ public static class DiscoverySettings { private final NodeService nodeService; final NamedWriteableRegistry namedWriteableRegistry; private final AtomicReference runnableTaskListener; + private FileCache fileCache; public Node(Environment environment) { this(environment, Collections.emptyList(), true); @@ -551,7 +561,6 @@ protected Node( for (Module pluginModule : pluginsService.createGuiceModules()) { modules.add(pluginModule); } - final MonitorService monitorService = new MonitorService(settings, nodeEnvironment, threadPool); final FsHealthService fsHealthService = new FsHealthService( settings, clusterService.getClusterSettings(), @@ -587,6 +596,11 @@ protected Node( pluginCircuitBreakers, settingsModule.getClusterSettings() ); + // File cache will be initialized by the node once circuit breakers are in place. + initializeFileCache(settings, circuitBreakerService.getBreaker(CircuitBreaker.REQUEST)); + final FileCacheCleaner fileCacheCleaner = new FileCacheCleaner(nodeEnvironment, fileCache); + final MonitorService monitorService = new MonitorService(settings, nodeEnvironment, threadPool, fileCache); + pluginsService.filterPlugins(CircuitBreakerPlugin.class).forEach(plugin -> { CircuitBreaker breaker = circuitBreakerService.getBreaker(plugin.getCircuitBreaker(settings).getName()); plugin.setCircuitBreaker(breaker); @@ -632,7 +646,7 @@ protected Node( final Map builtInDirectoryFactories = IndexModule.createBuiltInDirectoryFactories( repositoriesServiceReference::get, threadPool, - nodeEnvironment.fileCache() + fileCache ); final Map directoryFactories = new HashMap<>(); @@ -700,7 +714,8 @@ protected Node( searchModule.getValuesSourceRegistry(), recoveryStateFactories, remoteDirectoryFactory, - repositoriesServiceReference::get + repositoriesServiceReference::get, + fileCacheCleaner ); } else { indicesService = new IndicesService( @@ -725,7 +740,8 @@ protected Node( searchModule.getValuesSourceRegistry(), recoveryStateFactories, remoteDirectoryFactory, - repositoriesServiceReference::get + repositoriesServiceReference::get, + fileCacheCleaner ); } @@ -970,7 +986,7 @@ protected Node( indexingPressureService, searchModule.getValuesSourceRegistry().getUsageService(), searchBackpressureService, - nodeEnvironment + fileCache ); final SearchService searchService = newSearchService( @@ -1667,4 +1683,49 @@ DiscoveryNode getNode() { return localNode.get(); } } + + /** + * Initializes the search cache with a defined capacity. + * The capacity of the cache is based on user configuration for {@link Node#NODE_SEARCH_CACHE_SIZE_SETTING}. + * If the user doesn't configure the cache size, it fails if the node is a data + search node. + * Else it configures the size to 80% of available capacity for a dedicated search node, if not explicitly defined. + */ + private void initializeFileCache(Settings settings, CircuitBreaker circuitBreaker) throws IOException { + if (DiscoveryNode.isSearchNode(settings)) { + NodeEnvironment.NodePath fileCacheNodePath = nodeEnvironment.fileCacheNodePath(); + long capacity = NODE_SEARCH_CACHE_SIZE_SETTING.get(settings).getBytes(); + FsInfo.Path info = ExceptionsHelper.catchAsRuntimeException(() -> FsProbe.getFSInfo(fileCacheNodePath)); + long availableCapacity = info.getAvailable().getBytes(); + + // Initialize default values for cache if NODE_SEARCH_CACHE_SIZE_SETTING is not set. + if (capacity == 0) { + // If node is not a dedicated search node without configuration, prevent cache initialization + if (DiscoveryNode.getRolesFromSettings(settings).stream().anyMatch(role -> !DiscoveryNodeRole.SEARCH_ROLE.equals(role))) { + throw new SettingsException( + "Unable to initialize the " + + DiscoveryNodeRole.SEARCH_ROLE.roleName() + + "-" + + DiscoveryNodeRole.DATA_ROLE.roleName() + + " node: Missing value for configuration " + + NODE_SEARCH_CACHE_SIZE_SETTING.getKey() + ); + } else { + capacity = 80 * availableCapacity / 100; + } + } + capacity = Math.min(capacity, availableCapacity); + fileCacheNodePath.fileCacheReservedSize = new ByteSizeValue(capacity, ByteSizeUnit.BYTES); + this.fileCache = FileCacheFactory.createConcurrentLRUFileCache(capacity, circuitBreaker); + List fileCacheDataPaths = collectFileCacheDataPath(fileCacheNodePath); + this.fileCache.restoreFromDirectory(fileCacheDataPaths); + } + } + + /** + * Returns the {@link FileCache} instance for remote search node + * Note: Visible for testing + */ + public FileCache fileCache() { + return this.fileCache; + } } diff --git a/server/src/main/java/org/opensearch/node/NodeService.java b/server/src/main/java/org/opensearch/node/NodeService.java index b4446085243df..94653d6947ad4 100644 --- a/server/src/main/java/org/opensearch/node/NodeService.java +++ b/server/src/main/java/org/opensearch/node/NodeService.java @@ -45,9 +45,9 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsFilter; import org.opensearch.discovery.Discovery; -import org.opensearch.env.NodeEnvironment; import org.opensearch.http.HttpServerTransport; import org.opensearch.index.IndexingPressureService; +import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.indices.IndicesService; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.ingest.IngestService; @@ -87,7 +87,7 @@ public class NodeService implements Closeable { private final SearchBackpressureService searchBackpressureService; private final ClusterService clusterService; private final Discovery discovery; - private final NodeEnvironment nodeEnvironment; + private final FileCache fileCache; NodeService( Settings settings, @@ -108,7 +108,7 @@ public class NodeService implements Closeable { IndexingPressureService indexingPressureService, AggregationUsageService aggregationUsageService, SearchBackpressureService searchBackpressureService, - NodeEnvironment nodeEnvironment + FileCache fileCache ) { this.settings = settings; this.threadPool = threadPool; @@ -128,7 +128,7 @@ public class NodeService implements Closeable { this.aggregationUsageService = aggregationUsageService; this.searchBackpressureService = searchBackpressureService; this.clusterService = clusterService; - this.nodeEnvironment = nodeEnvironment; + this.fileCache = fileCache; clusterService.addStateApplier(ingestService); } @@ -209,7 +209,7 @@ public NodeStats stats( searchBackpressure ? this.searchBackpressureService.nodeStats() : null, clusterManagerThrottling ? this.clusterService.getClusterManagerService().getThrottlingStats() : null, weightedRoutingStats ? WeightedRoutingStats.getInstance() : null, - fileCacheStats ? nodeEnvironment.fileCacheStats() : null + fileCacheStats && fileCache != null ? fileCache.fileCacheStats() : null ); } diff --git a/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java b/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java index 808d3c0a7ffea..d886922d56882 100644 --- a/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java +++ b/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java @@ -38,9 +38,6 @@ import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.settings.SettingsException; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.set.Sets; import org.opensearch.core.internal.io.IOUtils; @@ -48,8 +45,6 @@ import org.opensearch.index.Index; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.ShardId; -import org.opensearch.monitor.fs.FsInfo; -import org.opensearch.monitor.fs.FsProbe; import org.opensearch.node.Node; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.IndexSettingsModule; @@ -70,7 +65,6 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -import static org.opensearch.test.NodeRoles.addRoles; import static org.opensearch.test.NodeRoles.nonDataNode; import static org.opensearch.test.NodeRoles.nonClusterManagerNode; import static org.hamcrest.CoreMatchers.equalTo; @@ -589,49 +583,6 @@ public void testEnsureNoShardDataOrIndexMetadata() throws IOException { verifyFailsOnShardData(noDataNoClusterManagerSettings, indexPath, shardDataDirName); } - public void testSearchFileCacheConfiguration() throws IOException { - Settings searchRoleSettings = addRoles(buildEnvSettings(Settings.EMPTY), Set.of(DiscoveryNodeRole.SEARCH_ROLE)); - ByteSizeValue cacheSize = new ByteSizeValue(16, ByteSizeUnit.GB); - Settings searchRoleSettingsWithConfig = Settings.builder() - .put(searchRoleSettings) - .put(Node.NODE_SEARCH_CACHE_SIZE_SETTING.getKey(), cacheSize) - .build(); - - Settings onlySearchRoleSettings = Settings.builder() - .put(searchRoleSettings) - .put( - NodeRoles.removeRoles( - searchRoleSettings, - Set.of( - DiscoveryNodeRole.DATA_ROLE, - DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, - DiscoveryNodeRole.INGEST_ROLE, - DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE - ) - ) - ) - .build(); - - // Test exception thrown with configuration missing - assertThrows(SettingsException.class, () -> newNodeEnvironment(searchRoleSettings)); - - // Test data + search node with defined cache size - try (NodeEnvironment env = newNodeEnvironment(searchRoleSettingsWithConfig)) { - NodeEnvironment.NodePath fileCacheNodePath = env.fileCacheNodePath(); - assertEquals(cacheSize.getBytes(), fileCacheNodePath.fileCacheReservedSize.getBytes()); - } - - // Test dedicated search node with no configuration - try (NodeEnvironment env = newNodeEnvironment(onlySearchRoleSettings)) { - NodeEnvironment.NodePath fileCacheNodePath = env.fileCacheNodePath(); - assertTrue(fileCacheNodePath.fileCacheReservedSize.getBytes() > 0); - FsProbe fsProbe = new FsProbe(env, onlySearchRoleSettings); - FsInfo fsInfo = fsProbe.stats(null); - FsInfo.Path cachePathInfo = fsInfo.iterator().next(); - assertEquals(cachePathInfo.getFileCacheReserved().getBytes(), fileCacheNodePath.fileCacheReservedSize.getBytes()); - } - } - private void verifyFailsOnShardData(Settings settings, Path indexPath, String shardDataDirName) { IllegalStateException ex = expectThrows( IllegalStateException.class, diff --git a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheTests.java b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheTests.java index 72ac9837537e1..b7fe3999af6f3 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheTests.java @@ -8,11 +8,14 @@ package org.opensearch.index.store.remote.filecache; -import org.apache.lucene.store.IndexInput; import org.junit.Before; import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.breaker.TestCircuitBreaker; import org.opensearch.env.NodeEnvironment; import org.opensearch.index.store.remote.directory.RemoteSnapshotDirectoryFactory; +import org.opensearch.common.breaker.CircuitBreaker; +import org.opensearch.common.breaker.CircuitBreakingException; +import org.opensearch.common.breaker.NoopCircuitBreaker; import org.opensearch.index.store.remote.utils.cache.CacheUsage; import org.opensearch.test.OpenSearchTestCase; @@ -37,7 +40,13 @@ public void init() throws Exception { } private FileCache createFileCache(long capacity) { - return FileCacheFactory.createConcurrentLRUFileCache(capacity, CONCURRENCY_LEVEL); + return FileCacheFactory.createConcurrentLRUFileCache(capacity, CONCURRENCY_LEVEL, new NoopCircuitBreaker(CircuitBreaker.REQUEST)); + } + + private FileCache createCircuitBreakingFileCache(long capacity) { + TestCircuitBreaker testCircuitBreaker = new TestCircuitBreaker(); + testCircuitBreaker.startBreaking(); + return FileCacheFactory.createConcurrentLRUFileCache(capacity, CONCURRENCY_LEVEL, testCircuitBreaker); } private Path createPath(String middle) { @@ -58,14 +67,16 @@ private void createFile(String nodeId, String indexName, String shardId, String } public void testCreateCacheWithSmallSegments() { - assertThrows(IllegalStateException.class, () -> { FileCacheFactory.createConcurrentLRUFileCache(1000, CONCURRENCY_LEVEL); }); + assertThrows(IllegalStateException.class, () -> { + FileCacheFactory.createConcurrentLRUFileCache(1000, CONCURRENCY_LEVEL, new NoopCircuitBreaker(CircuitBreaker.REQUEST)); + }); } // test get method public void testGet() { FileCache fileCache = createFileCache(GIGA_BYTES); for (int i = 0; i < 4; i++) { - fileCache.put(createPath(Integer.toString(i)), new FakeIndexInput(8 * MEGA_BYTES)); + fileCache.put(createPath(Integer.toString(i)), new FileCachedIndexInput.ClosedIndexInput(8 * MEGA_BYTES)); } // verify all blocks are put into file cache for (int i = 0; i < 4; i++) { @@ -87,10 +98,17 @@ public void testPutThrowException() { }); } + public void testPutThrowCircuitBreakingException() { + FileCache fileCache = createCircuitBreakingFileCache(GIGA_BYTES); + Path path = createPath("0"); + assertThrows(CircuitBreakingException.class, () -> fileCache.put(path, new FileCachedIndexInput.ClosedIndexInput(8 * MEGA_BYTES))); + assertNull(fileCache.get(path)); + } + public void testCompute() { FileCache fileCache = createFileCache(GIGA_BYTES); Path path = createPath("0"); - fileCache.put(path, new FakeIndexInput(8 * MEGA_BYTES)); + fileCache.put(path, new FileCachedIndexInput.ClosedIndexInput(8 * MEGA_BYTES)); fileCache.incRef(path); fileCache.compute(path, (p, i) -> null); // item will be removed @@ -104,10 +122,20 @@ public void testComputeThrowException() { }); } + public void testComputeThrowCircuitBreakingException() { + FileCache fileCache = createCircuitBreakingFileCache(GIGA_BYTES); + Path path = createPath("0"); + assertThrows( + CircuitBreakingException.class, + () -> fileCache.compute(path, (p, i) -> new FileCachedIndexInput.ClosedIndexInput(8 * MEGA_BYTES)) + ); + assertNull(fileCache.get(path)); + } + public void testRemove() { FileCache fileCache = createFileCache(GIGA_BYTES); for (int i = 0; i < 4; i++) { - fileCache.put(createPath(Integer.toString(i)), new FakeIndexInput(8 * MEGA_BYTES)); + fileCache.put(createPath(Integer.toString(i)), new FileCachedIndexInput.ClosedIndexInput(8 * MEGA_BYTES)); } fileCache.remove(createPath("0")); @@ -128,7 +156,7 @@ public void testRemoveThrowException() { public void testIncDecRef() { FileCache fileCache = createFileCache(GIGA_BYTES); for (int i = 0; i < 4; i++) { - fileCache.put(createPath(Integer.toString(i)), new FakeIndexInput(8 * MEGA_BYTES)); + fileCache.put(createPath(Integer.toString(i)), new FileCachedIndexInput.ClosedIndexInput(8 * MEGA_BYTES)); } // try to evict previous IndexInput @@ -181,7 +209,7 @@ public void testCapacity() { public void testSize() { FileCache fileCache = createFileCache(GIGA_BYTES); for (int i = 0; i < 4; i++) { - fileCache.put(createPath(Integer.toString(i)), new FakeIndexInput(8 * MEGA_BYTES)); + fileCache.put(createPath(Integer.toString(i)), new FileCachedIndexInput.ClosedIndexInput(8 * MEGA_BYTES)); } // test file cache size assertEquals(fileCache.size(), 4); @@ -201,7 +229,11 @@ public void testPrune() { } public void testUsage() { - FileCache fileCache = FileCacheFactory.createConcurrentLRUFileCache(16 * MEGA_BYTES, 1); + FileCache fileCache = FileCacheFactory.createConcurrentLRUFileCache( + 16 * MEGA_BYTES, + 1, + new NoopCircuitBreaker(CircuitBreaker.REQUEST) + ); putAndDecRef(fileCache, 0, 16 * MEGA_BYTES); CacheUsage expectedCacheUsage = new CacheUsage(16 * MEGA_BYTES, 0); @@ -213,7 +245,7 @@ public void testUsage() { public void testStats() { FileCache fileCache = createFileCache(GIGA_BYTES); for (int i = 0; i < 4; i++) { - fileCache.put(createPath(Integer.toString(i)), new FakeIndexInput(8 * MEGA_BYTES)); + fileCache.put(createPath(Integer.toString(i)), new FileCachedIndexInput.ClosedIndexInput(8 * MEGA_BYTES)); } // cache hits fileCache.get(createPath("0")); @@ -247,62 +279,7 @@ public void testCacheRestore() throws IOException { private void putAndDecRef(FileCache cache, int path, long indexInputSize) { final Path key = createPath(Integer.toString(path)); - cache.put(key, new FakeIndexInput(indexInputSize)); + cache.put(key, new FileCachedIndexInput.ClosedIndexInput(indexInputSize)); cache.decRef(key); } - - final class FakeIndexInput extends CachedIndexInput { - - private final long length; - - public FakeIndexInput(long length) { - super("dummy"); - this.length = length; - } - - @Override - public void close() throws IOException { - // no-op - } - - @Override - public long getFilePointer() { - throw new UnsupportedOperationException("DummyIndexInput doesn't support getFilePointer()."); - } - - @Override - public void seek(long pos) throws IOException { - throw new UnsupportedOperationException("DummyIndexInput doesn't support seek()."); - } - - @Override - public long length() { - return length; - } - - @Override - public IndexInput slice(String sliceDescription, long offset, long length) throws IOException { - throw new UnsupportedOperationException("DummyIndexInput couldn't be sliced."); - } - - @Override - public byte readByte() throws IOException { - throw new UnsupportedOperationException("DummyIndexInput doesn't support read."); - } - - @Override - public void readBytes(byte[] b, int offset, int len) throws IOException { - throw new UnsupportedOperationException("DummyIndexInput doesn't support read."); - } - - @Override - public IndexInput clone() { - throw new UnsupportedOperationException("DummyIndexInput couldn't be cloned."); - } - - @Override - public boolean isClosed() { - return true; - } - } } diff --git a/server/src/test/java/org/opensearch/index/store/remote/utils/TransferManagerTests.java b/server/src/test/java/org/opensearch/index/store/remote/utils/TransferManagerTests.java index f3049c504f295..804101038fbed 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/utils/TransferManagerTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/utils/TransferManagerTests.java @@ -24,6 +24,8 @@ import org.junit.After; import org.junit.Before; import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.breaker.CircuitBreaker; +import org.opensearch.common.breaker.NoopCircuitBreaker; import org.opensearch.index.store.remote.file.CleanerDaemonThreadLeakFilter; import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.index.store.remote.filecache.FileCacheFactory; @@ -42,7 +44,11 @@ @ThreadLeakFilters(filters = CleanerDaemonThreadLeakFilter.class) public class TransferManagerTests extends OpenSearchTestCase { private static final int EIGHT_MB = 1024 * 1024 * 8; - private final FileCache fileCache = FileCacheFactory.createConcurrentLRUFileCache(EIGHT_MB * 2, 1); + private final FileCache fileCache = FileCacheFactory.createConcurrentLRUFileCache( + EIGHT_MB * 2, + 1, + new NoopCircuitBreaker(CircuitBreaker.REQUEST) + ); private MMapDirectory directory; private BlobContainer blobContainer; private TransferManager transferManager; diff --git a/server/src/test/java/org/opensearch/monitor/fs/FsProbeTests.java b/server/src/test/java/org/opensearch/monitor/fs/FsProbeTests.java index 25da974a9f1dc..216594f24e2ea 100644 --- a/server/src/test/java/org/opensearch/monitor/fs/FsProbeTests.java +++ b/server/src/test/java/org/opensearch/monitor/fs/FsProbeTests.java @@ -33,10 +33,16 @@ package org.opensearch.monitor.fs; import org.apache.lucene.util.Constants; +import org.opensearch.common.breaker.CircuitBreaker; +import org.opensearch.common.breaker.NoopCircuitBreaker; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.ByteSizeUnit; +import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.env.NodeEnvironment; import org.opensearch.env.NodeEnvironment.NodePath; +import org.opensearch.index.store.remote.filecache.FileCache; +import org.opensearch.index.store.remote.filecache.FileCacheFactory; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -113,7 +119,14 @@ public void testFsInfo() throws IOException { public void testFsCacheInfo() throws IOException { Settings settings = Settings.builder().put("node.roles", "search").build(); try (NodeEnvironment env = newNodeEnvironment(settings)) { - FsProbe probe = new FsProbe(env, settings); + ByteSizeValue gbByteSizeValue = new ByteSizeValue(1, ByteSizeUnit.GB); + env.fileCacheNodePath().fileCacheReservedSize = gbByteSizeValue; + FileCache fileCache = FileCacheFactory.createConcurrentLRUFileCache( + gbByteSizeValue.getBytes(), + 16, + new NoopCircuitBreaker(CircuitBreaker.REQUEST) + ); + FsProbe probe = new FsProbe(env, fileCache); FsInfo stats = probe.stats(null); assertNotNull(stats); assertTrue(stats.getTimestamp() > 0L); diff --git a/server/src/test/java/org/opensearch/node/NodeTests.java b/server/src/test/java/org/opensearch/node/NodeTests.java index d775a6f645e61..ebafaca1a8f98 100644 --- a/server/src/test/java/org/opensearch/node/NodeTests.java +++ b/server/src/test/java/org/opensearch/node/NodeTests.java @@ -35,20 +35,29 @@ import org.opensearch.bootstrap.BootstrapCheck; import org.opensearch.bootstrap.BootstrapContext; import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.common.SetOnce; import org.opensearch.common.breaker.CircuitBreaker; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsException; import org.opensearch.common.transport.BoundTransportAddress; +import org.opensearch.common.unit.ByteSizeUnit; +import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.env.Environment; +import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexService; import org.opensearch.index.engine.Engine.Searcher; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; import org.opensearch.indices.breaker.BreakerSettings; import org.opensearch.indices.breaker.CircuitBreakerService; +import org.opensearch.monitor.fs.FsInfo; +import org.opensearch.monitor.fs.FsProbe; import org.opensearch.plugins.CircuitBreakerPlugin; import org.opensearch.plugins.Plugin; +import org.opensearch.test.NodeRoles; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.MockHttpTransport; @@ -59,6 +68,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.TimeUnit; @@ -66,6 +76,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.test.NodeRoles.addRoles; import static org.opensearch.test.NodeRoles.dataNode; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; @@ -346,6 +357,55 @@ public void testCreateWithCircuitBreakerPlugins() throws IOException { } } + public void testCreateWithFileCache() throws Exception { + Settings searchRoleSettings = addRoles(baseSettings().build(), Set.of(DiscoveryNodeRole.SEARCH_ROLE)); + List> plugins = basePlugins(); + ByteSizeValue cacheSize = new ByteSizeValue(16, ByteSizeUnit.GB); + Settings searchRoleSettingsWithConfig = baseSettings().put(searchRoleSettings) + .put(Node.NODE_SEARCH_CACHE_SIZE_SETTING.getKey(), cacheSize) + .put(FeatureFlags.SEARCHABLE_SNAPSHOT, "true") + .build(); + Settings onlySearchRoleSettings = Settings.builder() + .put(searchRoleSettingsWithConfig) + .put( + NodeRoles.removeRoles( + searchRoleSettingsWithConfig, + Set.of( + DiscoveryNodeRole.DATA_ROLE, + DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, + DiscoveryNodeRole.INGEST_ROLE, + DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE + ) + ) + ) + .build(); + + // Test exception thrown with configuration missing + assertThrows(SettingsException.class, () -> new MockNode(searchRoleSettings, plugins)); + + // Test file cache is initialized + try (MockNode mockNode = new MockNode(searchRoleSettingsWithConfig, plugins)) { + NodeEnvironment.NodePath fileCacheNodePath = mockNode.getNodeEnvironment().fileCacheNodePath(); + assertEquals(cacheSize.getBytes(), fileCacheNodePath.fileCacheReservedSize.getBytes()); + } + + // Test data + search node with defined cache size + try (MockNode mockNode = new MockNode(searchRoleSettingsWithConfig, plugins)) { + NodeEnvironment.NodePath fileCacheNodePath = mockNode.getNodeEnvironment().fileCacheNodePath(); + assertEquals(cacheSize.getBytes(), fileCacheNodePath.fileCacheReservedSize.getBytes()); + } + + // Test dedicated search node with no configuration + try (MockNode mockNode = new MockNode(onlySearchRoleSettings, plugins)) { + NodeEnvironment.NodePath fileCacheNodePath = mockNode.getNodeEnvironment().fileCacheNodePath(); + assertTrue(fileCacheNodePath.fileCacheReservedSize.getBytes() > 0); + FsProbe fsProbe = new FsProbe(mockNode.getNodeEnvironment(), mockNode.fileCache()); + FsInfo fsInfo = fsProbe.stats(null); + FsInfo.Path cachePathInfo = fsInfo.iterator().next(); + assertEquals(cachePathInfo.getFileCacheReserved().getBytes(), fileCacheNodePath.fileCacheReservedSize.getBytes()); + } + } + public static class MockCircuitBreakerPlugin extends Plugin implements CircuitBreakerPlugin { private SetOnce myCircuitBreaker = new SetOnce<>(); diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index efaab9e11d644..40cd924928541 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -176,6 +176,7 @@ import org.opensearch.index.seqno.RetentionLeaseSyncer; import org.opensearch.index.shard.PrimaryReplicaSyncer; import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; +import org.opensearch.index.store.remote.filecache.FileCacheCleaner; import org.opensearch.indices.IndicesModule; import org.opensearch.indices.IndicesService; import org.opensearch.indices.ShardLimitValidator; @@ -1801,6 +1802,7 @@ public void onFailure(final Exception e) { final MapperRegistry mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry(); final SetOnce repositoriesServiceReference = new SetOnce<>(); repositoriesServiceReference.set(repositoriesService); + FileCacheCleaner fileCacheCleaner = new FileCacheCleaner(nodeEnv, null); if (FeatureFlags.isEnabled(FeatureFlags.EXTENSIONS)) { indicesService = new IndicesService( settings, @@ -1836,7 +1838,8 @@ public void onFailure(final Exception e) { null, emptyMap(), new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService), - repositoriesServiceReference::get + repositoriesServiceReference::get, + fileCacheCleaner ); } else { indicesService = new IndicesService( @@ -1872,7 +1875,8 @@ public void onFailure(final Exception e) { null, emptyMap(), new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService), - repositoriesServiceReference::get + repositoriesServiceReference::get, + fileCacheCleaner ); } final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); From 0e2add2022891df9800db6e255f2e6018c51d4c3 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Wed, 5 Apr 2023 14:57:47 -0700 Subject: [PATCH 12/28] [BUG] Zstd new codec is a breaking change for kNN plugin: move custom-codecs from sandbox/modules to sandbox/plugins (so it won't be installed automatically) (#7014) (#7019) (cherry picked from commit c7d6cf5c2d91e18d2fd2a44e443529cee771cee1) Signed-off-by: Andriy Redko Signed-off-by: github-actions[bot] Co-authored-by: github-actions[bot] --- sandbox/{modules => plugins}/custom-codecs/build.gradle | 0 .../custom-codecs/licenses/zstd-jni-1.5.4-1.jar.sha1 | 0 .../custom-codecs/licenses/zstd-jni-LICENSE.txt | 0 .../custom-codecs/licenses/zstd-jni-NOTICE.txt | 0 .../opensearch/index/codec/customcodecs/CustomCodecPlugin.java | 0 .../opensearch/index/codec/customcodecs/CustomCodecService.java | 0 .../index/codec/customcodecs/CustomCodecServiceFactory.java | 0 .../opensearch/index/codec/customcodecs/Lucene95CustomCodec.java | 0 .../codec/customcodecs/Lucene95CustomStoredFieldsFormat.java | 0 .../codec/customcodecs/PerFieldMappingPostingFormatCodec.java | 0 .../java/org/opensearch/index/codec/customcodecs/ZstdCodec.java | 0 .../opensearch/index/codec/customcodecs/ZstdCompressionMode.java | 0 .../org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java | 0 .../index/codec/customcodecs/ZstdNoDictCompressionMode.java | 0 .../org/opensearch/index/codec/customcodecs/package-info.java | 0 .../custom-codecs/src/main/plugin-metadata/plugin-security.policy | 0 .../resources/META-INF/services/org.apache.lucene.codecs.Codec | 0 .../index/codec/customcodecs/AbstractCompressorTests.java | 0 .../opensearch/index/codec/customcodecs/ZstdCompressorTests.java | 0 .../index/codec/customcodecs/ZstdNoDictCompressorTests.java | 0 20 files changed, 0 insertions(+), 0 deletions(-) rename sandbox/{modules => plugins}/custom-codecs/build.gradle (100%) rename sandbox/{modules => plugins}/custom-codecs/licenses/zstd-jni-1.5.4-1.jar.sha1 (100%) rename sandbox/{modules => plugins}/custom-codecs/licenses/zstd-jni-LICENSE.txt (100%) rename sandbox/{modules => plugins}/custom-codecs/licenses/zstd-jni-NOTICE.txt (100%) rename sandbox/{modules => plugins}/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecPlugin.java (100%) rename sandbox/{modules => plugins}/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecService.java (100%) rename sandbox/{modules => plugins}/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecServiceFactory.java (100%) rename sandbox/{modules => plugins}/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java (100%) rename sandbox/{modules => plugins}/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java (100%) rename sandbox/{modules => plugins}/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/PerFieldMappingPostingFormatCodec.java (100%) rename sandbox/{modules => plugins}/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java (100%) rename sandbox/{modules => plugins}/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCompressionMode.java (100%) rename sandbox/{modules => plugins}/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java (100%) rename sandbox/{modules => plugins}/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressionMode.java (100%) rename sandbox/{modules => plugins}/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/package-info.java (100%) rename sandbox/{modules => plugins}/custom-codecs/src/main/plugin-metadata/plugin-security.policy (100%) rename sandbox/{modules => plugins}/custom-codecs/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec (100%) rename sandbox/{modules => plugins}/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/AbstractCompressorTests.java (100%) rename sandbox/{modules => plugins}/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdCompressorTests.java (100%) rename sandbox/{modules => plugins}/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressorTests.java (100%) diff --git a/sandbox/modules/custom-codecs/build.gradle b/sandbox/plugins/custom-codecs/build.gradle similarity index 100% rename from sandbox/modules/custom-codecs/build.gradle rename to sandbox/plugins/custom-codecs/build.gradle diff --git a/sandbox/modules/custom-codecs/licenses/zstd-jni-1.5.4-1.jar.sha1 b/sandbox/plugins/custom-codecs/licenses/zstd-jni-1.5.4-1.jar.sha1 similarity index 100% rename from sandbox/modules/custom-codecs/licenses/zstd-jni-1.5.4-1.jar.sha1 rename to sandbox/plugins/custom-codecs/licenses/zstd-jni-1.5.4-1.jar.sha1 diff --git a/sandbox/modules/custom-codecs/licenses/zstd-jni-LICENSE.txt b/sandbox/plugins/custom-codecs/licenses/zstd-jni-LICENSE.txt similarity index 100% rename from sandbox/modules/custom-codecs/licenses/zstd-jni-LICENSE.txt rename to sandbox/plugins/custom-codecs/licenses/zstd-jni-LICENSE.txt diff --git a/sandbox/modules/custom-codecs/licenses/zstd-jni-NOTICE.txt b/sandbox/plugins/custom-codecs/licenses/zstd-jni-NOTICE.txt similarity index 100% rename from sandbox/modules/custom-codecs/licenses/zstd-jni-NOTICE.txt rename to sandbox/plugins/custom-codecs/licenses/zstd-jni-NOTICE.txt diff --git a/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecPlugin.java b/sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecPlugin.java similarity index 100% rename from sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecPlugin.java rename to sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecPlugin.java diff --git a/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecService.java b/sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecService.java similarity index 100% rename from sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecService.java rename to sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecService.java diff --git a/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecServiceFactory.java b/sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecServiceFactory.java similarity index 100% rename from sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecServiceFactory.java rename to sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/CustomCodecServiceFactory.java diff --git a/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java b/sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java similarity index 100% rename from sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java rename to sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java diff --git a/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java b/sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java similarity index 100% rename from sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java rename to sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java diff --git a/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/PerFieldMappingPostingFormatCodec.java b/sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/PerFieldMappingPostingFormatCodec.java similarity index 100% rename from sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/PerFieldMappingPostingFormatCodec.java rename to sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/PerFieldMappingPostingFormatCodec.java diff --git a/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java b/sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java similarity index 100% rename from sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java rename to sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java diff --git a/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCompressionMode.java b/sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCompressionMode.java similarity index 100% rename from sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCompressionMode.java rename to sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCompressionMode.java diff --git a/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java b/sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java similarity index 100% rename from sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java rename to sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java diff --git a/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressionMode.java b/sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressionMode.java similarity index 100% rename from sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressionMode.java rename to sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressionMode.java diff --git a/sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/package-info.java b/sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/package-info.java similarity index 100% rename from sandbox/modules/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/package-info.java rename to sandbox/plugins/custom-codecs/src/main/java/org/opensearch/index/codec/customcodecs/package-info.java diff --git a/sandbox/modules/custom-codecs/src/main/plugin-metadata/plugin-security.policy b/sandbox/plugins/custom-codecs/src/main/plugin-metadata/plugin-security.policy similarity index 100% rename from sandbox/modules/custom-codecs/src/main/plugin-metadata/plugin-security.policy rename to sandbox/plugins/custom-codecs/src/main/plugin-metadata/plugin-security.policy diff --git a/sandbox/modules/custom-codecs/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec b/sandbox/plugins/custom-codecs/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec similarity index 100% rename from sandbox/modules/custom-codecs/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec rename to sandbox/plugins/custom-codecs/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec diff --git a/sandbox/modules/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/AbstractCompressorTests.java b/sandbox/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/AbstractCompressorTests.java similarity index 100% rename from sandbox/modules/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/AbstractCompressorTests.java rename to sandbox/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/AbstractCompressorTests.java diff --git a/sandbox/modules/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdCompressorTests.java b/sandbox/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdCompressorTests.java similarity index 100% rename from sandbox/modules/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdCompressorTests.java rename to sandbox/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdCompressorTests.java diff --git a/sandbox/modules/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressorTests.java b/sandbox/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressorTests.java similarity index 100% rename from sandbox/modules/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressorTests.java rename to sandbox/plugins/custom-codecs/src/test/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressorTests.java From 6ef2b31dd83b8201944b34844121a8597f4a987e Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 5 Apr 2023 18:47:50 -0400 Subject: [PATCH 13/28] Bump com.azure:azure-storage-blob from 12.20.0 to 12.21.1 in /plugins/repository-azure (#6942) (#7004) * Bump com.azure:azure-storage-blob in /plugins/repository-azure Bumps [com.azure:azure-storage-blob](https://github.com/Azure/azure-sdk-for-java) from 12.20.0 to 12.21.1. - [Release notes](https://github.com/Azure/azure-sdk-for-java/releases) - [Commits](https://github.com/Azure/azure-sdk-for-java/compare/azure-storage-blob_12.20.0...azure-storage-blob_12.21.1) --- updated-dependencies: - dependency-name: com.azure:azure-storage-blob dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] * Change the location of adding changelog from Unreleased 3.0 to 2.x Signed-off-by: Tianli Feng --------- Signed-off-by: dependabot[bot] Signed-off-by: Tianli Feng Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: Tianli Feng (cherry picked from commit 65f1285e6e2a1b6676a175c969cad273eed591c3) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- CHANGELOG.md | 3 ++- plugins/repository-azure/build.gradle | 2 +- .../licenses/azure-storage-blob-12.20.0.jar.sha1 | 1 - .../licenses/azure-storage-blob-12.21.1.jar.sha1 | 1 + 4 files changed, 4 insertions(+), 3 deletions(-) delete mode 100644 plugins/repository-azure/licenses/azure-storage-blob-12.20.0.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-storage-blob-12.21.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 4fbb5fc1bac2c..910a5cd6c375a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.codehaus.jettison:jettison` from 1.5.3 to 1.5.4 ([#6878](https://github.com/opensearch-project/OpenSearch/pull/6878)) - Add `com.github.luben:zstd-jni:1.5.4-1` ([#3577](https://github.com/opensearch-project/OpenSearch/pull/3577)) - Bump: Netty from 4.1.90.Final to 4.1.91.Final , ASM 9.4 to ASM 9.5, ByteBuddy 1.14.2 to 1.14.3 ([#6981](https://github.com/opensearch-project/OpenSearch/pull/6981)) +- Bump `com.azure:azure-storage-blob` from 12.15.0 to 12.21.1 ### Changed - Require MediaType in Strings.toString API ([#6009](https://github.com/opensearch-project/OpenSearch/pull/6009)) @@ -58,4 +59,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.5...2.x +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.5...2.x \ No newline at end of file diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 2f6e17ffe636a..897a017f7bdea 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -54,7 +54,7 @@ dependencies { api "io.netty:netty-resolver-dns:${versions.netty}" api "io.netty:netty-transport-native-unix-common:${versions.netty}" implementation project(':modules:transport-netty4') - api 'com.azure:azure-storage-blob:12.20.0' + api 'com.azure:azure-storage-blob:12.21.1' api 'org.reactivestreams:reactive-streams:1.0.3' api 'io.projectreactor:reactor-core:3.4.18' api 'io.projectreactor.netty:reactor-netty:1.1.4' diff --git a/plugins/repository-azure/licenses/azure-storage-blob-12.20.0.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-blob-12.20.0.jar.sha1 deleted file mode 100644 index de86848c9fd06..0000000000000 --- a/plugins/repository-azure/licenses/azure-storage-blob-12.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e682920b0e3115433f25d65b0718f8763035357e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-blob-12.21.1.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-blob-12.21.1.jar.sha1 new file mode 100644 index 0000000000000..e51bb4d4d9d53 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-storage-blob-12.21.1.jar.sha1 @@ -0,0 +1 @@ +23fc5d2e3266246056a9735315911d200e892d4e \ No newline at end of file From d2873dcacb26c96776087e5245fefafbb6813ed2 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Wed, 5 Apr 2023 17:43:53 -0700 Subject: [PATCH 14/28] Fix flaky test by asserting not null. (#7013) (#7023) (cherry picked from commit f346d59f16d20d97930219dd1e4fa0c9bdef57eb) Signed-off-by: Rishikesh1159 Signed-off-by: github-actions[bot] Co-authored-by: github-actions[bot] --- .../indices/replication/SegmentReplicationStatsIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java index d162e51616831..159de1a681f53 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java @@ -72,6 +72,7 @@ public void testSegmentReplicationStatsResponse() throws Exception { assertEquals(segmentReplicationStatsResponse.getReplicationStats().size(), 1); assertEquals(segmentReplicationStatsResponse.getTotalShards(), numShards * 2); assertEquals(segmentReplicationStatsResponse.getSuccessfulShards(), numShards * 2); + assertNotNull(currentReplicationState); assertEquals(currentReplicationState.getStage(), SegmentReplicationState.Stage.DONE); assertTrue(currentReplicationState.getIndex().recoveredFileCount() > 0); }, 1, TimeUnit.MINUTES); From 2ce07f206d32e098a383fb6e75ce27555c067332 Mon Sep 17 00:00:00 2001 From: Rishikesh Pasham <62345295+Rishikesh1159@users.noreply.github.com> Date: Wed, 5 Apr 2023 17:44:36 -0700 Subject: [PATCH 15/28] Add background async task to fail stale replica shards. (#7022) Signed-off-by: Rishikesh1159 --- .../index/SegmentReplicationPressureIT.java | 38 ++++++ .../SegmentReplicationPressureService.java | 128 +++++++++++++++++- ...egmentReplicationPressureServiceTests.java | 42 +++++- .../snapshots/SnapshotResiliencyTests.java | 8 +- 4 files changed, 212 insertions(+), 4 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java b/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java index ad6c396df69a1..35d6a9ef0ef1d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java @@ -16,6 +16,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException; import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.IndexShardState; import org.opensearch.indices.replication.SegmentReplicationBaseIT; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.plugins.Plugin; @@ -29,6 +30,7 @@ import java.util.Collections; import java.util.List; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import static java.util.Arrays.asList; @@ -200,6 +202,42 @@ public void testBelowReplicaLimit() throws Exception { verifyStoreContent(); } + public void testFailStaleReplica() throws Exception { + + Settings settings = Settings.builder().put(MAX_REPLICATION_TIME_SETTING.getKey(), TimeValue.timeValueMillis(500)).build(); + // Starts a primary and replica node. + final String primaryNode = internalCluster().startNode(settings); + createIndex(INDEX_NAME); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final String replicaNode = internalCluster().startNode(settings); + ensureGreen(INDEX_NAME); + + final IndexShard primaryShard = getIndexShard(primaryNode, INDEX_NAME); + final List replicaNodes = asList(replicaNode); + assertEqualSegmentInfosVersion(replicaNodes, primaryShard); + IndexShard replicaShard = getIndexShard(replicaNode, INDEX_NAME); + + final CountDownLatch latch = new CountDownLatch(1); + final AtomicInteger totalDocs = new AtomicInteger(0); + try (final Releasable ignored = blockReplication(replicaNodes, latch)) { + // Index docs until replicas are staled. + totalDocs.getAndSet(indexUntilCheckpointCount()); + latch.await(); + // index again while we are stale. + indexDoc(); + refresh(INDEX_NAME); + totalDocs.incrementAndGet(); + + // Verify that replica shard is closed. + assertBusy(() -> { assertTrue(replicaShard.state().equals(IndexShardState.CLOSED)); }, 1, TimeUnit.MINUTES); + } + ensureGreen(INDEX_NAME); + final IndexShard replicaAfterFailure = getIndexShard(replicaNode, INDEX_NAME); + + // Verify that new replica shard after failure is different from old replica shard. + assertNotEquals(replicaAfterFailure.routingEntry().allocationId().getId(), replicaShard.routingEntry().allocationId().getId()); + } + public void testBulkWritesRejected() throws Exception { final String primaryNode = internalCluster().startNode(); createIndex(INDEX_NAME); diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java b/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java index f31e236fb6184..7117836ce7873 100644 --- a/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java @@ -10,17 +10,25 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.opensearch.action.ActionListener; +import org.opensearch.cluster.action.shard.ShardStateAction; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.collect.Tuple; import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.AbstractAsyncTask; import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardId; import org.opensearch.indices.IndicesService; +import org.opensearch.threadpool.ThreadPool; +import java.io.Closeable; +import java.io.IOException; +import java.util.Comparator; import java.util.Set; import java.util.stream.Collectors; @@ -29,7 +37,7 @@ * * @opensearch.internal */ -public class SegmentReplicationPressureService { +public class SegmentReplicationPressureService implements Closeable { private volatile boolean isSegmentReplicationBackpressureEnabled; private volatile int maxCheckpointsBehind; @@ -38,6 +46,10 @@ public class SegmentReplicationPressureService { private static final Logger logger = LogManager.getLogger(SegmentReplicationPressureService.class); + /** + * When enabled, writes will be rejected when a replica shard falls behind by both the MAX_REPLICATION_TIME_SETTING time value and MAX_INDEXING_CHECKPOINTS number of checkpoints. + * Once a shard falls behind double the MAX_REPLICATION_TIME_SETTING time value it will be marked as failed. + */ public static final Setting SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED = Setting.boolSetting( "segrep.pressure.enabled", false, @@ -70,13 +82,28 @@ public class SegmentReplicationPressureService { ); private final IndicesService indicesService; + + private final ThreadPool threadPool; private final SegmentReplicationStatsTracker tracker; + private final ShardStateAction shardStateAction; + + private final AsyncFailStaleReplicaTask failStaleReplicaTask; + @Inject - public SegmentReplicationPressureService(Settings settings, ClusterService clusterService, IndicesService indicesService) { + public SegmentReplicationPressureService( + Settings settings, + ClusterService clusterService, + IndicesService indicesService, + ShardStateAction shardStateAction, + ThreadPool threadPool + ) { this.indicesService = indicesService; this.tracker = new SegmentReplicationStatsTracker(this.indicesService); + this.shardStateAction = shardStateAction; + this.threadPool = threadPool; + final ClusterSettings clusterSettings = clusterService.getClusterSettings(); this.isSegmentReplicationBackpressureEnabled = SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED.get(settings); clusterSettings.addSettingsUpdateConsumer( @@ -92,6 +119,13 @@ public SegmentReplicationPressureService(Settings settings, ClusterService clust this.maxAllowedStaleReplicas = MAX_ALLOWED_STALE_SHARDS.get(settings); clusterSettings.addSettingsUpdateConsumer(MAX_ALLOWED_STALE_SHARDS, this::setMaxAllowedStaleReplicas); + + this.failStaleReplicaTask = new AsyncFailStaleReplicaTask(this); + } + + // visible for testing + AsyncFailStaleReplicaTask getFailStaleReplicaTask() { + return failStaleReplicaTask; } public void isSegrepLimitBreached(ShardId shardId) { @@ -154,4 +188,94 @@ public void setMaxAllowedStaleReplicas(double maxAllowedStaleReplicas) { public void setMaxReplicationTime(TimeValue maxReplicationTime) { this.maxReplicationTime = maxReplicationTime; } + + @Override + public void close() throws IOException { + failStaleReplicaTask.close(); + } + + // Background Task to fail replica shards if they are too far behind primary shard. + final static class AsyncFailStaleReplicaTask extends AbstractAsyncTask { + + final SegmentReplicationPressureService pressureService; + + static final TimeValue INTERVAL = TimeValue.timeValueSeconds(30); + + AsyncFailStaleReplicaTask(SegmentReplicationPressureService pressureService) { + super(logger, pressureService.threadPool, INTERVAL, true); + this.pressureService = pressureService; + rescheduleIfNecessary(); + } + + @Override + protected boolean mustReschedule() { + return true; + } + + @Override + protected void runInternal() { + if (pressureService.isSegmentReplicationBackpressureEnabled) { + final SegmentReplicationStats stats = pressureService.tracker.getStats(); + + // Find the shardId in node which is having stale replicas with highest current replication time. + // This way we only fail one shardId's stale replicas in every iteration of this background async task and there by decrease + // load gradually on node. + stats.getShardStats() + .entrySet() + .stream() + .flatMap( + entry -> pressureService.getStaleReplicas(entry.getValue().getReplicaStats()) + .stream() + .map(r -> Tuple.tuple(entry.getKey(), r.getCurrentReplicationTimeMillis())) + ) + .max(Comparator.comparingLong(Tuple::v2)) + .map(Tuple::v1) + .ifPresent(shardId -> { + final Set staleReplicas = pressureService.getStaleReplicas( + stats.getShardStats().get(shardId).getReplicaStats() + ); + final IndexService indexService = pressureService.indicesService.indexService(shardId.getIndex()); + final IndexShard primaryShard = indexService.getShard(shardId.getId()); + for (SegmentReplicationShardStats staleReplica : staleReplicas) { + if (staleReplica.getCurrentReplicationTimeMillis() > 2 * pressureService.maxReplicationTime.millis()) { + pressureService.shardStateAction.remoteShardFailed( + shardId, + staleReplica.getAllocationId(), + primaryShard.getOperationPrimaryTerm(), + true, + "replica too far behind primary, marking as stale", + null, + new ActionListener<>() { + @Override + public void onResponse(Void unused) { + logger.trace( + "Successfully failed remote shardId [{}] allocation id [{}]", + shardId, + staleReplica.getAllocationId() + ); + } + + @Override + public void onFailure(Exception e) { + logger.error("Failed to send remote shard failure", e); + } + } + ); + } + } + }); + } + } + + @Override + protected String getThreadPool() { + return ThreadPool.Names.GENERIC; + } + + @Override + public String toString() { + return "fail_stale_replica"; + } + + } } diff --git a/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java b/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java index a050a4c2243db..3bc84c2c44be8 100644 --- a/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java +++ b/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java @@ -8,7 +8,9 @@ package org.opensearch.index; +import org.mockito.Mockito; import org.mockito.stubbing.Answer; +import org.opensearch.cluster.action.shard.ShardStateAction; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; @@ -21,6 +23,7 @@ import org.opensearch.index.shard.ShardId; import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.threadpool.ThreadPool; import java.util.Iterator; import java.util.List; @@ -29,13 +32,20 @@ import java.util.concurrent.TimeUnit; import static java.util.Arrays.asList; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.times; import static org.opensearch.index.SegmentReplicationPressureService.MAX_REPLICATION_TIME_SETTING; import static org.opensearch.index.SegmentReplicationPressureService.SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED; public class SegmentReplicationPressureServiceTests extends OpenSearchIndexLevelReplicationTestCase { + private static ShardStateAction shardStateAction = Mockito.mock(ShardStateAction.class); private static final Settings settings = Settings.builder() .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED.getKey(), true) @@ -181,6 +191,36 @@ public void testIsSegrepLimitBreached_underStaleNodeLimit() throws Exception { } } + public void testFailStaleReplicaTask() throws Exception { + final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED.getKey(), true) + .put(MAX_REPLICATION_TIME_SETTING.getKey(), TimeValue.timeValueMillis(10)) + .build(); + + try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + final IndexShard primaryShard = shards.getPrimary(); + SegmentReplicationPressureService service = buildPressureService(settings, primaryShard); + + // index docs in batches without refreshing + indexInBatches(5, shards, primaryShard); + + // assert that replica shard is few checkpoints behind primary + Set replicationStats = primaryShard.getReplicationStats(); + assertEquals(1, replicationStats.size()); + SegmentReplicationShardStats shardStats = replicationStats.stream().findFirst().get(); + assertEquals(5, shardStats.getCheckpointsBehindCount()); + + // call the background task + service.getFailStaleReplicaTask().runInternal(); + + // verify that remote shard failed method is called which fails the replica shards falling behind. + verify(shardStateAction, times(1)).remoteShardFailed(any(), anyString(), anyLong(), anyBoolean(), anyString(), any(), any()); + replicateSegments(primaryShard, shards.getReplicas()); + } + } + private int indexInBatches(int count, ReplicationGroup shards, IndexShard primaryShard) throws Exception { int totalDocs = 0; for (int i = 0; i < count; i++) { @@ -202,6 +242,6 @@ private SegmentReplicationPressureService buildPressureService(Settings settings ClusterService clusterService = mock(ClusterService.class); when(clusterService.getClusterSettings()).thenReturn(new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); - return new SegmentReplicationPressureService(settings, clusterService, indicesService); + return new SegmentReplicationPressureService(settings, clusterService, indicesService, shardStateAction, mock(ThreadPool.class)); } } diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 40cd924928541..6c4b636e3c002 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -1984,7 +1984,13 @@ public void onFailure(final Exception e) { new UpdateHelper(scriptService), actionFilters, new IndexingPressureService(settings, clusterService), - new SegmentReplicationPressureService(settings, clusterService, mock(IndicesService.class)), + new SegmentReplicationPressureService( + settings, + clusterService, + mock(IndicesService.class), + mock(ShardStateAction.class), + mock(ThreadPool.class) + ), new SystemIndices(emptyMap()) ); actions.put( From d95effb9c6389482c6a2e377b6fde8df96b29afe Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Wed, 5 Apr 2023 23:58:01 -0700 Subject: [PATCH 16/28] Bump version of protobuf-java to 3.22.2 (#6994) (#7024) (cherry picked from commit 53b128fc10f4b2ee5c95b55000dbfa89f110a661) Signed-off-by: Tianli Feng --- CHANGELOG.md | 3 +-- plugins/repository-gcs/build.gradle | 4 ++-- plugins/repository-gcs/licenses/protobuf-java-3.21.7.jar.sha1 | 1 - plugins/repository-gcs/licenses/protobuf-java-3.22.2.jar.sha1 | 1 + .../licenses/protobuf-java-util-3.20.0.jar.sha1 | 1 - .../licenses/protobuf-java-util-3.22.2.jar.sha1 | 1 + test/fixtures/hdfs-fixture/build.gradle | 2 +- 7 files changed, 6 insertions(+), 7 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/protobuf-java-3.21.7.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/protobuf-java-3.22.2.jar.sha1 delete mode 100644 plugins/repository-gcs/licenses/protobuf-java-util-3.20.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/protobuf-java-util-3.22.2.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 910a5cd6c375a..7c3229d8a3e3f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,12 +25,11 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Dependencies - Bump `org.apache.logging.log4j:log4j-core` from 2.18.0 to 2.20.0 ([#6490](https://github.com/opensearch-project/OpenSearch/pull/6490)) - Bump `com.azure:azure-storage-common` from 12.19.3 to 12.20.0 ([#6492](https://github.com/opensearch-project/OpenSearch/pull/6492) -- Bump `com.azure:azure-storage-blob` from 12.15.0 to 12.20.0 ([#6921](https://github.com/opensearch-project/OpenSearch/pull/6921) - Bump `snakeyaml` from 1.33 to 2.0 ([#6511](https://github.com/opensearch-project/OpenSearch/pull/6511)) - Bump `io.projectreactor.netty:reactor-netty` from 1.1.3 to 1.1.4 - Bump `com.avast.gradle:gradle-docker-compose-plugin` from 0.15.2 to 0.16.11 - Bump `net.minidev:json-smart` from 2.4.8 to 2.4.9 -- Bump `com.google.protobuf:protobuf-java` from 3.22.0 to 3.22.2 +- Bump `com.google.protobuf:protobuf-java` to 3.22.2 ([#6994](https://github.com/opensearch-project/OpenSearch/pull/6994)) - Bump Netty to 4.1.90.Final ([#6677](https://github.com/opensearch-project/OpenSearch/pull/6677) - Bump `com.diffplug.spotless` from 6.15.0 to 6.17.0 - Bump `org.apache.zookeeper:zookeeper` from 3.8.0 to 3.8.1 diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 1369588bf5e37..dbbaa8163925d 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -65,8 +65,8 @@ dependencies { api 'com.google.api:api-common:1.8.1' api 'com.google.api:gax:1.54.0' api 'org.threeten:threetenbp:1.4.4' - api 'com.google.protobuf:protobuf-java-util:3.20.0' - api 'com.google.protobuf:protobuf-java:3.21.7' + api 'com.google.protobuf:protobuf-java-util:3.22.2' + api 'com.google.protobuf:protobuf-java:3.22.2' api 'com.google.code.gson:gson:2.9.0' api 'com.google.api.grpc:proto-google-common-protos:2.10.0' api 'com.google.api.grpc:proto-google-iam-v1:0.12.0' diff --git a/plugins/repository-gcs/licenses/protobuf-java-3.21.7.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-3.21.7.jar.sha1 deleted file mode 100644 index faa673a23ef41..0000000000000 --- a/plugins/repository-gcs/licenses/protobuf-java-3.21.7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -96cfc7147192f1de72c3d7d06972155ffb7d180c \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/protobuf-java-3.22.2.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-3.22.2.jar.sha1 new file mode 100644 index 0000000000000..80feeec023e7b --- /dev/null +++ b/plugins/repository-gcs/licenses/protobuf-java-3.22.2.jar.sha1 @@ -0,0 +1 @@ +fdee98b8f6abab73f146a4edb4c09e56f8278d03 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/protobuf-java-util-3.20.0.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-util-3.20.0.jar.sha1 deleted file mode 100644 index 1e9d00d8d5c03..0000000000000 --- a/plugins/repository-gcs/licenses/protobuf-java-util-3.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ee4496b296418283cbe7ae784984347fc4717a9a \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/protobuf-java-util-3.22.2.jar.sha1 b/plugins/repository-gcs/licenses/protobuf-java-util-3.22.2.jar.sha1 new file mode 100644 index 0000000000000..2a5707254b8cf --- /dev/null +++ b/plugins/repository-gcs/licenses/protobuf-java-util-3.22.2.jar.sha1 @@ -0,0 +1 @@ +749cd4fe8ab52f37bc186193802ba19f5b284647 \ No newline at end of file diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 43f3838999080..d2d8555583b73 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -52,7 +52,7 @@ dependencies { api "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" api 'net.minidev:json-smart:2.4.9' api "org.mockito:mockito-core:${versions.mockito}" - api "com.google.protobuf:protobuf-java:3.21.9" + api "com.google.protobuf:protobuf-java:3.22.2" api "org.jetbrains.kotlin:kotlin-stdlib:${versions.kotlin}" api 'org.eclipse.jetty:jetty-server:9.4.49.v20220914' api 'org.apache.zookeeper:zookeeper:3.8.1' From c7e3b023a8f359c0d23be4cfdfa96d9106448bdd Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Thu, 6 Apr 2023 10:03:38 -0700 Subject: [PATCH 17/28] [Segment Replication] Compatibility check for differing lucene codec versions (#6730) (#6991) This change aims to fail segment replications between the primary and replica if they are utilizing differing lucene codec versions. This is to avoid the current behavior of failing the replica shard in such situations. (cherry picked from commit c334bbde881c61e66b7b4768e2d43455f1c827d9) Signed-off-by: Poojita Raj --- .../opensearch/index/shard/IndexShard.java | 20 ++++++-- .../OngoingSegmentReplications.java | 7 +++ .../SegmentReplicationTargetService.java | 2 +- .../checkpoint/ReplicationCheckpoint.java | 43 +++++++++++++---- .../gateway/PrimaryShardAllocatorTests.java | 46 +++++++++++------- .../index/seqno/ReplicationTrackerTests.java | 28 +++++++++-- .../SegmentReplicationIndexShardTests.java | 7 +-- .../OngoingSegmentReplicationsTests.java | 47 ++++++++++++++++++- .../PrimaryShardReplicationSourceTests.java | 33 +++++++++++-- .../SegmentReplicationSourceServiceTests.java | 9 +++- .../SegmentReplicationTargetServiceTests.java | 11 +++-- .../SegmentReplicationTargetTests.java | 4 +- .../PublishCheckpointActionTests.java | 5 +- .../replication/common/CopyStateTests.java | 15 +++++- .../index/shard/IndexShardTestCase.java | 7 ++- 15 files changed, 232 insertions(+), 52 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 9d06ce7c6a391..82a080e7dc512 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -496,6 +496,13 @@ public boolean isSystem() { return indexSettings.getIndexMetadata().isSystem(); } + /** + * Returns the name of the default codec in codecService + */ + public String getDefaultCodecName() { + return codecService.codec(CodecService.DEFAULT_CODEC).getName(); + } + /** * USE THIS METHOD WITH CARE! * Returns the primary term the index shard is supposed to be on. In case of primary promotion or when a replica learns about @@ -1489,7 +1496,7 @@ public Tuple, ReplicationCheckpoint> getLatestSegme return null; } if (getEngineOrNull() == null) { - return new Tuple<>(new GatedCloseable<>(null, () -> {}), ReplicationCheckpoint.empty(shardId)); + return new Tuple<>(new GatedCloseable<>(null, () -> {}), ReplicationCheckpoint.empty(shardId, getDefaultCodecName())); } // do not close the snapshot - caller will close it. final GatedCloseable snapshot = getSegmentInfosSnapshot(); @@ -1506,13 +1513,14 @@ public Tuple, ReplicationCheckpoint> getLatestSegme // getSegmentInfosSnapshot, so computing length from SegmentInfos can cause issues. shardRouting.primary() ? store.getSegmentMetadataMap(segmentInfos).values().stream().mapToLong(StoreFileMetadata::length).sum() - : store.stats(StoreStats.UNKNOWN_RESERVED_BYTES).getSizeInBytes() + : store.stats(StoreStats.UNKNOWN_RESERVED_BYTES).getSizeInBytes(), + getEngine().config().getCodec().getName() ) ); } catch (IOException e) { throw new OpenSearchException("Error Fetching SegmentInfos and latest checkpoint", e); } - }).orElseGet(() -> new Tuple<>(new GatedCloseable<>(null, () -> {}), ReplicationCheckpoint.empty(shardId))); + }).orElseGet(() -> new Tuple<>(new GatedCloseable<>(null, () -> {}), ReplicationCheckpoint.empty(shardId, getDefaultCodecName()))); } /** @@ -1582,6 +1590,12 @@ public final boolean shouldProcessCheckpoint(ReplicationCheckpoint requestCheckp ); return false; } + if (localCheckpoint.getCodec().equals(requestCheckpoint.getCodec()) == false) { + logger.trace( + () -> new ParameterizedMessage("Shard does not support the received lucene codec version {}", requestCheckpoint.getCodec()) + ); + return false; + } return true; } diff --git a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java index 6f04c6cf6f665..3ab0a7539fb06 100644 --- a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java +++ b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java @@ -14,6 +14,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.index.IndexService; import org.opensearch.index.shard.IndexShard; @@ -147,6 +148,12 @@ void startSegmentCopy(GetSegmentFilesRequest request, ActionListener { if (segrepHandler != null) { logger.warn("Override handler for allocation id {}", request.getTargetAllocationId()); diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index bf626ff93760c..1858449e13ae8 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -401,7 +401,7 @@ public void messageReceived(final ForceSyncRequest request, TransportChannel cha return; } startReplication( - ReplicationCheckpoint.empty(request.getShardId()), + ReplicationCheckpoint.empty(request.getShardId(), indexShard.getDefaultCodecName()), indexShard, new SegmentReplicationTargetService.SegmentReplicationListener() { @Override diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java index 57e667b06a223..32521fb0cd944 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java @@ -8,6 +8,7 @@ package org.opensearch.indices.replication.checkpoint; +import org.opensearch.Version; import org.opensearch.common.Nullable; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -30,29 +31,32 @@ public class ReplicationCheckpoint implements Writeable, Comparable tracker.shardAllocationId.equals(id) == false) .collect(Collectors.toSet()); - final ReplicationCheckpoint initialCheckpoint = new ReplicationCheckpoint(tracker.shardId(), 0L, 1, 1, 1L); - final ReplicationCheckpoint secondCheckpoint = new ReplicationCheckpoint(tracker.shardId(), 0L, 2, 2, 50L); - final ReplicationCheckpoint thirdCheckpoint = new ReplicationCheckpoint(tracker.shardId(), 0L, 2, 3, 100L); + final ReplicationCheckpoint initialCheckpoint = new ReplicationCheckpoint( + tracker.shardId(), + 0L, + 1, + 1, + 1L, + Codec.getDefault().getName() + ); + final ReplicationCheckpoint secondCheckpoint = new ReplicationCheckpoint( + tracker.shardId(), + 0L, + 2, + 2, + 50L, + Codec.getDefault().getName() + ); + final ReplicationCheckpoint thirdCheckpoint = new ReplicationCheckpoint( + tracker.shardId(), + 0L, + 2, + 3, + 100L, + Codec.getDefault().getName() + ); tracker.setLatestReplicationCheckpoint(initialCheckpoint); tracker.setLatestReplicationCheckpoint(secondCheckpoint); diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index 014a37249612b..c4db88782638f 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -8,6 +8,7 @@ package org.opensearch.index.shard; +import org.apache.lucene.codecs.Codec; import org.apache.lucene.index.SegmentInfos; import org.junit.Assert; import org.opensearch.ExceptionsHelper; @@ -306,7 +307,7 @@ public void testRejectCheckpointOnShardRoutingPrimary() throws IOException { assertEquals(false, primaryShard.getReplicationTracker().isPrimaryMode()); assertEquals(true, primaryShard.routingEntry().primary()); - spy.onNewCheckpoint(new ReplicationCheckpoint(primaryShard.shardId(), 0L, 0L, 0L), spyShard); + spy.onNewCheckpoint(new ReplicationCheckpoint(primaryShard.shardId(), 0L, 0L, 0L, Codec.getDefault().getName()), spyShard); // Verify that checkpoint is not processed as shard routing is primary. verify(spy, times(0)).startReplication(any(), any(), any()); @@ -1020,7 +1021,7 @@ private void assertDocCounts(IndexShard indexShard, int expectedPersistedDocCoun private void resolveCheckpointInfoResponseListener(ActionListener listener, IndexShard primary) { try { - final CopyState copyState = new CopyState(ReplicationCheckpoint.empty(primary.shardId), primary); + final CopyState copyState = new CopyState(ReplicationCheckpoint.empty(primary.shardId, primary.getDefaultCodecName()), primary); listener.onResponse( new CheckpointInfoResponse(copyState.getCheckpoint(), copyState.getMetadataMap(), copyState.getInfosBytes()) ); @@ -1034,7 +1035,7 @@ private void startReplicationAndAssertCancellation(IndexShard replica, SegmentRe throws InterruptedException { CountDownLatch latch = new CountDownLatch(1); final SegmentReplicationTarget target = targetService.startReplication( - ReplicationCheckpoint.empty(replica.shardId), + ReplicationCheckpoint.empty(replica.shardId, replica.getDefaultCodecName()), replica, new SegmentReplicationTargetService.SegmentReplicationListener() { @Override diff --git a/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java b/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java index 78767ee1dcf8c..6e27a4db6afec 100644 --- a/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java @@ -17,6 +17,7 @@ import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.IndexService; +import org.opensearch.index.codec.CodecService; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; @@ -47,7 +48,7 @@ public class OngoingSegmentReplicationsTests extends IndexShardTestCase { private final IndicesService mockIndicesService = mock(IndicesService.class); - private ReplicationCheckpoint testCheckpoint; + private ReplicationCheckpoint testCheckpoint, olderCodecTestCheckpoint; private DiscoveryNode primaryDiscoveryNode; private DiscoveryNode replicaDiscoveryNode; private IndexShard primary; @@ -73,8 +74,12 @@ public void setUp() throws Exception { ShardId testShardId = primary.shardId(); + CodecService codecService = new CodecService(null, null); + String defaultCodecName = codecService.codec(CodecService.DEFAULT_CODEC).getName(); + // This mirrors the creation of the ReplicationCheckpoint inside CopyState - testCheckpoint = new ReplicationCheckpoint(testShardId, primary.getOperationPrimaryTerm(), 0L, 0L); + testCheckpoint = new ReplicationCheckpoint(testShardId, primary.getOperationPrimaryTerm(), 0L, 0L, defaultCodecName); + olderCodecTestCheckpoint = new ReplicationCheckpoint(testShardId, primary.getOperationPrimaryTerm(), 0L, 0L, "Lucene94"); IndexService mockIndexService = mock(IndexService.class); when(mockIndicesService.indexServiceSafe(testShardId.getIndex())).thenReturn(mockIndexService); when(mockIndexService.getShard(testShardId.id())).thenReturn(primary); @@ -89,6 +94,44 @@ public void tearDown() throws Exception { super.tearDown(); } + public void testSuccessfulCodecCompatibilityCheck() throws Exception { + indexDoc(primary, "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar"); + primary.refresh("Test"); + OngoingSegmentReplications replications = spy(new OngoingSegmentReplications(mockIndicesService, recoverySettings)); + // replica checkpoint is on same/higher lucene codec than primary + final CheckpointInfoRequest request = new CheckpointInfoRequest( + 1L, + replica.routingEntry().allocationId().getId(), + replicaDiscoveryNode, + testCheckpoint + ); + final FileChunkWriter segmentSegmentFileChunkWriter = (fileMetadata, position, content, lastChunk, totalTranslogOps, listener) -> { + listener.onResponse(null); + }; + final CopyState copyState = replications.prepareForReplication(request, segmentSegmentFileChunkWriter); + } + + public void testFailCodecCompatibilityCheck() throws Exception { + indexDoc(primary, "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar"); + primary.refresh("Test"); + OngoingSegmentReplications replications = spy(new OngoingSegmentReplications(mockIndicesService, recoverySettings)); + // replica checkpoint is on lower/older lucene codec than primary + final CheckpointInfoRequest request = new CheckpointInfoRequest( + 1L, + replica.routingEntry().allocationId().getId(), + replicaDiscoveryNode, + olderCodecTestCheckpoint + ); + final FileChunkWriter segmentSegmentFileChunkWriter = (fileMetadata, position, content, lastChunk, totalTranslogOps, listener) -> { + listener.onResponse(null); + }; + try { + final CopyState copyState = replications.prepareForReplication(request, segmentSegmentFileChunkWriter); + } catch (CancellableThreads.ExecutionCancelledException ex) { + Assert.assertTrue(ex.getMessage().contains("Requested unsupported codec version")); + } + } + public void testPrepareAndSendSegments() throws IOException { indexDoc(primary, "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar"); primary.refresh("Test"); diff --git a/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java index d925956bd95ef..995f38087297e 100644 --- a/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java @@ -8,6 +8,7 @@ package org.opensearch.indices.replication; +import org.apache.lucene.codecs.Codec; import org.apache.lucene.util.Version; import org.junit.Assert; import org.opensearch.action.ActionListener; @@ -93,7 +94,13 @@ public void tearDown() throws Exception { } public void testGetCheckpointMetadata() { - final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint(indexShard.shardId(), PRIMARY_TERM, SEGMENTS_GEN, VERSION); + final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( + indexShard.shardId(), + PRIMARY_TERM, + SEGMENTS_GEN, + VERSION, + Codec.getDefault().getName() + ); replicationSource.getCheckpointMetadata(REPLICATION_ID, checkpoint, mock(ActionListener.class)); CapturingTransport.CapturedRequest[] requestList = transport.getCapturedRequestsAndClear(); assertEquals(1, requestList.length); @@ -104,7 +111,13 @@ public void testGetCheckpointMetadata() { } public void testGetSegmentFiles() { - final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint(indexShard.shardId(), PRIMARY_TERM, SEGMENTS_GEN, VERSION); + final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( + indexShard.shardId(), + PRIMARY_TERM, + SEGMENTS_GEN, + VERSION, + Codec.getDefault().getName() + ); StoreFileMetadata testMetadata = new StoreFileMetadata("testFile", 1L, "checksum", Version.LATEST); replicationSource.getSegmentFiles( REPLICATION_ID, @@ -126,7 +139,13 @@ public void testGetSegmentFiles() { */ public void testTransportTimeoutForGetSegmentFilesAction() { long fileSize = (long) (Math.pow(10, 9)); - final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint(indexShard.shardId(), PRIMARY_TERM, SEGMENTS_GEN, VERSION); + final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( + indexShard.shardId(), + PRIMARY_TERM, + SEGMENTS_GEN, + VERSION, + Codec.getDefault().getName() + ); StoreFileMetadata testMetadata = new StoreFileMetadata("testFile", fileSize, "checksum", Version.LATEST); replicationSource.getSegmentFiles( REPLICATION_ID, @@ -145,7 +164,13 @@ public void testTransportTimeoutForGetSegmentFilesAction() { public void testGetSegmentFiles_CancelWhileRequestOpen() throws InterruptedException { CountDownLatch latch = new CountDownLatch(1); - final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint(indexShard.shardId(), PRIMARY_TERM, SEGMENTS_GEN, VERSION); + final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( + indexShard.shardId(), + PRIMARY_TERM, + SEGMENTS_GEN, + VERSION, + Codec.getDefault().getName() + ); StoreFileMetadata testMetadata = new StoreFileMetadata("testFile", 1L, "checksum", Version.LATEST); replicationSource.getSegmentFiles( REPLICATION_ID, diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java index 0d05b1ec8679e..41022b77b46e1 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java @@ -8,6 +8,7 @@ package org.opensearch.indices.replication; +import org.apache.lucene.codecs.Codec; import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; @@ -55,7 +56,13 @@ public void setUp() throws Exception { when(mockIndexService.getShard(testShardId.id())).thenReturn(mockIndexShard); // This mirrors the creation of the ReplicationCheckpoint inside CopyState - testCheckpoint = new ReplicationCheckpoint(testShardId, mockIndexShard.getOperationPrimaryTerm(), 0L, 0L); + testCheckpoint = new ReplicationCheckpoint( + testShardId, + mockIndexShard.getOperationPrimaryTerm(), + 0L, + 0L, + Codec.getDefault().getName() + ); testThreadPool = new TestThreadPool("test", Settings.EMPTY); CapturingTransport transport = new CapturingTransport(); localNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), Version.CURRENT); diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index bae0afb5bcc3b..357a88c27fc46 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -15,6 +15,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.CancellableThreads; +import org.opensearch.index.codec.CodecService; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.replication.TestReplicationSource; import org.opensearch.index.shard.IndexShard; @@ -62,10 +63,12 @@ public void setUp() throws Exception { .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put("node.name", SegmentReplicationTargetServiceTests.class.getSimpleName()) .build(); + CodecService codecService = new CodecService(null, null); + String defaultCodecName = codecService.codec(CodecService.DEFAULT_CODEC).getName(); primaryShard = newStartedShard(true, settings); replicaShard = newShard(false, settings, new NRTReplicationEngineFactory()); recoverReplica(replicaShard, primaryShard, true, getReplicationFunc(replicaShard)); - checkpoint = new ReplicationCheckpoint(replicaShard.shardId(), 0L, 0L, 0L); + checkpoint = new ReplicationCheckpoint(replicaShard.shardId(), 0L, 0L, 0L, defaultCodecName); SegmentReplicationSourceFactory replicationSourceFactory = mock(SegmentReplicationSourceFactory.class); replicationSource = mock(SegmentReplicationSource.class); when(replicationSourceFactory.get(replicaShard)).thenReturn(replicationSource); @@ -76,13 +79,15 @@ public void setUp() throws Exception { initialCheckpoint.getShardId(), initialCheckpoint.getPrimaryTerm(), initialCheckpoint.getSegmentsGen(), - initialCheckpoint.getSegmentInfosVersion() + 1 + initialCheckpoint.getSegmentInfosVersion() + 1, + defaultCodecName ); newPrimaryCheckpoint = new ReplicationCheckpoint( initialCheckpoint.getShardId(), initialCheckpoint.getPrimaryTerm() + 1, initialCheckpoint.getSegmentsGen(), - initialCheckpoint.getSegmentInfosVersion() + 1 + initialCheckpoint.getSegmentInfosVersion() + 1, + defaultCodecName ); } diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java index 599e73b548ddb..b36dbdba40be8 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java @@ -8,6 +8,7 @@ package org.opensearch.indices.replication; +import org.apache.lucene.codecs.Codec; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.StringField; @@ -106,7 +107,8 @@ public void setUp() throws Exception { spyIndexShard.shardId(), spyIndexShard.getPendingPrimaryTerm(), testSegmentInfos.getGeneration(), - testSegmentInfos.version + testSegmentInfos.version, + Codec.getDefault().getName() ); } diff --git a/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java b/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java index 2c05fbc9328e5..bc597fd39539f 100644 --- a/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java @@ -8,6 +8,7 @@ package org.opensearch.indices.replication.checkpoint; +import org.apache.lucene.codecs.Codec; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.ActionTestUtils; @@ -104,7 +105,7 @@ public void testPublishCheckpointActionOnPrimary() { mockTargetService ); - final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint(indexShard.shardId(), 1111, 11, 1); + final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint(indexShard.shardId(), 1111, 11, 1, Codec.getDefault().getName()); final PublishCheckpointRequest request = new PublishCheckpointRequest(checkpoint); action.shardOperationOnPrimary(request, indexShard, ActionTestUtils.assertNoFailureListener(result -> { @@ -139,7 +140,7 @@ public void testPublishCheckpointActionOnReplica() { mockTargetService ); - final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint(indexShard.shardId(), 1111, 11, 1); + final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint(indexShard.shardId(), 1111, 11, 1, Codec.getDefault().getName()); final PublishCheckpointRequest request = new PublishCheckpointRequest(checkpoint); diff --git a/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java b/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java index a87a8de206a39..e3b48302ae6ef 100644 --- a/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java @@ -8,12 +8,14 @@ package org.opensearch.indices.replication.common; +import org.apache.lucene.codecs.Codec; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.util.Version; import org.opensearch.common.collect.Tuple; import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.index.codec.CodecService; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.index.shard.ShardId; @@ -49,7 +51,10 @@ public class CopyStateTests extends IndexShardTestCase { public void testCopyStateCreation() throws IOException { final IndexShard mockIndexShard = createMockIndexShard(); - CopyState copyState = new CopyState(ReplicationCheckpoint.empty(mockIndexShard.shardId()), mockIndexShard); + CopyState copyState = new CopyState( + ReplicationCheckpoint.empty(mockIndexShard.shardId(), new CodecService(null, null).codec("default").getName()), + mockIndexShard + ); ReplicationCheckpoint checkpoint = copyState.getCheckpoint(); assertEquals(TEST_SHARD_ID, checkpoint.getShardId()); // version was never set so this should be zero @@ -67,7 +72,13 @@ public static IndexShard createMockIndexShard() throws IOException { when(mockShard.store()).thenReturn(mockStore); SegmentInfos testSegmentInfos = new SegmentInfos(Version.LATEST.major); - ReplicationCheckpoint testCheckpoint = new ReplicationCheckpoint(mockShard.shardId(), mockShard.getOperationPrimaryTerm(), 0L, 0L); + ReplicationCheckpoint testCheckpoint = new ReplicationCheckpoint( + mockShard.shardId(), + mockShard.getOperationPrimaryTerm(), + 0L, + 0L, + Codec.getDefault().getName() + ); final Tuple, ReplicationCheckpoint> gatedCloseableReplicationCheckpointTuple = new Tuple<>( new GatedCloseable<>(testSegmentInfos, () -> {}), testCheckpoint diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index ab0cf38f77c7d..f2dd0a7e5def0 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -1299,7 +1299,10 @@ public void getCheckpointMetadata( ActionListener listener ) { try { - final CopyState copyState = new CopyState(ReplicationCheckpoint.empty(primaryShard.shardId), primaryShard); + final CopyState copyState = new CopyState( + ReplicationCheckpoint.empty(primaryShard.shardId, primaryShard.getDefaultCodecName()), + primaryShard + ); listener.onResponse( new CheckpointInfoResponse(copyState.getCheckpoint(), copyState.getMetadataMap(), copyState.getInfosBytes()) ); @@ -1353,7 +1356,7 @@ public final List replicateSegments(IndexShard primary for (IndexShard replica : replicaShards) { final SegmentReplicationTargetService targetService = prepareForReplication(primaryShard, replica); final SegmentReplicationTarget target = targetService.startReplication( - ReplicationCheckpoint.empty(replica.shardId), + ReplicationCheckpoint.empty(replica.shardId, replica.getDefaultCodecName()), replica, new SegmentReplicationTargetService.SegmentReplicationListener() { @Override From 266d87916123c1d765160e39d2ca7c99aa265913 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Thu, 6 Apr 2023 17:19:04 -0400 Subject: [PATCH 18/28] Fix GetSnapshots to not return non-existent snapshots with ignore_unavailable=true (#6839) (#7029) * Fix bug for Get Snapshot API to return correct response when getting a non-existing snapshot (#6820) * modify change log * Modify changelog --------- (cherry picked from commit 8b34e5f724fe2b875731228795f51d523462599a) Signed-off-by: Gao Binlong Signed-off-by: github-actions[bot] Co-authored-by: github-actions[bot] --- CHANGELOG.md | 3 ++- .../opensearch/snapshots/SnapshotStatusApisIT.java | 11 +++++++++++ .../snapshots/get/TransportGetSnapshotsAction.java | 6 ++++-- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7c3229d8a3e3f..14e8b83388217 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -54,8 +54,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Added equals/hashcode for named DocValueFormat.DateTime inner class ([#6357](https://github.com/opensearch-project/OpenSearch/pull/6357)) - Fixed bug for searchable snapshot to take 'base_path' of blob into account ([#6558](https://github.com/opensearch-project/OpenSearch/pull/6558)) - Fix fuzziness validation ([#5805](https://github.com/opensearch-project/OpenSearch/pull/5805)) +- Fix GetSnapshots to not return non-existent snapshots with ignore_unavailable=true ([#6839](https://github.com/opensearch-project/OpenSearch/pull/6839)) ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.5...2.x \ No newline at end of file +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.5...2.x diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java index b6fa4fbc2fc96..46547a7718927 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java @@ -379,6 +379,17 @@ public void testGetSnapshotsRequest() throws Exception { .get(); assertEquals(1, getSnapshotsResponse.getSnapshots().size()); assertEquals("snap-on-empty-repo", getSnapshotsResponse.getSnapshots().get(0).snapshotId().getName()); + + // there is an in-progress snapshot, make sure we return empty result when getting a non-existing snapshot with setting + // ignore_unavailable to true + getSnapshotsResponse = client.admin() + .cluster() + .prepareGetSnapshots("test-repo") + .setIgnoreUnavailable(true) + .addSnapshots("non-existent-snapshot") + .get(); + assertEquals(0, getSnapshotsResponse.getSnapshots().size()); + unblockNode(repositoryName, initialBlockedNode); // unblock node admin().cluster().prepareDeleteSnapshot(repositoryName, "snap-on-empty-repo").get(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index d05e62045a1a2..b5445bf544cc6 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -239,9 +239,11 @@ private List snapshots( repositoryName, snapshotIdsToIterate.stream().map(SnapshotId::getName).collect(Collectors.toList()) ); + // filter and incorporate the snapshots in progress for (SnapshotsInProgress.Entry entry : entries) { - snapshotSet.add(new SnapshotInfo(entry)); - snapshotIdsToIterate.remove(entry.snapshot().getSnapshotId()); + if (snapshotIdsToIterate.remove(entry.snapshot().getSnapshotId())) { + snapshotSet.add(new SnapshotInfo(entry)); + } } // then, look in the repository final Repository repository = repositoriesService.repository(repositoryName); From 008c33a894c615db5c75878ca0f1bbb0aaee8ff9 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Thu, 6 Apr 2023 15:18:48 -0700 Subject: [PATCH 19/28] Change port number for disconnected remote host to accomadate for Windows platform execution (#7035) (#7039) (cherry picked from commit cbd478a09d98ee7772d92152958ec861cf571bab) Signed-off-by: Kunal Kotwani Signed-off-by: github-actions[bot] Co-authored-by: github-actions[bot] --- .../resources/rest-api-spec/test/reindex/90_remote.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/90_remote.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/90_remote.yml index b30f7fc73222b..7d43a54987225 100644 --- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/90_remote.yml +++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/90_remote.yml @@ -390,14 +390,12 @@ refresh: true - do: - # sometimes IIS is listening on port 0. In that case we fail in other ways and this test isn't useful. - # make sure to stop any local webservers if running this test locally otherwise an s_s_l handshake exception may occur catch: /connect_exception|IIS Windows Server/ reindex: body: source: remote: - host: http://127.0.0.1:0 + host: http://127.0.0.1:54321 index: source dest: index: dest From 7b667f5624ae01031f9924c5337f0e0b75f6260f Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Thu, 6 Apr 2023 16:06:25 -0700 Subject: [PATCH 20/28] Add isRemoteSnapshot() check before isSegRepEnabled() on index settings. (#7036) (#7041) (cherry picked from commit fe8253e089e8b2b8ccd9d51a296628791fb363d8) Signed-off-by: Rishikesh1159 Signed-off-by: github-actions[bot] Co-authored-by: github-actions[bot] --- .../main/java/org/opensearch/indices/IndicesService.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index b0d488a5b2cf7..9a94ec7b1f680 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -933,12 +933,12 @@ private EngineFactory getEngineFactory(final IndexSettings idxSettings) { .filter(maybe -> Objects.requireNonNull(maybe).isPresent()) .collect(Collectors.toList()); if (engineFactories.isEmpty()) { - if (idxSettings.isSegRepEnabled()) { - return new NRTReplicationEngineFactory(); - } if (idxSettings.isRemoteSnapshot()) { return config -> new ReadOnlyEngine(config, new SeqNoStats(0, 0, 0), new TranslogStats(), true, Function.identity(), false); } + if (idxSettings.isSegRepEnabled()) { + return new NRTReplicationEngineFactory(); + } return new InternalEngineFactory(); } else if (engineFactories.size() == 1) { assert engineFactories.get(0).isPresent(); From eb4940dace6b89457fc4adef06244033d1089795 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Thu, 6 Apr 2023 16:56:41 -0700 Subject: [PATCH 21/28] Don't register destructiveDistroTest.docker twice (#6930) (#6949) (cherry picked from commit bcbb561b51c60776257e68786fcc411eebf4fab4) Signed-off-by: Daniel Widdis Signed-off-by: github-actions[bot] Co-authored-by: github-actions[bot] --- .../java/org/opensearch/gradle/test/DistroTestPlugin.java | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java index 3bb92d826e5eb..57bb524bcf607 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java @@ -119,8 +119,8 @@ public void apply(Project project) { TaskProvider depsTask = project.getTasks().register(taskname + "#deps"); depsTask.configure(t -> t.dependsOn(distribution, examplePlugin)); depsTasks.put(taskname, depsTask); - // TODO - suppressing failure temporarily where duplicate tasks are created for docker. - try { + // Avoid duplicate tasks such as docker registered in lifecycleTasks + if (project.getTasksByName(taskname, false).isEmpty()) { TaskProvider destructiveTask = configureTestTask(project, taskname, distribution, t -> { t.onlyIf(t2 -> distribution.isDocker() == false || dockerSupport.get().getDockerAvailability().isAvailable); addSysprop(t, DISTRIBUTION_SYSPROP, distribution::getFilepath); @@ -134,8 +134,6 @@ public void apply(Project project) { } destructiveDistroTest.configure(t -> t.dependsOn(destructiveTask)); lifecycleTasks.get(distribution.getType()).configure(t -> t.dependsOn(destructiveTask)); - } catch (Exception ex) { - System.out.println(ex.getMessage()); } if ((distribution.getType() == OpenSearchDistribution.Type.DEB || distribution.getType() == OpenSearchDistribution.Type.RPM) From 4cff04adb45f166757a9e2c05299448b9fbc3dcf Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Thu, 6 Apr 2023 20:47:45 -0700 Subject: [PATCH 22/28] [Segment Replication] Introduce new SEGMENT_REPLICATION_EXPERIMENTAL feature flag. (#7006) (#7042) * Add new SEGMENT_REPLICATION_CLUSTER_SETTING feature flag. * Remove entry from changelog as the cluster setting introduced is noy ready for users to use. * Fix failing unit tests. * Address comments on PR. * Address comments on PR and chnage feature flag to SEGMENT_REPLICATION_EXPERIMENTAL. --------- (cherry picked from commit fe8b4d4f1db61659407be6537427e0729da79794) Signed-off-by: Rishikesh1159 Signed-off-by: github-actions[bot] Co-authored-by: github-actions[bot] --- CHANGELOG.md | 1 - .../SegmentReplicationClusterSettingIT.java | 6 +++++- .../opensearch/cluster/metadata/IndexMetadata.java | 4 ---- .../opensearch/common/settings/ClusterSettings.java | 2 +- .../common/settings/FeatureFlagSettings.java | 1 + .../org/opensearch/common/util/FeatureFlags.java | 12 ++++++++++++ .../java/org/opensearch/index/IndexSettings.java | 2 +- .../org/opensearch/index/IndexSettingsTests.java | 10 ++-------- 8 files changed, 22 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 14e8b83388217..bb552b6ffb70a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,7 +15,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add wait_for_completion parameter to resize, open, and forcemerge APIs ([#6434](https://github.com/opensearch-project/OpenSearch/pull/6434)) - [Segment Replication] Apply backpressure when replicas fall behind ([#6563](https://github.com/opensearch-project/OpenSearch/pull/6563)) - [Remote Store] Integrate remote segment store in peer recovery flow ([#6664](https://github.com/opensearch-project/OpenSearch/pull/6664)) -- [Segment Replication] Add new cluster setting to set replication strategy by default for all indices in cluster. ([#6791](https://github.com/opensearch-project/OpenSearch/pull/6791)) - Enable sort optimization for all NumericTypes ([#6464](https://github.com/opensearch-project/OpenSearch/pull/6464) - Remove 'cluster_manager' role attachment when using 'node.master' deprecated setting ([#6331](https://github.com/opensearch-project/OpenSearch/pull/6331)) - Add new cluster settings to ignore weighted round-robin routing and fallback to default behaviour. ([#6834](https://github.com/opensearch-project/OpenSearch/pull/6834)) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java index c612956a3066f..3db1ffd655209 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java @@ -47,7 +47,11 @@ public Settings indexSettings() { @Override protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REPLICATION_TYPE, "true").build(); + return Settings.builder() + .put(super.featureFlagSettings()) + .put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL, "true") + .put(FeatureFlags.REPLICATION_TYPE, "true") + .build(); } @Override diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 78d686c3475aa..d72844610365c 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -330,10 +330,6 @@ public void validate(final Boolean value, final Map, Object> settings + INDEX_REPLICATION_TYPE_SETTING.getKey() + " should be set to " + ReplicationType.SEGMENT - + " or " - + CLUSTER_REPLICATION_TYPE_SETTING.getKey() - + " should be set to " - + Boolean.TRUE ); } } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 9e62b77de04ff..7c6a48bd3f935 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -647,7 +647,7 @@ public void apply(Settings value, Settings current, Settings previous) { public static final Map> FEATURE_FLAGGED_CLUSTER_SETTINGS = Map.of( FeatureFlags.SEARCHABLE_SNAPSHOT, List.of(Node.NODE_SEARCH_CACHE_SIZE_SETTING), - FeatureFlags.REPLICATION_TYPE, + FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL, List.of(IndicesService.CLUSTER_REPLICATION_TYPE_SETTING) ); diff --git a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java index 8fb6cd115f24b..3d8b4aaa82aa5 100644 --- a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java @@ -35,6 +35,7 @@ protected FeatureFlagSettings( new HashSet<>( Arrays.asList( FeatureFlags.REPLICATION_TYPE_SETTING, + FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL_SETTING, FeatureFlags.REMOTE_STORE_SETTING, FeatureFlags.SEARCHABLE_SNAPSHOT_SETTING, FeatureFlags.EXTENSIONS_SETTING diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index 72b7349180bad..9cd3685606ff6 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -27,6 +27,12 @@ public class FeatureFlags { */ public static final String REPLICATION_TYPE = "opensearch.experimental.feature.replication_type.enabled"; + /** + * Gates the visibility of the segment replication experimental features that allows users to test unreleased beta features. + */ + public static final String SEGMENT_REPLICATION_EXPERIMENTAL = + "opensearch.experimental.feature.segment_replication_experimental.enabled"; + /** * Gates the visibility of the index setting that allows persisting data to remote store along with local disk. * Once the feature is ready for production release, this feature flag can be removed. @@ -84,6 +90,12 @@ public static boolean isEnabled(String featureFlagName) { public static final Setting REPLICATION_TYPE_SETTING = Setting.boolSetting(REPLICATION_TYPE, false, Property.NodeScope); + public static final Setting SEGMENT_REPLICATION_EXPERIMENTAL_SETTING = Setting.boolSetting( + SEGMENT_REPLICATION_EXPERIMENTAL, + false, + Property.NodeScope + ); + public static final Setting REMOTE_STORE_SETTING = Setting.boolSetting(REMOTE_STORE, false, Property.NodeScope); public static final Setting SEARCHABLE_SNAPSHOT_SETTING = Setting.boolSetting(SEARCHABLE_SNAPSHOT, false, Property.NodeScope); diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index d1e427d3011f2..def7732e952ec 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -751,7 +751,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti nodeName = Node.NODE_NAME_SETTING.get(settings); this.indexMetadata = indexMetadata; numberOfShards = settings.getAsInt(IndexMetadata.SETTING_NUMBER_OF_SHARDS, null); - if (FeatureFlags.isEnabled(FeatureFlags.REPLICATION_TYPE) + if (FeatureFlags.isEnabled(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL) && indexMetadata.isSystem() == false && settings.get(IndexMetadata.SETTING_REPLICATION_TYPE) == null) { replicationType = IndicesService.CLUSTER_REPLICATION_TYPE_SETTING.get(settings); diff --git a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java index 4ffc82532d4df..663e9e5007a8f 100644 --- a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java @@ -868,10 +868,7 @@ public void testEnablingRemoteStoreFailsWhenReplicationTypeIsDocument() { IllegalArgumentException.class, () -> IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(indexSettings) ); - assertEquals( - "To enable index.remote_store.enabled, index.replication.type should be set to SEGMENT or cluster.indices.replication.strategy should be set to true", - iae.getMessage() - ); + assertEquals("To enable index.remote_store.enabled, index.replication.type should be set to SEGMENT", iae.getMessage()); } public void testEnablingRemoteStoreFailsWhenReplicationTypeIsDefault() { @@ -880,10 +877,7 @@ public void testEnablingRemoteStoreFailsWhenReplicationTypeIsDefault() { IllegalArgumentException.class, () -> IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(indexSettings) ); - assertEquals( - "To enable index.remote_store.enabled, index.replication.type should be set to SEGMENT or cluster.indices.replication.strategy should be set to true", - iae.getMessage() - ); + assertEquals("To enable index.remote_store.enabled, index.replication.type should be set to SEGMENT", iae.getMessage()); } public void testRemoteRepositoryDefaultSetting() { From d0e0ecce8197d6fbb04adaa786e4449111a15911 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Fri, 7 Apr 2023 15:21:04 -0700 Subject: [PATCH 23/28] [Extensions] Added headers to extensions request (#6826) (#7055) * added headers to extensions rest request * added tests for extension headers request * applied spotless changes * moved allowList/denyList code * changed warning comment text * Revert "changed warning comment text" This reverts commit 3842f9e665908e22ab2dba98928bf3fa6dbede0d. * changed warning comment text * replaced List by Set in headers deny/allow list * allowList, denyList moved to class level * code of headers filtering moved to method * Added tests for filterHeader(), changed path to uri in ExtensionRestRequest * added uri and HttpVersion to ExtensionRestRequest * fixed mistake in filteredHeaders * fixed syntax mistakes * fixed syntax mistakes 2 * Collectors import added to RestSendToExtensionAction * added path to ExtensionRestRequest * fixed compile errors 1 * fixed compile errors 2 * corrected tests for new ExtensionRestRequest --------- (cherry picked from commit 0db20d99d4e06ddb261429466a8072501cc15941) Signed-off-by: Daulet Signed-off-by: github-actions[bot] Co-authored-by: github-actions[bot] --- .../extensions/rest/ExtensionRestRequest.java | 60 +++++++++++++++++-- .../rest/RestSendToExtensionAction.java | 38 +++++++++++- .../rest/ExtensionRestRequestTests.java | 40 ++++++++++++- .../rest/RestSendToExtensionActionTests.java | 29 +++++++++ 4 files changed, 157 insertions(+), 10 deletions(-) diff --git a/server/src/main/java/org/opensearch/extensions/rest/ExtensionRestRequest.java b/server/src/main/java/org/opensearch/extensions/rest/ExtensionRestRequest.java index c98f042c18ab3..93ef9d3034062 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/ExtensionRestRequest.java +++ b/server/src/main/java/org/opensearch/extensions/rest/ExtensionRestRequest.java @@ -19,6 +19,7 @@ import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestRequest.Method; import org.opensearch.transport.TransportRequest; +import org.opensearch.http.HttpRequest; import java.io.IOException; import java.util.ArrayList; @@ -37,13 +38,16 @@ public class ExtensionRestRequest extends TransportRequest { private Method method; + private String uri; private String path; private Map params; + private Map> headers; private XContentType xContentType = null; private BytesReference content; // The owner of this request object // Will be replaced with PrincipalIdentifierToken class from feature/identity private String principalIdentifierToken; + private HttpRequest.HttpVersion httpVersion; // Tracks consumed parameters and content private final Set consumedParams = new HashSet<>(); @@ -53,26 +57,35 @@ public class ExtensionRestRequest extends TransportRequest { * This object can be instantiated given method, uri, params, content and identifier * * @param method of type {@link Method} - * @param path the REST path string (excluding the query) + * @param uri the REST uri string (excluding the query) + * @param path the REST path * @param params the REST params + * @param headers the REST headers * @param xContentType the content type, or null for plain text or no content * @param content the REST request content * @param principalIdentifier the owner of this request + * @param httpVersion the REST HTTP protocol version */ public ExtensionRestRequest( Method method, + String uri, String path, Map params, + Map> headers, XContentType xContentType, BytesReference content, - String principalIdentifier + String principalIdentifier, + HttpRequest.HttpVersion httpVersion ) { this.method = method; + this.uri = uri; this.path = path; this.params = params; + this.headers = headers; this.xContentType = xContentType; this.content = content; this.principalIdentifierToken = principalIdentifier; + this.httpVersion = httpVersion; } /** @@ -84,27 +97,33 @@ public ExtensionRestRequest( public ExtensionRestRequest(StreamInput in) throws IOException { super(in); method = in.readEnum(RestRequest.Method.class); + uri = in.readString(); path = in.readString(); params = in.readMap(StreamInput::readString, StreamInput::readString); + headers = in.readMap(StreamInput::readString, StreamInput::readStringList); if (in.readBoolean()) { xContentType = in.readEnum(XContentType.class); } content = in.readBytesReference(); principalIdentifierToken = in.readString(); + httpVersion = in.readEnum(HttpRequest.HttpVersion.class); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeEnum(method); + out.writeString(uri); out.writeString(path); out.writeMap(params, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeStringCollection); out.writeBoolean(xContentType != null); if (xContentType != null) { out.writeEnum(xContentType); } out.writeBytesReference(content); out.writeString(principalIdentifierToken); + out.writeEnum(httpVersion); } /** @@ -116,6 +135,15 @@ public Method method() { return method; } + /** + * Gets the REST uri + * + * @return This REST request's uri + */ + public String uri() { + return uri; + } + /** * Gets the REST path * @@ -196,6 +224,14 @@ public List consumedParams() { return new ArrayList<>(consumedParams); } + /** + * Gets the headers of request + * @return a map of request headers + */ + public Map> headers() { + return headers; + } + /** * Gets the content type, if any. * @@ -255,20 +291,33 @@ public String getRequestIssuerIdentity() { return principalIdentifierToken; } + /** + * @return This REST request's HTTP protocol version + */ + public HttpRequest.HttpVersion protocolVersion() { + return httpVersion; + } + @Override public String toString() { return "ExtensionRestRequest{method=" + method + + ", uri=" + + uri + ", path=" + path + ", params=" + params + + ", headers=" + + headers.toString() + ", xContentType=" + xContentType + ", contentLength=" + content.length() + ", requester=" + principalIdentifierToken + + ", httpVersion=" + + httpVersion + "}"; } @@ -278,15 +327,18 @@ public boolean equals(Object obj) { if (obj == null || getClass() != obj.getClass()) return false; ExtensionRestRequest that = (ExtensionRestRequest) obj; return Objects.equals(method, that.method) + && Objects.equals(uri, that.uri) && Objects.equals(path, that.path) && Objects.equals(params, that.params) + && Objects.equals(headers, that.headers) && Objects.equals(xContentType, that.xContentType) && Objects.equals(content, that.content) - && Objects.equals(principalIdentifierToken, that.principalIdentifierToken); + && Objects.equals(principalIdentifierToken, that.principalIdentifierToken) + && Objects.equals(httpVersion, that.httpVersion); } @Override public int hashCode() { - return Objects.hash(method, path, params, xContentType, content, principalIdentifierToken); + return Objects.hash(method, uri, path, params, headers, xContentType, content, principalIdentifierToken, httpVersion); } } diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java b/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java index 247269e2f1f17..4f5c80caa3958 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java @@ -25,6 +25,7 @@ import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; +import org.opensearch.http.HttpRequest; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -32,6 +33,8 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.TimeUnit; @@ -61,6 +64,9 @@ public String getName() { private final DiscoveryExtensionNode discoveryExtensionNode; private final TransportService transportService; + private static final Set allowList = Set.of("Content-Type"); + private static final Set denyList = Set.of("Authorization", "Proxy-Authorization"); + /** * Instantiates this object using a {@link RegisterRestActionsRequest} to populate the routes. * @@ -103,13 +109,26 @@ public List routes() { return this.routes; } + public Map> filterHeaders(Map> headers, Set allowList, Set denyList) { + Map> filteredHeaders = headers.entrySet() + .stream() + .filter(e -> !denyList.contains(e.getKey())) + .filter(e -> allowList.contains(e.getKey())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + return filteredHeaders; + } + @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - Method method = request.method(); + HttpRequest httpRequest = request.getHttpRequest(); String path = request.path(); + Method method = request.method(); + String uri = httpRequest.uri(); Map params = request.params(); + Map> headers = request.getHeaders(); XContentType contentType = request.getXContentType(); BytesReference content = request.content(); + HttpRequest.HttpVersion httpVersion = httpRequest.protocolVersion(); if (path.startsWith(pathPrefix)) { path = path.substring(pathPrefix.length()); @@ -160,17 +179,30 @@ public String executor() { return ThreadPool.Names.GENERIC; } }; + try { // Will be replaced with ExtensionTokenProcessor and PrincipalIdentifierToken classes from feature/identity final String extensionTokenProcessor = "placeholder_token_processor"; final String requestIssuerIdentity = "placeholder_request_issuer_identity"; + Map> filteredHeaders = filterHeaders(headers, allowList, denyList); + transportService.sendRequest( discoveryExtensionNode, ExtensionsManager.REQUEST_REST_EXECUTE_ON_EXTENSION_ACTION, - // HERE BE DRAGONS - DO NOT INCLUDE HEADERS + // DO NOT INCLUDE HEADERS WITH SECURITY OR PRIVACY INFORMATION // SEE https://github.com/opensearch-project/OpenSearch/issues/4429 - new ExtensionRestRequest(method, path, params, contentType, content, requestIssuerIdentity), + new ExtensionRestRequest( + method, + uri, + path, + params, + filteredHeaders, + contentType, + content, + requestIssuerIdentity, + httpVersion + ), restExecuteOnExtensionResponseHandler ); inProgressFuture.orTimeout(ExtensionsManager.EXTENSION_REQUEST_WAIT_TIMEOUT, TimeUnit.SECONDS).join(); diff --git a/server/src/test/java/org/opensearch/extensions/rest/ExtensionRestRequestTests.java b/server/src/test/java/org/opensearch/extensions/rest/ExtensionRestRequestTests.java index ba8159d0f9d11..0cf1f524a3ac4 100644 --- a/server/src/test/java/org/opensearch/extensions/rest/ExtensionRestRequestTests.java +++ b/server/src/test/java/org/opensearch/extensions/rest/ExtensionRestRequestTests.java @@ -21,10 +21,12 @@ import org.opensearch.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestRequest.Method; +import org.opensearch.http.HttpRequest; import org.opensearch.test.OpenSearchTestCase; import java.nio.charset.StandardCharsets; import java.security.Principal; +import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; @@ -34,11 +36,14 @@ public class ExtensionRestRequestTests extends OpenSearchTestCase { private Method expectedMethod; private String expectedPath; + private String expectedUri; Map expectedParams; + Map> expectedHeaders; XContentType expectedContentType; BytesReference expectedContent; String extensionUniqueId1; Principal userPrincipal; + HttpRequest.HttpVersion expectedHttpVersion; // Will be replaced with ExtensionTokenProcessor and PrincipalIdentifierToken classes from feature/identity String extensionTokenProcessor; String expectedRequestIssuerIdentity; @@ -48,11 +53,17 @@ public void setUp() throws Exception { super.setUp(); expectedMethod = Method.GET; expectedPath = "/test/uri"; + expectedUri = "foobar?foo=bar&baz=42"; expectedParams = Map.ofEntries(entry("foo", "bar"), entry("baz", "42")); + expectedHeaders = Map.ofEntries( + entry("Content-Type", Arrays.asList("application/json")), + entry("foo", Arrays.asList("hello", "world")) + ); expectedContentType = XContentType.JSON; expectedContent = new BytesArray("{\"key\": \"value\"}".getBytes(StandardCharsets.UTF_8)); extensionUniqueId1 = "ext_1"; userPrincipal = () -> "user1"; + expectedHttpVersion = HttpRequest.HttpVersion.HTTP_1_1; extensionTokenProcessor = "placeholder_extension_token_processor"; expectedRequestIssuerIdentity = "placeholder_request_issuer_identity"; } @@ -60,17 +71,23 @@ public void setUp() throws Exception { public void testExtensionRestRequest() throws Exception { ExtensionRestRequest request = new ExtensionRestRequest( expectedMethod, + expectedUri, expectedPath, expectedParams, + expectedHeaders, expectedContentType, expectedContent, - expectedRequestIssuerIdentity + expectedRequestIssuerIdentity, + expectedHttpVersion ); assertEquals(expectedMethod, request.method()); + assertEquals(expectedUri, request.uri()); assertEquals(expectedPath, request.path()); assertEquals(expectedParams, request.params()); + assertEquals(expectedHttpVersion, request.protocolVersion()); + assertEquals(Collections.emptyList(), request.consumedParams()); assertTrue(request.hasParam("foo")); assertFalse(request.hasParam("bar")); @@ -100,10 +117,13 @@ public void testExtensionRestRequest() throws Exception { try (NamedWriteableAwareStreamInput nameWritableAwareIn = new NamedWriteableAwareStreamInput(in, registry)) { request = new ExtensionRestRequest(nameWritableAwareIn); assertEquals(expectedMethod, request.method()); + assertEquals(expectedUri, request.uri()); assertEquals(expectedPath, request.path()); assertEquals(expectedParams, request.params()); + assertEquals(expectedHeaders, request.headers()); assertEquals(expectedContent, request.content()); assertEquals(expectedRequestIssuerIdentity, request.getRequestIssuerIdentity()); + assertEquals(expectedHttpVersion, request.protocolVersion()); } } } @@ -112,19 +132,24 @@ public void testExtensionRestRequest() throws Exception { public void testExtensionRestRequestWithNoContent() throws Exception { ExtensionRestRequest request = new ExtensionRestRequest( expectedMethod, + expectedUri, expectedPath, expectedParams, + expectedHeaders, null, new BytesArray(new byte[0]), - expectedRequestIssuerIdentity + expectedRequestIssuerIdentity, + expectedHttpVersion ); assertEquals(expectedMethod, request.method()); assertEquals(expectedPath, request.path()); assertEquals(expectedParams, request.params()); + assertEquals(expectedHeaders, request.headers()); assertNull(request.getXContentType()); assertEquals(0, request.content().length()); assertEquals(expectedRequestIssuerIdentity, request.getRequestIssuerIdentity()); + assertEquals(expectedHttpVersion, request.protocolVersion()); final ExtensionRestRequest requestWithNoContent = request; assertThrows(OpenSearchParseException.class, () -> requestWithNoContent.contentParser(NamedXContentRegistry.EMPTY)); @@ -136,11 +161,13 @@ public void testExtensionRestRequestWithNoContent() throws Exception { try (NamedWriteableAwareStreamInput nameWritableAwareIn = new NamedWriteableAwareStreamInput(in, registry)) { request = new ExtensionRestRequest(nameWritableAwareIn); assertEquals(expectedMethod, request.method()); + assertEquals(expectedUri, request.uri()); assertEquals(expectedPath, request.path()); assertEquals(expectedParams, request.params()); assertNull(request.getXContentType()); assertEquals(0, request.content().length()); assertEquals(expectedRequestIssuerIdentity, request.getRequestIssuerIdentity()); + assertEquals(expectedHttpVersion, request.protocolVersion()); final ExtensionRestRequest requestWithNoContentType = request; assertThrows(OpenSearchParseException.class, () -> requestWithNoContentType.contentParser(NamedXContentRegistry.EMPTY)); @@ -154,19 +181,24 @@ public void testExtensionRestRequestWithPlainTextContent() throws Exception { ExtensionRestRequest request = new ExtensionRestRequest( expectedMethod, + expectedUri, expectedPath, expectedParams, + expectedHeaders, null, expectedText, - expectedRequestIssuerIdentity + expectedRequestIssuerIdentity, + expectedHttpVersion ); assertEquals(expectedMethod, request.method()); + assertEquals(expectedUri, request.uri()); assertEquals(expectedPath, request.path()); assertEquals(expectedParams, request.params()); assertNull(request.getXContentType()); assertEquals(expectedText, request.content()); assertEquals(expectedRequestIssuerIdentity, request.getRequestIssuerIdentity()); + assertEquals(expectedHttpVersion, request.protocolVersion()); try (BytesStreamOutput out = new BytesStreamOutput()) { request.writeTo(out); @@ -175,11 +207,13 @@ public void testExtensionRestRequestWithPlainTextContent() throws Exception { try (NamedWriteableAwareStreamInput nameWritableAwareIn = new NamedWriteableAwareStreamInput(in, registry)) { request = new ExtensionRestRequest(nameWritableAwareIn); assertEquals(expectedMethod, request.method()); + assertEquals(expectedUri, request.uri()); assertEquals(expectedPath, request.path()); assertEquals(expectedParams, request.params()); assertNull(request.getXContentType()); assertEquals(expectedText, request.content()); assertEquals(expectedRequestIssuerIdentity, request.getRequestIssuerIdentity()); + assertEquals(expectedHttpVersion, request.protocolVersion()); } } } diff --git a/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java b/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java index 8d3d20c5bc3f7..6428057c5294b 100644 --- a/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java +++ b/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java @@ -13,6 +13,9 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; +import java.util.Set; +import java.util.Map; +import java.util.Arrays; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; @@ -123,6 +126,32 @@ public void testRestSendToExtensionAction() throws Exception { assertTrue(expectedMethods.containsAll(methods)); } + public void testRestSendToExtensionActionFilterHeaders() throws Exception { + RegisterRestActionsRequest registerRestActionRequest = new RegisterRestActionsRequest( + "uniqueid1", + List.of("GET /foo", "PUT /bar", "POST /baz") + ); + RestSendToExtensionAction restSendToExtensionAction = new RestSendToExtensionAction( + registerRestActionRequest, + discoveryExtensionNode, + transportService + ); + + Map> headers = new HashMap<>(); + headers.put("Content-Type", Arrays.asList("application/json")); + headers.put("Authorization", Arrays.asList("Bearer token")); + headers.put("Proxy-Authorization", Arrays.asList("Basic credentials")); + + Set allowList = Set.of("Content-Type"); // allowed headers + Set denyList = Set.of("Authorization", "Proxy-Authorization"); // denied headers + + Map> filteredHeaders = restSendToExtensionAction.filterHeaders(headers, allowList, denyList); + + assertTrue(filteredHeaders.containsKey("Content-Type")); + assertFalse(filteredHeaders.containsKey("Authorization")); + assertFalse(filteredHeaders.containsKey("Proxy-Authorization")); + } + public void testRestSendToExtensionActionBadMethod() throws Exception { RegisterRestActionsRequest registerRestActionRequest = new RegisterRestActionsRequest( "uniqueid1", From 932f47bd328a667543bd202da881805bf74a31cb Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Mon, 10 Apr 2023 10:54:53 -0700 Subject: [PATCH 24/28] [Extensions] Adds a parameter to Setting Validator to toggle regex matching (#6823) (#7054) (cherry picked from commit c765e3a4bfb4e2d2532e1afde34758e48c644b3d) Signed-off-by: github-actions[bot] Co-authored-by: github-actions[bot] --- .../opensearch/common/settings/Setting.java | 17 ++++++++++++++++- .../common/settings/SettingTests.java | 18 ++++++++++++++++-- 2 files changed, 32 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/opensearch/common/settings/Setting.java b/server/src/main/java/org/opensearch/common/settings/Setting.java index 992f71322aa16..a0cdf35ee0ad2 100644 --- a/server/src/main/java/org/opensearch/common/settings/Setting.java +++ b/server/src/main/java/org/opensearch/common/settings/Setting.java @@ -1256,15 +1256,27 @@ private static boolean isFiltered(Property[] properties) { public static class RegexValidator implements Writeable, Validator { private Pattern pattern; + private boolean isMatching; + /** * @param regex A regular expression containing the only valid input for this setting. */ public RegexValidator(String regex) { + this(regex, true); + } + + /** + * @param regex constructs a validator based on a regular expression. + * @param isMatching If true, the setting must match the given regex. If false, the setting must not match the given regex. + */ + public RegexValidator(String regex, boolean isMatching) { this.pattern = Pattern.compile(regex); + this.isMatching = isMatching; } public RegexValidator(StreamInput in) throws IOException { this.pattern = Pattern.compile(in.readString()); + this.isMatching = in.readBoolean(); } Pattern getPattern() { @@ -1273,14 +1285,17 @@ Pattern getPattern() { @Override public void validate(String value) { - if (!pattern.matcher(value).matches()) { + if (isMatching && !pattern.matcher(value).find()) { throw new IllegalArgumentException("Setting [" + value + "] does not match regex [" + pattern.pattern() + "]"); + } else if (!isMatching && pattern.matcher(value).find()) { + throw new IllegalArgumentException("Setting [" + value + "] must match regex [" + pattern.pattern() + "]"); } } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(pattern.pattern()); + out.writeBoolean(isMatching); } } diff --git a/server/src/test/java/org/opensearch/common/settings/SettingTests.java b/server/src/test/java/org/opensearch/common/settings/SettingTests.java index 005c0d7c38b51..1c10e19f34a19 100644 --- a/server/src/test/java/org/opensearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/opensearch/common/settings/SettingTests.java @@ -329,22 +329,36 @@ public void testRegexValidator() throws Exception { String expectedRegex = "\\d+"; Pattern expectedPattern = Pattern.compile(expectedRegex); RegexValidator regexValidator = new RegexValidator(expectedRegex); + RegexValidator regexValidatorMatcherFalse = new RegexValidator(expectedRegex, false); // Test that the pattern is correctly initialized assertNotNull(expectedPattern); assertNotNull(regexValidator.getPattern()); assertEquals(expectedPattern.pattern(), regexValidator.getPattern().pattern()); - // Test that validate() throws an exception for invalid input + // Test that checks the pattern and isMatching with the set value false parameters are working correctly during initialization + assertNotNull(regexValidatorMatcherFalse); + assertNotNull(regexValidatorMatcherFalse.getPattern()); + assertEquals(expectedPattern.pattern(), regexValidatorMatcherFalse.getPattern().pattern()); + + // Test throw an exception when the value does not match final RegexValidator finalValidator = new RegexValidator(expectedRegex); assertThrows(IllegalArgumentException.class, () -> finalValidator.validate("foo")); - try { regexValidator.validate("123"); } catch (IllegalArgumentException e) { fail("Expected validate() to not throw an exception, but it threw " + e); } + // Test throws an exception when the value matches + final RegexValidator finalValidatorFalse = new RegexValidator(expectedRegex); + assertThrows(IllegalArgumentException.class, () -> finalValidatorFalse.validate(expectedRegex)); + try { + regexValidatorMatcherFalse.validate(expectedRegex); + } catch (IllegalArgumentException e) { + fail("Expected validate() to not throw an exception, but it threw " + e); + } + try (BytesStreamOutput out = new BytesStreamOutput()) { regexValidator.writeTo(out); out.flush(); From a499c5605cbf3d657c29657c19b9012953cd59fc Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Mon, 10 Apr 2023 13:56:28 -0700 Subject: [PATCH 25/28] [Backport 2.x] Do not delete entire index cache on shard removal (#7073) * Do not delete entire index cache on shard removal (#7072) The cleaner was too aggresive with cleaning up the cache and would delete the entire index directory within the cache when a shard was removed. This is not correct when multiple shards for a given index are on the same node. Signed-off-by: Andrew Ross (cherry picked from commit e3339e8de65b580fa767e2d344d6a3a50116e84b) Signed-off-by: github-actions[bot] * Use correct import for IOUtils Signed-off-by: Andrew Ross --------- Signed-off-by: Andrew Ross Signed-off-by: github-actions[bot] Co-authored-by: github-actions[bot] Co-authored-by: Andrew Ross --- .../remote/filecache/FileCacheCleaner.java | 54 +++++++-- .../filecache/FileCacheCleanerTests.java | 111 ++++++++++++++++++ 2 files changed, 158 insertions(+), 7 deletions(-) create mode 100644 server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheCleanerTests.java diff --git a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheCleaner.java b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheCleaner.java index a1411f71c0761..4da4e37fd9076 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheCleaner.java +++ b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheCleaner.java @@ -11,13 +11,16 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.common.io.FileSystemUtils; import org.opensearch.common.settings.Settings; +import org.opensearch.core.internal.io.IOUtils; import org.opensearch.env.NodeEnvironment; +import org.opensearch.index.Index; import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.ShardId; import org.opensearch.index.shard.ShardPath; +import org.opensearch.indices.cluster.IndicesClusterStateService; import java.io.IOException; import java.nio.file.DirectoryStream; @@ -52,19 +55,56 @@ public FileCacheCleaner(NodeEnvironment nodeEnvironment, FileCache fileCache) { @Override public void beforeIndexShardDeleted(ShardId shardId, Settings settings) { try { - String storeType = settings.get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey()); - if (IndexModule.Type.REMOTE_SNAPSHOT.match(storeType)) { - ShardPath shardPath = ShardPath.loadFileCachePath(nodeEnvironment, shardId); - Path localStorePath = shardPath.getDataPath().resolve(LOCAL_STORE_LOCATION); + if (isRemoteSnapshot(settings)) { + final ShardPath shardPath = ShardPath.loadFileCachePath(nodeEnvironment, shardId); + final Path localStorePath = shardPath.getDataPath().resolve(LOCAL_STORE_LOCATION); try (DirectoryStream ds = Files.newDirectoryStream(localStorePath)) { for (Path subPath : ds) { fileCache.remove(subPath.toRealPath()); } } - FileSystemUtils.deleteSubDirectories(shardPath.getRootDataPath()); } } catch (IOException ioe) { - log.error(() -> new ParameterizedMessage("Error removing items from cache during shard deletion {})", shardId), ioe); + log.error(() -> new ParameterizedMessage("Error removing items from cache during shard deletion {}", shardId), ioe); } } + + @Override + public void afterIndexShardDeleted(ShardId shardId, Settings settings) { + if (isRemoteSnapshot(settings)) { + final Path path = ShardPath.loadFileCachePath(nodeEnvironment, shardId).getDataPath(); + try { + if (Files.exists(path)) { + IOUtils.rm(path); + } + } catch (IOException e) { + log.error(() -> new ParameterizedMessage("Failed to delete cache path for shard {}", shardId), e); + } + } + } + + @Override + public void afterIndexRemoved( + Index index, + IndexSettings indexSettings, + IndicesClusterStateService.AllocatedIndices.IndexRemovalReason reason + ) { + if (isRemoteSnapshot(indexSettings.getSettings()) + && reason == IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.DELETED) { + final Path indexCachePath = nodeEnvironment.fileCacheNodePath().fileCachePath.resolve( + Integer.toString(nodeEnvironment.getNodeLockId()) + ).resolve(index.getUUID()); + if (Files.exists(indexCachePath)) { + try { + IOUtils.rm(indexCachePath); + } catch (IOException e) { + log.error(() -> new ParameterizedMessage("Failed to delete cache path for index {}", index), e); + } + } + } + } + + private static boolean isRemoteSnapshot(Settings settings) { + return IndexModule.Type.REMOTE_SNAPSHOT.match(settings.get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey())); + } } diff --git a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheCleanerTests.java b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheCleanerTests.java new file mode 100644 index 0000000000000..0255726ad76fe --- /dev/null +++ b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheCleanerTests.java @@ -0,0 +1,111 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store.remote.filecache; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.Map; + +import org.hamcrest.MatcherAssert; +import org.junit.After; +import org.junit.Before; +import org.opensearch.Version; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.breaker.CircuitBreaker; +import org.opensearch.common.breaker.NoopCircuitBreaker; +import org.opensearch.common.settings.Settings; +import org.opensearch.env.NodeEnvironment; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.shard.ShardId; +import org.opensearch.index.shard.ShardPath; +import org.opensearch.indices.cluster.IndicesClusterStateService; +import org.opensearch.test.OpenSearchTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.opensearch.index.store.remote.directory.RemoteSnapshotDirectoryFactory.LOCAL_STORE_LOCATION; + +public class FileCacheCleanerTests extends OpenSearchTestCase { + private static final ShardId SHARD_0 = new ShardId("index", "uuid-0", 0); + private static final ShardId SHARD_1 = new ShardId("index", "uuid-1", 0); + private static final Settings SETTINGS = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put("index.store.type", "remote_snapshot") + .build(); + private static final IndexSettings INDEX_SETTINGS = new IndexSettings( + IndexMetadata.builder("index").settings(SETTINGS).build(), + SETTINGS + ); + + private final FileCache fileCache = FileCacheFactory.createConcurrentLRUFileCache( + 1024 * 1024 * 1024, + 1, + new NoopCircuitBreaker(CircuitBreaker.REQUEST) + ); + private final Map files = new HashMap<>(); + private NodeEnvironment env; + private FileCacheCleaner cleaner; + + @Before + public void setUpFileCache() throws IOException { + env = newNodeEnvironment(SETTINGS); + cleaner = new FileCacheCleaner(env, fileCache); + files.put(SHARD_0, addFile(fileCache, env, SHARD_0)); + files.put(SHARD_1, addFile(fileCache, env, SHARD_1)); + MatcherAssert.assertThat(fileCache.size(), equalTo(2L)); + } + + private static Path addFile(FileCache fileCache, NodeEnvironment env, ShardId shardId) throws IOException { + final ShardPath shardPath = ShardPath.loadFileCachePath(env, shardId); + final Path localStorePath = shardPath.getDataPath().resolve(LOCAL_STORE_LOCATION); + Files.createDirectories(localStorePath); + final Path file = Files.createFile(localStorePath.resolve("file")); + fileCache.put(file, new FileCachedIndexInput.ClosedIndexInput(1024)); + return file; + } + + @After + public void tearDownFileCache() { + env.close(); + } + + public void testShardRemoved() { + final Path cachePath = ShardPath.loadFileCachePath(env, SHARD_0).getDataPath(); + assertTrue(Files.exists(cachePath)); + + cleaner.beforeIndexShardDeleted(SHARD_0, SETTINGS); + MatcherAssert.assertThat(fileCache.size(), equalTo(1L)); + assertNull(fileCache.get(files.get(SHARD_0))); + assertFalse(Files.exists(files.get(SHARD_0))); + assertTrue(Files.exists(files.get(SHARD_1))); + cleaner.afterIndexShardDeleted(SHARD_0, SETTINGS); + assertFalse(Files.exists(cachePath)); + } + + public void testIndexRemoved() { + final Path indexCachePath = env.fileCacheNodePath().fileCachePath.resolve(Integer.toString(env.getNodeLockId())) + .resolve(SHARD_0.getIndex().getUUID()); + assertTrue(Files.exists(indexCachePath)); + + cleaner.beforeIndexShardDeleted(SHARD_0, SETTINGS); + cleaner.afterIndexShardDeleted(SHARD_0, SETTINGS); + cleaner.beforeIndexShardDeleted(SHARD_1, SETTINGS); + cleaner.afterIndexShardDeleted(SHARD_1, SETTINGS); + cleaner.afterIndexRemoved( + SHARD_0.getIndex(), + INDEX_SETTINGS, + IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.DELETED + ); + MatcherAssert.assertThat(fileCache.size(), equalTo(0L)); + assertFalse(Files.exists(indexCachePath)); + } +} From 19e66c6979777a7c11d179e1c1ffbf3245a1333f Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Mon, 10 Apr 2023 15:16:58 -0700 Subject: [PATCH 26/28] Add FlatObject FieldMapper (#6507) (#7052) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To fulfill issue #1018, we implement the approach by storing the entire nested object as a String. A `flat_object` creates exactly two internal Lucene [StringField](https://lucene.apache.org/core/8_0_0/core/org/apache/lucene/document/StringField.html) ( "._value" and "._valueAndPath" ) in regards of how many nested fields in the flat field. - value: a keyword field field contains the leaf values of all subfields, allowing for efficient searching of leaf values without specifying the path. (e.g: catalog = 'Mike'). - valueAndPath: : a keyword field field contains the path to the leaf value and its value, enabling efficient searching when the query includes the path to the leaf. (e.g: catalog.author.given = 'Mike') Limitation and Future Development: - enable searching in PainlessScript, we will need to direct the fielddatabuilder to fetch docvalues within the two stringfields in memory - open parameters setting, such as normalizer, docValues, ignoreAbove, nullValue, similarity, and depthlimit. - enable wildcard query (cherry picked from commit 75bb3efa6a20ec4c8602e34f5449325f249eec82) Signed-off-by: Mingshi Liu Signed-off-by: Lukáš Vlček Co-authored-by: Lukáš Vlček --- CHANGELOG.md | 1 + .../rest-api-spec/test/painless/30_search.yml | 97 +++ .../test/index/90_flat_object.yml | 746 +++++++++++++++++ .../search/fields/SearchFieldsIT.java | 30 +- .../xcontent/JsonToStringXContentParser.java | 257 ++++++ .../index/mapper/DynamicKeyFieldMapper.java | 3 - .../index/mapper/FlatObjectFieldMapper.java | 760 ++++++++++++++++++ .../org/opensearch/indices/IndicesModule.java | 2 + .../fielddata/AbstractFieldDataTestCase.java | 3 + .../mapper/FlatObjectFieldDataTests.java | 63 ++ .../mapper/FlatObjectFieldMapperTests.java | 143 ++++ .../terms/SignificantTextAggregatorTests.java | 4 +- 12 files changed, 2099 insertions(+), 10 deletions(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/index/90_flat_object.yml create mode 100644 server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java create mode 100644 server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java create mode 100644 server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldDataTests.java create mode 100644 server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index bb552b6ffb70a..2ac42429471b8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,6 +42,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Changed - Require MediaType in Strings.toString API ([#6009](https://github.com/opensearch-project/OpenSearch/pull/6009)) - [Refactor] XContent base classes from xcontent to core library ([#5902](https://github.com/opensearch-project/OpenSearch/pull/5902)) +- Added a new field type: flat_object ([#6507](https://github.com/opensearch-project/OpenSearch/pull/6507)) ### Deprecated - Map, List, and Set in org.opensearch.common.collect ([#6609](https://github.com/opensearch-project/OpenSearch/pull/6609)) diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/30_search.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/30_search.yml index a006fde630716..b360d8dc01ccf 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/30_search.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/30_search.yml @@ -482,3 +482,100 @@ }] - match: { error.root_cause.0.type: "illegal_argument_exception" } - match: { error.root_cause.0.reason: "script score function must not produce negative scores, but got: [-9.0]"} + +--- + +"Flat-object fields from within the scripting": + - skip: + version: " - 2.99.99" + reason: "flat_object is introduced in 3.0.0 in main branch" + + - do: + indices.create: + index: test + body: + mappings: + properties: + flat: + type : "flat_object" + + # This document has 6 distinct parts in its flat_object field paths: + # - flat.field_1 + # - flat.field_2 + # - flat.field_3 + # - flat.inner + # - flat.field_A + # - flat.field_B + - do: + index: + index: test + id: 1 + body: { + "flat": { + "field_1": "John Doe", + "field_2": 33, + "field_3": false, + "inner": { + "field_A": ["foo", "bar"], + "field_B": false + } + } + } + + - do: + index: + index: test + id: 2 + body: { + "flat": { + "field_1": "Joe Public", + "field_2": 45 + } + } + + - do: + indices.refresh: + index: test + + # It is possible to filter based on the number of distinct parts of flat_object field paths + - do: + search: + body: { + _source: true, + query: { + bool: { + filter: { + script: { + script: { + source: "doc['flat'].size() == 6", + lang: "painless" + } + } + } + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.flat.field_1: "John Doe" } + + - do: + search: + body: { + _source: true, + query: { + bool: { + filter: { + script: { + script: { + source: "doc['flat'].size() < 6", + lang: "painless" + } + } + } + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.flat.field_1: "Joe Public" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/90_flat_object.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/90_flat_object.yml new file mode 100644 index 0000000000000..88cb2f1716c9b --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/90_flat_object.yml @@ -0,0 +1,746 @@ +--- +# The initial test setup includes: +# - Create flat_object mapping +# - Index two example documents +# - Refresh the index so it is ready for search tests +setup: + - do: + indices.create: + index: test + body: + mappings: + properties: + ISBN13: + type : "keyword" + catalog: + type : "flat_object" + required_matches: + type : "long" + + - do: + index: + index: test + id: 1 + body: { + "ISBN13": "V9781933988177", + "catalog": { + "title": "Lucene in Action", + "author": + { + "surname": "McCandless", + "given": "Mike" + }, + "catalogId":"c-0002", + "quantity": 1234, + "rating": 9.2, + "location": [-81.7982,41.3847 ], + "review": [["great",99.8],["ok",80.0]], + "publishDate": "2015-01-01" + }, + "required_matches": 1 + } + + - do: + index: + index: test + id: 2 + body: { + "ISBN13": "V12154942129175", + "catalog": { + "title": "Mock in Action", + "author": + { + "surname": "Doe", + "given": "John" + }, + "catalogId": "c-0050", + "quantity": 4321, + "rating": 5.2, + "location": [-12.7982,33.3847 ], + "review": [["bad",30.41],["ok",80.0]], + "publishDate": "2016-01-01" + }, + "required_matches": 1 + } + + # Do index refresh + - do: + indices.refresh: + index: test + +--- +# Delete Index when connection is teardown +teardown: + - do: + indices.delete: + index: test + +--- +# Verify that mappings under the catalog field did not expand +# and no dynamic fields were created. +"Mappings": + - skip: + version: " - 2.99.99" + reason: "flat_object is introduced in 3.0.0 in main branch" + + - do: + indices.get_mapping: + index: test + - is_true: test.mappings + - match: { test.mappings.properties.ISBN13.type: keyword } + - match: { test.mappings.properties.catalog.type: flat_object } + - match: { test.mappings.properties.required_matches.type: long } + # https://github.com/opensearch-project/OpenSearch/tree/main/rest-api-spec/src/main/resources/rest-api-spec/test#length + - length: { test.mappings.properties: 3 } + - length: { test.mappings.properties.catalog: 1 } + +--- +"Supported queries": + - skip: + version: " - 2.99.99" + reason: "flat_object is introduced in 3.0.0 in main branch" + + # Verify Document Count + - do: + search: + body: { + query: { + match_all: {} + } + } + + - length: { hits.hits: 2 } + + # Match Query with dot path. + - do: + search: + body: { + _source: true, + query: { + match: { "catalog.title": "Lucene in Action"} + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.catalog.title: "Lucene in Action" } + + # Match Query without dot path. + - do: + search: + body: { + _source: true, + query: { + match: { catalog: "Lucene in Action"} + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.catalog.title: "Lucene in Action" } + + + # Multi Match Query without dot path. + - do: + search: + body: { + _source: true, + query: { + multi_match: { + "query": "Mike", + "fields": [ "ISBN13", "catalog" ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.catalog.author.given: "Mike" } + + # Multi Match Query with dot path. + - do: + search: + body: { + _source: true, + query: { + multi_match: { + "query": "Mike", + "fields": [ "ISBN13", "catalog.author.given" ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.catalog.author.given: "Mike" } + + # Term Query1 with dot path for date + - do: + search: + body: { + _source: true, + query: { + term: { catalog.publishDate: "2015-01-01"} + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.catalog.publishDate: "2015-01-01" } + + # Term Query1 without dot path for date + - do: + search: + body: { + _source: true, + query: { + term: { catalog: "2015-01-01" } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.catalog.publishDate: "2015-01-01" } + + # Term Query2 with dot path for string + - do: + search: + body: { + _source: true, + query: { + term: { "catalog.author.given": "Mike" } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.catalog.author.given: "Mike" } + + # Term Query2 without dot path. + - do: + search: + body: { + _source: true, + query: { + term: { catalog: "Mike" } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.catalog.author.given: "Mike" } + + # Term Query3 with dot path for array + - do: + search: + body: { + _source: true, + query: { + term: { catalog.location: "-12.7982" } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.catalog.location: [-12.7982,33.3847 ]} + + # Term Query3 without dot path for array + - do: + search: + body: { + _source: true, + query: { + term: { catalog: "-12.7982" } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.catalog.location: [-12.7982,33.3847 ]} + + + # Term Query4 with dot path for nested-array + - do: + search: + body: { + _source: true, + query: { + term: { catalog.review: "99.8" } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.catalog.review: [ [ "great",99.8 ],[ "ok",80.0 ] ] } + + # Term Query4 without dot path for nested-array + - do: + search: + body: { + _source: true, + query: { + term: { catalog: "99.8" } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.catalog.review: [["great",99.8],["ok",80.0]] } + + # Terms Query without dot path. + - do: + search: + body: { + _source: true, + query: { + terms: { catalog: ["John","Mike"] } + } + } + + - length: { hits.hits: 2 } + - match: { hits.hits.0._source.catalog.author.given: "Mike" } + + # Terms Query with dot path. + - do: + search: + body: { + _source: true, + query: { + terms: { catalog.author.given: ["John","Mike"] } + } + } + + - length: { hits.hits: 2 } + - match: { hits.hits.0._source.catalog.author.given: "Mike" } + + # Termset Query without dot path. + - do: + search: + body: { + _source: true, + query: { + "terms_set": { + "catalog": { + "terms": [ "John","Mike" ], + "minimum_should_match_field": "required_matches"} + } + } + } + - length: { hits.hits: 2 } + - match: { hits.hits.0._source.catalog.author.given: "Mike" } + + # Termset Query with dot path. + - do: + search: + body: { + _source: true, + query: { + "terms_set": { + "catalog.author.given": { + "terms": [ "John","Mike" ], + "minimum_should_match_field": "required_matches"} + } + } + } + - length: { hits.hits: 2 } + - match: { hits.hits.0._source.catalog.author.given: "Mike" } + + # Prefix Query with dot path. + - do: + search: + body: { + _source: true, + query: { + "prefix": { + "catalog.author.given": { + "value": "Mi" + } + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.catalog.author.given: "Mike" } + + # Prefix Query without dot path. + - do: + search: + body: { + _source: true, + query: { + "prefix": { + "catalog": { + "value": "Mi" + } + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.catalog.author.given: "Mike" } + + # Range Query with dot path. + - do: + search: + body: { + _source: true, + query: { + "range": { + "catalog.catalogId": { + "gte": "c-0000", + "lte": "c-0006" + } + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.catalog.catalogId: "c-0002" } + + # Range Query without dot path. + - do: + search: + body: { + _source: true, + query: { + "range": { + "catalog": { + "gte": "c-0000", + "lte": "c-0006" + } + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.catalog.catalogId: "c-0002" } + + # Range Query with integer input with dot path. + - do: + search: + body: { + _source: true, + query: { + "range": { + "catalog.quantity": { + "gte": 1000, + "lte": 2000 + } + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.catalog.quantity: 1234 } + + # Range Query with integer input without dot path. + - do: + search: + body: { + _source: true, + query: { + "range": { + "catalog": { + "gte": 1000, + "lte": 2000 + } + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.catalog.quantity: 1234 } + + # Range Query with date input with dot path. + - do: + search: + body: { + _source: true, + query: { + "range": { + "catalog.publishDate": { + "gte": "2015-01-01", + "lte": "2015-12-31" + } + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.catalog.publishDate: "2015-01-01" } + + # Range Query with date input without dot path. + - do: + search: + body: { + _source: true, + query: { + "range": { + "catalog": { + "gte": "2015-01-01", + "lte": "2015-12-31" + } + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.catalog.publishDate: "2015-01-01" } + + # Range Query with double input with dot path. + - do: + search: + body: { + _source: true, + query: { + "range": { + "catalog.location": { + "gte": 40.1234, + "lte": 42.1234 + } + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.catalog.location: [-81.7982,41.3847] } + + # Range Query with double input without dot path. + - do: + search: + body: { + _source: true, + query: { + "range": { + "catalog": { + "gte": 40.1234, + "lte": 42.1234 + } + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.catalog.location: [ -81.7982,41.3847 ] } + + # Exists Query with dot path. + - do: + search: + body: { + _source: true, + query: { + "exists": { + "field": catalog.catalogId + } + } + } + + - length: { hits.hits: 2 } + + # Exists Query without dot path. + - do: + search: + body: { + _source: true, + query: { + "exists": { + "field": catalog + } + } + } + + - length: { hits.hits: 2 } + + # Query_string Query without dot path. + - do: + search: + body: { + _source: true, + query: { + "query_string": { + "fields": [ "catalog", "ISBN13" ], + "query": "John OR Mike" + } + } + } + + - length: { hits.hits: 2 } + - match: { hits.hits.0._source.catalog.author.given: "Mike" } + + # Query_string Query with dot path. + - do: + search: + body: { + _source: true, + query: { + "query_string": { + "fields": [ "catalog.author.given", "ISBN13" ], + "query": "John OR Mike" + } + } + } + + - length: { hits.hits: 2 } + - match: { hits.hits.0._source.catalog.author.given: "Mike" } + + # Simple_query_string Query without dot path. + - do: + search: + body: { + _source: true, + query: { + "simple_query_string" : { + "query": "Doe", + "fields": ["catalog", "ISBN13"] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.catalog.author.surname: "Doe" } + + + # Simple_query_string Query with dot path. + - do: + search: + body: { + _source: true, + query: { + "simple_query_string": { + "query": "Doe", + "fields": [ "catalog.author.surname", "ISBN13" ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.catalog.author.surname: "Doe" } + +--- +"Unsupported": + - skip: + version: " - 2.99.99" + reason: "flat_object is introduced in 3.0.0 in main branch" + + # Mapping parameters (such as index/search analyzers) are currently not supported + # The plan is to support them in the next version + - do: + catch: bad_request + indices.create: + index: test_analyzer + body: + mappings: + properties: + data: + type : "flat_object" + analyzer: "standard" + + - match: { error.root_cause.0.type: "mapper_parsing_exception" } + - match: { error.root_cause.0.reason: "Mapping definition for [data] has unsupported parameters: [analyzer : standard]"} + - match: { status: 400 } + + # Wildcard Query with dot path. + - do: + catch: bad_request + search: + body: { + _source: true, + query: { + "wildcard": { + "catalog.title": "Mock*" + } + } + } + - match: { error.root_cause.0.type: "query_shard_exception" } + - match: { error.root_cause.0.reason: "Can only use wildcard queries on keyword and text fields - not on [catalog.title] which is of type [flat_object]"} + - match: { status: 400 } + + # Wildcard Query without dot path. + - do: + catch: bad_request + search: + body: { + _source: true, + query: { + "wildcard": { + "catalog": "Mock*" + } + } + } + - match: { error.root_cause.0.type: "query_shard_exception" } + - match: { error.root_cause.0.reason: "Can only use wildcard queries on keyword and text fields - not on [catalog] which is of type [flat_object]" } + - match: { status: 400 } + + # Aggregation and Match Query with dot path. + - do: + catch: bad_request + search: + body: { + _source: true, + size: 0, + query: { + "match": { + "ISBN13": "V9781933988177" + } + }, + aggs: { + "avg_rating": { + "avg": { + "field": "catalog.rating" + } + } + } + } + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "Field [catalog.rating] of type [flat_object] is not supported for aggregation [avg]" } + - match: { status: 400 } + + # Aggregation using average and Match Query with dot path. + - do: + catch: bad_request + search: + body: { + _source: true, + size: 0, + query: { + "match": { + "ISBN13": "V9781933988177" + } + }, + aggs: { + "avg_rating": { + "avg": { + "field": "catalog.rating" + } + } + } + } + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "Field [catalog.rating] of type [flat_object] is not supported for aggregation [avg]" } + - match: { status: 400 } + + # Aggregation using geolocation and Match Query with dot path. + - do: + catch: bad_request + search: + body: { + _source: true, + size: 0, + query: { + "match": { + "ISBN13": "V9781933988177" + } + }, + aggs: { + "books_in_location": { + "geo_distance": { + "field": "catalog.location", + "origin": "41.3847,-81.7982", + "unit": "km", + "ranges": [ + { + "to": 100 + } + ] + }, + "aggs": { + "total_books": { + "sum": { + "field": "catalog.quantity" + } + } + } + } + } + } + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "Field [catalog.location] of type [flat_object] is not supported for aggregation [geo_distance]" } + - match: { status: 400 } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java index ca6d7bc0562d2..8f0b98fd1c19e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java @@ -862,6 +862,9 @@ public void testDocValueFields() throws Exception { .startObject("ip_field") .field("type", "ip") .endObject() + .startObject("flat_object_field") + .field("type", "flat_object") + .endObject() .endObject() .endObject() .endObject() @@ -886,6 +889,10 @@ public void testDocValueFields() throws Exception { .field("boolean_field", true) .field("binary_field", new byte[] { 42, 100 }) .field("ip_field", "::1") + .field("flat_object_field") + .startObject() + .field("foo", "bar") + .endObject() .endObject() ) .get(); @@ -905,7 +912,8 @@ public void testDocValueFields() throws Exception { .addDocValueField("date_field") .addDocValueField("boolean_field") .addDocValueField("binary_field") - .addDocValueField("ip_field"); + .addDocValueField("ip_field") + .addDocValueField("flat_object_field"); SearchResponse searchResponse = builder.get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); @@ -926,11 +934,14 @@ public void testDocValueFields() throws Exception { "text_field", "keyword_field", "binary_field", - "ip_field" + "ip_field", + "flat_object_field" ) ) ); - + String json = Strings.toString( + XContentFactory.jsonBuilder().startObject().startObject("flat_object_field").field("foo", "bar").endObject().endObject() + ); assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValue().toString(), equalTo("1")); assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValue().toString(), equalTo("2")); assertThat(searchResponse.getHits().getAt(0).getFields().get("integer_field").getValue(), equalTo((Object) 3L)); @@ -946,6 +957,7 @@ public void testDocValueFields() throws Exception { assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo")); assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValue(), equalTo("KmQ")); assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("flat_object_field").getValue(), equalTo("flat_object_field.foo")); builder = client().prepareSearch().setQuery(matchAllQuery()).addDocValueField("*field"); searchResponse = builder.get(); @@ -968,7 +980,8 @@ public void testDocValueFields() throws Exception { "text_field", "keyword_field", "binary_field", - "ip_field" + "ip_field", + "flat_object_field" ) ) ); @@ -988,6 +1001,7 @@ public void testDocValueFields() throws Exception { assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo")); assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValue(), equalTo("KmQ")); assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("flat_object_field").getValue(), equalTo("flat_object_field.foo")); builder = client().prepareSearch() .setQuery(matchAllQuery()) @@ -1002,7 +1016,9 @@ public void testDocValueFields() throws Exception { .addDocValueField("date_field", "use_field_mapping") .addDocValueField("boolean_field", "use_field_mapping") .addDocValueField("binary_field", "use_field_mapping") - .addDocValueField("ip_field", "use_field_mapping"); + .addDocValueField("ip_field", "use_field_mapping") + .addDocValueField("flat_object_field", "use_field_mapping"); + ; searchResponse = builder.get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); @@ -1023,7 +1039,8 @@ public void testDocValueFields() throws Exception { "text_field", "keyword_field", "binary_field", - "ip_field" + "ip_field", + "flat_object_field" ) ) ); @@ -1043,6 +1060,7 @@ public void testDocValueFields() throws Exception { assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo")); assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValue(), equalTo("KmQ")); assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("flat_object_field").getValue(), equalTo("flat_object_field.foo")); builder = client().prepareSearch() .setQuery(matchAllQuery()) diff --git a/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java b/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java new file mode 100644 index 0000000000000..71a2381c24f67 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java @@ -0,0 +1,257 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.xcontent; + +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.AbstractXContentParser; +import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentLocation; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.mapper.ParseContext; +import java.io.IOException; +import java.nio.CharBuffer; +import java.util.ArrayList; + +/** + * JsonToStringParser is the main parser class to transform JSON into stringFields in a XContentParser + * returns XContentParser with one parent field and subfields + * fieldName, fieldName._value, fieldName._valueAndPath + * @opensearch.internal + */ +public class JsonToStringXContentParser extends AbstractXContentParser { + private final String fieldTypeName; + private XContentParser parser; + + private ArrayList valueList = new ArrayList<>(); + private ArrayList valueAndPathList = new ArrayList<>(); + private ArrayList keyList = new ArrayList<>(); + + private XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent); + private ParseContext parseContext; + + private NamedXContentRegistry xContentRegistry; + + private DeprecationHandler deprecationHandler; + + private static final String VALUE_AND_PATH_SUFFIX = "._valueAndPath"; + private static final String VALUE_SUFFIX = "._value"; + private static final String DOT_SYMBOL = "."; + private static final String EQUAL_SYMBOL = "="; + + public JsonToStringXContentParser( + NamedXContentRegistry xContentRegistry, + DeprecationHandler deprecationHandler, + ParseContext parseContext, + String fieldTypeName + ) throws IOException { + super(xContentRegistry, deprecationHandler); + this.parseContext = parseContext; + this.deprecationHandler = deprecationHandler; + this.xContentRegistry = xContentRegistry; + this.parser = parseContext.parser(); + this.fieldTypeName = fieldTypeName; + } + + public XContentParser parseObject() throws IOException { + builder.startObject(); + StringBuilder path = new StringBuilder(fieldTypeName); + parseToken(path, null); + builder.field(this.fieldTypeName, keyList); + builder.field(this.fieldTypeName + VALUE_SUFFIX, valueList); + builder.field(this.fieldTypeName + VALUE_AND_PATH_SUFFIX, valueAndPathList); + builder.endObject(); + String jString = XContentHelper.convertToJson(BytesReference.bytes(builder), false, XContentType.JSON); + return JsonXContent.jsonXContent.createParser(this.xContentRegistry, this.deprecationHandler, String.valueOf(jString)); + } + + private void parseToken(StringBuilder path, String currentFieldName) throws IOException { + + while (this.parser.nextToken() != Token.END_OBJECT) { + if (this.parser.currentName() != null) { + currentFieldName = this.parser.currentName(); + } + StringBuilder parsedFields = new StringBuilder(); + + if (this.parser.currentToken() == Token.FIELD_NAME) { + path.append(DOT_SYMBOL + currentFieldName); + this.keyList.add(currentFieldName); + } else if (this.parser.currentToken() == Token.START_ARRAY) { + parseToken(path, currentFieldName); + break; + } else if (this.parser.currentToken() == Token.END_ARRAY) { + // skip + } else if (this.parser.currentToken() == Token.START_OBJECT) { + parseToken(path, currentFieldName); + int dotIndex = path.lastIndexOf(DOT_SYMBOL); + if (dotIndex != -1) { + path.delete(dotIndex, path.length()); + } + } else { + if (!path.toString().contains(currentFieldName)) { + path.append(DOT_SYMBOL + currentFieldName); + } + parseValue(parsedFields); + this.valueList.add(parsedFields.toString()); + this.valueAndPathList.add(path + EQUAL_SYMBOL + parsedFields); + int dotIndex = path.lastIndexOf(DOT_SYMBOL); + if (dotIndex != -1) { + path.delete(dotIndex, path.length()); + } + } + + } + } + + private void parseValue(StringBuilder parsedFields) throws IOException { + switch (this.parser.currentToken()) { + case VALUE_BOOLEAN: + case VALUE_NUMBER: + case VALUE_STRING: + case VALUE_NULL: + parsedFields.append(this.parser.textOrNull()); + break; + // Handle other token types as needed + case FIELD_NAME: + case VALUE_EMBEDDED_OBJECT: + case END_ARRAY: + case START_ARRAY: + break; + default: + throw new IOException("Unsupported token type [" + parser.currentToken() + "]"); + } + } + + @Override + public XContentType contentType() { + return XContentType.JSON; + } + + @Override + public Token nextToken() throws IOException { + return this.parser.nextToken(); + } + + @Override + public void skipChildren() throws IOException { + this.parser.skipChildren(); + } + + @Override + public Token currentToken() { + return this.parser.currentToken(); + } + + @Override + public String currentName() throws IOException { + return this.parser.currentName(); + } + + @Override + public String text() throws IOException { + return this.parser.text(); + } + + @Override + public CharBuffer charBuffer() throws IOException { + return this.parser.charBuffer(); + } + + @Override + public Object objectText() throws IOException { + return this.parser.objectText(); + } + + @Override + public Object objectBytes() throws IOException { + return this.parser.objectBytes(); + } + + @Override + public boolean hasTextCharacters() { + return this.parser.hasTextCharacters(); + } + + @Override + public char[] textCharacters() throws IOException { + return this.parser.textCharacters(); + } + + @Override + public int textLength() throws IOException { + return this.parser.textLength(); + } + + @Override + public int textOffset() throws IOException { + return this.parser.textOffset(); + } + + @Override + public Number numberValue() throws IOException { + return this.parser.numberValue(); + } + + @Override + public NumberType numberType() throws IOException { + return this.parser.numberType(); + } + + @Override + public byte[] binaryValue() throws IOException { + return this.parser.binaryValue(); + } + + @Override + public XContentLocation getTokenLocation() { + return this.parser.getTokenLocation(); + } + + @Override + protected boolean doBooleanValue() throws IOException { + return this.parser.booleanValue(); + } + + @Override + protected short doShortValue() throws IOException { + return this.parser.shortValue(); + } + + @Override + protected int doIntValue() throws IOException { + return this.parser.intValue(); + } + + @Override + protected long doLongValue() throws IOException { + return this.parser.longValue(); + } + + @Override + protected float doFloatValue() throws IOException { + return this.parser.floatValue(); + } + + @Override + protected double doDoubleValue() throws IOException { + return this.parser.doubleValue(); + } + + @Override + public boolean isClosed() { + return this.parser.isClosed(); + } + + @Override + public void close() throws IOException { + this.parser.close(); + } +} diff --git a/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldMapper.java index 71f4c312a8c58..94bc4806ba0e0 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldMapper.java @@ -49,9 +49,6 @@ * sure to passes an empty multi-fields list to help prevent conflicting sub-keys from being * registered. * - * Note: we anticipate that 'flattened' fields will be the only implementation of this - * interface. Flattened object fields live in the 'mapper-flattened' module. - * * @opensearch.internal */ public abstract class DynamicKeyFieldMapper extends FieldMapper { diff --git a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java new file mode 100644 index 0000000000000..e0b37df5c1734 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java @@ -0,0 +1,760 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.SortedSetDocValuesField; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.BoostQuery; +import org.apache.lucene.search.MultiTermQuery; +import org.apache.lucene.search.PrefixQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermInSetQuery; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TermRangeQuery; +import org.apache.lucene.util.BytesRef; +import org.opensearch.OpenSearchException; +import org.opensearch.Version; +import org.opensearch.common.Nullable; +import org.opensearch.common.collect.Iterators; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.lucene.search.AutomatonQueries; +import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.common.xcontent.JsonToStringXContentParser; +import org.opensearch.index.analysis.NamedAnalyzer; +import org.opensearch.index.fielddata.IndexFieldData; +import org.opensearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.QueryShardException; +import org.opensearch.search.aggregations.support.CoreValuesSourceType; +import org.opensearch.search.lookup.SearchLookup; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.function.BiFunction; +import java.util.function.Supplier; + +import static org.opensearch.search.SearchService.ALLOW_EXPENSIVE_QUERIES; + +/** + * A field mapper for flat_objects. + * This mapper accepts JSON object and treat as string fields in one index. + * @opensearch.internal + */ +public final class FlatObjectFieldMapper extends DynamicKeyFieldMapper { + + public static final String CONTENT_TYPE = "flat_object"; + private static final String VALUE_AND_PATH_SUFFIX = "._valueAndPath"; + private static final String VALUE_SUFFIX = "._value"; + private static final String DOT_SYMBOL = "."; + private static final String EQUAL_SYMBOL = "="; + + /** + * In flat_object field mapper, field type is similar to keyword field type + * Cannot be tokenized, can OmitNorms, and can setIndexOption. + * @opensearch.internal + */ + public static class Defaults { + public static final FieldType FIELD_TYPE = new FieldType(); + + static { + FIELD_TYPE.setTokenized(false); + FIELD_TYPE.setOmitNorms(true); + FIELD_TYPE.setIndexOptions(IndexOptions.DOCS); + FIELD_TYPE.freeze(); + } + + } + + @Override + public MappedFieldType keyedFieldType(String key) { + return new FlatObjectFieldType(this.name() + DOT_SYMBOL + key); + } + + /** + * FlatObjectFieldType is the parent field type. + */ + public static class FlatObjectField extends Field { + + public FlatObjectField(String field, BytesRef term, FieldType ft) { + super(field, term, ft); + } + + } + + /** + * The builder for the flat_object field mapper using default parameters + * @opensearch.internal + */ + public static class Builder extends FieldMapper.Builder { + + public Builder(String name) { + super(name, Defaults.FIELD_TYPE); + builder = this; + } + + private FlatObjectFieldType buildFlatObjectFieldType(BuilderContext context, FieldType fieldType) { + return new FlatObjectFieldType(buildFullName(context), fieldType); + } + + /** + * ValueFieldMapper is the subfield type for values in the Json. + * use a {@link KeywordFieldMapper.KeywordField} + */ + private ValueFieldMapper buildValueFieldMapper(BuilderContext context, FieldType fieldType, FlatObjectFieldType fft) { + String fullName = buildFullName(context); + FieldType vft = new FieldType(fieldType); + KeywordFieldMapper.KeywordFieldType valueFieldType = new KeywordFieldMapper.KeywordFieldType(fullName + VALUE_SUFFIX, vft); + + fft.setValueFieldType(valueFieldType); + return new ValueFieldMapper(vft, valueFieldType); + } + + /** + * ValueAndPathFieldMapper is the subfield type for path=value format in the Json. + * also use a {@link KeywordFieldMapper.KeywordField} + */ + private ValueAndPathFieldMapper buildValueAndPathFieldMapper(BuilderContext context, FieldType fieldType, FlatObjectFieldType fft) { + String fullName = buildFullName(context); + FieldType vft = new FieldType(fieldType); + KeywordFieldMapper.KeywordFieldType ValueAndPathFieldType = new KeywordFieldMapper.KeywordFieldType( + fullName + VALUE_AND_PATH_SUFFIX, + vft + ); + fft.setValueAndPathFieldType(ValueAndPathFieldType); + return new ValueAndPathFieldMapper(vft, ValueAndPathFieldType); + } + + @Override + public FlatObjectFieldMapper build(BuilderContext context) { + FieldType fieldtype = new FieldType(Defaults.FIELD_TYPE); + FlatObjectFieldType fft = buildFlatObjectFieldType(context, fieldtype); + return new FlatObjectFieldMapper( + name, + Defaults.FIELD_TYPE, + fft, + buildValueFieldMapper(context, fieldtype, fft), + buildValueAndPathFieldMapper(context, fieldtype, fft), + CopyTo.empty(), + this + ); + } + } + + public static final TypeParser PARSER = new TypeParser((n, c) -> new Builder(n)); + + /** + * Creates a new TypeParser for flatObjectFieldMapper that does not use ParameterizedFieldMapper + */ + public static class TypeParser implements Mapper.TypeParser { + private final BiFunction builderFunction; + + public TypeParser(BiFunction builderFunction) { + this.builderFunction = builderFunction; + } + + @Override + public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { + Builder builder = builderFunction.apply(name, parserContext); + return builder; + } + } + + /** + * flat_object fields type contains its own fieldType, one valueFieldType and one valueAndPathFieldType + * @opensearch.internal + */ + public static final class FlatObjectFieldType extends StringFieldType { + + private final int ignoreAbove; + private final String nullValue; + + private KeywordFieldMapper.KeywordFieldType valueFieldType; + + private KeywordFieldMapper.KeywordFieldType valueAndPathFieldType; + + public FlatObjectFieldType(String name, boolean isSearchable, boolean hasDocValues, Map meta) { + super(name, isSearchable, false, true, TextSearchInfo.SIMPLE_MATCH_ONLY, meta); + setIndexAnalyzer(Lucene.KEYWORD_ANALYZER); + this.ignoreAbove = Integer.MAX_VALUE; + this.nullValue = null; + } + + public FlatObjectFieldType(String name) { + this(name, true, true, Collections.emptyMap()); + } + + public FlatObjectFieldType(String name, FieldType fieldType) { + super( + name, + fieldType.indexOptions() != IndexOptions.NONE, + false, + true, + new TextSearchInfo(fieldType, null, Lucene.KEYWORD_ANALYZER, Lucene.KEYWORD_ANALYZER), + Collections.emptyMap() + ); + this.ignoreAbove = Integer.MAX_VALUE; + this.nullValue = null; + } + + public FlatObjectFieldType(String name, NamedAnalyzer analyzer) { + super(name, true, false, true, new TextSearchInfo(Defaults.FIELD_TYPE, null, analyzer, analyzer), Collections.emptyMap()); + this.ignoreAbove = Integer.MAX_VALUE; + this.nullValue = null; + } + + void setValueFieldType(KeywordFieldMapper.KeywordFieldType valueFieldType) { + this.valueFieldType = valueFieldType; + } + + void setValueAndPathFieldType(KeywordFieldMapper.KeywordFieldType ValueAndPathFieldType) { + this.valueAndPathFieldType = ValueAndPathFieldType; + } + + public KeywordFieldMapper.KeywordFieldType getValueFieldType() { + return this.valueFieldType; + } + + public KeywordFieldMapper.KeywordFieldType getValueAndPathFieldType() { + return this.valueAndPathFieldType; + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + NamedAnalyzer normalizer() { + return indexAnalyzer(); + } + + /** + * + * Fielddata is an in-memory data structure that is used for aggregations, sorting, and scripting. + * @param fullyQualifiedIndexName the name of the index this field-data is build for + * @param searchLookup a {@link SearchLookup} supplier to allow for accessing other fields values in the context of runtime fields + * @return IndexFieldData.Builder + */ + @Override + public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, Supplier searchLookup) { + failIfNoDocValues(); + return new SortedSetOrdinalsIndexFieldData.Builder(name(), CoreValuesSourceType.BYTES); + } + + @Override + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { + if (format != null) { + throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't support formats."); + } + + return new SourceValueFetcher(name(), context, nullValue) { + @Override + protected String parseSourceValue(Object value) { + String flatObjectKeywordValue = value.toString(); + + if (flatObjectKeywordValue.length() > ignoreAbove) { + return null; + } + + NamedAnalyzer normalizer = normalizer(); + if (normalizer == null) { + return flatObjectKeywordValue; + } + + try { + return normalizeValue(normalizer, name(), flatObjectKeywordValue); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + }; + } + + @Override + public Object valueForDisplay(Object value) { + if (value == null) { + return null; + } + // flat_objects are internally stored as utf8 bytes + BytesRef binaryValue = (BytesRef) value; + return binaryValue.utf8ToString(); + } + + @Override + protected BytesRef indexedValueForSearch(Object value) { + if (getTextSearchInfo().getSearchAnalyzer() == Lucene.KEYWORD_ANALYZER) { + // flat_object analyzer with the default attribute source which encodes terms using UTF8 + // in that case we skip normalization, which may be slow if there many terms need to + // parse (eg. large terms query) since Analyzer.normalize involves things like creating + // attributes through reflection + // This if statement will be used whenever a normalizer is NOT configured + return super.indexedValueForSearch(value); + } + + if (value == null) { + return null; + } + value = inputToString(value); + return getTextSearchInfo().getSearchAnalyzer().normalize(name(), value.toString()); + } + + /** + * redirect queries with rewrite value to rewriteSearchValue and directSubFieldName + */ + @Override + public Query termQuery(Object value, @Nullable QueryShardContext context) { + + String searchValueString = inputToString(value); + String directSubFieldName = directSubfield(); + String rewriteSearchValue = rewriteValue(searchValueString); + + failIfNotIndexed(); + Query query; + query = new TermQuery(new Term(directSubFieldName, indexedValueForSearch(rewriteSearchValue))); + if (boost() != 1f) { + query = new BoostQuery(query, boost()); + } + return query; + } + + @Override + public Query termsQuery(List values, QueryShardContext context) { + failIfNotIndexed(); + String directedSearchFieldName = directSubfield(); + BytesRef[] bytesRefs = new BytesRef[values.size()]; + for (int i = 0; i < bytesRefs.length; i++) { + String rewriteValues = rewriteValue(inputToString(values.get(i))); + + bytesRefs[i] = indexedValueForSearch(new BytesRef(rewriteValues)); + + } + + return new TermInSetQuery(directedSearchFieldName, bytesRefs); + } + + /** + * To direct search fields, if a dot path was used in search query, + * then direct to flatObjectFieldName._valueAndPath subfield, + * else, direct to flatObjectFieldName._value subfield. + * @return directedSubFieldName + */ + public String directSubfield() { + if (name().contains(DOT_SYMBOL)) { + String[] dotPathList = name().split("\\."); + return dotPathList[0] + VALUE_AND_PATH_SUFFIX; + } else { + return this.valueFieldType.name(); + } + } + + /** + * If the search key is assigned with value, + * the dot path was used in search query, then + * rewrite the searchValueString as the format "dotpath=value", + * @return rewriteSearchValue + */ + public String rewriteValue(String searchValueString) { + if (!name().contains(DOT_SYMBOL)) { + return searchValueString; + } else { + String rewriteSearchValue = new StringBuilder().append(name()).append(EQUAL_SYMBOL).append(searchValueString).toString(); + return rewriteSearchValue; + } + + } + + private String inputToString(Object inputValue) { + if (inputValue instanceof Integer) { + String inputToString = Integer.toString((Integer) inputValue); + return inputToString; + } else if (inputValue instanceof Float) { + String inputToString = Float.toString((Float) inputValue); + return inputToString; + } else if (inputValue instanceof Boolean) { + String inputToString = Boolean.toString((Boolean) inputValue); + return inputToString; + } else if (inputValue instanceof Short) { + String inputToString = Short.toString((Short) inputValue); + return inputToString; + } else if (inputValue instanceof Long) { + String inputToString = Long.toString((Long) inputValue); + return inputToString; + } else if (inputValue instanceof Double) { + String inputToString = Double.toString((Double) inputValue); + return inputToString; + } else if (inputValue instanceof BytesRef) { + String inputToString = (((BytesRef) inputValue).utf8ToString()); + return inputToString; + } else if (inputValue instanceof String) { + String inputToString = (String) inputValue; + return inputToString; + } else if (inputValue instanceof Version) { + String inputToString = inputValue.toString(); + return inputToString; + } else { + // default to cast toString + return inputValue.toString(); + } + } + + @Override + public Query prefixQuery(String value, MultiTermQuery.RewriteMethod method, boolean caseInsensitive, QueryShardContext context) { + String directSubfield = directSubfield(); + String rewriteValue = rewriteValue(value); + + if (context.allowExpensiveQueries() == false) { + throw new OpenSearchException( + "[prefix] queries cannot be executed when '" + + ALLOW_EXPENSIVE_QUERIES.getKey() + + "' is set to false. For optimised prefix queries on text " + + "fields please enable [index_prefixes]." + ); + } + failIfNotIndexed(); + if (method == null) { + method = MultiTermQuery.CONSTANT_SCORE_REWRITE; + } + if (caseInsensitive) { + return AutomatonQueries.caseInsensitivePrefixQuery((new Term(directSubfield, indexedValueForSearch(rewriteValue))), method); + } + return new PrefixQuery(new Term(directSubfield, indexedValueForSearch(rewriteValue)), method); + } + + @Override + public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { + String directSubfield = directSubfield(); + String rewriteUpperTerm = rewriteValue(inputToString(upperTerm)); + String rewriteLowerTerm = rewriteValue(inputToString(lowerTerm)); + if (context.allowExpensiveQueries() == false) { + throw new OpenSearchException( + "[range] queries on [text] or [keyword] fields cannot be executed when '" + + ALLOW_EXPENSIVE_QUERIES.getKey() + + "' is set to false." + ); + } + failIfNotIndexed(); + return new TermRangeQuery( + directSubfield, + lowerTerm == null ? null : indexedValueForSearch(rewriteLowerTerm), + upperTerm == null ? null : indexedValueForSearch(rewriteUpperTerm), + includeLower, + includeUpper + ); + } + + /** + * if there is dot path. query the field name in flatObject parent field. + * else query in _field_names system field + */ + @Override + public Query existsQuery(QueryShardContext context) { + String searchKey; + String searchField; + if (name().contains(DOT_SYMBOL)) { + searchKey = name().split("\\.")[0]; + searchField = name(); + } else { + searchKey = FieldNamesFieldMapper.NAME; + searchField = name(); + } + return new TermQuery(new Term(searchKey, indexedValueForSearch(searchField))); + } + + @Override + public Query wildcardQuery( + String value, + @Nullable MultiTermQuery.RewriteMethod method, + boolean caseInsensitve, + QueryShardContext context + ) { + // flat_object field types are always normalized, so ignore case sensitivity and force normalize the wildcard + // query text + throw new QueryShardException( + context, + "Can only use wildcard queries on keyword and text fields - not on [" + name() + "] which is of type [" + typeName() + "]" + ); + + } + + } + + private final ValueFieldMapper valueFieldMapper; + private final ValueAndPathFieldMapper valueAndPathFieldMapper; + + FlatObjectFieldMapper( + String simpleName, + FieldType fieldType, + FlatObjectFieldType mappedFieldType, + ValueFieldMapper valueFieldMapper, + ValueAndPathFieldMapper valueAndPathFieldMapper, + CopyTo copyTo, + Builder builder + ) { + super(simpleName, fieldType, mappedFieldType, copyTo); + assert fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) <= 0; + this.fieldType = fieldType; + this.valueFieldMapper = valueFieldMapper; + this.valueAndPathFieldMapper = valueAndPathFieldMapper; + this.mappedFieldType = mappedFieldType; + } + + @Override + protected FlatObjectFieldMapper clone() { + return (FlatObjectFieldMapper) super.clone(); + } + + @Override + protected void mergeOptions(FieldMapper other, List conflicts) { + + } + + @Override + public FlatObjectFieldType fieldType() { + return (FlatObjectFieldType) super.fieldType(); + } + + @Override + protected void parseCreateField(ParseContext context) throws IOException { + String fieldName = null; + + if (context.externalValueSet()) { + String value = context.externalValue().toString(); + parseValueAddFields(context, value, fieldType().name()); + } else { + JsonToStringXContentParser JsonToStringParser = new JsonToStringXContentParser( + NamedXContentRegistry.EMPTY, + DeprecationHandler.IGNORE_DEPRECATIONS, + context, + fieldType().name() + ); + /** + * JsonToStringParser is the main parser class to transform JSON into stringFields in a XContentParser + * It reads the JSON object and parsed to a list of string + */ + XContentParser parser = JsonToStringParser.parseObject(); + + XContentParser.Token currentToken; + while ((currentToken = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + switch (currentToken) { + case FIELD_NAME: + fieldName = parser.currentName(); + break; + case VALUE_STRING: + String value = parser.textOrNull(); + parseValueAddFields(context, value, fieldName); + break; + } + + } + + } + + } + + @Override + public Iterator iterator() { + List subIterators = new ArrayList<>(); + if (valueFieldMapper != null) { + subIterators.add(valueFieldMapper); + } + if (valueAndPathFieldMapper != null) { + subIterators.add(valueAndPathFieldMapper); + } + if (subIterators.size() == 0) { + return super.iterator(); + } + @SuppressWarnings("unchecked") + Iterator concat = Iterators.concat(super.iterator(), subIterators.iterator()); + return concat; + } + + /** + * parseValueAddFields method will store data to Lucene. + * the JsonToStringXContentParser returns XContentParser with 3 string fields + * fieldName, fieldName._value, fieldName._valueAndPath. + * parseValueAddFields recognized string by the stringfield name, + * fieldName will be store through the parent FlatObjectFieldMapper,which contains all the keys + * fieldName._value will be store through the valueFieldMapper, which contains the values of the Json Object + * fieldName._valueAndPath will be store through the valueAndPathFieldMapper, which contains the "path=values" format + */ + private void parseValueAddFields(ParseContext context, String value, String fieldName) throws IOException { + + NamedAnalyzer normalizer = fieldType().normalizer(); + if (normalizer != null) { + value = normalizeValue(normalizer, name(), value); + } + + String[] valueTypeList = fieldName.split("\\._"); + String valueType = "._" + valueTypeList[valueTypeList.length - 1]; + + if (fieldType.indexOptions() != IndexOptions.NONE || fieldType.stored()) { + // convert to utf8 only once before feeding postings/dv/stored fields + + final BytesRef binaryValue = new BytesRef(fieldType().name() + DOT_SYMBOL + value); + Field field = new FlatObjectField(fieldType().name(), binaryValue, fieldType); + + if (fieldType().hasDocValues() == false && fieldType.omitNorms()) { + createFieldNamesField(context); + } + if (fieldName.equals(fieldType().name())) { + context.doc().add(field); + } + if (valueType.equals(VALUE_SUFFIX)) { + if (valueFieldMapper != null) { + valueFieldMapper.addField(context, value); + } + } + if (valueType.equals(VALUE_AND_PATH_SUFFIX)) { + if (valueAndPathFieldMapper != null) { + valueAndPathFieldMapper.addField(context, value); + } + } + + if (fieldType().hasDocValues()) { + if (context.doc().getField(fieldType().name()) == null || !context.doc().getFields(fieldType().name()).equals(field)) { + if (fieldName.equals(fieldType().name())) { + context.doc().add(new SortedSetDocValuesField(fieldType().name(), binaryValue)); + } + if (valueType.equals(VALUE_SUFFIX)) { + if (valueFieldMapper != null) { + context.doc().add(new SortedSetDocValuesField(fieldType().name() + VALUE_SUFFIX, binaryValue)); + } + } + if (valueType.equals(VALUE_AND_PATH_SUFFIX)) { + if (valueAndPathFieldMapper != null) { + context.doc().add(new SortedSetDocValuesField(fieldType().name() + VALUE_AND_PATH_SUFFIX, binaryValue)); + } + } + + } + } + + } + + } + + private static String normalizeValue(NamedAnalyzer normalizer, String field, String value) throws IOException { + String normalizerErrorMessage = "The normalization token stream is " + + "expected to produce exactly 1 token, but got 0 for analyzer " + + normalizer + + " and input \"" + + value + + "\""; + try (TokenStream ts = normalizer.tokenStream(field, value)) { + final CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class); + ts.reset(); + if (ts.incrementToken() == false) { + throw new IllegalStateException(normalizerErrorMessage); + } + final String newValue = termAtt.toString(); + if (ts.incrementToken()) { + throw new IllegalStateException(normalizerErrorMessage); + } + ts.end(); + return newValue; + } + } + + @Override + protected String contentType() { + return CONTENT_TYPE; + } + + private static final class ValueAndPathFieldMapper extends FieldMapper { + + protected ValueAndPathFieldMapper(FieldType fieldType, KeywordFieldMapper.KeywordFieldType mappedFieldType) { + super(mappedFieldType.name(), fieldType, mappedFieldType, MultiFields.empty(), CopyTo.empty()); + } + + void addField(ParseContext context, String value) { + final BytesRef binaryValue = new BytesRef(value); + if (fieldType.indexOptions() != IndexOptions.NONE || fieldType.stored()) { + Field field = new KeywordFieldMapper.KeywordField(fieldType().name(), binaryValue, fieldType); + + context.doc().add(field); + + if (fieldType().hasDocValues() == false && fieldType.omitNorms()) { + createFieldNamesField(context); + } + } + } + + @Override + protected void parseCreateField(ParseContext context) { + throw new UnsupportedOperationException(); + } + + @Override + protected void mergeOptions(FieldMapper other, List conflicts) { + + } + + @Override + protected String contentType() { + return "valueAndPath"; + } + + @Override + public String toString() { + return fieldType().toString(); + } + + } + + private static final class ValueFieldMapper extends FieldMapper { + + protected ValueFieldMapper(FieldType fieldType, KeywordFieldMapper.KeywordFieldType mappedFieldType) { + super(mappedFieldType.name(), fieldType, mappedFieldType, MultiFields.empty(), CopyTo.empty()); + } + + void addField(ParseContext context, String value) { + final BytesRef binaryValue = new BytesRef(value); + if (fieldType.indexOptions() != IndexOptions.NONE || fieldType.stored()) { + Field field = new KeywordFieldMapper.KeywordField(fieldType().name(), binaryValue, fieldType); + context.doc().add(field); + + if (fieldType().hasDocValues() == false && fieldType.omitNorms()) { + createFieldNamesField(context); + } + } + } + + @Override + protected void parseCreateField(ParseContext context) { + throw new UnsupportedOperationException(); + } + + @Override + protected void mergeOptions(FieldMapper other, List conflicts) { + + } + + @Override + protected String contentType() { + return "value"; + } + + @Override + public String toString() { + return fieldType().toString(); + } + } + +} diff --git a/server/src/main/java/org/opensearch/indices/IndicesModule.java b/server/src/main/java/org/opensearch/indices/IndicesModule.java index 5310e1b1e8397..696536cb85c9d 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesModule.java +++ b/server/src/main/java/org/opensearch/indices/IndicesModule.java @@ -51,6 +51,7 @@ import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.index.mapper.FieldAliasMapper; import org.opensearch.index.mapper.FieldNamesFieldMapper; +import org.opensearch.index.mapper.FlatObjectFieldMapper; import org.opensearch.index.mapper.GeoPointFieldMapper; import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.IgnoredFieldMapper; @@ -162,6 +163,7 @@ public static Map getMappers(List mappe mappers.put(CompletionFieldMapper.CONTENT_TYPE, CompletionFieldMapper.PARSER); mappers.put(FieldAliasMapper.CONTENT_TYPE, new FieldAliasMapper.TypeParser()); mappers.put(GeoPointFieldMapper.CONTENT_TYPE, new GeoPointFieldMapper.TypeParser()); + mappers.put(FlatObjectFieldMapper.CONTENT_TYPE, FlatObjectFieldMapper.PARSER); for (MapperPlugin mapperPlugin : mapperPlugins) { for (Map.Entry entry : mapperPlugin.getMappers().entrySet()) { diff --git a/server/src/test/java/org/opensearch/index/fielddata/AbstractFieldDataTestCase.java b/server/src/test/java/org/opensearch/index/fielddata/AbstractFieldDataTestCase.java index 69be53dc1016b..10ccc99f44603 100644 --- a/server/src/test/java/org/opensearch/index/fielddata/AbstractFieldDataTestCase.java +++ b/server/src/test/java/org/opensearch/index/fielddata/AbstractFieldDataTestCase.java @@ -51,6 +51,7 @@ import org.opensearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.opensearch.index.mapper.BinaryFieldMapper; import org.opensearch.index.mapper.ContentPath; +import org.opensearch.index.mapper.FlatObjectFieldMapper; import org.opensearch.index.mapper.GeoPointFieldMapper; import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; @@ -142,6 +143,8 @@ public > IFD getForField(String type, String field .fieldType(); } else if (type.equals("geo_point")) { fieldType = new GeoPointFieldMapper.Builder(fieldName).docValues(docValues).build(context).fieldType(); + } else if (type.equals("flat_object")) { + fieldType = new FlatObjectFieldMapper.Builder(fieldName).docValues(docValues).build(context).fieldType(); } else if (type.equals("binary")) { fieldType = new BinaryFieldMapper.Builder(fieldName, docValues).build(context).fieldType(); } else { diff --git a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldDataTests.java b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldDataTests.java new file mode 100644 index 0000000000000..54393b10a3c5d --- /dev/null +++ b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldDataTests.java @@ -0,0 +1,63 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.index.mapper; + +import org.apache.lucene.index.LeafReaderContext; +import org.opensearch.common.Strings; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.compress.CompressedXContent; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.fielddata.AbstractFieldDataTestCase; +import org.opensearch.index.fielddata.IndexFieldData; + +import java.util.List; + +public class FlatObjectFieldDataTests extends AbstractFieldDataTestCase { + private String FIELD_TYPE = "flat_object"; + + @Override + protected boolean hasDocValues() { + return true; + } + + public void testDocValue() throws Exception { + String mapping = Strings.toString( + XContentFactory.jsonBuilder() + .startObject() + .startObject("test") + .startObject("properties") + .startObject("field") + .field("type", FIELD_TYPE) + .endObject() + .endObject() + .endObject() + .endObject() + ); + final DocumentMapper mapper = mapperService.documentMapperParser().parse("test", new CompressedXContent(mapping)); + + XContentBuilder json = XContentFactory.jsonBuilder().startObject().startObject("field").field("foo", "bar").endObject().endObject(); + ParsedDocument d = mapper.parse(new SourceToParse("test", "1", BytesReference.bytes(json), XContentType.JSON)); + writer.addDocument(d.rootDoc()); + writer.commit(); + + IndexFieldData fieldData = getForField("field"); + List readers = refreshReader(); + assertEquals(1, readers.size()); + + IndexFieldData valueFieldData = getForField("field._value"); + List valueReaders = refreshReader(); + assertEquals(1, valueReaders.size()); + } + + @Override + protected String getFieldDataType() { + return FIELD_TYPE; + } +} diff --git a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java new file mode 100644 index 0000000000000..309b150f11748 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java @@ -0,0 +1,143 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.IndexableFieldType; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.util.BytesRef; +import org.opensearch.common.Strings; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.index.query.QueryShardContext; + +import java.io.IOException; + +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.core.IsEqual.equalTo; +import static org.hamcrest.core.StringContains.containsString; + +public class FlatObjectFieldMapperTests extends MapperTestCase { + private static final String FIELD_TYPE = "flat_object"; + private static final String VALUE_AND_PATH_SUFFIX = "._valueAndPath"; + private static final String VALUE_SUFFIX = "._value"; + + protected boolean supportsMeta() { + return false; + } + + protected boolean supportsOrIgnoresBoost() { + return false; + } + + public void testMapperServiceHasParser() throws IOException { + MapperService mapperService = createMapperService(fieldMapping(b -> { minimalMapping(b); })); + Mapper.TypeParser parser = mapperService.mapperRegistry.getMapperParsers().get(FIELD_TYPE); + assertNotNull(parser); + assertTrue(parser instanceof FlatObjectFieldMapper.TypeParser); + } + + protected void assertExistsQuery(MapperService mapperService) throws IOException { + ParseContext.Document fields = mapperService.documentMapper().parse(source(this::writeField)).rootDoc(); + QueryShardContext queryShardContext = createQueryShardContext(mapperService); + MappedFieldType fieldType = mapperService.fieldType("field"); + Query query = fieldType.existsQuery(queryShardContext); + assertExistsQuery(fieldType, query, fields); + + } + + protected void assertExistsQuery(MappedFieldType fieldType, Query query, ParseContext.Document fields) { + // we always perform a term query against _field_names, even when the field + // is not added to _field_names because it is not indexed nor stored + assertThat(query, instanceOf(TermQuery.class)); + TermQuery termQuery = (TermQuery) query; + assertEquals(FieldNamesFieldMapper.NAME, termQuery.getTerm().field()); + assertEquals("field", termQuery.getTerm().text()); + if (fieldType.isSearchable() || fieldType.isStored()) { + assertNotNull(fields.getField(FieldNamesFieldMapper.NAME)); + } else { + assertNoFieldNamesField(fields); + } + } + + public void minimalMapping(XContentBuilder b) throws IOException { + b.field("type", FIELD_TYPE); + } + + /** + * Writes a sample value for the field to the provided {@link XContentBuilder}. + * @param builder builder + */ + protected void writeFieldValue(XContentBuilder builder) throws IOException { + builder.startObject(); + builder.field("foo", "bar"); + builder.endObject(); + } + + public void testMinimalToMaximal() throws IOException { + XContentBuilder orig = JsonXContent.contentBuilder().startObject(); + createMapperService(fieldMapping(this::minimalMapping)).documentMapper().mapping().toXContent(orig, ToXContent.EMPTY_PARAMS); + orig.endObject(); + XContentBuilder parsedFromOrig = JsonXContent.contentBuilder().startObject(); + createMapperService(orig).documentMapper().mapping().toXContent(parsedFromOrig, ToXContent.EMPTY_PARAMS); + parsedFromOrig.endObject(); + assertEquals(Strings.toString(orig), Strings.toString(parsedFromOrig)); + assertParseMaximalWarnings(); + } + + public void testDefaults() throws Exception { + XContentBuilder mapping = fieldMapping(this::minimalMapping); + DocumentMapper mapper = createDocumentMapper(mapping); + assertEquals(Strings.toString(mapping), mapper.mappingSource().toString()); + + String json = Strings.toString( + XContentFactory.jsonBuilder().startObject().startObject("field").field("foo", "bar").endObject().endObject() + ); + + ParsedDocument doc = mapper.parse(source(json)); + IndexableField[] fields = doc.rootDoc().getFields("field"); + assertEquals(2, fields.length); + assertEquals(new BytesRef("field.foo"), fields[0].binaryValue()); + + IndexableFieldType fieldType = fields[0].fieldType(); + assertFalse(fieldType.tokenized()); + assertFalse(fieldType.stored()); + assertThat(fieldType.indexOptions(), equalTo(IndexOptions.DOCS)); + assertEquals(DocValuesType.NONE, fieldType.docValuesType()); + + // Test internal substring fields as well + IndexableField[] fieldValues = doc.rootDoc().getFields("field" + VALUE_SUFFIX); + assertEquals(2, fieldValues.length); + assertTrue(fieldValues[0] instanceof KeywordFieldMapper.KeywordField); + assertEquals(new BytesRef("bar"), fieldValues[0].binaryValue()); + + IndexableField[] fieldValueAndPaths = doc.rootDoc().getFields("field" + VALUE_AND_PATH_SUFFIX); + assertEquals(2, fieldValues.length); + assertTrue(fieldValueAndPaths[0] instanceof KeywordFieldMapper.KeywordField); + assertEquals(new BytesRef("field.foo=bar"), fieldValueAndPaths[0].binaryValue()); + } + + public void testNullValue() throws IOException { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + MapperParsingException e = expectThrows(MapperParsingException.class, () -> mapper.parse(source(b -> b.nullField("field")))); + assertThat(e.getMessage(), containsString("object mapping for [_doc] tried to parse field [field] as object")); + + } + + @Override + protected void registerParameters(ParameterChecker checker) throws IOException { + // In the future we will want to make sure parameter updates are covered. + } + +} diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorTests.java index ce5c361ffcf69..e9b2d40fd4ede 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorTests.java @@ -50,6 +50,7 @@ import org.opensearch.index.analysis.AnalyzerScope; import org.opensearch.index.analysis.NamedAnalyzer; import org.opensearch.index.mapper.BinaryFieldMapper; +import org.opensearch.index.mapper.FlatObjectFieldMapper; import org.opensearch.index.mapper.GeoPointFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.TextFieldMapper; @@ -102,7 +103,8 @@ protected List getSupportedValuesSourceTypes() { protected List unsupportedMappedFieldTypes() { return Arrays.asList( BinaryFieldMapper.CONTENT_TYPE, // binary fields are not supported because they do not have analyzers - GeoPointFieldMapper.CONTENT_TYPE // geopoint fields cannot use term queries + GeoPointFieldMapper.CONTENT_TYPE, // geopoint fields cannot use term queries + FlatObjectFieldMapper.CONTENT_TYPE // flat_object fields are not supported aggregations ); } From 312bc820d7420a10d7cae0c0d5ce85c295a0a275 Mon Sep 17 00:00:00 2001 From: Mingshi Liu <113382730+mingshl@users.noreply.github.com> Date: Mon, 10 Apr 2023 17:04:32 -0700 Subject: [PATCH 27/28] Extend the version range to run flat-object field REST Yaml test on 2.7.0 (#7081) Signed-off-by: Mingshi Liu --- .../resources/rest-api-spec/test/painless/30_search.yml | 4 ++-- .../resources/rest-api-spec/test/index/90_flat_object.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/30_search.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/30_search.yml index b360d8dc01ccf..4b3d5bd9e2980 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/30_search.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/30_search.yml @@ -487,8 +487,8 @@ "Flat-object fields from within the scripting": - skip: - version: " - 2.99.99" - reason: "flat_object is introduced in 3.0.0 in main branch" + version: " - 2.6.99" + reason: "flat_object is introduced in 2.7.0" - do: indices.create: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/90_flat_object.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/90_flat_object.yml index 88cb2f1716c9b..0a5f7444efd17 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/90_flat_object.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/90_flat_object.yml @@ -80,8 +80,8 @@ teardown: # and no dynamic fields were created. "Mappings": - skip: - version: " - 2.99.99" - reason: "flat_object is introduced in 3.0.0 in main branch" + version: " - 2.6.99" + reason: "flat_object is introduced in 2.7.0" - do: indices.get_mapping: From 535cbacba703d9b5a009a4249908b0b894ad92fd Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 10 Apr 2023 20:27:58 -0400 Subject: [PATCH 28/28] Bump org.gradle.test-retry from 1.5.1 to 1.5.2 (#7067) (#7076) * Bump org.gradle.test-retry from 1.5.1 to 1.5.2 Bumps org.gradle.test-retry from 1.5.1 to 1.5.2. --- updated-dependencies: - dependency-name: org.gradle.test-retry dependency-type: direct:production update-type: version-update:semver-patch ... * Update changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2ac42429471b8..6fc521b50a489 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add `com.github.luben:zstd-jni:1.5.4-1` ([#3577](https://github.com/opensearch-project/OpenSearch/pull/3577)) - Bump: Netty from 4.1.90.Final to 4.1.91.Final , ASM 9.4 to ASM 9.5, ByteBuddy 1.14.2 to 1.14.3 ([#6981](https://github.com/opensearch-project/OpenSearch/pull/6981)) - Bump `com.azure:azure-storage-blob` from 12.15.0 to 12.21.1 +- Bump `org.gradle.test-retry` from 1.5.1 to 1.5.2 ### Changed - Require MediaType in Strings.toString API ([#6009](https://github.com/opensearch-project/OpenSearch/pull/6009)) diff --git a/build.gradle b/build.gradle index be733233f60b5..7f928ddee7aed 100644 --- a/build.gradle +++ b/build.gradle @@ -54,7 +54,7 @@ plugins { id 'opensearch.docker-support' id 'opensearch.global-build-info' id "com.diffplug.spotless" version "6.17.0" apply false - id "org.gradle.test-retry" version "1.5.1" apply false + id "org.gradle.test-retry" version "1.5.2" apply false id "test-report-aggregation" id 'jacoco-report-aggregation' }