From f71b77df7a01077c8740da96faa9f74e9f955ae8 Mon Sep 17 00:00:00 2001 From: Evgenia Badyanova Date: Mon, 17 Dec 2018 11:59:45 -0500 Subject: [PATCH] Fixing line length for EnvironmentTests and RecoveryTests (#36657) Relates #34884 --- .../resources/checkstyle_suppressions.xml | 6 -- .../elasticsearch/env/EnvironmentTests.java | 3 +- .../env/NodeEnvironmentTests.java | 7 +- .../recovery/FullRollingRestartIT.java | 25 +++++--- .../recovery/RecoveryWhileUnderLoadIT.java | 64 ++++++++++++++----- .../elasticsearch/recovery/RelocationIT.java | 55 +++++++++++----- .../recovery/TruncatedRecoveryIT.java | 6 +- 7 files changed, 113 insertions(+), 53 deletions(-) diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 6e628eab0cbd3..55fdcecb084a9 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -64,17 +64,11 @@ - - - - - - diff --git a/server/src/test/java/org/elasticsearch/env/EnvironmentTests.java b/server/src/test/java/org/elasticsearch/env/EnvironmentTests.java index 5ada31b612941..c87a896d318be 100644 --- a/server/src/test/java/org/elasticsearch/env/EnvironmentTests.java +++ b/server/src/test/java/org/elasticsearch/env/EnvironmentTests.java @@ -55,7 +55,8 @@ public void testRepositoryResolution() throws IOException { Environment environment = newEnvironment(); assertThat(environment.resolveRepoFile("/test/repos/repo1"), nullValue()); assertThat(environment.resolveRepoFile("test/repos/repo1"), nullValue()); - environment = newEnvironment(Settings.builder().putList(Environment.PATH_REPO_SETTING.getKey(), "/test/repos", "/another/repos", "/test/repos/../other").build()); + environment = newEnvironment(Settings.builder() + .putList(Environment.PATH_REPO_SETTING.getKey(), "/test/repos", "/another/repos", "/test/repos/../other").build()); assertThat(environment.resolveRepoFile("/test/repos/repo1"), notNullValue()); assertThat(environment.resolveRepoFile("test/repos/repo1"), notNullValue()); assertThat(environment.resolveRepoFile("/another/repos/repo1"), notNullValue()); diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java index 7a24ebaf0484c..63635f5cbe7a4 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java @@ -352,7 +352,8 @@ public void run() { for (int i = 0; i < iters; i++) { int shard = randomIntBetween(0, counts.length - 1); try { - try (ShardLock autoCloses = env.shardLock(new ShardId("foo", "fooUUID", shard), scaledRandomIntBetween(0, 10))) { + try (ShardLock autoCloses = env.shardLock(new ShardId("foo", "fooUUID", shard), + scaledRandomIntBetween(0, 10))) { counts[shard].value++; countsAtomic[shard].incrementAndGet(); assertEquals(flipFlop[shard].incrementAndGet(), 1); @@ -386,7 +387,9 @@ public void testCustomDataPaths() throws Exception { final Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_INDEX_UUID, "myindexUUID").build(); IndexSettings s1 = IndexSettingsModule.newIndexSettings("myindex", indexSettings); - IndexSettings s2 = IndexSettingsModule.newIndexSettings("myindex", Settings.builder().put(indexSettings).put(IndexMetaData.SETTING_DATA_PATH, "/tmp/foo").build()); + IndexSettings s2 = IndexSettingsModule.newIndexSettings("myindex", Settings.builder() + .put(indexSettings) + .put(IndexMetaData.SETTING_DATA_PATH, "/tmp/foo").build()); Index index = new Index("myindex", "myindexUUID"); ShardId sid = new ShardId(index, 0); diff --git a/server/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java b/server/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java index 6624d4eb8ded4..0fb5f7ac114d6 100644 --- a/server/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java +++ b/server/src/test/java/org/elasticsearch/recovery/FullRollingRestartIT.java @@ -76,14 +76,16 @@ public void testFullRollingRestart() throws Exception { internalCluster().startNode(settings); // make sure the cluster state is green, and all has been recovered - assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("3")); + assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout) + .setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("3")); logger.info("--> add two more nodes"); internalCluster().startNode(settings); internalCluster().startNode(settings); // make sure the cluster state is green, and all has been recovered - assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("5")); + assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout) + .setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("5")); logger.info("--> refreshing and checking data"); refresh(); @@ -94,11 +96,13 @@ public void testFullRollingRestart() throws Exception { // now start shutting nodes down internalCluster().stopRandomDataNode(); // make sure the cluster state is green, and all has been recovered - assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("4")); + assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout) + .setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("4")); internalCluster().stopRandomDataNode(); // make sure the cluster state is green, and all has been recovered - assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("3")); + assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout) + .setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("3")); logger.info("--> stopped two nodes, verifying data"); refresh(); @@ -109,12 +113,14 @@ public void testFullRollingRestart() throws Exception { // closing the 3rd node internalCluster().stopRandomDataNode(); // make sure the cluster state is green, and all has been recovered - assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("2")); + assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout) + .setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("2")); internalCluster().stopRandomDataNode(); // make sure the cluster state is yellow, and all has been recovered - assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForYellowStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("1")); + assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout) + .setWaitForYellowStatus().setWaitForNoRelocatingShards(true).setWaitForNodes("1")); logger.info("--> one node left, verifying data"); refresh(); @@ -133,7 +139,9 @@ public void testNoRebalanceOnRollingRestart() throws Exception { * to relocating to the restarting node since all had 2 shards and now one node has nothing allocated. * We have a fix for this to wait until we have allocated unallocated shards now so this shouldn't happen. */ - prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "6").put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0").put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMinutes(1))).get(); + prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "6") + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0") + .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMinutes(1))).get(); for (int i = 0; i < 100; i++) { client().prepareIndex("test", "type1", Long.toString(i)) @@ -152,7 +160,8 @@ public void testNoRebalanceOnRollingRestart() throws Exception { recoveryResponse = client().admin().indices().prepareRecoveries("test").get(); for (RecoveryState recoveryState : recoveryResponse.shardRecoveryStates().get("test")) { - assertTrue("relocated from: " + recoveryState.getSourceNode() + " to: " + recoveryState.getTargetNode()+ "-- \nbefore: \n" + state, + assertTrue("relocated from: " + recoveryState.getSourceNode() + " to: " + + recoveryState.getTargetNode()+ "-- \nbefore: \n" + state, recoveryState.getRecoverySource().getType() != RecoverySource.Type.PEER || recoveryState.getPrimary() == false); } } diff --git a/server/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java b/server/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java index 0d2235c30a425..c0345be6fae01 100644 --- a/server/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java +++ b/server/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java @@ -53,14 +53,18 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; -@TestLogging("_root:DEBUG,org.elasticsearch.index.shard:TRACE,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.index.seqno:TRACE,org.elasticsearch.indices.recovery:TRACE") +@TestLogging("_root:DEBUG,org.elasticsearch.index.shard:TRACE,org.elasticsearch.cluster.service:TRACE," + + "org.elasticsearch.index.seqno:TRACE,org.elasticsearch.indices.recovery:TRACE") public class RecoveryWhileUnderLoadIT extends ESIntegTestCase { private final Logger logger = LogManager.getLogger(RecoveryWhileUnderLoadIT.class); public void testRecoverWhileUnderLoadAllocateReplicasTest() throws Exception { logger.info("--> creating test index ..."); int numberOfShards = numberOfShards(); - assertAcked(prepareCreate("test", 1, Settings.builder().put(SETTING_NUMBER_OF_SHARDS, numberOfShards).put(SETTING_NUMBER_OF_REPLICAS, 1).put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC))); + assertAcked(prepareCreate("test", 1, Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, numberOfShards) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC))); final int totalNumDocs = scaledRandomIntBetween(200, 10000); int waitFor = totalNumDocs / 10; @@ -92,7 +96,8 @@ public void testRecoverWhileUnderLoadAllocateReplicasTest() throws Exception { logger.info("--> waiting for GREEN health status ..."); // make sure the cluster state is green, and all has been recovered - assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("5m").setWaitForGreenStatus()); + assertNoTimeout(client().admin().cluster().prepareHealth() + .setWaitForEvents(Priority.LANGUID).setTimeout("5m").setWaitForGreenStatus()); logger.info("--> waiting for {} docs to be indexed ...", totalNumDocs); waitForDocs(totalNumDocs, indexer); @@ -113,7 +118,10 @@ public void testRecoverWhileUnderLoadAllocateReplicasTest() throws Exception { public void testRecoverWhileUnderLoadAllocateReplicasRelocatePrimariesTest() throws Exception { logger.info("--> creating test index ..."); int numberOfShards = numberOfShards(); - assertAcked(prepareCreate("test", 1, Settings.builder().put(SETTING_NUMBER_OF_SHARDS, numberOfShards).put(SETTING_NUMBER_OF_REPLICAS, 1).put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC))); + assertAcked(prepareCreate("test", 1, Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, numberOfShards) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC))); final int totalNumDocs = scaledRandomIntBetween(200, 10000); int waitFor = totalNumDocs / 10; @@ -142,7 +150,8 @@ public void testRecoverWhileUnderLoadAllocateReplicasRelocatePrimariesTest() thr allowNodes("test", 4); logger.info("--> waiting for GREEN health status ..."); - assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("5m").setWaitForGreenStatus()); + assertNoTimeout(client().admin().cluster().prepareHealth() + .setWaitForEvents(Priority.LANGUID).setTimeout("5m").setWaitForGreenStatus()); logger.info("--> waiting for {} docs to be indexed ...", totalNumDocs); @@ -164,7 +173,9 @@ public void testRecoverWhileUnderLoadAllocateReplicasRelocatePrimariesTest() thr public void testRecoverWhileUnderLoadWithReducedAllowedNodes() throws Exception { logger.info("--> creating test index ..."); int numberOfShards = numberOfShards(); - assertAcked(prepareCreate("test", 2, Settings.builder().put(SETTING_NUMBER_OF_SHARDS, numberOfShards).put(SETTING_NUMBER_OF_REPLICAS, 1).put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC))); + assertAcked(prepareCreate("test", 2, Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, numberOfShards).put(SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC))); final int totalNumDocs = scaledRandomIntBetween(200, 10000); int waitFor = totalNumDocs / 10; @@ -194,7 +205,10 @@ public void testRecoverWhileUnderLoadWithReducedAllowedNodes() throws Exception allowNodes("test", 4); logger.info("--> waiting for GREEN health status ..."); - assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("5m").setWaitForGreenStatus().setWaitForNoRelocatingShards(true)); + assertNoTimeout(client().admin().cluster().prepareHealth() + .setWaitForEvents(Priority.LANGUID).setTimeout("5m") + .setWaitForGreenStatus() + .setWaitForNoRelocatingShards(true)); logger.info("--> waiting for {} docs to be indexed ...", totalNumDocs); waitForDocs(totalNumDocs, indexer); @@ -205,23 +219,31 @@ public void testRecoverWhileUnderLoadWithReducedAllowedNodes() throws Exception logger.info("--> allow 3 nodes for index [test] ..."); allowNodes("test", 3); logger.info("--> waiting for relocations ..."); - assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("5m").setWaitForNoRelocatingShards(true)); + assertNoTimeout(client().admin().cluster().prepareHealth() + .setWaitForEvents(Priority.LANGUID).setTimeout("5m") + .setWaitForNoRelocatingShards(true)); logger.info("--> allow 2 nodes for index [test] ..."); allowNodes("test", 2); logger.info("--> waiting for relocations ..."); - assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("5m").setWaitForNoRelocatingShards(true)); + assertNoTimeout(client().admin().cluster().prepareHealth() + .setWaitForEvents(Priority.LANGUID).setTimeout("5m") + .setWaitForNoRelocatingShards(true)); logger.info("--> allow 1 nodes for index [test] ..."); allowNodes("test", 1); logger.info("--> waiting for relocations ..."); - assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("5m").setWaitForNoRelocatingShards(true)); + assertNoTimeout(client().admin().cluster().prepareHealth() + .setWaitForEvents(Priority.LANGUID).setTimeout("5m") + .setWaitForNoRelocatingShards(true)); logger.info("--> marking and waiting for indexing threads to stop ..."); indexer.stop(); logger.info("--> indexing threads stopped"); - assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("5m").setWaitForNoRelocatingShards(true)); + assertNoTimeout(client().admin().cluster().prepareHealth() + .setWaitForEvents(Priority.LANGUID).setTimeout("5m") + .setWaitForNoRelocatingShards(true)); logger.info("--> refreshing the index"); refreshAndAssert(); @@ -235,7 +257,10 @@ public void testRecoverWhileRelocating() throws Exception { final int numReplicas = 0; logger.info("--> creating test index ..."); int allowNodes = 2; - assertAcked(prepareCreate("test", 3, Settings.builder().put(SETTING_NUMBER_OF_SHARDS, numShards).put(SETTING_NUMBER_OF_REPLICAS, numReplicas).put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC))); + assertAcked(prepareCreate("test", 3, Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, numShards) + .put(SETTING_NUMBER_OF_REPLICAS, numReplicas) + .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC))); final int numDocs = scaledRandomIntBetween(200, 9999); @@ -258,7 +283,8 @@ public void testRecoverWhileRelocating() throws Exception { logger.info("--> indexing threads stopped"); logger.info("--> bump up number of replicas to 1 and allow all nodes to hold the index"); allowNodes("test", 3); - assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put("number_of_replicas", 1)).get()); + assertAcked(client().admin().indices().prepareUpdateSettings("test") + .setSettings(Settings.builder().put("number_of_replicas", 1)).get()); ensureGreen(TimeValue.timeValueMinutes(5)); logger.info("--> refreshing the index"); @@ -273,7 +299,8 @@ private void iterateAssertCount(final int numberOfShards, final int iterations, SearchResponse[] iterationResults = new SearchResponse[iterations]; boolean error = false; for (int i = 0; i < iterations; i++) { - SearchResponse searchResponse = client().prepareSearch().setSize((int) numberOfDocs).setQuery(matchAllQuery()).addSort("id", SortOrder.ASC).get(); + SearchResponse searchResponse = client().prepareSearch().setSize((int) numberOfDocs).setQuery(matchAllQuery()) + .addSort("id", SortOrder.ASC).get(); logSearchResponse(numberOfShards, numberOfDocs, i, searchResponse); iterationResults[i] = searchResponse; if (searchResponse.getHits().getTotalHits().value != numberOfDocs) { @@ -286,7 +313,8 @@ private void iterateAssertCount(final int numberOfShards, final int iterations, IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats().get(); for (ShardStats shardStats : indicesStatsResponse.getShards()) { DocsStats docsStats = shardStats.getStats().docs; - logger.info("shard [{}] - count {}, primary {}", shardStats.getShardRouting().id(), docsStats.getCount(), shardStats.getShardRouting().primary()); + logger.info("shard [{}] - count {}, primary {}", shardStats.getShardRouting().id(), docsStats.getCount(), + shardStats.getShardRouting().primary()); } ClusterService clusterService = clusterService(); @@ -332,12 +360,14 @@ private void iterateAssertCount(final int numberOfShards, final int iterations, } private void logSearchResponse(int numberOfShards, long numberOfDocs, int iteration, SearchResponse searchResponse) { - logger.info("iteration [{}] - successful shards: {} (expected {})", iteration, searchResponse.getSuccessfulShards(), numberOfShards); + logger.info("iteration [{}] - successful shards: {} (expected {})", iteration, + searchResponse.getSuccessfulShards(), numberOfShards); logger.info("iteration [{}] - failed shards: {} (expected 0)", iteration, searchResponse.getFailedShards()); if (searchResponse.getShardFailures() != null && searchResponse.getShardFailures().length > 0) { logger.info("iteration [{}] - shard failures: {}", iteration, Arrays.toString(searchResponse.getShardFailures())); } - logger.info("iteration [{}] - returned documents: {} (expected {})", iteration, searchResponse.getHits().getTotalHits().value, numberOfDocs); + logger.info("iteration [{}] - returned documents: {} (expected {})", iteration, + searchResponse.getHits().getTotalHits().value, numberOfDocs); } private void refreshAndAssert() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java b/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java index b27e4fd229a07..62208a404885b 100644 --- a/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java +++ b/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java @@ -133,7 +133,8 @@ public void testSimpleRelocationNoIndexing() { logger.info("--> start another node"); final String node_2 = internalCluster().startNode(); - ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").execute().actionGet(); + ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) + .setWaitForNodes("2").execute().actionGet(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); logger.info("--> relocate the shard from node1 to node2"); @@ -141,7 +142,8 @@ public void testSimpleRelocationNoIndexing() { .add(new MoveAllocationCommand("test", 0, node_1, node_2)) .execute().actionGet(); - clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNoRelocatingShards(true).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet(); + clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); logger.info("--> verifying count again..."); @@ -155,7 +157,8 @@ public void testRelocationWhileIndexingRandom() throws Exception { int numberOfReplicas = randomBoolean() ? 0 : 1; int numberOfNodes = numberOfReplicas == 0 ? 2 : 3; - logger.info("testRelocationWhileIndexingRandom(numRelocations={}, numberOfReplicas={}, numberOfNodes={})", numberOfRelocations, numberOfReplicas, numberOfNodes); + logger.info("testRelocationWhileIndexingRandom(numRelocations={}, numberOfReplicas={}, numberOfNodes={})", + numberOfRelocations, numberOfReplicas, numberOfNodes); String[] nodes = new String[numberOfNodes]; logger.info("--> starting [node1] ..."); @@ -172,8 +175,10 @@ public void testRelocationWhileIndexingRandom() throws Exception { logger.info("--> starting [node{}] ...", i); nodes[i - 1] = internalCluster().startNode(); if (i != numberOfNodes) { - ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) - .setWaitForNodes(Integer.toString(i)).setWaitForGreenStatus().execute().actionGet(); + ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setWaitForNodes(Integer.toString(i)) + .setWaitForGreenStatus().execute().actionGet(); assertThat(healthResponse.isTimedOut(), equalTo(false)); } } @@ -202,7 +207,10 @@ public void testRelocationWhileIndexingRandom() throws Exception { logger.debug("--> flushing"); client().admin().indices().prepareFlush().get(); } - ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNoRelocatingShards(true).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet(); + ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); indexer.pauseIndexing(); logger.info("--> DONE relocate the shard from {} to {}", fromNode, toNode); @@ -218,7 +226,8 @@ public void testRelocationWhileIndexingRandom() throws Exception { boolean ranOnce = false; for (int i = 0; i < 10; i++) { logger.info("--> START search test round {}", i + 1); - SearchHits hits = client().prepareSearch("test").setQuery(matchAllQuery()).setSize((int) indexer.totalIndexedDocs()).storedFields().execute().actionGet().getHits(); + SearchHits hits = client().prepareSearch("test").setQuery(matchAllQuery()) + .setSize((int) indexer.totalIndexedDocs()).storedFields().execute().actionGet().getHits(); ranOnce = true; if (hits.getTotalHits().value != indexer.totalIndexedDocs()) { int[] hitIds = new int[(int) indexer.totalIndexedDocs()]; @@ -252,7 +261,8 @@ public void testRelocationWhileRefreshing() throws Exception { int numberOfReplicas = randomBoolean() ? 0 : 1; int numberOfNodes = numberOfReplicas == 0 ? 2 : 3; - logger.info("testRelocationWhileIndexingRandom(numRelocations={}, numberOfReplicas={}, numberOfNodes={})", numberOfRelocations, numberOfReplicas, numberOfNodes); + logger.info("testRelocationWhileIndexingRandom(numRelocations={}, numberOfReplicas={}, numberOfNodes={})", + numberOfRelocations, numberOfReplicas, numberOfNodes); String[] nodes = new String[numberOfNodes]; logger.info("--> starting [node_0] ..."); @@ -281,13 +291,15 @@ public void testRelocationWhileRefreshing() throws Exception { final Semaphore postRecoveryShards = new Semaphore(0); final IndexEventListener listener = new IndexEventListener() { @Override - public void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardState previousState, IndexShardState currentState, @Nullable String reason) { + public void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardState previousState, + IndexShardState currentState, @Nullable String reason) { if (currentState == IndexShardState.POST_RECOVERY) { postRecoveryShards.release(); } } }; - for (MockIndexEventListener.TestEventListener eventListener : internalCluster().getInstances(MockIndexEventListener.TestEventListener.class)) { + for (MockIndexEventListener.TestEventListener eventListener : internalCluster() + .getInstances(MockIndexEventListener.TestEventListener.class)) { eventListener.setNewDelegate(listener); } @@ -327,7 +339,10 @@ public void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardSt indexRandom(true, true, builders2); // verify cluster was finished. - assertFalse(client().admin().cluster().prepareHealth().setWaitForNoRelocatingShards(true).setWaitForEvents(Priority.LANGUID).setTimeout("30s").get().isTimedOut()); + assertFalse(client().admin().cluster().prepareHealth() + .setWaitForNoRelocatingShards(true) + .setWaitForEvents(Priority.LANGUID) + .setTimeout("30s").get().isTimedOut()); logger.info("--> DONE relocate the shard from {} to {}", fromNode, toNode); logger.debug("--> verifying all searches return the same number of docs"); @@ -374,17 +389,20 @@ public void testCancellationCleansTempFiles() throws Exception { MockTransportService mockTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, p_node); for (DiscoveryNode node : clusterService.state().nodes()) { if (!node.equals(clusterService.localNode())) { - mockTransportService.addSendBehavior(internalCluster().getInstance(TransportService.class, node.getName()), new RecoveryCorruption(corruptionCount)); + mockTransportService.addSendBehavior(internalCluster().getInstance(TransportService.class, node.getName()), + new RecoveryCorruption(corruptionCount)); } } - client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)).get(); + client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)).get(); corruptionCount.await(); logger.info("--> stopping replica assignment"); assertAcked(client().admin().cluster().prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none"))); + .setTransientSettings(Settings.builder() + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none"))); logger.info("--> wait for all replica shards to be removed, on all nodes"); assertBusy(() -> { @@ -408,7 +426,8 @@ public void testCancellationCleansTempFiles() throws Exception { Files.walkFileTree(shardLoc, new SimpleFileVisitor() { @Override public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { - assertThat("found a temporary recovery file: " + file, file.getFileName().toString(), not(startsWith("recovery."))); + assertThat("found a temporary recovery file: " + file, file.getFileName().toString(), + not(startsWith("recovery."))); return FileVisitResult.CONTINUE; } }); @@ -496,13 +515,15 @@ class RecoveryCorruption implements StubbableTransport.SendRequestBehavior { } @Override - public void sendRequest(Transport.Connection connection, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException { + public void sendRequest(Transport.Connection connection, long requestId, String action, TransportRequest request, + TransportRequestOptions options) throws IOException { if (action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) { RecoveryFileChunkRequest chunkRequest = (RecoveryFileChunkRequest) request; if (chunkRequest.name().startsWith(IndexFileNames.SEGMENTS)) { // corrupting the segments_N files in order to make sure future recovery re-send files logger.debug("corrupting [{}] to {}. file name: [{}]", action, connection.getNode(), chunkRequest.name()); - assert chunkRequest.content().toBytesRef().bytes == chunkRequest.content().toBytesRef().bytes : "no internal reference!!"; + assert chunkRequest.content().toBytesRef().bytes == + chunkRequest.content().toBytesRef().bytes : "no internal reference!!"; byte[] array = chunkRequest.content().toBytesRef().bytes; array[0] = (byte) ~array[0]; // flip one byte in the content corruptionCount.countDown(); diff --git a/server/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java b/server/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java index ac8688c9847d3..973c687ebe84c 100644 --- a/server/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java +++ b/server/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java @@ -89,7 +89,8 @@ public void testCancelRecoveryAndResume() throws Exception { // we have no replicas so far and make sure that we allocate the primary on the lucky node assertAcked(prepareCreate("test") .addMapping("type1", "field1", "type=text", "the_id", "type=text") - .setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numberOfShards()) + .setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numberOfShards()) .put("index.routing.allocation.include._name", primariesNode.getNode().getName()))); // only allocate on the lucky node // index some docs and check if they are coming back @@ -112,7 +113,8 @@ public void testCancelRecoveryAndResume() throws Exception { final CountDownLatch latch = new CountDownLatch(1); final AtomicBoolean truncate = new AtomicBoolean(true); for (NodeStats dataNode : dataNodeStats) { - MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName())); + MockTransportService mockTransportService = ((MockTransportService) internalCluster() + .getInstance(TransportService.class, dataNode.getNode().getName())); mockTransportService.addSendBehavior(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), (connection, requestId, action, request, options) -> { if (action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) {