diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 5924353ef8cb2..d7fe5b9d0e07a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2310,7 +2310,8 @@ BlockReconstructionWork scheduleReconstruction(BlockInfo block,
byte[] newIndices = new byte[liveBlockIndices.size()];
adjustSrcNodesAndIndices((BlockInfoStriped)block,
srcNodes, liveBlockIndices, newSrcNodes, newIndices);
- byte[] liveAndDecommissioningBusyIndices = new byte[liveAndDecommissioningBusyBlockIndices.size()];
+ byte[] liveAndDecommissioningBusyIndices =
+ new byte[liveAndDecommissioningBusyBlockIndices.size()];
for (int i = 0; i < liveAndDecommissioningBusyBlockIndices.size(); i++) {
liveAndDecommissioningBusyIndices[i] = liveAndDecommissioningBusyBlockIndices.get(i);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 1295c0dca8752..b6108e5828c4d 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -1233,6 +1233,14 @@
+
+ dfs.namenode.decommission.ec.reconstruction.enable
+ false
+
+ Whether to use reconstruction to copy ec block when the related node is busy.
+
+
+
dfs.namenode.redundancy.interval.seconds
3
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
index 440eee17a341b..3bdccf201d1e9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
@@ -63,7 +63,6 @@
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.Lists;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@@ -1215,7 +1214,7 @@ public void testDecommissionBusyNodeWithECReconstruction1() throws Exception {
assertTrue(newStorageInfos.size() >= 2);
DatanodeStorageInfo decommissionedNode = null;
int alive = 0;
- for (int i = 0; i < newStorageInfos.size();i ++) {
+ for (int i = 0; i < newStorageInfos.size(); i++) {
DatanodeStorageInfo datanodeStorageInfo = newStorageInfos.get(i);
if (datanodeStorageInfo.getDatanodeDescriptor().isDecommissioned()) {
decommissionedNode = datanodeStorageInfo;
@@ -1286,7 +1285,7 @@ public void testDecommissionBusyNodeWithECReconstruction2() throws Exception {
assertTrue(newStorageInfos.size() >= 4);
int alive = 0;
int decommissioned = 0;
- for (int i = 0; i < newStorageInfos.size();i ++) {
+ for (int i = 0; i < newStorageInfos.size(); i++) {
DatanodeStorageInfo newDatanodeStorageInfo = newStorageInfos.get(i);
if (newDatanodeStorageInfo.getDatanodeDescriptor().isDecommissioned()) {
assertTrue(newDatanodeStorageInfo.equals(storageInfos.get(0)) ||