From 14911e8695bd12c5b77e502562374d8d7ad4fbe1 Mon Sep 17 00:00:00 2001 From: zhengchenyu Date: Mon, 8 Jul 2024 16:37:34 +0800 Subject: [PATCH] HDFS-17565. EC: dfs.datanode.ec.reconstruction.threads should support dynamic reconfigured. --- .../hadoop/hdfs/server/datanode/DataNode.java | 23 ++++++++++++- .../erasurecode/ErasureCodingWorker.java | 19 +++++++++++ .../datanode/TestDataNodeReconfiguration.java | 33 +++++++++++++++++++ .../hadoop/hdfs/tools/TestDFSAdmin.java | 2 +- 4 files changed, 75 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 87e8eee681d1d..7c5c9eee33a9a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -89,6 +89,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DISK_BALANCER_ENABLED_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DISK_BALANCER_PLAN_VALID_INTERVAL; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DISK_BALANCER_PLAN_VALID_INTERVAL_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_THREADS_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_THREADS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_DEFAULT; @@ -374,7 +376,8 @@ public class DataNode extends ReconfigurableBase DFS_DATANODE_DATA_TRANSFER_BANDWIDTHPERSEC_KEY, DFS_DATANODE_DATA_WRITE_BANDWIDTHPERSEC_KEY, DFS_DATANODE_DATA_READ_BANDWIDTHPERSEC_KEY, - DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY)); + DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY, + DFS_DN_EC_RECONSTRUCTION_THREADS_KEY)); public static final String METRICS_LOG_NAME = "DataNodeMetricsLog"; @@ -740,6 +743,8 @@ public String reconfigurePropertyImpl(String property, String newVal) return reconfDiskBalancerParameters(property, newVal); case DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY: return reconfSlowIoWarningThresholdParameters(property, newVal); + case DFS_DN_EC_RECONSTRUCTION_THREADS_KEY: + return reconfStripedReconstructionParameters(property, newVal); default: break; } @@ -1079,6 +1084,22 @@ private String reconfSlowIoWarningThresholdParameters(String property, String ne } } + private String reconfStripedReconstructionParameters(String property, String newVal) + throws ReconfigurationException { + String result = null; + try { + if (property.equals(DFS_DN_EC_RECONSTRUCTION_THREADS_KEY)) { + int size = (newVal == null ? DFS_DN_EC_RECONSTRUCTION_THREADS_DEFAULT : + Integer.parseInt(newVal)); + result = Long.toString(size); + ecWorker.setStripedReconstructionPoolSize(size); + } + return result; + } catch (IllegalArgumentException e) { + throw new ReconfigurationException(property, newVal, getConf().get(property), e); + } + } + /** * Get a list of the keys of the re-configurable properties in configuration. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java index 74c4cf1bd5f34..3b36404b59a26 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.datanode.erasurecode; +import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -37,6 +38,9 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_THREADS_KEY; + /** * ErasureCodingWorker handles the erasure coding reconstruction work commands. * These commands would be issued from Namenode as part of Datanode's heart beat @@ -172,4 +176,19 @@ public void shutDown() { public float getXmitWeight() { return xmitWeight; } + + public void setStripedReconstructionPoolSize(int size) { + Preconditions.checkArgument(size > 0, + DFS_DN_EC_RECONSTRUCTION_THREADS_KEY + " should be greater than 0"); + this.stripedReconstructionPool.setCorePoolSize(size); + this.stripedReconstructionPool.setMaximumPoolSize(size); + } + + @VisibleForTesting + public int getStripedReconstructionPoolSize() { + int poolSize = this.stripedReconstructionPool.getCorePoolSize(); + Preconditions.checkArgument(poolSize == this.stripedReconstructionPool.getMaximumPoolSize(), + "The maximum pool size should be equal to core pool size"); + return poolSize; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java index 69f0565f2b6b6..02120ae035735 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java @@ -55,6 +55,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DISK_BALANCER_ENABLED_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DISK_BALANCER_PLAN_VALID_INTERVAL; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DISK_BALANCER_PLAN_VALID_INTERVAL_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_THREADS_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_THREADS_KEY; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNull; @@ -948,4 +950,35 @@ public void testSlowIoWarningThresholdReconfiguration() throws Exception { } } + @Test + public void testDataNodeECReconstructionThreads() throws Exception { + for (int i = 0; i < NUM_DATA_NODE; i++) { + DataNode dn = cluster.getDataNodes().get(i); + + // Verify DFS_DN_EC_RECONSTRUCTION_THREADS_KEY. + // Try invalid values. + LambdaTestUtils.intercept(ReconfigurationException.class, + "Could not change property dfs.datanode.ec.reconstruction.threads from " + + "'8' to 'text'", + () -> dn.reconfigureProperty(DFS_DN_EC_RECONSTRUCTION_THREADS_KEY, "text")); + LambdaTestUtils.intercept(ReconfigurationException.class, + "Could not change property dfs.datanode.ec.reconstruction.threads from " + + "'8' to '-1'", + () -> dn.reconfigureProperty(DFS_DN_EC_RECONSTRUCTION_THREADS_KEY, "-1")); + LambdaTestUtils.intercept(ReconfigurationException.class, + "Could not change property dfs.datanode.ec.reconstruction.threads from " + + "'8' to '0'", + () -> dn.reconfigureProperty(DFS_DN_EC_RECONSTRUCTION_THREADS_KEY, "0")); + + // Set value is 10. + dn.reconfigureProperty(DFS_DN_EC_RECONSTRUCTION_THREADS_KEY, + String.valueOf(10)); + assertEquals(10, dn.getErasureCodingWorker().getStripedReconstructionPoolSize()); + + // Set default value. + dn.reconfigureProperty(DFS_DN_EC_RECONSTRUCTION_THREADS_KEY, null); + assertEquals(DFS_DN_EC_RECONSTRUCTION_THREADS_DEFAULT, + dn.getErasureCodingWorker().getStripedReconstructionPoolSize()); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java index 392866e86ea2e..0fd382db3dfc0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java @@ -349,7 +349,7 @@ public void testDataNodeGetReconfigurableProperties() throws IOException, Interr final List outs = Lists.newArrayList(); final List errs = Lists.newArrayList(); getReconfigurableProperties("datanode", address, outs, errs); - assertEquals(26, outs.size()); + assertEquals(27, outs.size()); assertEquals(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, outs.get(1)); }