From b8763c36efecfe6d967919c354d3cb0ad441be35 Mon Sep 17 00:00:00 2001 From: singer-bin Date: Mon, 14 Mar 2022 17:46:35 +0800 Subject: [PATCH 01/13] HDFS-16457 --- .../hadoop/hdfs/server/datanode/DataNode.java | 22 ++++++++++++++++++- .../datanode/fsdataset/FsDatasetSpi.java | 6 +++++ .../fsdataset/impl/BlockPoolSlice.java | 15 +++++++++++-- .../fsdataset/impl/FsDatasetImpl.java | 5 +++++ .../datanode/fsdataset/impl/FsVolumeImpl.java | 4 ++++ .../server/datanode/SimulatedFSDataset.java | 6 +++++ .../extdataset/ExternalDatasetImpl.java | 6 +++++ 7 files changed, 61 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 15e8a9e359799..d779ca47e62da 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.datanode; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_GETSPACEUSED_CLASSNAME; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY; @@ -149,6 +150,8 @@ import org.apache.hadoop.hdfs.server.common.DataNodeLockManager.LockLevel; import org.apache.hadoop.hdfs.server.datanode.checker.DatasetVolumeChecker; import org.apache.hadoop.hdfs.server.datanode.checker.StorageLocationChecker; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.BlockPoolSlice; import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.util.*; import org.apache.hadoop.hdfs.client.BlockReportOptions; @@ -341,7 +344,7 @@ public class DataNode extends ReconfigurableBase DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY, DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY, DFS_DATANODE_MIN_OUTLIER_DETECTION_DISKS_KEY, - DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_KEY)); + DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_KEY,FS_GETSPACEUSED_CLASSNAME)); public static final Log METRICS_LOG = LogFactory.getLog("DataNodeMetricsLog"); @@ -673,6 +676,9 @@ public String reconfigurePropertyImpl(String property, String newVal) case DFS_DATANODE_MIN_OUTLIER_DETECTION_DISKS_KEY: case DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_KEY: return reconfSlowDiskParameters(property, newVal); + case FS_GETSPACEUSED_CLASSNAME: + reconfSpaceUsedKlass(); + return newVal; default: break; } @@ -680,6 +686,20 @@ public String reconfigurePropertyImpl(String property, String newVal) property, newVal, getConf().get(property)); } + private void reconfSpaceUsedKlass(){ + List volumeList = data.getVolumeList(); + for (FsVolumeImpl fsVolume : volumeList) { + Map blockPoolSlices = fsVolume.getBlockPoolSlices(); + for (Entry entry : blockPoolSlices.entrySet()) { + try { + entry.getValue().refreshSpaceUsedKlass(getNewConf()); + } catch (IOException e) { + e.printStackTrace(); + } + } + } + } + private String reconfDataXceiverParameters(String property, String newVal) throws ReconfigurationException { String result; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java index e39ef817b6f29..ada559a35d54c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.ScanInfo; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory; import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; @@ -679,4 +680,9 @@ ReplicaInfo moveBlockAcrossVolumes(final ExtendedBlock block, * @throws IOException */ MountVolumeMap getMountVolumeMap() throws IOException; + + /** + * Get the volume list. + */ + List getVolumeList(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java index eff079a353da6..4c09fd90ed341 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java @@ -84,7 +84,7 @@ * * This class is synchronized by {@link FsVolumeImpl}. */ -class BlockPoolSlice { +public class BlockPoolSlice { static final Logger LOG = LoggerFactory.getLogger(BlockPoolSlice.class); private final String bpid; @@ -115,6 +115,7 @@ class BlockPoolSlice { private final Timer timer; private final int maxDataLength; private final FileIoProvider fileIoProvider; + private final File bpDir; private static ForkJoinPool addReplicaThreadPool = null; private static final int VOLUMES_REPLICA_ADD_THREADPOOL_SIZE = Runtime @@ -128,7 +129,7 @@ public int compare(File f1, File f2) { }; // TODO:FEDERATION scalability issue - a thread per DU is needed - private final GetSpaceUsed dfsUsage; + private volatile GetSpaceUsed dfsUsage; /** * Create a blook pool slice @@ -141,6 +142,7 @@ public int compare(File f1, File f2) { */ BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir, Configuration conf, Timer timer) throws IOException { + this.bpDir = bpDir; this.bpid = bpid; this.volume = volume; this.fileIoProvider = volume.getFileIoProvider(); @@ -232,6 +234,15 @@ public void run() { SHUTDOWN_HOOK_PRIORITY); } + public void refreshSpaceUsedKlass(Configuration conf) throws IOException { + this.dfsUsage = new FSCachingGetSpaceUsed.Builder().setBpid(bpid) + .setVolume(volume) + .setPath(bpDir) + .setConf(conf) + .setInitialUsed(loadDfsUsed()) + .build(); + } + private synchronized static void initializeAddReplicaPool(Configuration conf, FsDatasetImpl dataset) { if (addReplicaThreadPool == null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 002d99abc5ba7..f0a2b97044c29 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -3575,6 +3575,11 @@ public MountVolumeMap getMountVolumeMap() { return volumes.getMountVolumeMap(); } + @Override + public List getVolumeList() { + return volumes.getVolumes(); + } + @Override public boolean isDeletingBlock(String bpid, long blockId) { synchronized(deletingBlock) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java index 8f15d8a70932e..256e29f0fde0d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java @@ -552,6 +552,10 @@ long getReserved(){ return reserved != null ? reserved.getReserved() : 0; } + public Map getBlockPoolSlices() { + return bpSlices; + } + @VisibleForTesting BlockPoolSlice getBlockPoolSlice(String bpid) throws IOException { BlockPoolSlice bp = bpSlices.get(bpid); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java index 3313c7c7a0360..29eb051cb0210 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.server.common.AutoCloseDataSetLock; import org.apache.hadoop.hdfs.server.common.DataNodeLockManager; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.MountVolumeMap; import org.apache.hadoop.thirdparty.com.google.common.math.LongMath; import org.apache.commons.lang3.ArrayUtils; @@ -1605,5 +1606,10 @@ public Set deepCopyReplica(String bpid) public MountVolumeMap getMountVolumeMap() { return null; } + + @Override + public List getVolumeList() { + return null; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java index 1c6597eb4541a..77e2e2077d1f5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java @@ -25,6 +25,7 @@ import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.server.common.AutoCloseDataSetLock; import org.apache.hadoop.hdfs.server.common.DataNodeLockManager; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.MountVolumeMap; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; @@ -467,4 +468,9 @@ public Set deepCopyReplica(String bpid) public MountVolumeMap getMountVolumeMap() { return null; } + + @Override + public List getVolumeList() { + return null; + } } From 6e8448f46bce6a742361228a155b1f68f56483a1 Mon Sep 17 00:00:00 2001 From: singer-bin Date: Tue, 15 Mar 2022 09:41:02 +0800 Subject: [PATCH 02/13] HDFS-16457 --- .../hadoop/hdfs/tools/TestDFSAdmin.java | 34 +++++++++---------- 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java index 78598760df42c..6fb1ef1ecdd60 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java @@ -30,28 +30,17 @@ import java.util.concurrent.TimeoutException; import java.util.function.Supplier; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY; - import org.apache.commons.io.FileUtils; import org.apache.commons.text.TextStringBuilder; -import org.apache.hadoop.fs.FsShell; -import org.apache.hadoop.fs.permission.FsAction; -import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.ReconfigurationUtil; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; @@ -82,18 +71,28 @@ import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.ToolRunner; - import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; -import org.junit.Assert; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY; import static org.apache.hadoop.hdfs.client.HdfsAdmin.TRASH_PERMISSION; import static org.hamcrest.CoreMatchers.allOf; import static org.hamcrest.CoreMatchers.anyOf; +import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.not; import static org.junit.Assert.assertEquals; @@ -102,7 +101,6 @@ import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; -import static org.hamcrest.CoreMatchers.containsString; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -338,7 +336,7 @@ public void testDataNodeGetReconfigurableProperties() throws IOException { final List outs = Lists.newArrayList(); final List errs = Lists.newArrayList(); getReconfigurableProperties("datanode", address, outs, errs); - assertEquals(16, outs.size()); + assertEquals(17, outs.size()); assertEquals(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, outs.get(1)); } From 30c432379c99a5d503cff770d6029379c8d193d6 Mon Sep 17 00:00:00 2001 From: singer-bin Date: Wed, 16 Mar 2022 11:25:28 +0800 Subject: [PATCH 03/13] reslove conficts --- .../hadoop/hdfs/tools/TestDFSAdmin.java | 32 ++++++++++--------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java index 6fb1ef1ecdd60..1497c090af04e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java @@ -30,17 +30,28 @@ import java.util.concurrent.TimeoutException; import java.util.function.Supplier; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY; + import org.apache.commons.io.FileUtils; import org.apache.commons.text.TextStringBuilder; +import org.apache.hadoop.fs.FsShell; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.ReconfigurationUtil; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsAction; -import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; @@ -71,28 +82,18 @@ import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.ToolRunner; + import org.junit.After; -import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.junit.Assert; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY; import static org.apache.hadoop.hdfs.client.HdfsAdmin.TRASH_PERMISSION; import static org.hamcrest.CoreMatchers.allOf; import static org.hamcrest.CoreMatchers.anyOf; -import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.not; import static org.junit.Assert.assertEquals; @@ -101,6 +102,7 @@ import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import static org.hamcrest.CoreMatchers.containsString; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; From 25d5a67ffb6e9c96b3d64c41968b25a5f3c9d26d Mon Sep 17 00:00:00 2001 From: singer-bin Date: Thu, 17 Mar 2022 18:17:32 +0800 Subject: [PATCH 04/13] the ut of the pr --- .../fsdataset/impl/BlockPoolSlice.java | 1 + .../datanode/TestDataNodeReconfiguration.java | 32 +++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java index 4c09fd90ed341..0fb31d6745696 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java @@ -235,6 +235,7 @@ public void run() { } public void refreshSpaceUsedKlass(Configuration conf) throws IOException { + ((CachingGetSpaceUsed)dfsUsage).close(); this.dfsUsage = new FSCachingGetSpaceUsed.Builder().setBpid(bpid) .setVolume(volume) .setPath(bpDir) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java index 1a9d6024acd0f..d5863ab6cdeb3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hdfs.server.datanode; +import org.apache.hadoop.fs.CachingGetSpaceUsed; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_GETSPACEUSED_CLASSNAME; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY; @@ -76,6 +78,7 @@ public class TestDataNodeReconfiguration { private final int NUM_NAME_NODE = 1; private final int NUM_DATA_NODE = 10; private MiniDFSCluster cluster; + private static long counter = 0; @Before public void Setup() throws IOException { @@ -99,6 +102,7 @@ private void startDFSCluster(int numNameNodes, int numDataNodes) throws IOException { Configuration conf = new Configuration(); conf.setBoolean(DFS_DATANODE_PEER_STATS_ENABLED_KEY, true); + conf.set(FS_GETSPACEUSED_CLASSNAME, DummyDU.class.getName()); MiniDFSNNTopology nnTopology = MiniDFSNNTopology .simpleFederatedTopology(numNameNodes); @@ -673,4 +677,32 @@ public void testSlowDiskParameters() throws ReconfigurationException, IOExceptio dn.getDiskMetrics().getSlowDiskDetector().getLowThresholdMs()); } } + + private static class DummyDU extends CachingGetSpaceUsed { + public DummyDU(Builder builder) throws IOException { + super(builder.setInterval(1000)); + } + + @Override + protected void refresh() { + counter++; + } + } + + @Test + public void testDfsUsageParameters() throws IOException, ReconfigurationException, InterruptedException { + final DataNode[] dns = createDNsForTest(1); + final DataNode dataNode = dns[0]; + + long lastCounter = counter; + + Thread.sleep(5000); + assertTrue(counter > lastCounter); + + dataNode.reconfigurePropertyImpl(FS_GETSPACEUSED_CLASSNAME, null); + + lastCounter = counter; + Thread.sleep(5000); + assertEquals(lastCounter, counter); + } } From d0f7902b8546b76020460e0fa84ce477d1a70dfd Mon Sep 17 00:00:00 2001 From: singer-bin Date: Fri, 18 Mar 2022 10:29:47 +0800 Subject: [PATCH 05/13] about ut --- .../org/apache/hadoop/hdfs/server/datanode/DataNode.java | 3 ++- .../hdfs/server/datanode/TestDataNodeReconfiguration.java | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index d779ca47e62da..b57a3cca1a6fd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -344,7 +344,8 @@ public class DataNode extends ReconfigurableBase DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY, DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY, DFS_DATANODE_MIN_OUTLIER_DETECTION_DISKS_KEY, - DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_KEY,FS_GETSPACEUSED_CLASSNAME)); + DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_KEY, + FS_GETSPACEUSED_CLASSNAME)); public static final Log METRICS_LOG = LogFactory.getLog("DataNodeMetricsLog"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java index d5863ab6cdeb3..1f43332956ab8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java @@ -679,7 +679,7 @@ public void testSlowDiskParameters() throws ReconfigurationException, IOExceptio } private static class DummyDU extends CachingGetSpaceUsed { - public DummyDU(Builder builder) throws IOException { + DummyDU(Builder builder) throws IOException { super(builder.setInterval(1000)); } @@ -690,7 +690,8 @@ protected void refresh() { } @Test - public void testDfsUsageParameters() throws IOException, ReconfigurationException, InterruptedException { + public void testDfsUsageParameters() throws IOException, ReconfigurationException, + InterruptedException { final DataNode[] dns = createDNsForTest(1); final DataNode dataNode = dns[0]; From 49afa2485c6a2d018a790e74ba83f622a200a72b Mon Sep 17 00:00:00 2001 From: singer-bin Date: Fri, 18 Mar 2022 18:06:48 +0800 Subject: [PATCH 06/13] fix ut --- .../hdfs/server/datanode/TestDataNodeReconfiguration.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java index 1f43332956ab8..fd88310e170c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java @@ -102,7 +102,7 @@ private void startDFSCluster(int numNameNodes, int numDataNodes) throws IOException { Configuration conf = new Configuration(); conf.setBoolean(DFS_DATANODE_PEER_STATS_ENABLED_KEY, true); - conf.set(FS_GETSPACEUSED_CLASSNAME, DummyDU.class.getName()); + conf.set(FS_GETSPACEUSED_CLASSNAME, DummyCachingGetSpaceUsed.class.getName()); MiniDFSNNTopology nnTopology = MiniDFSNNTopology .simpleFederatedTopology(numNameNodes); @@ -678,8 +678,8 @@ public void testSlowDiskParameters() throws ReconfigurationException, IOExceptio } } - private static class DummyDU extends CachingGetSpaceUsed { - DummyDU(Builder builder) throws IOException { + public static class DummyCachingGetSpaceUsed extends CachingGetSpaceUsed { + public DummyCachingGetSpaceUsed(Builder builder) throws IOException { super(builder.setInterval(1000)); } From 68b4a871737b033bce5fcb207e6868d53b910632 Mon Sep 17 00:00:00 2001 From: singer-bin Date: Tue, 22 Mar 2022 10:23:39 +0800 Subject: [PATCH 07/13] fix ut --- .../server/datanode/TestDataNodeReconfiguration.java | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java index fd88310e170c3..c8108f6c591c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java @@ -102,7 +102,7 @@ private void startDFSCluster(int numNameNodes, int numDataNodes) throws IOException { Configuration conf = new Configuration(); conf.setBoolean(DFS_DATANODE_PEER_STATS_ENABLED_KEY, true); - conf.set(FS_GETSPACEUSED_CLASSNAME, DummyCachingGetSpaceUsed.class.getName()); + conf.setClass(FS_GETSPACEUSED_CLASSNAME, DummyCachingGetSpaceUsed.class, CachingGetSpaceUsed.class); MiniDFSNNTopology nnTopology = MiniDFSNNTopology .simpleFederatedTopology(numNameNodes); @@ -680,7 +680,7 @@ public void testSlowDiskParameters() throws ReconfigurationException, IOExceptio public static class DummyCachingGetSpaceUsed extends CachingGetSpaceUsed { public DummyCachingGetSpaceUsed(Builder builder) throws IOException { - super(builder.setInterval(1000)); + super(builder.setInterval(1000).setJitter(0L)); } @Override @@ -692,15 +692,15 @@ protected void refresh() { @Test public void testDfsUsageParameters() throws IOException, ReconfigurationException, InterruptedException { - final DataNode[] dns = createDNsForTest(1); - final DataNode dataNode = dns[0]; long lastCounter = counter; - Thread.sleep(5000); assertTrue(counter > lastCounter); - dataNode.reconfigurePropertyImpl(FS_GETSPACEUSED_CLASSNAME, null); + for (int i = 0; i < NUM_DATA_NODE; i++) { + DataNode dn = cluster.getDataNodes().get(i); + dn.reconfigurePropertyImpl(FS_GETSPACEUSED_CLASSNAME, null); + } lastCounter = counter; Thread.sleep(5000); From c55da7b3bc85ce95113804f597017a76fc605dcb Mon Sep 17 00:00:00 2001 From: singer-bin Date: Tue, 5 Apr 2022 22:25:50 +0800 Subject: [PATCH 08/13] Extension PR 3863 --- .../hadoop/hdfs/server/datanode/DataNode.java | 38 +++++++++---------- .../fsdataset/impl/BlockPoolSlice.java | 8 +++- .../datanode/TestDataNodeReconfiguration.java | 15 ++++---- 3 files changed, 33 insertions(+), 28 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 88dcf47979d10..0af66310105eb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -19,6 +19,8 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_GETSPACEUSED_CLASSNAME; +import org.apache.hadoop.fs.DU; +import org.apache.hadoop.fs.GetSpaceUsed; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DU_INTERVAL_DEFAULT; @@ -685,10 +687,8 @@ public String reconfigurePropertyImpl(String property, String newVal) return reconfSlowDiskParameters(property, newVal); case FS_DU_INTERVAL_KEY: case FS_GETSPACEUSED_JITTER_KEY: - return reconfDfsUsageParameters(property, newVal); case FS_GETSPACEUSED_CLASSNAME: - reconfSpaceUsedKlass(); - return newVal; + return reconfDfsUsageParameters(property, newVal); default: break; } @@ -696,20 +696,6 @@ public String reconfigurePropertyImpl(String property, String newVal) property, newVal, getConf().get(property)); } - private void reconfSpaceUsedKlass(){ - List volumeList = data.getVolumeList(); - for (FsVolumeImpl fsVolume : volumeList) { - Map blockPoolSlices = fsVolume.getBlockPoolSlices(); - for (Entry entry : blockPoolSlices.entrySet()) { - try { - entry.getValue().refreshSpaceUsedKlass(getNewConf()); - } catch (IOException e) { - e.printStackTrace(); - } - } - } - } - private String reconfDataXceiverParameters(String property, String newVal) throws ReconfigurationException { String result; @@ -898,7 +884,7 @@ private String reconfDfsUsageParameters(String property, String newVal) for (FsVolumeImpl fsVolume : volumeList) { Map blockPoolSlices = fsVolume.getBlockPoolSlices(); for (BlockPoolSlice value : blockPoolSlices.values()) { - value.updateDfsUsageConfig(interval, null); + value.updateDfsUsageConfig(interval, null, null); } } } else if (property.equals(FS_GETSPACEUSED_JITTER_KEY)) { @@ -910,13 +896,25 @@ private String reconfDfsUsageParameters(String property, String newVal) for (FsVolumeImpl fsVolume : volumeList) { Map blockPoolSlices = fsVolume.getBlockPoolSlices(); for (BlockPoolSlice value : blockPoolSlices.values()) { - value.updateDfsUsageConfig(null, jitter); + value.updateDfsUsageConfig(null, jitter, null); + } + } + } else if (property.equals(FS_GETSPACEUSED_CLASSNAME)) { + Preconditions.checkNotNull(data, "FsDatasetSpi has not been initialized."); + Class klass = (newVal == null ? DU.class : + Class.forName(newVal).asSubclass(GetSpaceUsed.class)); + result = klass.getName(); + List volumeList = data.getVolumeList(); + for (FsVolumeImpl fsVolume : volumeList) { + Map blockPoolSlices = fsVolume.getBlockPoolSlices(); + for (BlockPoolSlice value : blockPoolSlices.values()) { + value.updateDfsUsageConfig(null, null, klass); } } } LOG.info("RECONFIGURE* changed {} to {}", property, newVal); return result; - } catch (IllegalArgumentException | IOException e) { + } catch (IllegalArgumentException | IOException | ClassNotFoundException e) { throw new ReconfigurationException(property, newVal, getConf().get(property), e); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java index c6d2261d6b14e..747edf11ca4d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java @@ -45,6 +45,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.TimeUnit; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_GETSPACEUSED_CLASSNAME; import org.apache.hadoop.hdfs.server.datanode.FSCachingGetSpaceUsed; import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; @@ -240,7 +241,8 @@ public void run() { SHUTDOWN_HOOK_PRIORITY); } - public void updateDfsUsageConfig(Long interval, Long jitter) throws IOException { + public void updateDfsUsageConfig(Long interval, Long jitter, Class klass) + throws IOException { // Close the old dfsUsage if it is CachingGetSpaceUsed. if (dfsUsage instanceof CachingGetSpaceUsed) { ((CachingGetSpaceUsed) dfsUsage).close(); @@ -255,6 +257,10 @@ public void updateDfsUsageConfig(Long interval, Long jitter) throws IOException FS_GETSPACEUSED_JITTER_KEY + " should be larger than or equal to 0"); config.setLong(FS_GETSPACEUSED_JITTER_KEY, jitter); } + + if (klass != null) { + config.setClass(FS_GETSPACEUSED_CLASSNAME, klass, CachingGetSpaceUsed.class); + } // Start new dfsUsage. this.dfsUsage = new FSCachingGetSpaceUsed.Builder().setBpid(bpid) .setVolume(volume) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java index f0827c0d15892..85725fe3b443c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java @@ -111,7 +111,8 @@ private void startDFSCluster(int numNameNodes, int numDataNodes) throws IOException { Configuration conf = new Configuration(); conf.setBoolean(DFS_DATANODE_PEER_STATS_ENABLED_KEY, true); - conf.setClass(FS_GETSPACEUSED_CLASSNAME, DummyCachingGetSpaceUsed.class, CachingGetSpaceUsed.class); + conf.setClass(FS_GETSPACEUSED_CLASSNAME, DummyCachingGetSpaceUsed.class, + CachingGetSpaceUsed.class); MiniDFSNNTopology nnTopology = MiniDFSNNTopology .simpleFederatedTopology(numNameNodes); @@ -772,20 +773,20 @@ protected void refresh() { } @Test - public void testDfsUsageKlass() throws IOException, ReconfigurationException, - InterruptedException { + public void testDfsUsageKlass() throws ReconfigurationException, InterruptedException { long lastCounter = counter; Thread.sleep(5000); - assertTrue(counter > lastCounter); + assertEquals(lastCounter, counter); for (int i = 0; i < NUM_DATA_NODE; i++) { DataNode dn = cluster.getDataNodes().get(i); - dn.reconfigurePropertyImpl(FS_GETSPACEUSED_CLASSNAME, null); + dn.reconfigurePropertyImpl(FS_GETSPACEUSED_CLASSNAME, + DummyCachingGetSpaceUsed.class.getName()); } lastCounter = counter; Thread.sleep(5000); - assertEquals(lastCounter, counter); - } + assertTrue(counter > lastCounter); + } } From 63182b60b8d12c33abcbbce7bfc9b6d714411b39 Mon Sep 17 00:00:00 2001 From: singer-bin Date: Wed, 6 Apr 2022 09:04:17 +0800 Subject: [PATCH 09/13] fix ut --- .../hdfs/server/datanode/TestDataNodeReconfiguration.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java index 85725fe3b443c..7a243b2319f31 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java @@ -111,8 +111,6 @@ private void startDFSCluster(int numNameNodes, int numDataNodes) throws IOException { Configuration conf = new Configuration(); conf.setBoolean(DFS_DATANODE_PEER_STATS_ENABLED_KEY, true); - conf.setClass(FS_GETSPACEUSED_CLASSNAME, DummyCachingGetSpaceUsed.class, - CachingGetSpaceUsed.class); MiniDFSNNTopology nnTopology = MiniDFSNNTopology .simpleFederatedTopology(numNameNodes); From e5052de0914c386ed3645486f288ef172dc49066 Mon Sep 17 00:00:00 2001 From: singer-bin Date: Wed, 6 Apr 2022 10:23:48 +0800 Subject: [PATCH 10/13] Add windows os judgment --- .../hadoop/hdfs/server/datanode/DataNode.java | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 0af66310105eb..7536e994f4c7f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -21,6 +21,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_GETSPACEUSED_CLASSNAME; import org.apache.hadoop.fs.DU; import org.apache.hadoop.fs.GetSpaceUsed; +import org.apache.hadoop.fs.WindowsGetSpaceUsed; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DU_INTERVAL_DEFAULT; @@ -901,8 +902,16 @@ private String reconfDfsUsageParameters(String property, String newVal) } } else if (property.equals(FS_GETSPACEUSED_CLASSNAME)) { Preconditions.checkNotNull(data, "FsDatasetSpi has not been initialized."); - Class klass = (newVal == null ? DU.class : - Class.forName(newVal).asSubclass(GetSpaceUsed.class)); + Class klass; + if (newVal == null) { + if (Shell.WINDOWS) { + klass = DU.class; + } else { + klass = WindowsGetSpaceUsed.class; + } + } else { + klass = Class.forName(newVal).asSubclass(GetSpaceUsed.class); + } result = klass.getName(); List volumeList = data.getVolumeList(); for (FsVolumeImpl fsVolume : volumeList) { From 4f3341c0793a7a9e56dfef56f89b98fbb9eb79cc Mon Sep 17 00:00:00 2001 From: singer-bin Date: Wed, 6 Apr 2022 19:41:21 +0800 Subject: [PATCH 11/13] Remove unused methods --- .../server/datanode/fsdataset/impl/BlockPoolSlice.java | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java index 747edf11ca4d4..7294c76d712bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java @@ -275,16 +275,6 @@ public GetSpaceUsed getDfsUsage() { return dfsUsage; } - public void refreshSpaceUsedKlass(Configuration conf) throws IOException { - ((CachingGetSpaceUsed) dfsUsage).close(); - this.dfsUsage = new FSCachingGetSpaceUsed.Builder().setBpid(bpid) - .setVolume(volume) - .setPath(bpDir) - .setConf(conf) - .setInitialUsed(loadDfsUsed()) - .build(); - } - private synchronized static void initializeAddReplicaPool(Configuration conf, FsDatasetImpl dataset) { if (addReplicaThreadPool == null) { From 7f45dbebb728d281d586bc046ca06ae9232254ca Mon Sep 17 00:00:00 2001 From: singer-bin Date: Thu, 7 Apr 2022 14:22:37 +0800 Subject: [PATCH 12/13] Sort import sentences --- .../org/apache/hadoop/hdfs/server/datanode/DataNode.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 7536e994f4c7f..57aa2c31cab67 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -18,12 +18,9 @@ package org.apache.hadoop.hdfs.server.datanode; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_GETSPACEUSED_CLASSNAME; -import org.apache.hadoop.fs.DU; -import org.apache.hadoop.fs.GetSpaceUsed; -import org.apache.hadoop.fs.WindowsGetSpaceUsed; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_GETSPACEUSED_CLASSNAME; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DU_INTERVAL_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DU_INTERVAL_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_GETSPACEUSED_JITTER_DEFAULT; @@ -91,6 +88,9 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.DF; +import org.apache.hadoop.fs.DU; +import org.apache.hadoop.fs.GetSpaceUsed; +import org.apache.hadoop.fs.WindowsGetSpaceUsed; import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.ReconfigurationProtocolService; import java.io.BufferedOutputStream; From b5606806ba3ac2ebd312e121d2c14791f32e190c Mon Sep 17 00:00:00 2001 From: singer-bin Date: Thu, 7 Apr 2022 14:35:17 +0800 Subject: [PATCH 13/13] Sort import sentences --- .../hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java | 2 +- .../hdfs/server/datanode/TestDataNodeReconfiguration.java | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java index 7294c76d712bb..23f3602a456c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java @@ -45,7 +45,6 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.TimeUnit; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_GETSPACEUSED_CLASSNAME; import org.apache.hadoop.hdfs.server.datanode.FSCachingGetSpaceUsed; import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; @@ -81,6 +80,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DU_INTERVAL_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_GETSPACEUSED_JITTER_KEY; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_GETSPACEUSED_CLASSNAME; /** * A block pool slice represents a portion of a block pool stored on a volume. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java index 7a243b2319f31..14e3f63691bd5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java @@ -18,13 +18,12 @@ package org.apache.hadoop.hdfs.server.datanode; -import org.apache.hadoop.fs.CachingGetSpaceUsed; -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_GETSPACEUSED_CLASSNAME; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DU_INTERVAL_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DU_INTERVAL_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_GETSPACEUSED_JITTER_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_GETSPACEUSED_JITTER_KEY; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_GETSPACEUSED_CLASSNAME; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_SPLIT_THRESHOLD_KEY; @@ -60,6 +59,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.ReconfigurationException; +import org.apache.hadoop.fs.CachingGetSpaceUsed; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.GetSpaceUsed;