From a41f6a94a6f9932f6fde6ebf845a4cacd777614e Mon Sep 17 00:00:00 2001 From: "eddy.cao" Date: Sat, 2 Dec 2023 21:29:11 +0800 Subject: [PATCH 1/3] NNThroughputBenchmark support specifying the base directory --- .../namenode/NNThroughputBenchmark.java | 48 +++++++++++++------ .../namenode/TestNNThroughputBenchmark.java | 12 +++++ 2 files changed, 46 insertions(+), 14 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index 847f7dc0c12c7..073ef5f7a3bc0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -162,11 +162,11 @@ static void setNameNodeLoggingLevel(Level logLevel) { * specific name-node operation. */ abstract class OperationStatsBase { - protected static final String BASE_DIR_NAME = "/nnThroughputBenchmark"; + protected String baseDirName = "/nnThroughputBenchmark"; protected static final String OP_ALL_NAME = "all"; protected static final String OP_ALL_USAGE = "-op all "; - protected final String baseDir; + protected String baseDir; protected short replication; protected int blockSize; protected int numThreads = 0; // number of threads @@ -228,7 +228,7 @@ abstract class OperationStatsBase { abstract void printResults(); OperationStatsBase() { - baseDir = BASE_DIR_NAME + "/" + getOpName(); + baseDir = baseDirName + "/" + getOpName(); replication = (short) config.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3); blockSize = config.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); numOpsRequired = 10; @@ -317,6 +317,7 @@ long getAverageTime() { } String getBaseDir() { + baseDir = baseDirName + "/" + getOpName(); return baseDir; } @@ -494,7 +495,7 @@ long executeOp(int daemonId, int inputIdx, String ignore) clientProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false); long start = Time.now(); - clientProto.delete(BASE_DIR_NAME, true); + clientProto.delete(baseDirName, true); long end = Time.now(); return end-start; } @@ -502,7 +503,7 @@ long executeOp(int daemonId, int inputIdx, String ignore) @Override void printResults() { LOG.info("--- " + getOpName() + " inputs ---"); - LOG.info("Remove directory " + BASE_DIR_NAME); + LOG.info("Remove directory " + baseDirName); printStats(); } } @@ -517,9 +518,9 @@ void printResults() { class CreateFileStats extends OperationStatsBase { // Operation types static final String OP_CREATE_NAME = "create"; - static final String OP_CREATE_USAGE = + static final String OP_CREATE_USAGE = "-op create [-threads T] [-files N] [-blockSize S] [-filesPerDir P]" - + " [-close]"; + + " [-baseDirName D] [-close]"; protected FileNameGenerator nameGenerator; protected String[][] fileNames; @@ -553,6 +554,9 @@ void parseArguments(List args) { } else if(args.get(i).equals("-filesPerDir")) { if(i+1 == args.size()) printUsage(); nrFilesPerDir = Integer.parseInt(args.get(++i)); + } else if(args.get(i).equals("-baseDirName")) { + if(i+1 == args.size()) printUsage(); + baseDirName = args.get(++i); } else if(args.get(i).equals("-close")) { closeUponCreate = true; } else if(!ignoreUnrelatedOptions) @@ -568,6 +572,7 @@ void generateInputs(int[] opsPerThread) throws IOException { false); // int generatedFileIdx = 0; LOG.info("Generate " + numOpsRequired + " intputs for " + getOpName()); + LOG.info("basedir: " + getBaseDir()); fileNames = new String[numThreads][]; try { for(int idx=0; idx < numThreads; idx++) { @@ -618,6 +623,7 @@ long executeOp(int daemonId, int inputIdx, String clientName) @Override void printResults() { LOG.info("--- " + getOpName() + " inputs ---"); + LOG.info("baseDir = " + getBaseDir()); LOG.info("nrFiles = " + numOpsRequired); LOG.info("nrThreads = " + numThreads); LOG.info("nrFilesPerDir = " + nameGenerator.getFilesPerDirectory()); @@ -635,7 +641,7 @@ class MkdirsStats extends OperationStatsBase { // Operation types static final String OP_MKDIRS_NAME = "mkdirs"; static final String OP_MKDIRS_USAGE = "-op mkdirs [-threads T] [-dirs N] " + - "[-dirsPerDir P]"; + "[-dirsPerDir P] [-baseDirName D]"; protected FileNameGenerator nameGenerator; protected String[][] dirPaths; @@ -664,6 +670,9 @@ void parseArguments(List args) { } else if(args.get(i).equals("-dirsPerDir")) { if(i+1 == args.size()) printUsage(); nrDirsPerDir = Integer.parseInt(args.get(++i)); + } else if(args.get(i).equals("-baseDirName")) { + if(i+1 == args.size()) printUsage(); + baseDirName = args.get(++i); } else if(!ignoreUnrelatedOptions) printUsage(); } @@ -718,6 +727,7 @@ long executeOp(int daemonId, int inputIdx, String clientName) @Override void printResults() { LOG.info("--- " + getOpName() + " inputs ---"); + LOG.info("baseDir = " + getBaseDir()); LOG.info("nrDirs = " + numOpsRequired); LOG.info("nrThreads = " + numThreads); LOG.info("nrDirsPerDir = " + nameGenerator.getFilesPerDirectory()); @@ -736,7 +746,7 @@ class OpenFileStats extends CreateFileStats { static final String OP_OPEN_NAME = "open"; static final String OP_USAGE_ARGS = " [-threads T] [-files N] [-blockSize S] [-filesPerDir P]" - + " [-useExisting]"; + + " [-baseDirName D] [-useExisting]"; static final String OP_OPEN_USAGE = "-op " + OP_OPEN_NAME + OP_USAGE_ARGS; @@ -771,6 +781,7 @@ void generateInputs(int[] opsPerThread) throws IOException { "-blockSize", String.valueOf(blockSize), "-filesPerDir", String.valueOf(nameGenerator.getFilesPerDirectory()), + "-baseDirName", this.baseDirName, "-close"}; CreateFileStats opCreate = new CreateFileStats(Arrays.asList(createArgs)); @@ -1135,9 +1146,9 @@ private int transferBlocks( Block blocks[], */ class BlockReportStats extends OperationStatsBase { static final String OP_BLOCK_REPORT_NAME = "blockReport"; - static final String OP_BLOCK_REPORT_USAGE = + static final String OP_BLOCK_REPORT_USAGE = "-op blockReport [-datanodes T] [-reports N] " + - "[-blocksPerReport B] [-blocksPerFile F] [-blockSize S]"; + "[-blocksPerReport B] [-blocksPerFile F] [-blockSize S] [-baseDirName D]"; private int blocksPerReport; private int blocksPerFile; @@ -1187,6 +1198,9 @@ void parseArguments(List args) { } else if (args.get(i).equals("-blockSize")) { if(i+1 == args.size()) printUsage(); blockSize = Integer.parseInt(args.get(++i)); + } else if(args.get(i).equals("-baseDirName")) { + if(i+1 == args.size()) printUsage(); + baseDirName = args.get(++i); } else if(!ignoreUnrelatedOptions) printUsage(); } @@ -1330,6 +1344,7 @@ void printResults() { } blockDistribution += ")"; LOG.info("--- " + getOpName() + " inputs ---"); + LOG.info("baseDir = " + getBaseDir()); LOG.info("reports = " + numOpsRequired); LOG.info("datanodes = " + numThreads + " " + blockDistribution); LOG.info("blocksPerReport = " + blocksPerReport); @@ -1348,7 +1363,7 @@ class ReplicationStats extends OperationStatsBase { static final String OP_REPLICATION_USAGE = "-op replication [-datanodes T] [-nodesToDecommission D] " + "[-nodeReplicationLimit C] [-totalBlocks B] [-blockSize S] " - + "[-replication R]"; + + "[-replication R] [-baseDirName N]"; private final BlockReportStats blockReportObject; private int numDatanodes; @@ -1377,7 +1392,8 @@ class ReplicationStats extends OperationStatsBase { "-datanodes", String.valueOf(numDatanodes), "-blocksPerReport", String.valueOf(totalBlocks*replication/numDatanodes), "-blocksPerFile", String.valueOf(numDatanodes), - "-blockSize", String.valueOf(blockSize)}; + "-blockSize", String.valueOf(blockSize), + "-baseDirName", baseDirName}; blockReportObject = new BlockReportStats(Arrays.asList(blkReportArgs)); numDecommissionedBlocks = 0; numPendingBlocks = 0; @@ -1410,6 +1426,9 @@ void parseArguments(List args) { } else if (args.get(i).equals("-blockSize")) { if(i+1 == args.size()) printUsage(); blockSize = Integer.parseInt(args.get(++i)); + } else if(args.get(i).equals("-baseDirName")) { + if(i+1 == args.size()) printUsage(); + baseDirName = args.get(++i); } else if(!ignoreUnrelatedOptions) printUsage(); } @@ -1485,6 +1504,7 @@ void printResults() { } blockDistribution += ")"; LOG.info("--- " + getOpName() + " inputs ---"); + LOG.info("baseDir = " + getBaseDir()); LOG.info("numOpsRequired = " + numOpsRequired); LOG.info("datanodes = " + numDatanodes + " " + blockDistribution); LOG.info("decommissioned datanodes = " + nodesToDecommission); @@ -1631,7 +1651,7 @@ public int run(String[] aArgs) throws Exception { } // run each benchmark for(OperationStatsBase op : ops) { - LOG.info("Starting benchmark: " + op.getOpName()); + LOG.info("Starting benchmark: " + op.getOpName() + ", baseDir: " + op.getBaseDir()); op.benchmark(); op.cleanUp(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java index f7a8d92864fa0..27df0f8b137fa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java @@ -60,6 +60,18 @@ public void testNNThroughput() throws Exception { NNThroughputBenchmark.runBenchmark(conf, new String[] {"-op", "all"}); } + @Test + public void testNNThroughputWithBaseDir() throws Exception { + Configuration conf = new HdfsConfiguration(); + conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16); + File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name"); + conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, + nameDir.getAbsolutePath()); + DFSTestUtil.formatNameNode(conf); + NNThroughputBenchmark.runBenchmark(conf, + new String[] {"-op", "all", "-baseDirName", "/nnThroughputBenchmark1"}); + } + /** * This test runs all benchmarks defined in {@link NNThroughputBenchmark}, * with explicit local -fs option. From fa1ce214549d6da7fab1efb2656dce3cbb76b3c8 Mon Sep 17 00:00:00 2001 From: "eddy.cao" Date: Mon, 4 Dec 2023 16:08:52 +0800 Subject: [PATCH 2/3] update UT and code style --- .../namenode/NNThroughputBenchmark.java | 6 +- .../namenode/TestNNThroughputBenchmark.java | 68 +++++++++++++++---- 2 files changed, 59 insertions(+), 15 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index 073ef5f7a3bc0..a1cf29facba8e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -746,7 +746,7 @@ class OpenFileStats extends CreateFileStats { static final String OP_OPEN_NAME = "open"; static final String OP_USAGE_ARGS = " [-threads T] [-files N] [-blockSize S] [-filesPerDir P]" - + " [-baseDirName D] [-useExisting]"; + + " [-useExisting] [-baseDirName D]"; static final String OP_OPEN_USAGE = "-op " + OP_OPEN_NAME + OP_USAGE_ARGS; @@ -1361,9 +1361,9 @@ void printResults() { class ReplicationStats extends OperationStatsBase { static final String OP_REPLICATION_NAME = "replication"; static final String OP_REPLICATION_USAGE = - "-op replication [-datanodes T] [-nodesToDecommission D] " + + "-op replication [-datanodes T] [-nodesToDecommission N] " + "[-nodeReplicationLimit C] [-totalBlocks B] [-blockSize S] " - + "[-replication R] [-baseDirName N]"; + + "[-replication R] [-baseDirName D]"; private final BlockReportStats blockReportObject; private int numDatanodes; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java index 27df0f8b137fa..35b7622254082 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java @@ -60,18 +60,6 @@ public void testNNThroughput() throws Exception { NNThroughputBenchmark.runBenchmark(conf, new String[] {"-op", "all"}); } - @Test - public void testNNThroughputWithBaseDir() throws Exception { - Configuration conf = new HdfsConfiguration(); - conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16); - File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name"); - conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, - nameDir.getAbsolutePath()); - DFSTestUtil.formatNameNode(conf); - NNThroughputBenchmark.runBenchmark(conf, - new String[] {"-op", "all", "-baseDirName", "/nnThroughputBenchmark1"}); - } - /** * This test runs all benchmarks defined in {@link NNThroughputBenchmark}, * with explicit local -fs option. @@ -199,4 +187,60 @@ public void testNNThroughputForBlockReportOp() throws Exception { "blockReport", "-datanodes", "3", "-reports", "2"}); } } + + /** + * This test runs {@link NNThroughputBenchmark} against a mini DFS cluster + * with explicit -baseDirName option. + */ + @Test(timeout = 120000) + public void testNNThroughputWithBaseDir() throws Exception { + final Configuration conf = new HdfsConfiguration(); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16); + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); + cluster.waitActive(); + final Configuration benchConf = new HdfsConfiguration(); + benchConf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16); + FileSystem.setDefaultUri(benchConf, cluster.getURI()); + + NNThroughputBenchmark.runBenchmark(benchConf, + new String[] {"-op", "create", "-keepResults", "-files", "3", "-baseDirName", + "/nnThroughputBenchmark1", "-close"}); + FSNamesystem fsNamesystem = cluster.getNamesystem(); + DirectoryListing listing = fsNamesystem.getListing("/", HdfsFileStatus.EMPTY_NAME, false); + Boolean b_dir_exist1 = false; + Boolean b_dir_exist2 = false; + for (HdfsFileStatus f : listing.getPartialListing()) { + if (f.getFullName("/").equals("/nnThroughputBenchmark1")) { + b_dir_exist1 = true; + } + if (f.getFullName("/").equals("/nnThroughputBenchmark")) { + b_dir_exist2 = true; + } + } + Assert.assertEquals(b_dir_exist1, true); + Assert.assertEquals(b_dir_exist2, false); + + NNThroughputBenchmark.runBenchmark(benchConf, + new String[] {"-op", "all", "-baseDirName", "/nnThroughputBenchmark1"}); + listing = fsNamesystem.getListing("/", HdfsFileStatus.EMPTY_NAME, false); + b_dir_exist1 = false; + b_dir_exist2 = false; + for (HdfsFileStatus f : listing.getPartialListing()) { + if (f.getFullName("/").equals("/nnThroughputBenchmark1")) { + b_dir_exist1 = true; + } + if (f.getFullName("/").equals("/nnThroughputBenchmark")) { + b_dir_exist2 = true; + } + } + Assert.assertEquals(b_dir_exist1, true); + Assert.assertEquals(b_dir_exist2, false); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } } From 14566b74d274a286f28f1efa751c3f2941e74d61 Mon Sep 17 00:00:00 2001 From: "eddy.cao" Date: Tue, 5 Dec 2023 11:12:00 +0800 Subject: [PATCH 3/3] fix the checkstyle --- .../src/site/markdown/Benchmarking.md | 19 ++--- .../namenode/NNThroughputBenchmark.java | 71 ++++++++++++------- .../namenode/TestNNThroughputBenchmark.java | 34 ++------- 3 files changed, 61 insertions(+), 63 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md b/hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md index 26d5db37d6855..2449ab5cdeda5 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md @@ -54,15 +54,15 @@ Following are all the operations supported along with their respective operation | OPERATION\_OPTION | Operation-specific parameters | |:---- |:---- | |`all` | _options for other operations_ | -|`create` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-close`] | -|`mkdirs` | [`-threads 3`] [`-dirs 10`] [`-dirsPerDir 2`] | -|`open` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] | -|`delete` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] | -|`append` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] [`-appendNewBlk`] | -|`fileStatus` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] | -|`rename` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] | -|`blockReport` | [`-datanodes 10`] [`-reports 30`] [`-blocksPerReport 100`] [`-blocksPerFile 10`] | -|`replication` | [`-datanodes 10`] [`-nodesToDecommission 1`] [`-nodeReplicationLimit 100`] [`-totalBlocks 100`] [`-replication 3`] | +|`create` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-close`] [`-baseDirName /nnThroughputBenchmark`] | +|`mkdirs` | [`-threads 3`] [`-dirs 10`] [`-dirsPerDir 2`] [`-baseDirName /nnThroughputBenchmark`] | +|`open` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] [`-baseDirName /nnThroughputBenchmark`] | +|`delete` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] [`-baseDirName /nnThroughputBenchmark`] | +|`append` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] [`-appendNewBlk`] [`-baseDirName /nnThroughputBenchmark`] | +|`fileStatus` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] [`-baseDirName /nnThroughputBenchmark`] | +|`rename` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] [`-baseDirName /nnThroughputBenchmark`] | +|`blockReport` | [`-datanodes 10`] [`-reports 30`] [`-blocksPerReport 100`] [`-blocksPerFile 10`] [`-baseDirName /nnThroughputBenchmark`] | +|`replication` | [`-datanodes 10`] [`-nodesToDecommission 1`] [`-nodeReplicationLimit 100`] [`-totalBlocks 100`] [`-replication 3`] [`-baseDirName /nnThroughputBenchmark`] | |`clean` | N/A | ##### Operation Options @@ -86,6 +86,7 @@ When running benchmarks with the above operation(s), please provide operation-sp |`-nodeReplicationLimit` | The maximum number of outgoing replication streams for a data-node. | |`-totalBlocks` | Number of total blocks to operate. | |`-replication` | Replication factor. Will be adjusted to number of data-nodes if it is larger than that. | +|`-baseDirName` | The base dir name for benchmarks, to support multiple clients submitting benchmark tests at the same time. | ### Reports diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index a1cf29facba8e..a4e88d759fb4e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -162,11 +162,11 @@ static void setNameNodeLoggingLevel(Level logLevel) { * specific name-node operation. */ abstract class OperationStatsBase { - protected String baseDirName = "/nnThroughputBenchmark"; + private String baseDirName = "/nnThroughputBenchmark"; protected static final String OP_ALL_NAME = "all"; protected static final String OP_ALL_USAGE = "-op all "; - protected String baseDir; + private String baseDir; protected short replication; protected int blockSize; protected int numThreads = 0; // number of threads @@ -295,7 +295,12 @@ void cleanUp() throws IOException { clientProto.saveNamespace(0, 0); } } - + public String getBaseDirName() { + return baseDirName; + } + public void setBaseDirName(String baseDirName) { + this.baseDirName = baseDirName; + } int getNumOpsExecuted() { return numOpsExecuted; } @@ -316,11 +321,15 @@ long getAverageTime() { return elapsedTime == 0 ? 0 : 1000*(double)numOpsExecuted / elapsedTime; } - String getBaseDir() { - baseDir = baseDirName + "/" + getOpName(); + public String getBaseDir() { + setBaseDir(baseDirName + "/" + getOpName()); return baseDir; } + public void setBaseDir(String baseDir) { + this.baseDir = baseDir; + } + String getClientName(int idx) { return getOpName() + "-client-" + idx; } @@ -495,7 +504,7 @@ long executeOp(int daemonId, int inputIdx, String ignore) clientProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false); long start = Time.now(); - clientProto.delete(baseDirName, true); + clientProto.delete(getBaseDirName(), true); long end = Time.now(); return end-start; } @@ -503,7 +512,7 @@ long executeOp(int daemonId, int inputIdx, String ignore) @Override void printResults() { LOG.info("--- " + getOpName() + " inputs ---"); - LOG.info("Remove directory " + baseDirName); + LOG.info("Remove directory " + getBaseDirName()); printStats(); } } @@ -555,8 +564,10 @@ void parseArguments(List args) { if(i+1 == args.size()) printUsage(); nrFilesPerDir = Integer.parseInt(args.get(++i)); } else if(args.get(i).equals("-baseDirName")) { - if(i+1 == args.size()) printUsage(); - baseDirName = args.get(++i); + if (i + 1 == args.size()) { + printUsage(); + } + setBaseDirName(args.get(++i)); } else if(args.get(i).equals("-close")) { closeUponCreate = true; } else if(!ignoreUnrelatedOptions) @@ -671,8 +682,10 @@ void parseArguments(List args) { if(i+1 == args.size()) printUsage(); nrDirsPerDir = Integer.parseInt(args.get(++i)); } else if(args.get(i).equals("-baseDirName")) { - if(i+1 == args.size()) printUsage(); - baseDirName = args.get(++i); + if (i + 1 == args.size()) { + printUsage(); + } + setBaseDirName(args.get(++i)); } else if(!ignoreUnrelatedOptions) printUsage(); } @@ -775,14 +788,14 @@ void parseArguments(List args) { void generateInputs(int[] opsPerThread) throws IOException { // create files using opsPerThread String[] createArgs = new String[] { - "-op", "create", - "-threads", String.valueOf(this.numThreads), - "-files", String.valueOf(numOpsRequired), - "-blockSize", String.valueOf(blockSize), - "-filesPerDir", - String.valueOf(nameGenerator.getFilesPerDirectory()), - "-baseDirName", this.baseDirName, - "-close"}; + "-op", "create", + "-threads", String.valueOf(this.numThreads), + "-files", String.valueOf(numOpsRequired), + "-blockSize", String.valueOf(blockSize), + "-filesPerDir", + String.valueOf(nameGenerator.getFilesPerDirectory()), + "-baseDirName", getBaseDirName(), + "-close"}; CreateFileStats opCreate = new CreateFileStats(Arrays.asList(createArgs)); if(!useExisting) { // create files if they were not created before @@ -1147,8 +1160,8 @@ private int transferBlocks( Block blocks[], class BlockReportStats extends OperationStatsBase { static final String OP_BLOCK_REPORT_NAME = "blockReport"; static final String OP_BLOCK_REPORT_USAGE = - "-op blockReport [-datanodes T] [-reports N] " + - "[-blocksPerReport B] [-blocksPerFile F] [-blockSize S] [-baseDirName D]"; + "-op blockReport [-datanodes T] [-reports N] " + + "[-blocksPerReport B] [-blocksPerFile F] [-blockSize S] [-baseDirName D]"; private int blocksPerReport; private int blocksPerFile; @@ -1199,8 +1212,10 @@ void parseArguments(List args) { if(i+1 == args.size()) printUsage(); blockSize = Integer.parseInt(args.get(++i)); } else if(args.get(i).equals("-baseDirName")) { - if(i+1 == args.size()) printUsage(); - baseDirName = args.get(++i); + if (i + 1 == args.size()) { + printUsage(); + } + setBaseDirName(args.get(++i)); } else if(!ignoreUnrelatedOptions) printUsage(); } @@ -1361,7 +1376,7 @@ void printResults() { class ReplicationStats extends OperationStatsBase { static final String OP_REPLICATION_NAME = "replication"; static final String OP_REPLICATION_USAGE = - "-op replication [-datanodes T] [-nodesToDecommission N] " + + "-op replication [-datanodes T] [-nodesToDecommission D] " + "[-nodeReplicationLimit C] [-totalBlocks B] [-blockSize S] " + "[-replication R] [-baseDirName D]"; @@ -1393,7 +1408,7 @@ class ReplicationStats extends OperationStatsBase { "-blocksPerReport", String.valueOf(totalBlocks*replication/numDatanodes), "-blocksPerFile", String.valueOf(numDatanodes), "-blockSize", String.valueOf(blockSize), - "-baseDirName", baseDirName}; + "-baseDirName", getBaseDirName()}; blockReportObject = new BlockReportStats(Arrays.asList(blkReportArgs)); numDecommissionedBlocks = 0; numPendingBlocks = 0; @@ -1427,8 +1442,10 @@ void parseArguments(List args) { if(i+1 == args.size()) printUsage(); blockSize = Integer.parseInt(args.get(++i)); } else if(args.get(i).equals("-baseDirName")) { - if(i+1 == args.size()) printUsage(); - baseDirName = args.get(++i); + if (i + 1 == args.size()) { + printUsage(); + } + setBaseDirName(args.get(++i)); } else if(!ignoreUnrelatedOptions) printUsage(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java index 35b7622254082..a9836e0003595 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java @@ -22,8 +22,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.DirectoryListing; @@ -203,40 +205,18 @@ public void testNNThroughputWithBaseDir() throws Exception { final Configuration benchConf = new HdfsConfiguration(); benchConf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16); FileSystem.setDefaultUri(benchConf, cluster.getURI()); + DistributedFileSystem fs = cluster.getFileSystem(); NNThroughputBenchmark.runBenchmark(benchConf, new String[] {"-op", "create", "-keepResults", "-files", "3", "-baseDirName", "/nnThroughputBenchmark1", "-close"}); - FSNamesystem fsNamesystem = cluster.getNamesystem(); - DirectoryListing listing = fsNamesystem.getListing("/", HdfsFileStatus.EMPTY_NAME, false); - Boolean b_dir_exist1 = false; - Boolean b_dir_exist2 = false; - for (HdfsFileStatus f : listing.getPartialListing()) { - if (f.getFullName("/").equals("/nnThroughputBenchmark1")) { - b_dir_exist1 = true; - } - if (f.getFullName("/").equals("/nnThroughputBenchmark")) { - b_dir_exist2 = true; - } - } - Assert.assertEquals(b_dir_exist1, true); - Assert.assertEquals(b_dir_exist2, false); + Assert.assertTrue(fs.exists(new Path("/nnThroughputBenchmark1"))); + Assert.assertFalse(fs.exists(new Path("/nnThroughputBenchmark"))); NNThroughputBenchmark.runBenchmark(benchConf, new String[] {"-op", "all", "-baseDirName", "/nnThroughputBenchmark1"}); - listing = fsNamesystem.getListing("/", HdfsFileStatus.EMPTY_NAME, false); - b_dir_exist1 = false; - b_dir_exist2 = false; - for (HdfsFileStatus f : listing.getPartialListing()) { - if (f.getFullName("/").equals("/nnThroughputBenchmark1")) { - b_dir_exist1 = true; - } - if (f.getFullName("/").equals("/nnThroughputBenchmark")) { - b_dir_exist2 = true; - } - } - Assert.assertEquals(b_dir_exist1, true); - Assert.assertEquals(b_dir_exist2, false); + Assert.assertTrue(fs.exists(new Path("/nnThroughputBenchmark1"))); + Assert.assertFalse(fs.exists(new Path("/nnThroughputBenchmark"))); } finally { if (cluster != null) { cluster.shutdown();