From 84cd5ee8781680077a281e9c61a7c63d9f43ae26 Mon Sep 17 00:00:00 2001 From: "eddy.cao" Date: Tue, 5 Dec 2023 11:12:00 +0800 Subject: [PATCH] fix the checkstyle --- .../src/site/markdown/Benchmarking.md | 19 ++--- .../namenode/NNThroughputBenchmark.java | 69 ++++++++++++------- .../namenode/TestNNThroughputBenchmark.java | 24 +++---- 3 files changed, 65 insertions(+), 47 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md b/hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md index 26d5db37d6855d..2449ab5cdeda5f 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md @@ -54,15 +54,15 @@ Following are all the operations supported along with their respective operation | OPERATION\_OPTION | Operation-specific parameters | |:---- |:---- | |`all` | _options for other operations_ | -|`create` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-close`] | -|`mkdirs` | [`-threads 3`] [`-dirs 10`] [`-dirsPerDir 2`] | -|`open` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] | -|`delete` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] | -|`append` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] [`-appendNewBlk`] | -|`fileStatus` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] | -|`rename` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] | -|`blockReport` | [`-datanodes 10`] [`-reports 30`] [`-blocksPerReport 100`] [`-blocksPerFile 10`] | -|`replication` | [`-datanodes 10`] [`-nodesToDecommission 1`] [`-nodeReplicationLimit 100`] [`-totalBlocks 100`] [`-replication 3`] | +|`create` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-close`] [`-baseDirName /nnThroughputBenchmark`] | +|`mkdirs` | [`-threads 3`] [`-dirs 10`] [`-dirsPerDir 2`] [`-baseDirName /nnThroughputBenchmark`] | +|`open` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] [`-baseDirName /nnThroughputBenchmark`] | +|`delete` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] [`-baseDirName /nnThroughputBenchmark`] | +|`append` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] [`-appendNewBlk`] [`-baseDirName /nnThroughputBenchmark`] | +|`fileStatus` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] [`-baseDirName /nnThroughputBenchmark`] | +|`rename` | [`-threads 3`] [`-files 10`] [`-filesPerDir 4`] [`-useExisting`] [`-baseDirName /nnThroughputBenchmark`] | +|`blockReport` | [`-datanodes 10`] [`-reports 30`] [`-blocksPerReport 100`] [`-blocksPerFile 10`] [`-baseDirName /nnThroughputBenchmark`] | +|`replication` | [`-datanodes 10`] [`-nodesToDecommission 1`] [`-nodeReplicationLimit 100`] [`-totalBlocks 100`] [`-replication 3`] [`-baseDirName /nnThroughputBenchmark`] | |`clean` | N/A | ##### Operation Options @@ -86,6 +86,7 @@ When running benchmarks with the above operation(s), please provide operation-sp |`-nodeReplicationLimit` | The maximum number of outgoing replication streams for a data-node. | |`-totalBlocks` | Number of total blocks to operate. | |`-replication` | Replication factor. Will be adjusted to number of data-nodes if it is larger than that. | +|`-baseDirName` | The base dir name for benchmarks, to support multiple clients submitting benchmark tests at the same time. | ### Reports diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index a1cf29facba8ee..350b0e3a1ba8d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -162,11 +162,11 @@ static void setNameNodeLoggingLevel(Level logLevel) { * specific name-node operation. */ abstract class OperationStatsBase { - protected String baseDirName = "/nnThroughputBenchmark"; + private String baseDirName = "/nnThroughputBenchmark"; protected static final String OP_ALL_NAME = "all"; protected static final String OP_ALL_USAGE = "-op all "; - protected String baseDir; + private String baseDir; protected short replication; protected int blockSize; protected int numThreads = 0; // number of threads @@ -295,7 +295,12 @@ void cleanUp() throws IOException { clientProto.saveNamespace(0, 0); } } - + public String getBaseDirName() { + return baseDirName; + } + public void setBaseDirName(String baseDirName) { + this.baseDirName = baseDirName; + } int getNumOpsExecuted() { return numOpsExecuted; } @@ -316,11 +321,15 @@ long getAverageTime() { return elapsedTime == 0 ? 0 : 1000*(double)numOpsExecuted / elapsedTime; } - String getBaseDir() { - baseDir = baseDirName + "/" + getOpName(); + public String getBaseDir() { + setBaseDir(baseDirName + "/" + getOpName()); return baseDir; } + public void setBaseDir(String baseDir) { + this.baseDir = baseDir; + } + String getClientName(int idx) { return getOpName() + "-client-" + idx; } @@ -495,7 +504,7 @@ long executeOp(int daemonId, int inputIdx, String ignore) clientProto.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false); long start = Time.now(); - clientProto.delete(baseDirName, true); + clientProto.delete(getBaseDirName(), true); long end = Time.now(); return end-start; } @@ -503,7 +512,7 @@ long executeOp(int daemonId, int inputIdx, String ignore) @Override void printResults() { LOG.info("--- " + getOpName() + " inputs ---"); - LOG.info("Remove directory " + baseDirName); + LOG.info("Remove directory " + getBaseDirName()); printStats(); } } @@ -555,8 +564,10 @@ void parseArguments(List args) { if(i+1 == args.size()) printUsage(); nrFilesPerDir = Integer.parseInt(args.get(++i)); } else if(args.get(i).equals("-baseDirName")) { - if(i+1 == args.size()) printUsage(); - baseDirName = args.get(++i); + if (i + 1 == args.size()) { + printUsage(); + } + setBaseDirName(args.get(++i)); } else if(args.get(i).equals("-close")) { closeUponCreate = true; } else if(!ignoreUnrelatedOptions) @@ -671,8 +682,10 @@ void parseArguments(List args) { if(i+1 == args.size()) printUsage(); nrDirsPerDir = Integer.parseInt(args.get(++i)); } else if(args.get(i).equals("-baseDirName")) { - if(i+1 == args.size()) printUsage(); - baseDirName = args.get(++i); + if (i + 1 == args.size()) { + printUsage(); + } + setBaseDirName(args.get(++i)); } else if(!ignoreUnrelatedOptions) printUsage(); } @@ -775,14 +788,14 @@ void parseArguments(List args) { void generateInputs(int[] opsPerThread) throws IOException { // create files using opsPerThread String[] createArgs = new String[] { - "-op", "create", - "-threads", String.valueOf(this.numThreads), - "-files", String.valueOf(numOpsRequired), - "-blockSize", String.valueOf(blockSize), - "-filesPerDir", - String.valueOf(nameGenerator.getFilesPerDirectory()), - "-baseDirName", this.baseDirName, - "-close"}; + "-op", "create", + "-threads", String.valueOf(this.numThreads), + "-files", String.valueOf(numOpsRequired), + "-blockSize", String.valueOf(blockSize), + "-filesPerDir", + String.valueOf(nameGenerator.getFilesPerDirectory()), + "-baseDirName", getBaseDirName(), + "-close"}; CreateFileStats opCreate = new CreateFileStats(Arrays.asList(createArgs)); if(!useExisting) { // create files if they were not created before @@ -1147,8 +1160,8 @@ private int transferBlocks( Block blocks[], class BlockReportStats extends OperationStatsBase { static final String OP_BLOCK_REPORT_NAME = "blockReport"; static final String OP_BLOCK_REPORT_USAGE = - "-op blockReport [-datanodes T] [-reports N] " + - "[-blocksPerReport B] [-blocksPerFile F] [-blockSize S] [-baseDirName D]"; + "-op blockReport [-datanodes T] [-reports N] " + + "[-blocksPerReport B] [-blocksPerFile F] [-blockSize S] [-baseDirName D]"; private int blocksPerReport; private int blocksPerFile; @@ -1199,8 +1212,10 @@ void parseArguments(List args) { if(i+1 == args.size()) printUsage(); blockSize = Integer.parseInt(args.get(++i)); } else if(args.get(i).equals("-baseDirName")) { - if(i+1 == args.size()) printUsage(); - baseDirName = args.get(++i); + if (i + 1 == args.size()) { + printUsage(); + } + setBaseDirName(args.get(++i)); } else if(!ignoreUnrelatedOptions) printUsage(); } @@ -1393,7 +1408,7 @@ class ReplicationStats extends OperationStatsBase { "-blocksPerReport", String.valueOf(totalBlocks*replication/numDatanodes), "-blocksPerFile", String.valueOf(numDatanodes), "-blockSize", String.valueOf(blockSize), - "-baseDirName", baseDirName}; + "-baseDirName", getBaseDirName()}; blockReportObject = new BlockReportStats(Arrays.asList(blkReportArgs)); numDecommissionedBlocks = 0; numPendingBlocks = 0; @@ -1427,8 +1442,10 @@ void parseArguments(List args) { if(i+1 == args.size()) printUsage(); blockSize = Integer.parseInt(args.get(++i)); } else if(args.get(i).equals("-baseDirName")) { - if(i+1 == args.size()) printUsage(); - baseDirName = args.get(++i); + if (i + 1 == args.size()) { + printUsage(); + } + setBaseDirName(args.get(++i)); } else if(!ignoreUnrelatedOptions) printUsage(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java index 35b7622254082a..1910f63a687835 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java @@ -209,34 +209,34 @@ public void testNNThroughputWithBaseDir() throws Exception { "/nnThroughputBenchmark1", "-close"}); FSNamesystem fsNamesystem = cluster.getNamesystem(); DirectoryListing listing = fsNamesystem.getListing("/", HdfsFileStatus.EMPTY_NAME, false); - Boolean b_dir_exist1 = false; - Boolean b_dir_exist2 = false; + Boolean dirExist1 = false; + Boolean dirExist2 = false; for (HdfsFileStatus f : listing.getPartialListing()) { if (f.getFullName("/").equals("/nnThroughputBenchmark1")) { - b_dir_exist1 = true; + dirExist1 = true; } if (f.getFullName("/").equals("/nnThroughputBenchmark")) { - b_dir_exist2 = true; + dirExist2 = true; } } - Assert.assertEquals(b_dir_exist1, true); - Assert.assertEquals(b_dir_exist2, false); + Assert.assertEquals(dirExist1, true); + Assert.assertEquals(dirExist2, false); NNThroughputBenchmark.runBenchmark(benchConf, new String[] {"-op", "all", "-baseDirName", "/nnThroughputBenchmark1"}); listing = fsNamesystem.getListing("/", HdfsFileStatus.EMPTY_NAME, false); - b_dir_exist1 = false; - b_dir_exist2 = false; + dirExist1 = false; + dirExist2 = false; for (HdfsFileStatus f : listing.getPartialListing()) { if (f.getFullName("/").equals("/nnThroughputBenchmark1")) { - b_dir_exist1 = true; + dirExist1 = true; } if (f.getFullName("/").equals("/nnThroughputBenchmark")) { - b_dir_exist2 = true; + dirExist2 = true; } } - Assert.assertEquals(b_dir_exist1, true); - Assert.assertEquals(b_dir_exist2, false); + Assert.assertEquals(dirExist1, true); + Assert.assertEquals(dirExist2, false); } finally { if (cluster != null) { cluster.shutdown();