From 8f877582830040a90d88059592219de8c2b8be43 Mon Sep 17 00:00:00 2001 From: Tom McCormick Date: Mon, 3 Oct 2022 10:10:56 -0700 Subject: [PATCH] HDFS-16791 WIP - client protocol and Filesystem apis implemented and building. Tests for Distributed Filesystem, view filesystem and RBF --- .../java/org/apache/hadoop/fs/FileSystem.java | 10 + .../apache/hadoop/fs/FilterFileSystem.java | 73 ++-- .../hadoop/fs/viewfs/ViewFileSystem.java | 16 + .../org/apache/hadoop/hdfs/DFSClient.java | 10 + .../hadoop/hdfs/DFSOpsCountStatistics.java | 1 + .../hadoop/hdfs/DistributedFileSystem.java | 27 ++ .../hadoop/hdfs/protocol/ClientProtocol.java | 8 + .../ClientNamenodeProtocolTranslatorPB.java | 18 + .../main/proto/ClientNamenodeProtocol.proto | 16 +- .../hadoop/hdfs/protocol/TestReadOnly.java | 1 + .../router/RouterClientProtocol.java | 15 + .../federation/router/RouterRpcServer.java | 6 + .../router/TestRouterMountTable.java | 22 ++ ...amenodeProtocolServerSideTranslatorPB.java | 115 +++--- .../hdfs/server/namenode/FSNamesystem.java | 333 +++++++++--------- .../server/namenode/NameNodeRpcServer.java | 169 ++++----- .../fs/viewfs/TestViewFileSystemHdfs.java | 45 ++- .../hdfs/TestDistributedFileSystem.java | 75 ++-- .../apache/hadoop/hdfs/TestEnclosingRoot.java | 120 +++++++ 19 files changed, 709 insertions(+), 371 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEnclosingRoot.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index 0bc419b0353802..27560cbdd4336b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -4876,6 +4876,16 @@ public CompletableFuture build() throws IOException { } + /** + * Return root path + * @param path + * @return + * @throws IOException + */ + public Path getEnclosingRoot(Path path) throws IOException { + return this.makeQualified(new Path("/")); + } + /** * Create a multipart uploader. * @param basePath file path under which all files are uploaded diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java index cdbe51e3307017..7fa49ceea61c60 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java @@ -61,23 +61,23 @@ @InterfaceAudience.Public @InterfaceStability.Stable public class FilterFileSystem extends FileSystem { - + protected FileSystem fs; protected String swapScheme; - + /* * so that extending classes can define it */ public FilterFileSystem() { } - + public FilterFileSystem(FileSystem fs) { this.fs = fs; this.statistics = fs.statistics; } /** - * Get the raw file system + * Get the raw file system * @return FileSystem being filtered */ public FileSystem getRawFileSystem() { @@ -108,8 +108,8 @@ public void initialize(URI name, Configuration conf) throws IOException { public URI getUri() { return fs.getUri(); } - - + + @Override protected URI getCanonicalUri() { return fs.getCanonicalUri(); @@ -127,7 +127,7 @@ public Path makeQualified(Path path) { // swap in our scheme if the filtered fs is using a different scheme if (swapScheme != null) { try { - // NOTE: should deal with authority, but too much other stuff is broken + // NOTE: should deal with authority, but too much other stuff is broken fqPath = new Path( new URI(swapScheme, fqPath.toUri().getSchemeSpecificPart(), null) ); @@ -137,7 +137,7 @@ public Path makeQualified(Path path) { } return fqPath; } - + /////////////////////////////////////////////////////////////// // FileSystem /////////////////////////////////////////////////////////////// @@ -223,14 +223,14 @@ protected RemoteIterator listLocatedStatus(final Path f, public FSDataOutputStream createNonRecursive(Path f, FsPermission permission, EnumSet flags, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { - + return fs.createNonRecursive(f, permission, flags, bufferSize, replication, blockSize, progress); } /** * Set replication for an existing file. - * + * * @param src file name * @param replication new replication * @throws IOException raised on errors performing I/O. @@ -241,7 +241,7 @@ public FSDataOutputStream createNonRecursive(Path f, FsPermission permission, public boolean setReplication(Path src, short replication) throws IOException { return fs.setReplication(src, replication); } - + /** * Renames Path src to Path dst. Can take place on local fs * or remote DFS. @@ -261,13 +261,13 @@ protected void rename(Path src, Path dst, Rename... options) public boolean truncate(Path f, final long newLength) throws IOException { return fs.truncate(f, newLength); } - + /** Delete a file */ @Override public boolean delete(Path f, boolean recursive) throws IOException { return fs.delete(f, recursive); } - + /** List files in a directory. */ @Override public FileStatus[] listStatus(Path f) throws IOException { @@ -286,7 +286,7 @@ public RemoteIterator listLocatedStatus(Path f) throws IOException { return fs.listLocatedStatus(f); } - + /** Return a remote iterator for listing in a directory */ @Override public RemoteIterator listStatusIterator(Path f) @@ -303,34 +303,34 @@ public Path getHomeDirectory() { /** * Set the current working directory for the given file system. All relative * paths will be resolved relative to it. - * + * * @param newDir new dir. */ @Override public void setWorkingDirectory(Path newDir) { fs.setWorkingDirectory(newDir); } - + /** * Get the current working directory for the given file system - * + * * @return the directory pathname */ @Override public Path getWorkingDirectory() { return fs.getWorkingDirectory(); } - + @Override protected Path getInitialWorkingDirectory() { return fs.getInitialWorkingDirectory(); } - + @Override public FsStatus getStatus(Path p) throws IOException { return fs.getStatus(p); } - + @Override public boolean mkdirs(Path f, FsPermission permission) throws IOException { return fs.mkdirs(f, permission); @@ -351,26 +351,26 @@ public void copyFromLocalFile(boolean delSrc, Path src, Path dst) throws IOException { fs.copyFromLocalFile(delSrc, src, dst); } - + /** * The src files are on the local disk. Add it to FS at * the given dst name. * delSrc indicates if the source should be removed */ @Override - public void copyFromLocalFile(boolean delSrc, boolean overwrite, + public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path[] srcs, Path dst) throws IOException { fs.copyFromLocalFile(delSrc, overwrite, srcs, dst); } - + /** * The src file is on the local disk. Add it to FS at * the given dst name. * delSrc indicates if the source should be removed */ @Override - public void copyFromLocalFile(boolean delSrc, boolean overwrite, + public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException { fs.copyFromLocalFile(delSrc, overwrite, src, dst); @@ -380,13 +380,13 @@ public void copyFromLocalFile(boolean delSrc, boolean overwrite, * The src file is under FS, and the dst is on the local disk. * Copy it from FS control to the local dst name. * delSrc indicates if the src will be removed or not. - */ + */ @Override public void copyToLocalFile(boolean delSrc, Path src, Path dst) throws IOException { fs.copyToLocalFile(delSrc, src, dst); } - + /** * Returns a local File that the user can write output to. The caller * provides both the eventual FS target name and the local working @@ -427,7 +427,7 @@ public long getUsed(Path path) throws IOException { public long getDefaultBlockSize() { return fs.getDefaultBlockSize(); } - + @Override public short getDefaultReplication() { return fs.getDefaultReplication(); @@ -438,7 +438,7 @@ public FsServerDefaults getServerDefaults() throws IOException { return fs.getServerDefaults(); } - // path variants delegate to underlying filesystem + // path variants delegate to underlying filesystem @Override public long getDefaultBlockSize(Path f) { return fs.getDefaultBlockSize(f); @@ -476,7 +476,7 @@ public void access(Path path, FsAction mode) throws AccessControlException, public void createSymlink(final Path target, final Path link, final boolean createParent) throws AccessControlException, FileAlreadyExistsException, FileNotFoundException, - ParentNotDirectoryException, UnsupportedFileSystemException, + ParentNotDirectoryException, UnsupportedFileSystemException, IOException { fs.createSymlink(target, link, createParent); } @@ -513,7 +513,7 @@ public FileChecksum getFileChecksum(Path f, long length) throws IOException { public void setVerifyChecksum(boolean verifyChecksum) { fs.setVerifyChecksum(verifyChecksum); } - + @Override public void setWriteChecksum(boolean writeChecksum) { fs.setWriteChecksum(writeChecksum); @@ -523,7 +523,7 @@ public void setWriteChecksum(boolean writeChecksum) { public Configuration getConf() { return fs.getConf(); } - + @Override public void close() throws IOException { super.close(); @@ -564,7 +564,7 @@ protected boolean primitiveMkdir(Path f, FsPermission abdolutePermission) throws IOException { return fs.primitiveMkdir(f, abdolutePermission); } - + @Override // FileSystem public FileSystem[] getChildFileSystems() { return new FileSystem[]{fs}; @@ -575,13 +575,13 @@ public Path createSnapshot(Path path, String snapshotName) throws IOException { return fs.createSnapshot(path, snapshotName); } - + @Override // FileSystem public void renameSnapshot(Path path, String snapshotOldName, String snapshotNewName) throws IOException { fs.renameSnapshot(path, snapshotOldName, snapshotNewName); } - + @Override // FileSystem public void deleteSnapshot(Path path, String snapshotName) throws IOException { @@ -732,6 +732,11 @@ protected CompletableFuture openFileWithOptions( return fs.openFileWithOptions(pathHandle, parameters); } + @Override + public Path getEnclosingRoot(Path path) throws IOException { + return fs.getEnclosingRoot(path); + } + @Override public boolean hasPathCapability(final Path path, final String capability) throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java index e31a701a6eaa71..1f10c4830dfdd9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java @@ -1370,6 +1370,14 @@ public boolean hasPathCapability(Path path, String capability) } } + @Override + public Path getEnclosingRoot(Path path) throws IOException { + InodeTree.ResolveResult res = fsState.resolve(getUriPath(path), true); + Path fullPath = new Path(res.resolvedPath); + Path enclosingPath = res.targetFileSystem.getEnclosingRoot(path); + return enclosingPath.depth() > fullPath.depth() ? enclosingPath : fullPath; + } + /** * An instance of this class represents an internal dir of the viewFs * that is internal dir of the mount table. @@ -1919,6 +1927,14 @@ public Collection getAllStoragePolicies() } return allPolicies; } + + @Override + public Path getEnclosingRoot(Path path) throws IOException { + InodeTree.ResolveResult res = fsState.resolve(path.toString(), true); + Path fullPath = new Path(res.resolvedPath); + Path enclosingPath = res.targetFileSystem.getEnclosingRoot(path); + return enclosingPath.depth() > fullPath.depth() ? enclosingPath : fullPath; + } } enum RenameStrategy { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index acfca6799f4f81..d6ed85e031901e 100755 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -3528,4 +3528,14 @@ public DatanodeInfo[] slowDatanodeReport() throws IOException { } } + public Path getEnclosingRoot(String src) throws IOException { + checkOpen(); + try (TraceScope ignored = newPathTraceScope("getEnclosingRoot", src)) { + return new Path(namenode.getEnclosingRoot(src)); + } catch (RemoteException re) { + throw re.unwrapRemoteException(AccessControlException.class, + UnresolvedPathException.class); + } + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java index 04fef2d5dcd23a..2975453ddfcae5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java @@ -64,6 +64,7 @@ public enum OpType { GET_EC_CODECS("op_get_ec_codecs"), GET_EC_POLICY("op_get_ec_policy"), GET_EC_POLICIES("op_get_ec_policies"), + GET_ENCLOSING_ROOT("op_get_enclosing_root"), GET_ENCRYPTION_ZONE("op_get_encryption_zone"), GET_FILE_BLOCK_LOCATIONS("op_get_file_block_locations"), GET_FILE_CHECKSUM(CommonStatisticNames.OP_GET_FILE_CHECKSUM), diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 93db332d738c44..0bb366d3f0bab0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -3898,4 +3898,31 @@ public DatanodeInfo[] getSlowDatanodeStats() throws IOException { return dfs.slowDatanodeReport(); } + /* HDFS only */ + public Path getEnclosingRoot(final Path path) throws IOException { + statistics.incrementReadOps(1); + storageStatistics.incrementOpCounter(OpType.GET_ENCLOSING_ROOT); + Preconditions.checkNotNull(path); + Path absF = fixRelativePart(path); + return new FileSystemLinkResolver() { + @Override + public Path doCall(final Path p) throws IOException { + return dfs.getEnclosingRoot(getPathName(p)); + } + + @Override + public Path next(final FileSystem fs, final Path p) + throws IOException { + if (fs instanceof DistributedFileSystem) { + DistributedFileSystem myDfs = (DistributedFileSystem) fs; + return myDfs.getEnclosingRoot(p); + } else { + throw new UnsupportedOperationException( + "Cannot call getEZForPath" + + " on a symlink to a non-DistributedFileSystem: " + path + + " -> " + p); + } + } + }.resolve(this, absF); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 4f2da496a1a3d9..782b93e36850de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -26,6 +26,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.crypto.CryptoProtocolVersion; import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.hdfs.AddBlockFlag; @@ -1888,4 +1889,11 @@ BatchedEntries listOpenFiles(long prevId, @ReadOnly DatanodeInfo[] getSlowDatanodeReport() throws IOException; + /** + * Get the enclosing root for a path. + */ + @Idempotent + @ReadOnly(isCoordinated = true) + String getEnclosingRoot(String src) throws IOException; + } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 541a4361896dc4..ed78329e6fe31e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -38,6 +38,7 @@ import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.Options.Rename; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.XAttr; @@ -127,6 +128,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEnclosingRootRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEnclosingRootResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto; @@ -2106,4 +2109,19 @@ public HAServiceProtocol.HAServiceState getHAServiceState() } } + @Override + public String getEnclosingRoot(String filename) throws IOException { + final GetEnclosingRootRequestProto.Builder builder = + GetEnclosingRootRequestProto.newBuilder(); + builder.setFilename(filename); + final GetEnclosingRootRequestProto req = builder.build(); + try { + final GetEnclosingRootResponseProto response = + rpcProxy.getEnclosingRoot(null, req); + return response.getEnclosingRootPath(); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto index 60792b5b6c94cc..d1f7d360307146 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto @@ -38,9 +38,9 @@ import "erasurecoding.proto"; import "HAServiceProtocol.proto"; /** - * The ClientNamenodeProtocol Service defines the interface between a client + * The ClientNamenodeProtocol Service defines the interface between a client * (as runnign inside a MR Task) and the Namenode. - * See org.apache.hadoop.hdfs.protocol.ClientProtocol for the javadoc + * See org.apache.hadoop.hdfs.protocol.ClientProtocol for the javadoc * for each of the methods. * The exceptions declared in the above class also apply to this protocol. * Exceptions are unwrapped and thrown by the PB libraries. @@ -428,6 +428,14 @@ message GetPreferredBlockSizeResponseProto { message GetSlowDatanodeReportRequestProto { } +message GetEnclosingRootRequestProto { + required string filename = 1; +} + +message GetEnclosingRootResponseProto { + required string enclosingRootPath = 1; +} + message GetSlowDatanodeReportResponseProto { repeated DatanodeInfoProto datanodeInfoProto = 1; } @@ -999,7 +1007,7 @@ service ClientNamenodeProtocol { rpc allowSnapshot(AllowSnapshotRequestProto) returns(AllowSnapshotResponseProto); rpc disallowSnapshot(DisallowSnapshotRequestProto) - returns(DisallowSnapshotResponseProto); + returns(DisallowSnapshotResponseProto); rpc getSnapshottableDirListing(GetSnapshottableDirListingRequestProto) returns(GetSnapshottableDirListingResponseProto); rpc getSnapshotListing(GetSnapshotListingRequestProto) @@ -1080,4 +1088,6 @@ service ClientNamenodeProtocol { returns(HAServiceStateResponseProto); rpc getSlowDatanodeReport(GetSlowDatanodeReportRequestProto) returns(GetSlowDatanodeReportResponseProto); + rpc getEnclosingRoot(GetEnclosingRootRequestProto) + returns(GetEnclosingRootResponseProto); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestReadOnly.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestReadOnly.java index f7ea7bcd76dde5..c225a98c8244ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestReadOnly.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestReadOnly.java @@ -56,6 +56,7 @@ public class TestReadOnly { "listCachePools", "getAclStatus", "getEZForPath", + "getEnclosingRoot", "listEncryptionZones", "listReencryptionStatus", "getXAttrs", diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index a5f83c95b7baf7..bf5be9947114c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -1935,6 +1935,21 @@ public DatanodeInfo[] getSlowDatanodeReport() throws IOException { return rpcServer.getSlowDatanodeReport(true, 0); } + @Override + public String getEnclosingRoot(String src) throws IOException { + Path mountPath = new Path("/"); + if (subclusterResolver instanceof MountTableResolver) { + MountTableResolver mountTable = (MountTableResolver) subclusterResolver; + if (mountTable.getMountPoint(src) != null) { + // unclear if this is the correct thing to do, probably depends on default mount point / link fallback + mountPath = new Path(mountTable.getMountPoint(src).getSourcePath()); + } + } + EncryptionZone zone = getEZForPath(src); + Path zonePath = new Path((zone != null ? zone.getPath() : "/")); + return (zonePath.depth() > mountPath.depth() ? zonePath : mountPath).toString(); + } + @Override public HAServiceProtocol.HAServiceState getHAServiceState() { if (rpcServer.isSafeMode()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index c4173163436cef..cbd61d8b687617 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -2068,4 +2068,10 @@ public ListenableFuture reload( return executorService.submit(() -> load(type)); } } + + @Override // ClientProtocol + public String getEnclosingRoot(String src) throws IOException { + // need to resolve this src to a mount point in RBF and return the max this and the enclosing root from nn + return clientProto.getEnclosingRoot(src); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java index a346c1a241a80f..168887c7aebe23 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java @@ -763,4 +763,26 @@ public void testListStatusMountPoint() throws Exception { nnFs0.delete(new Path("/testLsMountEntryDest"), true); } } + + @Test + public void testGetEnclosingRoot() throws Exception { + + // Add a read only entry + MountTable readOnlyEntry = MountTable.newInstance( + "/readonly", Collections.singletonMap("ns0", "/testdir")); + readOnlyEntry.setReadOnly(true); + assertTrue(addMountTable(readOnlyEntry)); + assertEquals(routerFs.getEnclosingRoot(new Path("/readonly")), new Path("/readonly")); + + assertEquals(routerFs.getEnclosingRoot(new Path("/regular")), new Path("/")); + + // Add a regular entry + MountTable regularEntry = MountTable.newInstance( + "/regular", Collections.singletonMap("ns0", "/testdir")); + assertTrue(addMountTable(regularEntry)); + assertEquals(routerFs.getEnclosingRoot(new Path("/regular")), new Path("/regular")); + + // Create a folder which should show in all locations + assertTrue(routerFs.mkdirs(new Path("/regular/newdir"))); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 79c122cf5bae0a..4fa173575b76d2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -134,6 +134,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEnclosingRootRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEnclosingRootResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto; @@ -323,12 +325,12 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements DeleteSnapshotResponseProto.newBuilder().build(); static final RenameSnapshotResponseProto VOID_RENAME_SNAPSHOT_RESPONSE = RenameSnapshotResponseProto.newBuilder().build(); - static final AllowSnapshotResponseProto VOID_ALLOW_SNAPSHOT_RESPONSE = + static final AllowSnapshotResponseProto VOID_ALLOW_SNAPSHOT_RESPONSE = AllowSnapshotResponseProto.newBuilder().build(); static final DisallowSnapshotResponseProto VOID_DISALLOW_SNAPSHOT_RESPONSE = DisallowSnapshotResponseProto.newBuilder().build(); - static final GetSnapshottableDirListingResponseProto - NULL_GET_SNAPSHOTTABLE_DIR_LISTING_RESPONSE = + static final GetSnapshottableDirListingResponseProto + NULL_GET_SNAPSHOTTABLE_DIR_LISTING_RESPONSE = GetSnapshottableDirListingResponseProto.newBuilder().build(); static final GetSnapshotListingResponseProto NULL_GET_SNAPSHOT_LISTING_RESPONSE = @@ -339,28 +341,28 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements VOID_UNSET_STORAGE_POLICY_RESPONSE = UnsetStoragePolicyResponseProto.newBuilder().build(); - private static final CreateResponseProto VOID_CREATE_RESPONSE = + private static final CreateResponseProto VOID_CREATE_RESPONSE = CreateResponseProto.newBuilder().build(); - private static final SetPermissionResponseProto VOID_SET_PERM_RESPONSE = + private static final SetPermissionResponseProto VOID_SET_PERM_RESPONSE = SetPermissionResponseProto.newBuilder().build(); - private static final SetOwnerResponseProto VOID_SET_OWNER_RESPONSE = + private static final SetOwnerResponseProto VOID_SET_OWNER_RESPONSE = SetOwnerResponseProto.newBuilder().build(); - private static final AbandonBlockResponseProto VOID_ADD_BLOCK_RESPONSE = + private static final AbandonBlockResponseProto VOID_ADD_BLOCK_RESPONSE = AbandonBlockResponseProto.newBuilder().build(); - private static final ReportBadBlocksResponseProto VOID_REP_BAD_BLOCK_RESPONSE = + private static final ReportBadBlocksResponseProto VOID_REP_BAD_BLOCK_RESPONSE = ReportBadBlocksResponseProto.newBuilder().build(); - private static final ConcatResponseProto VOID_CONCAT_RESPONSE = + private static final ConcatResponseProto VOID_CONCAT_RESPONSE = ConcatResponseProto.newBuilder().build(); - private static final Rename2ResponseProto VOID_RENAME2_RESPONSE = + private static final Rename2ResponseProto VOID_RENAME2_RESPONSE = Rename2ResponseProto.newBuilder().build(); - private static final GetListingResponseProto VOID_GETLISTING_RESPONSE = + private static final GetListingResponseProto VOID_GETLISTING_RESPONSE = GetListingResponseProto.newBuilder().build(); private static final GetBatchedListingResponseProto @@ -370,50 +372,50 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements .setHasMore(false) .build(); - private static final RenewLeaseResponseProto VOID_RENEWLEASE_RESPONSE = + private static final RenewLeaseResponseProto VOID_RENEWLEASE_RESPONSE = RenewLeaseResponseProto.newBuilder().build(); private static final RefreshNodesResponseProto VOID_REFRESHNODES_RESPONSE = RefreshNodesResponseProto.newBuilder().build(); - private static final FinalizeUpgradeResponseProto VOID_FINALIZEUPGRADE_RESPONSE = + private static final FinalizeUpgradeResponseProto VOID_FINALIZEUPGRADE_RESPONSE = FinalizeUpgradeResponseProto.newBuilder().build(); - private static final MetaSaveResponseProto VOID_METASAVE_RESPONSE = + private static final MetaSaveResponseProto VOID_METASAVE_RESPONSE = MetaSaveResponseProto.newBuilder().build(); - private static final GetFileInfoResponseProto VOID_GETFILEINFO_RESPONSE = + private static final GetFileInfoResponseProto VOID_GETFILEINFO_RESPONSE = GetFileInfoResponseProto.newBuilder().build(); private static final GetLocatedFileInfoResponseProto VOID_GETLOCATEDFILEINFO_RESPONSE = GetLocatedFileInfoResponseProto.newBuilder().build(); - private static final GetFileLinkInfoResponseProto VOID_GETFILELINKINFO_RESPONSE = + private static final GetFileLinkInfoResponseProto VOID_GETFILELINKINFO_RESPONSE = GetFileLinkInfoResponseProto.newBuilder().build(); - private static final SetQuotaResponseProto VOID_SETQUOTA_RESPONSE = + private static final SetQuotaResponseProto VOID_SETQUOTA_RESPONSE = SetQuotaResponseProto.newBuilder().build(); - private static final FsyncResponseProto VOID_FSYNC_RESPONSE = + private static final FsyncResponseProto VOID_FSYNC_RESPONSE = FsyncResponseProto.newBuilder().build(); - private static final SetTimesResponseProto VOID_SETTIMES_RESPONSE = + private static final SetTimesResponseProto VOID_SETTIMES_RESPONSE = SetTimesResponseProto.newBuilder().build(); - private static final CreateSymlinkResponseProto VOID_CREATESYMLINK_RESPONSE = + private static final CreateSymlinkResponseProto VOID_CREATESYMLINK_RESPONSE = CreateSymlinkResponseProto.newBuilder().build(); private static final UpdatePipelineResponseProto - VOID_UPDATEPIPELINE_RESPONSE = + VOID_UPDATEPIPELINE_RESPONSE = UpdatePipelineResponseProto.newBuilder().build(); - private static final CancelDelegationTokenResponseProto - VOID_CANCELDELEGATIONTOKEN_RESPONSE = + private static final CancelDelegationTokenResponseProto + VOID_CANCELDELEGATIONTOKEN_RESPONSE = CancelDelegationTokenResponseProto.newBuilder().build(); - private static final SetBalancerBandwidthResponseProto - VOID_SETBALANCERBANDWIDTH_RESPONSE = + private static final SetBalancerBandwidthResponseProto + VOID_SETBALANCERBANDWIDTH_RESPONSE = SetBalancerBandwidthResponseProto.newBuilder().build(); private static final SetAclResponseProto @@ -433,10 +435,10 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements private static final RemoveAclResponseProto VOID_REMOVEACL_RESPONSE = RemoveAclResponseProto.getDefaultInstance(); - + private static final SetXAttrResponseProto VOID_SETXATTR_RESPONSE = SetXAttrResponseProto.getDefaultInstance(); - + private static final RemoveXAttrResponseProto VOID_REMOVEXATTR_RESPONSE = RemoveXAttrResponseProto.getDefaultInstance(); @@ -449,7 +451,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements /** * Constructor - * + * * @param server - the NN server * @throws IOException */ @@ -490,7 +492,7 @@ public GetServerDefaultsResponseProto getServerDefaults( } } - + @Override public CreateResponseProto create(RpcController controller, CreateRequestProto req) throws ServiceException { @@ -544,7 +546,7 @@ public AppendResponseProto append(RpcController controller, public SetReplicationResponseProto setReplication(RpcController controller, SetReplicationRequestProto req) throws ServiceException { try { - boolean result = + boolean result = server.setReplication(req.getSrc(), (short) req.getReplication()); return SetReplicationResponseProto.newBuilder().setResult(result).build(); } catch (IOException e) { @@ -568,7 +570,7 @@ public SetPermissionResponseProto setPermission(RpcController controller, public SetOwnerResponseProto setOwner(RpcController controller, SetOwnerRequestProto req) throws ServiceException { try { - server.setOwner(req.getSrc(), + server.setOwner(req.getSrc(), req.hasUsername() ? req.getUsername() : null, req.hasGroupname() ? req.getGroupname() : null); } catch (IOException e) { @@ -592,7 +594,7 @@ public AbandonBlockResponseProto abandonBlock(RpcController controller, @Override public AddBlockResponseProto addBlock(RpcController controller, AddBlockRequestProto req) throws ServiceException { - + try { List excl = req.getExcludeNodesList(); List favor = req.getFavoredNodesList(); @@ -643,7 +645,7 @@ public GetAdditionalDatanodeResponseProto getAdditionalDatanode( public CompleteResponseProto complete(RpcController controller, CompleteRequestProto req) throws ServiceException { try { - boolean result = + boolean result = server.complete(req.getSrc(), req.getClientName(), req.hasLast() ? PBHelperClient.convert(req.getLast()) : null, req.hasFileId() ? req.getFileId() : HdfsConstants.GRANDFATHER_INODE_ID); @@ -652,7 +654,7 @@ public CompleteResponseProto complete(RpcController controller, throw new ServiceException(e); } } - + @Override public ReportBadBlocksResponseProto reportBadBlocks(RpcController controller, ReportBadBlocksRequestProto req) throws ServiceException { @@ -706,11 +708,11 @@ public Rename2ResponseProto rename2(RpcController controller, } try { - server.rename2(req.getSrc(), req.getDst(), + server.rename2(req.getSrc(), req.getDst(), optionList.toArray(new Rename[optionList.size()])); } catch (IOException e) { throw new ServiceException(e); - } + } return VOID_RENAME2_RESPONSE; } @@ -835,7 +837,7 @@ public RecoverLeaseResponseProto recoverLease(RpcController controller, throw new ServiceException(e); } } - + @Override public RestoreFailedStorageResponseProto restoreFailedStorage( RpcController controller, RestoreFailedStorageRequestProto req) @@ -934,7 +936,7 @@ public SetSafeModeResponseProto setSafeMode(RpcController controller, throw new ServiceException(e); } } - + @Override public SaveNamespaceResponseProto saveNamespace(RpcController controller, SaveNamespaceRequestProto req) throws ServiceException { @@ -1048,12 +1050,12 @@ public GetFileInfoResponseProto getFileInfo(RpcController controller, GetFileInfoRequestProto req) throws ServiceException { try { HdfsFileStatus result = server.getFileInfo(req.getSrc()); - + if (result != null) { return GetFileInfoResponseProto.newBuilder().setFs( PBHelperClient.convert(result)).build(); } - return VOID_GETFILEINFO_RESPONSE; + return VOID_GETFILEINFO_RESPONSE; } catch (IOException e) { throw new ServiceException(e); } @@ -1085,7 +1087,7 @@ public GetFileLinkInfoResponseProto getFileLinkInfo(RpcController controller, return GetFileLinkInfoResponseProto.newBuilder().setFs( PBHelperClient.convert(result)).build(); } else { - return VOID_GETFILELINKINFO_RESPONSE; + return VOID_GETFILELINKINFO_RESPONSE; } } catch (IOException e) { @@ -1105,7 +1107,7 @@ public GetContentSummaryResponseProto getContentSummary( throw new ServiceException(e); } } - + @Override public SetQuotaResponseProto setQuota(RpcController controller, SetQuotaRequestProto req) throws ServiceException { @@ -1119,7 +1121,7 @@ public SetQuotaResponseProto setQuota(RpcController controller, throw new ServiceException(e); } } - + @Override public FsyncResponseProto fsync(RpcController controller, FsyncRequestProto req) throws ServiceException { @@ -1210,7 +1212,7 @@ public GetDelegationTokenResponseProto getDelegationToken( try { Token token = server .getDelegationToken(new Text(req.getRenewer())); - GetDelegationTokenResponseProto.Builder rspBuilder = + GetDelegationTokenResponseProto.Builder rspBuilder = GetDelegationTokenResponseProto.newBuilder(); if (token != null) { rspBuilder.setToken(PBHelperClient.convert(token)); @@ -1265,7 +1267,7 @@ public GetDataEncryptionKeyResponseProto getDataEncryptionKey( RpcController controller, GetDataEncryptionKeyRequestProto request) throws ServiceException { try { - GetDataEncryptionKeyResponseProto.Builder builder = + GetDataEncryptionKeyResponseProto.Builder builder = GetDataEncryptionKeyResponseProto.newBuilder(); DataEncryptionKey encryptionKey = server.getDataEncryptionKey(); if (encryptionKey != null) { @@ -1304,7 +1306,7 @@ public DeleteSnapshotResponseProto deleteSnapshot(RpcController controller, throw new ServiceException(e); } } - + @Override public AllowSnapshotResponseProto allowSnapshot(RpcController controller, AllowSnapshotRequestProto req) throws ServiceException { @@ -1411,7 +1413,7 @@ public GetSnapshotDiffReportListingResponseProto getSnapshotDiffReportListing( @Override public IsFileClosedResponseProto isFileClosed( - RpcController controller, IsFileClosedRequestProto request) + RpcController controller, IsFileClosedRequestProto request) throws ServiceException { try { boolean result = server.isFileClosed(request.getSrc()); @@ -1495,7 +1497,7 @@ public AddCachePoolResponseProto addCachePool(RpcController controller, throw new ServiceException(e); } } - + @Override public ModifyCachePoolResponseProto modifyCachePool(RpcController controller, ModifyCachePoolRequestProto request) throws ServiceException { @@ -1604,7 +1606,7 @@ public GetAclStatusResponseProto getAclStatus(RpcController controller, throw new ServiceException(e); } } - + @Override public CreateEncryptionZoneResponseProto createEncryptionZone( RpcController controller, CreateEncryptionZoneRequestProto req) @@ -1760,7 +1762,7 @@ public ListXAttrsResponseProto listXAttrs(RpcController controller, throw new ServiceException(e); } } - + @Override public RemoveXAttrResponseProto removeXAttr(RpcController controller, RemoveXAttrRequestProto req) throws ServiceException { @@ -2074,4 +2076,17 @@ public GetSlowDatanodeReportResponseProto getSlowDatanodeReport(RpcController co throw new ServiceException(e); } } + + @Override + public GetEnclosingRootResponseProto getEnclosingRoot( + RpcController controller, GetEnclosingRootRequestProto req) + throws ServiceException { + try { + String enclosingRootPath = server.getEnclosingRoot(req.getFilename()); + return GetEnclosingRootResponseProto.newBuilder().setEnclosingRootPath(enclosingRootPath) + .build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 99f2089fe8d526..a3ba5cbf4655b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -413,7 +413,7 @@ void logAuditEvent(boolean succeeded, String cmd, String src) throws IOException { logAuditEvent(succeeded, cmd, src, null, null); } - + private void logAuditEvent(boolean succeeded, String cmd, String src, String dst, FileStatus stat) throws IOException { if (isAuditEnabled() && isExternalInvocation()) { @@ -554,7 +554,7 @@ private boolean isClientPortInfoAbsent(CallerContext ctx){ */ private volatile boolean needRollbackFsImage; - final LeaseManager leaseManager = new LeaseManager(this); + final LeaseManager leaseManager = new LeaseManager(this); Daemon nnrmthread = null; // NamenodeResourceMonitor thread @@ -581,7 +581,7 @@ private boolean isClientPortInfoAbsent(CallerContext ctx){ */ private final long editLogRollerThreshold; /** - * Check interval of an active namenode's edit log roller thread + * Check interval of an active namenode's edit log roller thread */ private final int editLogRollerInterval; @@ -593,7 +593,7 @@ private boolean isClientPortInfoAbsent(CallerContext ctx){ private volatile boolean hasResourcesAvailable = false; private volatile boolean fsRunning = true; - + /** The start time of the namesystem. */ private final long startTime = now(); @@ -619,11 +619,11 @@ private boolean isClientPortInfoAbsent(CallerContext ctx){ /** Lock to protect FSNamesystem. */ private final FSNamesystemLock fsLock; - /** + /** * Checkpoint lock to protect FSNamesystem modification on standby NNs. * Unlike fsLock, it does not affect block updates. On active NNs, this lock * does not provide proper protection, because there are operations that - * modify both block and name system state. Even on standby, fsLock is + * modify both block and name system state. Even on standby, fsLock is * used when block state changes need to be blocked. */ private final ReentrantLock cpLock; @@ -641,7 +641,7 @@ private boolean isClientPortInfoAbsent(CallerContext ctx){ /** * Reference to the NN's HAContext object. This is only set once - * {@link #startCommonServices(Configuration, HAContext)} is called. + * {@link #startCommonServices(Configuration, HAContext)} is called. */ private HAContext haContext; @@ -841,14 +841,14 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException { namesystem.getFSDirectory().createReservedStatuses(namesystem.getCTime()); return namesystem; } - + FSNamesystem(Configuration conf, FSImage fsImage) throws IOException { this(conf, fsImage, false); } - + /** * Create an FSNamesystem associated with the specified image. - * + * * Note that this does not load any data off of disk -- if you would * like that behavior, use {@link #loadFromDisk(Configuration)} * @@ -884,7 +884,7 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException { DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT); this.fsOwner = UserGroupInformation.getCurrentUser(); - this.supergroup = conf.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY, + this.supergroup = conf.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY, DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT); this.isPermissionEnabled = conf.getBoolean(DFS_PERMISSIONS_ENABLED_KEY, DFS_PERMISSIONS_ENABLED_DEFAULT); @@ -908,8 +908,8 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException { // block allocation has to be persisted in HA using a shared edits directory // so that the standby has up-to-date namespace information nameserviceId = DFSUtil.getNamenodeNameServiceId(conf); - this.haEnabled = HAUtil.isHAEnabled(conf, nameserviceId); - + this.haEnabled = HAUtil.isHAEnabled(conf, nameserviceId); + // Sanity check the HA-related config. if (nameserviceId != null) { LOG.info("Determined nameservice ID: " + nameserviceId); @@ -960,7 +960,7 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException { blockManager.getStoragePolicySuite().getDefaultPolicy().getId(), isSnapshotTrashRootEnabled); - this.maxFsObjects = conf.getLong(DFS_NAMENODE_MAX_OBJECTS_KEY, + this.maxFsObjects = conf.getLong(DFS_NAMENODE_MAX_OBJECTS_KEY, DFS_NAMENODE_MAX_OBJECTS_DEFAULT); this.minBlockSize = conf.getLongBytes( @@ -984,10 +984,10 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException { DFSConfigKeys.DFS_NAMENODE_MAX_CORRUPT_FILE_BLOCKS_RETURNED_DEFAULT); this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf); - + this.standbyShouldCheckpoint = conf.getBoolean( DFS_HA_STANDBY_CHECKPOINTS_KEY, DFS_HA_STANDBY_CHECKPOINTS_DEFAULT); - // # edit autoroll threshold is a multiple of the checkpoint threshold + // # edit autoroll threshold is a multiple of the checkpoint threshold this.editLogRollerThreshold = (long) (conf.getFloat( DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD, @@ -1124,13 +1124,13 @@ void unlockRetryCache() { boolean hasRetryCache() { return retryCache != null; } - + void addCacheEntryWithPayload(byte[] clientId, int callId, Object payload) { if (retryCache != null) { retryCache.addCacheEntryWithPayload(clientId, callId, payload); } } - + void addCacheEntry(byte[] clientId, int callId) { if (retryCache != null) { retryCache.addCacheEntry(clientId, callId); @@ -1267,7 +1267,7 @@ void loadFSImage(StartupOption startOpt) throws IOException { if (RollingUpgradeStartupOption.ROLLBACK.matches(startOpt)) { rollingUpgradeInfo = null; } - final boolean needToSave = staleImage && !haEnabled && !isRollingUpgrade(); + final boolean needToSave = staleImage && !haEnabled && !isRollingUpgrade(); LOG.info("Need to save fs image? " + needToSave + " (staleImage=" + staleImage + ", haEnabled=" + haEnabled + ", isRollingUpgrade=" + isRollingUpgrade() + ")"); @@ -1306,7 +1306,7 @@ private void startSecretManager() { } } } - + @Override public void startSecretManagerIfNecessary() { assert hasWriteLock() : "Starting secret manager needs write lock"; @@ -1323,8 +1323,8 @@ private void stopSecretManager() { dtSecretManager.stopThreads(); } } - - /** + + /** * Start services common to both active and standby states */ void startCommonServices(Configuration conf, HAContext haContext) throws IOException { @@ -1344,7 +1344,7 @@ void startCommonServices(Configuration conf, HAContext haContext) throws IOExcep } finally { writeUnlock("startCommonServices"); } - + registerMXBean(); DefaultMetricsSystem.instance().register(this); if (inodeAttributeProvider != null) { @@ -1356,8 +1356,8 @@ void startCommonServices(Configuration conf, HAContext haContext) throws IOExcep this.nameNodeHostName = (serviceAddress != null) ? serviceAddress.getHostName() : ""; } - - /** + + /** * Stop services common to both active and standby states */ void stopCommonServices() { @@ -1373,7 +1373,7 @@ void stopCommonServices() { } RetryCache.clear(retryCache); } - + /** * Start services required in active state * @throws IOException @@ -1384,17 +1384,17 @@ void startActiveServices() throws IOException { writeLock(); try { FSEditLog editLog = getFSImage().getEditLog(); - + if (!editLog.isOpenForWrite()) { // During startup, we're already open for write during initialization. editLog.initJournalsForWrite(); // May need to recover editLog.recoverUnclosedStreams(); - + LOG.info("Catching up to latest edits from old active before " + "taking over writer role in edits logs"); editLogTailer.catchupDuringFailover(); - + blockManager.setPostponeBlocksFromFuture(false); blockManager.getDatanodeManager().markAllDatanodesStale(); blockManager.clearQueues(); @@ -1414,7 +1414,7 @@ void startActiveServices() throws IOException { } long nextTxId = getFSImage().getLastAppliedTxId() + 1; - LOG.info("Will take over writing edit logs at txnid " + + LOG.info("Will take over writing edit logs at txnid " + nextTxId); editLog.setNextTxId(nextTxId); @@ -1493,7 +1493,7 @@ private boolean shouldUseDelegationTokens() { alwaysUseDelegationTokensForTests; } - /** + /** * Stop services required in active state */ void stopActiveServices() { @@ -1549,10 +1549,10 @@ void stopActiveServices() { writeUnlock("stopActiveServices"); } } - + /** * Start services required in standby or observer state - * + * * @throws IOException */ void startStandbyServices(final Configuration conf, boolean isObserver) @@ -1625,7 +1625,7 @@ public void checkOperation(OperationCategory op) throws StandbyException { getSnapshotManager().initThreadLocals(); } } - + /** * @throws RetriableException * If 1) The NameNode is in SafeMode, 2) HA is enabled, and 3) @@ -1663,7 +1663,7 @@ public static Collection getNamespaceDirs(Configuration conf) { /** * Get all edits dirs which are required. If any shared edits dirs are * configured, these are also included in the set of required dirs. - * + * * @param conf the HDFS configuration. * @return all required dirs. */ @@ -1679,7 +1679,7 @@ private static Collection getStorageDirs(Configuration conf, Collection dirNames = conf.getTrimmedStringCollection(propertyName); StartupOption startOpt = NameNode.getStartupOption(conf); if(startOpt == StartupOption.IMPORT) { - // In case of IMPORT this will get rid of default directories + // In case of IMPORT this will get rid of default directories // but will retain directories specified in hdfs-site.xml // When importing image from a checkpoint, the name-node can // start with empty set of storage directories. @@ -1694,7 +1694,7 @@ private static Collection getStorageDirs(Configuration conf, "\n\tThe NameNode currently runs without persistent storage." + "\n\tAny changes to the file system meta-data may be lost." + "\n\tRecommended actions:" + - "\n\t\t- shutdown and restart NameNode with configured \"" + "\n\t\t- shutdown and restart NameNode with configured \"" + propertyName + "\" in hdfs-site.xml;" + "\n\t\t- use Backup Node as a persistent and up-to-date storage " + "of the file system meta-data."); @@ -1718,38 +1718,38 @@ public static List getNamespaceEditsDirs(Configuration conf) throws IOException { return getNamespaceEditsDirs(conf, true); } - + public static List getNamespaceEditsDirs(Configuration conf, boolean includeShared) throws IOException { // Use a LinkedHashSet so that order is maintained while we de-dup // the entries. LinkedHashSet editsDirs = new LinkedHashSet(); - + if (includeShared) { List sharedDirs = getSharedEditsDirs(conf); - + // Fail until multiple shared edits directories are supported (HDFS-2782) if (sharedDirs.size() > 1) { throw new IOException( "Multiple shared edits directories are not yet supported"); } - + // First add the shared edits dirs. It's critical that the shared dirs // are added first, since JournalSet syncs them in the order they are listed, // and we need to make sure all edits are in place in the shared storage // before they are replicated locally. See HDFS-2874. for (URI dir : sharedDirs) { if (!editsDirs.add(dir)) { - LOG.warn("Edits URI " + dir + " listed multiple times in " + + LOG.warn("Edits URI " + dir + " listed multiple times in " + DFS_NAMENODE_SHARED_EDITS_DIR_KEY + ". Ignoring duplicates."); } } - } + } // Now add the non-shared dirs. for (URI dir : getStorageDirs(conf, DFS_NAMENODE_EDITS_DIR_KEY)) { if (!editsDirs.add(dir)) { - LOG.warn("Edits URI " + dir + " listed multiple times in " + + LOG.warn("Edits URI " + dir + " listed multiple times in " + DFS_NAMENODE_SHARED_EDITS_DIR_KEY + " and " + DFS_NAMENODE_EDITS_DIR_KEY + ". Ignoring duplicates."); } @@ -1763,7 +1763,7 @@ public static List getNamespaceEditsDirs(Configuration conf, return Lists.newArrayList(editsDirs); } } - + /** * Returns edit directories that are shared between primary and secondary. * @param conf configuration @@ -1862,7 +1862,7 @@ public void cpLockInterruptibly() throws InterruptedException { public void cpUnlock() { this.cpLock.unlock(); } - + NamespaceInfo getNamespaceInfo() { readLock(); @@ -2273,7 +2273,7 @@ private void sortLocatedBlocks(String clientMachine, LocatedBlocks blocks) { * Moves all the blocks from {@code srcs} and appends them to {@code target} * To avoid rollbacks we will verify validity of ALL of the args * before we start actual move. - * + * * This does not support ".inodes" relative path * @param target target to concat into * @param srcs file that will be concatenated @@ -2306,7 +2306,7 @@ void concat(String target, String [] srcs, boolean logRetryCache) } /** - * stores the modification and access time for this inode. + * stores the modification and access time for this inode. * The access time is precise up to an hour. The transaction, if needed, is * written to the edits log but is not flushed. */ @@ -2421,15 +2421,15 @@ void createSymlink(String target, String link, /** * Set replication for an existing file. - * - * The NameNode sets new replication and schedules either replication of - * under-replicated data blocks or removal of the excessive block copies + * + * The NameNode sets new replication and schedules either replication of + * under-replicated data blocks or removal of the excessive block copies * if the blocks are over-replicated. - * + * * @see ClientProtocol#setReplication(String, short) * @param src file name * @param replication new replication - * @return true if successful; + * @return true if successful; * false if file does not exist or is a directory * @throws IOException */ @@ -2649,7 +2649,7 @@ long getPreferredBlockSize(String src) throws IOException { } /** - * If the file is within an encryption zone, select the appropriate + * If the file is within an encryption zone, select the appropriate * CryptoProtocolVersion from the list provided by the client. Since the * client may be newer, we need to handle unknown versions. * @@ -2687,7 +2687,7 @@ CryptoProtocolVersion chooseProtocolVersion( /** * Create a new file entry in the namespace. - * + * * For description of parameters and exceptions thrown see * {@link ClientProtocol#create}, except it returns valid file status upon * success @@ -2836,7 +2836,7 @@ private HdfsFileStatus startFileInt(String src, * Recover lease; * Immediately revoke the lease of the current lease holder and start lease * recovery so that the file can be forced to be closed. - * + * * @param src the path of the file to start lease recovery * @param holder the lease holder's name * @param clientMachine the client machine's name @@ -2863,7 +2863,7 @@ boolean recoverLease(String src, String holder, String clientMachine) if (isPermissionEnabled) { dir.checkPathAccess(pc, iip, FsAction.WRITE); } - + return recoverLeaseInternal(RecoverLeaseOp.RECOVER_LEASE, iip, src, holder, clientMachine, true); } catch (StandbyException se) { @@ -2884,7 +2884,7 @@ enum RecoverLeaseOp { APPEND_FILE, TRUNCATE_FILE, RECOVER_LEASE; - + public String getExceptionMessage(String src, String holder, String clientMachine, String reason) { return "Failed to " + this + " " + src + " for " + holder + @@ -2926,7 +2926,7 @@ boolean recoverLeaseInternal(RecoverLeaseOp op, INodesInPath iip, "the file is under construction but no leases found.")); } if (force) { - // close now: no need to wait for soft lease expiration and + // close now: no need to wait for soft lease expiration and // close only the file src LOG.info("recoverLease: " + lease + ", src=" + src + " from client " + clientName); @@ -2936,7 +2936,7 @@ boolean recoverLeaseInternal(RecoverLeaseOp op, INodesInPath iip, "Current lease holder " + lease.getHolder() + " does not match file creator " + clientName; // - // If the original holder has not renewed in the last SOFTLIMIT + // If the original holder has not renewed in the last SOFTLIMIT // period, then start lease recovery. // if (lease.expiredSoftLimit()) { @@ -3018,7 +3018,7 @@ LastBlockWithStatus appendFile(String srcArg, String holder, ExtendedBlock getExtendedBlock(Block blk) { return new ExtendedBlock(getBlockPoolId(), blk); } - + void setBlockPoolId(String bpid) { blockManager.setBlockPoolId(bpid); } @@ -3129,7 +3129,7 @@ LocatedBlock getAdditionalDatanode(String src, long fileId, // choose new datanodes. final DatanodeStorageInfo[] targets = blockManager.chooseTarget4AdditionalDatanode( - src, numAdditionalNodes, clientnode, chosen, + src, numAdditionalNodes, clientnode, chosen, excludes, preferredblocksize, storagePolicyID, blockType); final LocatedBlock lb = BlockManager.newLocatedBlock( blk, targets, -1, false); @@ -3198,7 +3198,7 @@ INodeFile checkLease(INodesInPath iip, String holder, long fileId) } return file; } - + /** * Complete in-progress write to the given file. * @return true if successful, false if the client should continue to retry @@ -3280,7 +3280,7 @@ private boolean checkBlocksComplete(String src, boolean allowCommittedBlock, } /** - * Change the indicated filename. + * Change the indicated filename. * @deprecated Use {@link #renameTo(String, String, boolean, * Options.Rename...)} instead. */ @@ -3355,8 +3355,8 @@ void renameTo(final String src, final String dst, /** * Remove the indicated file from namespace. - * - * @see ClientProtocol#delete(String, boolean) for detailed description and + * + * @see ClientProtocol#delete(String, boolean) for detailed description and * description of exceptions */ boolean delete(String src, boolean recursive, boolean logRetryCache) @@ -3606,7 +3606,7 @@ QuotaUsage getQuotaUsage(final String src) throws IOException { * Set the namespace quota and storage space quota for a directory. * See {@link ClientProtocol#setQuota(String, long, long, StorageType)} for the * contract. - * + * * Note: This does not support ".inodes" relative path. */ void setQuota(String src, long nsQuota, long ssQuota, StorageType type) @@ -3647,7 +3647,7 @@ void setQuota(String src, long nsQuota, long ssQuota, StorageType type) * @param fileId The inode ID that we're fsyncing. Older clients will pass * INodeId.GRANDFATHER_INODE_ID here. * @param clientName The string representation of the client - * @param lastBlockLength The length of the last block + * @param lastBlockLength The length of the last block * under construction reported from client. * @throws IOException if path does not exist */ @@ -3685,7 +3685,7 @@ void fsync(String src, long fileId, String clientName, long lastBlockLength) * replication;
* RecoveryInProgressException if lease recovery is in progress.
* IOException in case of an error. - * @return true if file has been successfully finalized and closed or + * @return true if file has been successfully finalized and closed or * false if block recovery has been initiated. Since the lease owner * has been changed and logged, caller should call logSync(). */ @@ -3758,7 +3758,7 @@ boolean internalReleaseLease(Lease lease, String src, INodesInPath iip, " replicated, lease removed, file" + src + " closed."); return true; // closed! } - // Cannot close file right now, since some blocks + // Cannot close file right now, since some blocks // are not yet minimally replicated. // This may potentially cause infinite loop in lease recovery // if there are no valid replicas on data-nodes. @@ -3844,7 +3844,7 @@ private Lease reassignLease(Lease lease, String src, String newHolder, logReassignLease(lease.getHolder(), src, newHolder); return reassignLeaseInternal(lease, newHolder, pendingFile); } - + Lease reassignLeaseInternal(Lease lease, String newHolder, INodeFile pendingFile) { assert hasWriteLock(); pendingFile.getFileUnderConstructionFeature().setClientName(newHolder); @@ -3930,9 +3930,9 @@ public boolean isInSnapshot(long blockCollectionID) { /* * 1. if bc is under construction and also with snapshot, and * bc is not in the current fsdirectory tree, bc must represent a snapshot - * file. - * 2. if fullName is not an absolute path, bc cannot be existent in the - * current fsdirectory tree. + * file. + * 2. if fullName is not an absolute path, bc cannot be existent in the + * current fsdirectory tree. * 3. if bc is not the current node associated with fullName, bc must be a * snapshot inode. */ @@ -3970,7 +3970,7 @@ void commitBlockSynchronization(ExtendedBlock oldBlock, checkOperation(OperationCategory.WRITE); // If a DN tries to commit to the standby, the recovery will // fail, and the next retry will succeed on the new NN. - + checkNameNodeSafeMode( "Cannot commitBlockSynchronization while in safe mode"); final BlockInfo storedBlock = getStoredBlock( @@ -4149,13 +4149,13 @@ void renewLease(String holder) throws IOException { * @param startAfter the name to start after * @param needLocation if blockLocations need to be returned * @return a partial listing starting after startAfter - * + * * @throws AccessControlException if access is denied * @throws UnresolvedLinkException if symbolic link is encountered * @throws IOException if other I/O error occurred */ DirectoryListing getListing(String src, byte[] startAfter, - boolean needLocation) + boolean needLocation) throws IOException { checkOperation(OperationCategory.READ); final String operationName = "listStatus"; @@ -4347,15 +4347,15 @@ BatchedDirectoryListing getBatchedListing(String[] srcs, byte[] startAfter, * data storage is reported the namenode issues a new unique storageID. *

* Finally, the namenode returns its namespaceID as the registrationID - * for the datanodes. + * for the datanodes. * namespaceID is a persistent attribute of the name space. * The registrationID is checked every time the datanode is communicating - * with the namenode. + * with the namenode. * Datanodes with inappropriate registrationID are rejected. - * If the namenode stops, and then restarts it can restore its + * If the namenode stops, and then restarts it can restore its * namespaceID and will continue serving the datanodes that has previously * registered with the namenode without restarting the whole cluster. - * + * * @see org.apache.hadoop.hdfs.server.datanode.DataNode */ void registerDatanode(DatanodeRegistration nodeReg) throws IOException { @@ -4366,10 +4366,10 @@ void registerDatanode(DatanodeRegistration nodeReg) throws IOException { writeUnlock("registerDatanode"); } } - + /** * Get registrationID for datanodes based on the namespaceID. - * + * * @see #registerDatanode(DatanodeRegistration) * @return registration ID */ @@ -4688,20 +4688,20 @@ public long getMissingReplOneBlocksCount() { // not locking return blockManager.getMissingReplOneBlocksCount(); } - + @Metric(value = {"ExpiredHeartbeats", "Number of expired heartbeats"}, type = Metric.Type.COUNTER) public int getExpiredHeartbeats() { return datanodeStatistics.getExpiredHeartbeats(); } - + @Metric({"TransactionsSinceLastCheckpoint", "Number of transactions since last checkpoint"}) public long getTransactionsSinceLastCheckpoint() { return getFSImage().getLastAppliedOrWrittenTxId() - getNNStorage().getMostRecentCheckpointTxId(); } - + @Metric({"TransactionsSinceLastLogRoll", "Number of transactions since last edit log roll"}) public long getTransactionsSinceLastLogRoll() { @@ -4730,7 +4730,7 @@ private long getCorrectTransactionsSinceLastLogRoll() { public long getLastWrittenTransactionId() { return getEditLog().getLastWrittenTxIdWithoutLock(); } - + @Metric({"LastCheckpointTime", "Time in milliseconds since the epoch of the last checkpoint"}) public long getLastCheckpointTime() { @@ -4840,7 +4840,7 @@ public long getCapacityUsedNonDFS() { public int getTotalLoad() { return datanodeStatistics.getXceiverCount(); } - + @Metric({ "SnapshottableDirectories", "Number of snapshottable directories" }) public int getNumSnapshottableDirs() { return this.snapshotManager.getNumSnapshottableDirs(); @@ -4911,7 +4911,7 @@ int getNumberOfDatanodes(DatanodeReportType type) { readLock(); try { return getBlockManager().getDatanodeManager().getDatanodeListForReport( - type).size(); + type).size(); } finally { readUnlock("getNumberOfDatanodes"); } @@ -4952,7 +4952,7 @@ DatanodeInfo[] datanodeReport(final DatanodeReportType type) readLock(); try { checkOperation(OperationCategory.UNCHECKED); - final DatanodeManager dm = getBlockManager().getDatanodeManager(); + final DatanodeManager dm = getBlockManager().getDatanodeManager(); final List results = dm.getDatanodeListForReport(type); arr = getDatanodeInfoFromDescriptors(results); } finally { @@ -4971,7 +4971,7 @@ DatanodeStorageReport[] getDatanodeStorageReport(final DatanodeReportType type readLock(); try { checkOperation(OperationCategory.UNCHECKED); - final DatanodeManager dm = getBlockManager().getDatanodeManager(); + final DatanodeManager dm = getBlockManager().getDatanodeManager(); reports = dm.getDatanodeStorageReport(type); } finally { readUnlock(operationName, getLockReportInfoSupplier(null)); @@ -5012,11 +5012,11 @@ boolean saveNamespace(final long timeWindow, final long txGap) logAuditEvent(true, operationName, null); return saved; } - + /** * Enables/Disables/Checks restoring failed storage replicas if the storage becomes available again. * Requires superuser privilege. - * + * * @throws AccessControlException if superuser privilege is violated. */ boolean restoreFailedStorage(String arg) throws IOException { @@ -5028,7 +5028,7 @@ boolean restoreFailedStorage(String arg) throws IOException { writeLock(); try { checkOperation(OperationCategory.UNCHECKED); - + // if it is disabled - enable it and vice versa. if(arg.equals("check")) { val = getNNStorage().getRestoreFailedStorage(); @@ -5045,9 +5045,9 @@ boolean restoreFailedStorage(String arg) throws IOException { } Date getStartTime() { - return new Date(startTime); + return new Date(startTime); } - + void finalizeUpgrade() throws IOException { String operationName = "finalizeUpgrade"; checkSuperuserPrivilege(operationName); @@ -5107,7 +5107,7 @@ boolean setSafeMode(SafeModeAction action) throws IOException { } /** - * Get the total number of blocks in the system. + * Get the total number of blocks in the system. */ @Override // FSNamesystemMBean @Metric @@ -5196,7 +5196,7 @@ void leaveSafeMode(boolean force) { writeLock(); try { if (!isInSafeMode()) { - NameNode.stateChangeLog.info("STATE* Safe mode is already OFF"); + NameNode.stateChangeLog.info("STATE* Safe mode is already OFF"); return; } if (blockManager.leaveSafeMode(force)) { @@ -5264,7 +5264,7 @@ NamenodeCommand startCheckpoint(NamenodeRegistration backupNode, try { checkOperation(OperationCategory.CHECKPOINT); checkNameNodeSafeMode("Checkpoint not started"); - + LOG.info("Start checkpoint for " + backupNode.getAddress()); NamenodeCommand cmd = getFSImage().startCheckpoint(backupNode, activeNamenode, getEffectiveLayoutVersion()); @@ -5285,7 +5285,7 @@ public void processIncrementalBlockReport(final DatanodeID nodeID, writeUnlock("processIncrementalBlockReport"); } } - + void endCheckpoint(NamenodeRegistration registration, CheckpointSignature sig) throws IOException { checkOperation(OperationCategory.CHECKPOINT); @@ -5528,7 +5528,7 @@ public long getExcessBlocks() { public long getNumTimedOutPendingReconstructions() { return blockManager.getNumTimedOutPendingReconstructions(); } - + // HA-only metric @Metric public long getPostponedMisreplicatedBlocks() { @@ -5540,7 +5540,7 @@ public long getPostponedMisreplicatedBlocks() { public int getPendingDataNodeMessageCount() { return blockManager.getPendingDataNodeMessageCount(); } - + // HA-only metric @Metric public String getHAState() { @@ -5570,7 +5570,7 @@ public HAServiceState getState() { public String getFSState() { return isInSafeMode() ? "safeMode" : "Operational"; } - + private ObjectName namesystemMBeanName, replicatedBlocksMBeanName, ecBlockGroupsMBeanName, namenodeMXBeanName; @@ -5657,7 +5657,7 @@ public int getNumLiveDataNodes() { public int getNumDeadDataNodes() { return getBlockManager().getDatanodeManager().getNumDeadDataNodes(); } - + @Override // FSNamesystemMBean @Metric({"NumDecomLiveDataNodes", "Number of datanodes which have been decommissioned and are now live"}) @@ -5736,7 +5736,7 @@ public int getNumDecommissioningDataNodes() { } @Override // FSNamesystemMBean - @Metric({"StaleDataNodes", + @Metric({"StaleDataNodes", "Number of datanodes marked stale due to delayed heartbeat"}) public int getNumStaleDataNodes() { return getBlockManager().getDatanodeManager().getNumStaleNodes(); @@ -5849,7 +5849,7 @@ private INodeFile checkUCBlock(ExtendedBlock block, assert hasWriteLock(); checkNameNodeSafeMode("Cannot get a new generation stamp and an " + "access token for block " + block); - + // check stored block state BlockInfo storedBlock = getStoredBlock(ExtendedBlock.getLocalBlock(block)); if (storedBlock == null) { @@ -5860,25 +5860,25 @@ private INodeFile checkUCBlock(ExtendedBlock block, + " is " + storedBlock.getBlockUCState() + " but not " + BlockUCState.UNDER_CONSTRUCTION); } - + // check file inode final INodeFile file = getBlockCollection(storedBlock); if (file == null || !file.isUnderConstruction() || isFileDeleted(file)) { - throw new IOException("The file " + storedBlock + + throw new IOException("The file " + storedBlock + " belonged to does not exist or it is not under construction."); } - + // check lease if (clientName == null || !clientName.equals(file.getFileUnderConstructionFeature() .getClientName())) { - throw new LeaseExpiredException("Lease mismatch: " + block + - " is accessed by a non lease holder " + clientName); + throw new LeaseExpiredException("Lease mismatch: " + block + + " is accessed by a non lease holder " + clientName); } return file; } - + /** * Client is reporting some bad block locations. */ @@ -5905,12 +5905,12 @@ void reportBadBlocks(LocatedBlock[] blocks) throws IOException { } /** - * Get a new generation stamp together with an access token for + * Get a new generation stamp together with an access token for * a block under construction. - * + * * This method is called for recovering a failed write or setting up * a block for appended. - * + * * @param block a block * @param clientName the name of a client * @return a located block with a new generation stamp and an access token @@ -5926,7 +5926,7 @@ LocatedBlock bumpBlockGenerationStamp(ExtendedBlock block, // check vadility of parameters final INodeFile file = checkUCBlock(block, clientName); - + // get a new generation stamp and an access token block.setGenerationStamp(nextGenerationStamp( blockManager.isLegacyBlock(block.getLocalBlock()))); @@ -5942,10 +5942,10 @@ LocatedBlock bumpBlockGenerationStamp(ExtendedBlock block, getEditLog().logSync(); return locatedBlock; } - + /** * Update a pipeline for a block under construction. - * + * * @param clientName the name of the client * @param oldBlock and old block * @param newBlock a new block with a new generation stamp and length @@ -6024,7 +6024,7 @@ private void updatePipelineInternal(String clientName, ExtendedBlock oldBlock, * Register a Backup name-node, verifying that it belongs * to the correct namespace, and adding it to the set of * active journals if necessary. - * + * * @param bnReg registration of the new BackupNode * @param nnReg registration of this NameNode * @throws IOException if the namespace IDs do not match @@ -6077,12 +6077,12 @@ void releaseBackupNode(NamenodeRegistration registration) static class CorruptFileBlockInfo { final String path; final Block block; - + public CorruptFileBlockInfo(String p, Block b) { path = p; block = b; } - + @Override public String toString() { return block.getBlockName() + "\t" + path; @@ -6247,7 +6247,7 @@ Token getDelegationToken(Text renewer) } /** - * + * * @param token token to renew * @return new expiryTime of the token * @throws InvalidToken if {@code token} is invalid @@ -6291,7 +6291,7 @@ long renewDelegationToken(Token token) } /** - * + * * @param token token to cancel * @throws IOException on error */ @@ -6353,11 +6353,11 @@ void loadSecretManagerState(SecretManagerSection s, /** * Log the updateMasterKey operation to edit logs. - * + * * @param key new delegation key. */ public void logUpdateMasterKey(DelegationKey key) { - + assert !isInSafeMode() : "this should never be called while in safemode, since we stop " + "the DT manager before entering safemode!"; @@ -6367,10 +6367,10 @@ public void logUpdateMasterKey(DelegationKey key) { getEditLog().logUpdateMasterKey(key); getEditLog().logSync(); } - + /** * Log the cancellation of expired tokens to edit logs. - * + * * @param id token identifier to cancel */ public void logExpireDelegationToken(DelegationTokenIdentifier id) { @@ -6382,16 +6382,16 @@ public void logExpireDelegationToken(DelegationTokenIdentifier id) { assert hasReadLock(); // do not logSync so expiration edits are batched getEditLog().logCancelDelegationToken(id); - } - + } + private void logReassignLease(String leaseHolder, String src, String newHolder) { assert hasWriteLock(); getEditLog().logReassignLease(leaseHolder, src, newHolder); } - + /** - * + * * @return true if delegation token operation is allowed */ private boolean isAllowedDelegationTokenOp() throws IOException { @@ -6404,7 +6404,7 @@ private boolean isAllowedDelegationTokenOp() throws IOException { } return true; } - + /** * Returns authentication method used to establish the connection. * @return AuthenticationMethod used to establish connection @@ -6419,9 +6419,9 @@ private AuthenticationMethod getConnectionAuthenticationMethod() } return authMethod; } - + /** - * Client invoked methods are invoked over RPC and will be in + * Client invoked methods are invoked over RPC and will be in * RPC call context even if the client exits. */ boolean isExternalInvocation() { @@ -6542,7 +6542,7 @@ public long getTotalBlocks() { public long getNumberOfMissingBlocks() { return getMissingBlocksCount(); } - + @Override // NameNodeMXBean public long getNumberOfMissingBlocksWithReplicationFactorOne() { return getMissingReplOneBlocksCount(); @@ -6559,7 +6559,7 @@ public int getThreads() { */ @Override // NameNodeMXBean public String getLiveNodes() { - final Map> info = + final Map> info = new HashMap>(); final List live = new ArrayList(); blockManager.getDatanodeManager().fetchDatanodes(live, null, false); @@ -6613,7 +6613,7 @@ public String getLiveNodes() { */ @Override // NameNodeMXBean public String getDeadNodes() { - final Map> info = + final Map> info = new HashMap>(); final List dead = new ArrayList(); blockManager.getDatanodeManager().fetchDatanodes(null, dead, false); @@ -6637,7 +6637,7 @@ public String getDeadNodes() { */ @Override // NameNodeMXBean public String getDecomNodes() { - final Map> info = + final Map> info = new HashMap>(); final List decomNodeList = blockManager.getDatanodeManager( ).getDecommissioningNodes(); @@ -6702,17 +6702,17 @@ private long getDfsUsed(DatanodeDescriptor alivenode) { public String getClusterId() { return getNNStorage().getClusterID(); } - + @Override // NameNodeMXBean public String getBlockPoolId() { return getBlockManager().getBlockPoolId(); } - + @Override // NameNodeMXBean public String getNameDirStatuses() { Map> statusMap = new HashMap>(); - + Map activeDirs = new HashMap(); for (Iterator it = getNNStorage().dirIterator(); it.hasNext();) { @@ -6720,7 +6720,7 @@ public String getNameDirStatuses() { activeDirs.put(st.getRoot(), st.getStorageDirType()); } statusMap.put("active", activeDirs); - + List removedStorageDirs = getNNStorage().getRemovedStorageDirs(); Map failedDirs = new HashMap(); @@ -6728,7 +6728,7 @@ public String getNameDirStatuses() { failedDirs.put(st.getRoot(), st.getStorageDirType()); } statusMap.put("failed", failedDirs); - + return JSON.toString(statusMap); } @@ -6823,7 +6823,7 @@ public String getJournalTransactionInfo() { Long.toString(this.getFSImage().getMostRecentCheckpointTxId())); return JSON.toString(txnIdMap); } - + @Override // NameNodeMXBean public long getNNStartedTimeInMillis() { return startTime; @@ -6997,27 +6997,27 @@ public synchronized void verifyToken(DelegationTokenIdentifier identifier, public EditLogTailer getEditLogTailer() { return editLogTailer; } - + @VisibleForTesting public void setEditLogTailerForTests(EditLogTailer tailer) { this.editLogTailer = tailer; } - + @VisibleForTesting void setFsLockForTests(ReentrantReadWriteLock lock) { this.fsLock.coarseLock = lock; } - + @VisibleForTesting public ReentrantReadWriteLock getFsLockForTests() { return fsLock.coarseLock; } - + @VisibleForTesting public ReentrantLock getCpLockForTests() { return cpLock; } - + @VisibleForTesting public void setNNResourceChecker(NameNodeResourceChecker nnResourceChecker) { this.nnResourceChecker = nnResourceChecker; @@ -7026,7 +7026,7 @@ public void setNNResourceChecker(NameNodeResourceChecker nnResourceChecker) { public SnapshotManager getSnapshotManager() { return snapshotManager; } - + /** Allow snapshot on a directory. */ void allowSnapshot(String path) throws IOException { checkOperation(OperationCategory.WRITE); @@ -7043,7 +7043,7 @@ void allowSnapshot(String path) throws IOException { getEditLog().logSync(); logAuditEvent(true, operationName, path, null, null); } - + /** Disallow snapshot on a directory. */ void disallowSnapshot(String path) throws IOException { checkOperation(OperationCategory.WRITE); @@ -7060,7 +7060,7 @@ void disallowSnapshot(String path) throws IOException { getEditLog().logSync(); logAuditEvent(true, operationName, path, null, null); } - + /** * Create a snapshot * @param snapshotRoot The directory path where the snapshot is taken @@ -7092,14 +7092,14 @@ String createSnapshot(String snapshotRoot, String snapshotName, snapshotPath, null); return snapshotPath; } - + /** * Rename a snapshot * @param path The directory path where the snapshot was taken * @param snapshotOldName Old snapshot name * @param snapshotNewName New snapshot name * @throws SafeModeException - * @throws IOException + * @throws IOException */ void renameSnapshot( String path, String snapshotOldName, String snapshotNewName, @@ -7132,8 +7132,8 @@ void renameSnapshot( } /** - * Get the list of snapshottable directories that are owned - * by the current user. Return all the snapshottable directories if the + * Get the list of snapshottable directories that are owned + * by the current user. Return all the snapshottable directories if the * current user is a super user. * @return The list of all the current snapshottable directories. * @throws IOException If an I/O error occurred. @@ -7316,7 +7316,7 @@ SnapshotDiffReportListing getSnapshotDiffReportListing(String path, null); return diffs; } - + /** * Delete a snapshot of a snapshottable directory * @param snapshotRoot The snapshottable directory @@ -7459,7 +7459,7 @@ void startRollingUpgradeInternal(long startTime) /** * Update internal state to indicate that a rolling upgrade is in progress for * non-HA setup. This requires the namesystem is in SafeMode and after doing a - * checkpoint for rollback the namesystem will quit the safemode automatically + * checkpoint for rollback the namesystem will quit the safemode automatically */ private void startRollingUpgradeInternalForNonHA(long startTime) throws IOException { @@ -8879,7 +8879,7 @@ private static void enableAsyncAuditLog(Configuration conf) { logger.removeAppender(appender); asyncAppender.addAppender(appender); } - logger.addAppender(asyncAppender); + logger.addAppender(asyncAppender); } } /** @@ -9040,4 +9040,19 @@ private void checkBlockLocationsWhenObserver(LocatedBlocks blocks, String src) } } } + + /** + * Get the enclosing root for the specified path. + * + * @param srcArg the path of a file or directory to get the EZ for. + * @return the enclosing root of the path or null if none. + */ + String getEnclosingRoot(final String srcArg) throws IOException { + EncryptionZone ez = getEZForPath(srcArg); + if (ez != null) { + return ez.getPath(); + } else { + return "/"; + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index b19bfc13acf65f..dc7c0b9aed2505 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -239,12 +239,12 @@ @InterfaceAudience.Private @VisibleForTesting public class NameNodeRpcServer implements NamenodeProtocols { - + private static final Logger LOG = NameNode.LOG; private static final Logger stateChangeLog = NameNode.stateChangeLog; private static final Logger blockStateChangeLog = NameNode .blockStateChangeLog; - + // Dependencies from other parts of NN. protected final FSNamesystem namesystem; protected final NameNode nn; @@ -261,11 +261,11 @@ public class NameNodeRpcServer implements NamenodeProtocols { /** The RPC server that listens to lifeline requests */ private final RPC.Server lifelineRpcServer; private final InetSocketAddress lifelineRPCAddress; - + /** The RPC server that listens to requests from clients */ protected final RPC.Server clientRpcServer; protected final InetSocketAddress clientRpcAddress; - + private final String minimumDataNodeVersion; private final String defaultECPolicyName; @@ -280,23 +280,23 @@ public NameNodeRpcServer(Configuration conf, NameNode nn) this.retryCache = namesystem.getRetryCache(); this.metrics = NameNode.getNameNodeMetrics(); - int handlerCount = - conf.getInt(DFS_NAMENODE_HANDLER_COUNT_KEY, + int handlerCount = + conf.getInt(DFS_NAMENODE_HANDLER_COUNT_KEY, DFS_NAMENODE_HANDLER_COUNT_DEFAULT); ipProxyUsers = conf.getStrings(DFS_NAMENODE_IP_PROXY_USERS); RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, ProtobufRpcEngine2.class); - ClientNamenodeProtocolServerSideTranslatorPB - clientProtocolServerTranslator = + ClientNamenodeProtocolServerSideTranslatorPB + clientProtocolServerTranslator = new ClientNamenodeProtocolServerSideTranslatorPB(this); BlockingService clientNNPbService = ClientNamenodeProtocol. newReflectiveBlockingService(clientProtocolServerTranslator); int maxDataLength = conf.getInt(IPC_MAXIMUM_DATA_LENGTH, IPC_MAXIMUM_DATA_LENGTH_DEFAULT); - DatanodeProtocolServerSideTranslatorPB dnProtoPbTranslator = + DatanodeProtocolServerSideTranslatorPB dnProtoPbTranslator = new DatanodeProtocolServerSideTranslatorPB(this, maxDataLength); BlockingService dnProtoPbService = DatanodeProtocolService .newReflectiveBlockingService(dnProtoPbTranslator); @@ -306,22 +306,22 @@ public NameNodeRpcServer(Configuration conf, NameNode nn) BlockingService lifelineProtoPbService = DatanodeLifelineProtocolService .newReflectiveBlockingService(lifelineProtoPbTranslator); - NamenodeProtocolServerSideTranslatorPB namenodeProtocolXlator = + NamenodeProtocolServerSideTranslatorPB namenodeProtocolXlator = new NamenodeProtocolServerSideTranslatorPB(this); BlockingService NNPbService = NamenodeProtocolService .newReflectiveBlockingService(namenodeProtocolXlator); - - RefreshAuthorizationPolicyProtocolServerSideTranslatorPB refreshAuthPolicyXlator = + + RefreshAuthorizationPolicyProtocolServerSideTranslatorPB refreshAuthPolicyXlator = new RefreshAuthorizationPolicyProtocolServerSideTranslatorPB(this); BlockingService refreshAuthService = RefreshAuthorizationPolicyProtocolService .newReflectiveBlockingService(refreshAuthPolicyXlator); - RefreshUserMappingsProtocolServerSideTranslatorPB refreshUserMappingXlator = + RefreshUserMappingsProtocolServerSideTranslatorPB refreshUserMappingXlator = new RefreshUserMappingsProtocolServerSideTranslatorPB(this); BlockingService refreshUserMappingService = RefreshUserMappingsProtocolService .newReflectiveBlockingService(refreshUserMappingXlator); - RefreshCallQueueProtocolServerSideTranslatorPB refreshCallQueueXlator = + RefreshCallQueueProtocolServerSideTranslatorPB refreshCallQueueXlator = new RefreshCallQueueProtocolServerSideTranslatorPB(this); BlockingService refreshCallQueueService = RefreshCallQueueProtocolService .newReflectiveBlockingService(refreshCallQueueXlator); @@ -331,12 +331,12 @@ public NameNodeRpcServer(Configuration conf, NameNode nn) BlockingService genericRefreshService = GenericRefreshProtocolService .newReflectiveBlockingService(genericRefreshXlator); - GetUserMappingsProtocolServerSideTranslatorPB getUserMappingXlator = + GetUserMappingsProtocolServerSideTranslatorPB getUserMappingXlator = new GetUserMappingsProtocolServerSideTranslatorPB(this); BlockingService getUserMappingService = GetUserMappingsProtocolService .newReflectiveBlockingService(getUserMappingXlator); - - HAServiceProtocolServerSideTranslatorPB haServiceProtocolXlator = + + HAServiceProtocolServerSideTranslatorPB haServiceProtocolXlator = new HAServiceProtocolServerSideTranslatorPB(this); BlockingService haPbService = HAServiceProtocolService .newReflectiveBlockingService(haServiceProtocolXlator); @@ -380,14 +380,14 @@ public NameNodeRpcServer(Configuration conf, NameNode nn) serviceRpcServer); DFSUtil.addPBProtocol(conf, RefreshAuthorizationPolicyProtocolPB.class, refreshAuthService, serviceRpcServer); - DFSUtil.addPBProtocol(conf, RefreshUserMappingsProtocolPB.class, + DFSUtil.addPBProtocol(conf, RefreshUserMappingsProtocolPB.class, refreshUserMappingService, serviceRpcServer); // We support Refreshing call queue here in case the client RPC queue is full DFSUtil.addPBProtocol(conf, RefreshCallQueueProtocolPB.class, refreshCallQueueService, serviceRpcServer); DFSUtil.addPBProtocol(conf, GenericRefreshProtocolPB.class, genericRefreshService, serviceRpcServer); - DFSUtil.addPBProtocol(conf, GetUserMappingsProtocolPB.class, + DFSUtil.addPBProtocol(conf, GetUserMappingsProtocolPB.class, getUserMappingService, serviceRpcServer); // Update the address with the correct port @@ -481,15 +481,15 @@ public NameNodeRpcServer(Configuration conf, NameNode nn) clientRpcServer); DFSUtil.addPBProtocol(conf, DatanodeProtocolPB.class, dnProtoPbService, clientRpcServer); - DFSUtil.addPBProtocol(conf, RefreshAuthorizationPolicyProtocolPB.class, + DFSUtil.addPBProtocol(conf, RefreshAuthorizationPolicyProtocolPB.class, refreshAuthService, clientRpcServer); - DFSUtil.addPBProtocol(conf, RefreshUserMappingsProtocolPB.class, + DFSUtil.addPBProtocol(conf, RefreshUserMappingsProtocolPB.class, refreshUserMappingService, clientRpcServer); DFSUtil.addPBProtocol(conf, RefreshCallQueueProtocolPB.class, refreshCallQueueService, clientRpcServer); DFSUtil.addPBProtocol(conf, GenericRefreshProtocolPB.class, genericRefreshService, clientRpcServer); - DFSUtil.addPBProtocol(conf, GetUserMappingsProtocolPB.class, + DFSUtil.addPBProtocol(conf, GetUserMappingsProtocolPB.class, getUserMappingService, clientRpcServer); // set service-level authorization security policy @@ -510,7 +510,7 @@ public NameNodeRpcServer(Configuration conf, NameNode nn) clientRpcAddress = new InetSocketAddress( rpcAddr.getHostName(), listenAddr.getPort()); nn.setRpcServerAddress(conf, clientRpcAddress); - + minimumDataNodeVersion = conf.get( DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY, DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT); @@ -572,33 +572,33 @@ RPC.Server getLifelineRpcServer() { public RPC.Server getClientRpcServer() { return clientRpcServer; } - + /** Allow access to the service RPC server for testing */ @VisibleForTesting RPC.Server getServiceRpcServer() { return serviceRpcServer; } - + /** * Start client and service RPC servers. */ void start() { clientRpcServer.start(); if (serviceRpcServer != null) { - serviceRpcServer.start(); + serviceRpcServer.start(); } if (lifelineRpcServer != null) { lifelineRpcServer.start(); } } - + /** * Wait until the RPC servers have shutdown. */ void join() throws InterruptedException { clientRpcServer.join(); if (serviceRpcServer != null) { - serviceRpcServer.join(); + serviceRpcServer.join(); } if (lifelineRpcServer != null) { lifelineRpcServer.join(); @@ -675,7 +675,7 @@ public ExportedBlockKeys getBlockKeys() throws IOException { @Override // NamenodeProtocol public void errorReport(NamenodeRegistration registration, - int errorCode, + int errorCode, String msg) throws IOException { String operationName = "errorReport"; checkNNStartup(); @@ -782,11 +782,11 @@ public void cancelDelegationToken(Token token) checkNNStartup(); namesystem.cancelDelegationToken(token); } - + @Override // ClientProtocol - public LocatedBlocks getBlockLocations(String src, - long offset, - long length) + public LocatedBlocks getBlockLocations(String src, + long offset, + long length) throws IOException { checkNNStartup(); metrics.incrGetBlockLocations(); @@ -794,7 +794,7 @@ public LocatedBlocks getBlockLocations(String src, namesystem.getBlockLocations(getClientMachine(), src, offset, length); return locatedBlocks; } - + @Override // ClientProtocol public FsServerDefaults getServerDefaults() throws IOException { checkNNStartup(); @@ -872,7 +872,7 @@ public boolean recoverLease(String src, String clientName) throws IOException { } @Override // ClientProtocol - public boolean setReplication(String src, short replication) + public boolean setReplication(String src, short replication) throws IOException { checkNNStartup(); return namesystem.setReplication(src, replication); @@ -921,7 +921,7 @@ public void setOwner(String src, String username, String groupname) checkNNStartup(); namesystem.setOwner(src, username, groupname); } - + @Override public LocatedBlock addBlock(String src, String clientName, ExtendedBlock previous, DatanodeInfo[] excludedNodes, long fileId, @@ -983,10 +983,10 @@ public boolean complete(String src, String clientName, } /** - * The client has detected an error on the specified located blocks - * and is reporting them to the server. For now, the namenode will - * mark the block as corrupt. In the future we might - * check the blocks are actually corrupt. + * The client has detected an error on the specified located blocks + * and is reporting them to the server. For now, the namenode will + * mark the block as corrupt. In the future we might + * check the blocks are actually corrupt. */ @Override // ClientProtocol, DatanodeProtocol public void reportBadBlocks(LocatedBlock[] blocks) throws IOException { @@ -1022,7 +1022,7 @@ public void updatePipeline(String clientName, ExtendedBlock oldBlock, RetryCache.setState(cacheEntry, success); } } - + @Override // DatanodeProtocol public void commitBlockSynchronization(ExtendedBlock block, long newgenerationstamp, long newlength, @@ -1033,14 +1033,14 @@ public void commitBlockSynchronization(ExtendedBlock block, namesystem.commitBlockSynchronization(block, newgenerationstamp, newlength, closeFile, deleteblock, newtargets, newtargetstorages); } - + @Override // ClientProtocol - public long getPreferredBlockSize(String filename) + public long getPreferredBlockSize(String filename) throws IOException { checkNNStartup(); return namesystem.getPreferredBlockSize(filename); } - + @Deprecated @Override // ClientProtocol public boolean rename(String src, String dst) throws IOException { @@ -1067,7 +1067,7 @@ public boolean rename(String src, String dst) throws IOException { } return ret; } - + @Override // ClientProtocol public void concat(String trg, String[] src) throws IOException { checkNNStartup(); @@ -1089,7 +1089,7 @@ public void concat(String trg, String[] src) throws IOException { RetryCache.setState(cacheEntry, success); } } - + @Override // ClientProtocol public void rename2(String src, String dst, Options.Rename... options) throws IOException { @@ -1144,14 +1144,14 @@ public boolean delete(String src, boolean recursive) throws IOException { } finally { RetryCache.setState(cacheEntry, ret); } - if (ret) + if (ret) metrics.incrDeleteFileOps(); return ret; } /** * Check path length does not exceed maximum. Returns true if - * length and depth are okay. Returns false if length is too long + * length and depth are okay. Returns false if length is too long * or depth is too great. */ private boolean checkPathLength(String src) { @@ -1159,14 +1159,14 @@ private boolean checkPathLength(String src) { return (src.length() <= MAX_PATH_LENGTH && srcPath.depth() <= MAX_PATH_DEPTH); } - + @Override // ClientProtocol public boolean mkdirs(String src, FsPermission masked, boolean createParent) throws IOException { checkNNStartup(); stateChangeLog.debug("*DIR* NameNode.mkdirs: {}.", src); if (!checkPathLength(src)) { - throw new IOException("mkdirs: Pathname too long. Limit " + throw new IOException("mkdirs: Pathname too long. Limit " + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels."); } return namesystem.mkdirs(src, @@ -1184,7 +1184,7 @@ public void renewLease(String clientName, List namespaces) + ") should be null or empty"); } checkNNStartup(); - namesystem.renewLease(clientName); + namesystem.renewLease(clientName); } @Override // ClientProtocol @@ -1246,14 +1246,14 @@ public boolean isFileClosed(String src) throws IOException{ checkNNStartup(); return namesystem.isFileClosed(src); } - + @Override // ClientProtocol public HdfsFileStatus getFileLinkInfo(String src) throws IOException { checkNNStartup(); metrics.incrFileInfoOps(); return namesystem.getFileInfo(src, false, false, false); } - + @Override // ClientProtocol public long[] getStats() throws IOException { checkNNStartup(); @@ -1308,7 +1308,7 @@ public boolean setSafeMode(SafeModeAction action, boolean isChecked) } @Override // ClientProtocol - public boolean restoreFailedStorage(String arg) throws IOException { + public boolean restoreFailedStorage(String arg) throws IOException { checkNNStartup(); return namesystem.restoreFailedStorage(arg); } @@ -1329,7 +1329,7 @@ public boolean saveNamespace(long timeWindow, long txGap) throws IOException { } return true; } - + @Override // ClientProtocol public long rollEdits() throws AccessControlException, IOException { checkNNStartup(); @@ -1351,7 +1351,7 @@ public long getTransactionID() throws IOException { namesystem.checkSuperuserPrivilege(operationName); return namesystem.getFSImage().getCorrectLastAppliedOrWrittenTxId(); } - + @Override // NamenodeProtocol public long getMostRecentCheckpointTxId() throws IOException { String operationName = "getMostRecentCheckpointTxId"; @@ -1360,13 +1360,13 @@ public long getMostRecentCheckpointTxId() throws IOException { namesystem.checkSuperuserPrivilege(operationName); return namesystem.getFSImage().getMostRecentCheckpointTxId(); } - + @Override // NamenodeProtocol public CheckpointSignature rollEditLog() throws IOException { checkNNStartup(); return namesystem.rollEditLog(); } - + @Override // NamenodeProtocol public RemoteEditLogManifest getEditLogManifest(long sinceTxId) throws IOException { @@ -1392,7 +1392,7 @@ public boolean isRollingUpgrade() throws IOException { namesystem.checkSuperuserPrivilege(operationName); return namesystem.isRollingUpgrade(); } - + @Override // ClientProtocol public void finalizeUpgrade() throws IOException { checkNNStartup(); @@ -1487,7 +1487,7 @@ public void setBalancerBandwidth(long bandwidth) throws IOException { checkNNStartup(); namesystem.setBalancerBandwidth(bandwidth); } - + @Override // ClientProtocol public ContentSummary getContentSummary(String path) throws IOException { checkNNStartup(); @@ -1530,7 +1530,7 @@ public void setQuota(String path, long namespaceQuota, long storagespaceQuota, checkNNStartup(); namesystem.setQuota(path, namespaceQuota, storagespaceQuota, type); } - + @Override // ClientProtocol public void fsync(String src, long fileId, String clientName, long lastBlockLength) @@ -1540,7 +1540,7 @@ public void fsync(String src, long fileId, String clientName, } @Override // ClientProtocol - public void setTimes(String src, long mtime, long atime) + public void setTimes(String src, long mtime, long atime) throws IOException { checkNNStartup(); namesystem.setTimes(src, mtime, atime); @@ -1557,12 +1557,12 @@ public void createSymlink(String target, String link, FsPermission dirPerms, } /* We enforce the MAX_PATH_LENGTH limit even though a symlink target - * URI may refer to a non-HDFS file system. + * URI may refer to a non-HDFS file system. */ if (!checkPathLength(link)) { throw new IOException("Symlink path exceeds " + MAX_PATH_LENGTH + " character limit"); - + } final UserGroupInformation ugi = getRemoteUser(); @@ -1635,7 +1635,7 @@ public DatanodeCommand blockReport(final DatanodeRegistration nodeReg, verifyRequest(nodeReg); blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: from {}, reports.length={}.", nodeReg, reports.length); - final BlockManager bm = namesystem.getBlockManager(); + final BlockManager bm = namesystem.getBlockManager(); boolean noStaleStorages = false; try { if (bm.checkBlockReportLease(context, nodeReg)) { @@ -1713,9 +1713,9 @@ public void run() { @Override // DatanodeProtocol public void errorReport(DatanodeRegistration nodeReg, - int errorCode, String msg) throws IOException { + int errorCode, String msg) throws IOException { checkNNStartup(); - String dnName = + String dnName = (nodeReg == null) ? "Unknown DataNode" : nodeReg.toString(); if (errorCode == DatanodeProtocol.NOTIFY) { @@ -1728,12 +1728,12 @@ public void errorReport(DatanodeRegistration nodeReg, LOG.warn("Disk error on " + dnName + ": " + msg); } else if (errorCode == DatanodeProtocol.FATAL_DISK_ERROR) { LOG.warn("Fatal disk error on " + dnName + ": " + msg); - namesystem.getBlockManager().getDatanodeManager().removeDatanode(nodeReg); + namesystem.getBlockManager().getDatanodeManager().removeDatanode(nodeReg); } else { LOG.info("Error report from " + dnName + ": " + msg); } } - + @Override // DatanodeProtocol, NamenodeProtocol public NamespaceInfo versionRequest() throws IOException { checkNNStartup(); @@ -1751,9 +1751,9 @@ public void sendLifeline(DatanodeRegistration nodeReg, StorageReport[] report, xceiverCount, xmitsInProgress, failedVolumes, volumeFailureSummary); } - /** + /** * Verifies the given registration. - * + * * @param nodeReg node registration * @throws UnregisteredNodeException if the registration is invalid */ @@ -1816,7 +1816,7 @@ public Collection refresh(String identifier, String[] args) { // Let the registry handle as needed return RefreshRegistry.defaultRegistry().dispatch(identifier, args); } - + @Override // GetUserMappingsProtocol public String[] getGroupsForUser(String user) throws IOException { LOG.debug("Getting groups for user {}", user); @@ -1829,9 +1829,9 @@ public synchronized void monitorHealth() throws HealthCheckFailedException, checkNNStartup(); nn.monitorHealth(); } - + @Override // HAServiceProtocol - public synchronized void transitionToActive(StateChangeRequestInfo req) + public synchronized void transitionToActive(StateChangeRequestInfo req) throws ServiceFailedException, AccessControlException, IOException { checkNNStartup(); nn.checkHaStateChange(req); @@ -1866,7 +1866,7 @@ public synchronized void transitionToObserver(StateChangeRequestInfo req) } @Override // HAServiceProtocol - public synchronized HAServiceStatus getServiceStatus() + public synchronized HAServiceStatus getServiceStatus() throws AccessControlException, ServiceFailedException, IOException { checkNNStartup(); return nn.getServiceStatus(); @@ -1882,7 +1882,7 @@ void verifyLayoutVersion(int version) throws IOException { throw new IncorrectVersionException( HdfsServerConstants.NAMENODE_LAYOUT_VERSION, version, "data node"); } - + private void verifySoftwareVersion(DatanodeRegistration dnReg) throws IncorrectVersionException { String dnVersion = dnReg.getSoftwareVersion(); @@ -1951,7 +1951,7 @@ public String createSnapshot(String snapshotRoot, String snapshotName) } return ret; } - + @Override public void deleteSnapshot(String snapshotRoot, String snapshotName) throws IOException { @@ -2223,7 +2223,7 @@ public AclStatus getAclStatus(String src) throws IOException { checkNNStartup(); return namesystem.getAclStatus(src); } - + @Override // ClientProtocol public void createEncryptionZone(String src, String keyName) throws IOException { @@ -2320,9 +2320,9 @@ public void setXAttr(String src, XAttr xAttr, EnumSet flag) RetryCache.setState(cacheEntry, success); } } - + @Override // ClientProtocol - public List getXAttrs(String src, List xAttrs) + public List getXAttrs(String src, List xAttrs) throws IOException { checkNNStartup(); return namesystem.getXAttrs(src, xAttrs); @@ -2333,7 +2333,7 @@ public List listXAttrs(String src) throws IOException { checkNNStartup(); return namesystem.listXAttrs(src); } - + @Override // ClientProtocol public void removeXAttr(String src, XAttr xAttr) throws IOException { checkNNStartup(); @@ -2674,4 +2674,11 @@ public Long getNextSPSPath() throws IOException { } return namesystem.getBlockManager().getSPSManager().getNextPathId(); } + + @Override // ClientProtocol + public String getEnclosingRoot(String src) + throws IOException { + checkNNStartup(); + return namesystem.getEnclosingRoot(src); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java index 62dc3076d00a65..c9f1a86bd450e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java @@ -41,6 +41,7 @@ import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -49,6 +50,7 @@ import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag; import org.apache.hadoop.hdfs.client.HdfsAdmin; +import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; @@ -80,7 +82,7 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest { private static FileSystem fHdfs2; private FileSystem fsTarget2; Path targetTestRoot2; - + @Override protected FileSystemTestHelper createFileSystemHelper() { return new FileSystemTestHelper("/tmp/TestViewFileSystemHdfs"); @@ -105,14 +107,14 @@ public static void clusterSetupAtBegining() throws IOException, SupportsBlocks = true; CONF.setBoolean( DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); - + cluster = new MiniDFSCluster.Builder(CONF).nnTopology( MiniDFSNNTopology.simpleFederatedTopology(2)) .numDataNodes(2) .build(); cluster.waitClusterUp(); - + fHdfs = cluster.getFileSystem(0); fHdfs2 = cluster.getFileSystem(1); fHdfs.getConf().set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, @@ -120,16 +122,16 @@ public static void clusterSetupAtBegining() throws IOException, fHdfs2.getConf().set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, FsConstants.VIEWFS_URI.toString()); - defaultWorkingDirectory = fHdfs.makeQualified( new Path("/user/" + + defaultWorkingDirectory = fHdfs.makeQualified( new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName())); - defaultWorkingDirectory2 = fHdfs2.makeQualified( new Path("/user/" + + defaultWorkingDirectory2 = fHdfs2.makeQualified( new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName())); - + fHdfs.mkdirs(defaultWorkingDirectory); fHdfs2.mkdirs(defaultWorkingDirectory2); } - + @AfterClass public static void ClusterShutdownAtEnd() throws Exception { if (cluster != null) { @@ -166,7 +168,7 @@ void setupMountPoints() { int getExpectedDirPaths() { return 8; } - + @Override int getExpectedMountPoints() { return 9; @@ -174,7 +176,7 @@ int getExpectedMountPoints() { @Override int getExpectedDelegationTokenCount() { - return 2; // Mount points to 2 unique hdfs + return 2; // Mount points to 2 unique hdfs } @Override @@ -506,4 +508,29 @@ public void testInternalDirectoryPermissions() throws IOException { assertEquals(fs.getFileStatus(subDirOfInternalDir).getPermission(), fs.getFileStatus(subDirOfRealDir).getPermission()); } + + @Test + public void testEnclosingRootsBase() throws Exception { + final Path zone = new Path("/data/EZ"); + fsTarget.mkdirs(zone); + final Path zone1 = new Path("/data/EZ/zone1"); + fsTarget.mkdirs(zone1); + + DFSTestUtil.createKey("test_key", cluster, 0, CONF); + HdfsAdmin hdfsAdmin = new HdfsAdmin(cluster.getURI(0), CONF); + final EnumSet provisionTrash = + EnumSet.of(CreateEncryptionZoneFlag.PROVISION_TRASH); + hdfsAdmin.createEncryptionZone(zone1, "test_key", provisionTrash); + RemoteIterator zones = hdfsAdmin.listEncryptionZones(); + assertEquals(fsView.getEnclosingRoot(zone), new Path("/data")); + assertEquals(fsView.getEnclosingRoot(zone1), zone1); + + Path nn02Ez = new Path("/mountOnNn2/EZ"); + fsTarget2.mkdirs(nn02Ez); + assertEquals(fsView.getEnclosingRoot((nn02Ez)), new Path("/mountOnNn2")); + HdfsAdmin hdfsAdmin2 = new HdfsAdmin(cluster.getURI(1), CONF); + DFSTestUtil.createKey("test_key", cluster, 1, CONF); + hdfsAdmin2.createEncryptionZone(nn02Ez, "test_key", provisionTrash); + assertEquals(fsView.getEnclosingRoot((nn02Ez)), nn02Ez); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java index 32b0992610f6f1..d6fa7765d1a07a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java @@ -141,9 +141,9 @@ public class TestDistributedFileSystem { } private boolean dualPortTesting = false; - + private boolean noXmlDefaults = false; - + HdfsConfiguration getTestConfiguration() { HdfsConfiguration conf; if (noXmlDefaults) { @@ -199,9 +199,9 @@ public void testFileSystemCloseAll() throws Exception { if (cluster != null) {cluster.shutdown();} } } - + /** - * Tests DFSClient.close throws no ConcurrentModificationException if + * Tests DFSClient.close throws no ConcurrentModificationException if * multiple files are open. * Also tests that any cached sockets are closed. (HDFS-3359) * Also tests deprecated listOpenFiles(EnumSet<>). (HDFS-14595) @@ -402,7 +402,7 @@ public void testDFSCloseOrdering() throws Exception { inOrder.verify(fs.dfs).delete(eq(path.toString()), eq(true)); inOrder.verify(fs.dfs).close(); } - + private static class MyDistributedFileSystem extends DistributedFileSystem { MyDistributedFileSystem() { dfs = mock(DFSClient.class); @@ -481,7 +481,7 @@ public void testDFSClient() throws Exception { .getDeclaredMethod("isRunning"); checkMethod.setAccessible(true); assertFalse((boolean) checkMethod.invoke(dfs.dfs.getLeaseRenewer())); - + { //create a file final FSDataOutputStream out = dfs.create(filepaths[0]); @@ -593,11 +593,11 @@ public void testDFSClient() throws Exception { assertFalse((boolean)checkMethod.invoke(dfs.dfs.getLeaseRenewer())); dfs.close(); } - + { // test accessing DFS with ip address. should work with any hostname // alias or ip address that points to the interface that NameNode // is listening on. In this case, it is localhost. - String uri = "hdfs://127.0.0.1:" + cluster.getNameNodePort() + + String uri = "hdfs://127.0.0.1:" + cluster.getNameNodePort() + "/test/ipAddress/file"; Path path = new Path(uri); FileSystem fs = FileSystem.get(path.toUri(), conf); @@ -605,7 +605,7 @@ public void testDFSClient() throws Exception { byte[] buf = new byte[1024]; out.write(buf); out.close(); - + FSDataInputStream in = fs.open(path); in.readFully(buf); in.close(); @@ -699,7 +699,7 @@ public void testStatistics() throws IOException { fs.mkdirs(dir); checkStatistics(fs, readOps, ++writeOps, largeReadOps); checkOpStatistics(OpType.MKDIRS, opCount + 1); - + opCount = getOpStatistics(OpType.CREATE); FSDataOutputStream out = fs.create(file, (short)1); out.close(); @@ -710,7 +710,7 @@ public void testStatistics() throws IOException { FileStatus status = fs.getFileStatus(file); checkStatistics(fs, ++readOps, writeOps, largeReadOps); checkOpStatistics(OpType.GET_FILE_STATUS, opCount + 1); - + opCount = getOpStatistics(OpType.GET_FILE_BLOCK_LOCATIONS); fs.getFileBlockLocations(file, 0, 0); checkStatistics(fs, ++readOps, writeOps, largeReadOps); @@ -718,30 +718,30 @@ public void testStatistics() throws IOException { fs.getFileBlockLocations(status, 0, 0); checkStatistics(fs, ++readOps, writeOps, largeReadOps); checkOpStatistics(OpType.GET_FILE_BLOCK_LOCATIONS, opCount + 2); - + opCount = getOpStatistics(OpType.OPEN); FSDataInputStream in = fs.open(file); in.close(); checkStatistics(fs, ++readOps, writeOps, largeReadOps); checkOpStatistics(OpType.OPEN, opCount + 1); - + opCount = getOpStatistics(OpType.SET_REPLICATION); fs.setReplication(file, (short)2); checkStatistics(fs, readOps, ++writeOps, largeReadOps); checkOpStatistics(OpType.SET_REPLICATION, opCount + 1); - + opCount = getOpStatistics(OpType.RENAME); Path file1 = new Path(dir, "file1"); fs.rename(file, file1); checkStatistics(fs, readOps, ++writeOps, largeReadOps); checkOpStatistics(OpType.RENAME, opCount + 1); - + opCount = getOpStatistics(OpType.GET_CONTENT_SUMMARY); fs.getContentSummary(file1); checkStatistics(fs, ++readOps, writeOps, largeReadOps); checkOpStatistics(OpType.GET_CONTENT_SUMMARY, opCount + 1); - - + + // Iterative ls test long mkdirOp = getOpStatistics(OpType.MKDIRS); long listStatusOp = getOpStatistics(OpType.LIST_STATUS); @@ -752,7 +752,7 @@ public void testStatistics() throws IOException { mkdirOp++; FileStatus[] list = fs.listStatus(dir); if (list.length > lsLimit) { - // if large directory, then count readOps and largeReadOps by + // if large directory, then count readOps and largeReadOps by // number times listStatus iterates int iterations = (int)Math.ceil((double)list.length/lsLimit); largeReadOps += iterations; @@ -763,7 +763,7 @@ public void testStatistics() throws IOException { readOps++; listStatusOp++; } - + // writeOps incremented by 1 for mkdirs // readOps and largeReadOps incremented by 1 or more checkStatistics(fs, readOps, ++writeOps, largeReadOps); @@ -776,7 +776,7 @@ public void testStatistics() throws IOException { checkStatistics(fs, readOps, writeOps, largeReadOps); checkOpStatistics(OpType.LIST_LOCATED_STATUS, locatedListStatusOP); } - + opCount = getOpStatistics(OpType.GET_STATUS); fs.getStatus(file1); checkStatistics(fs, ++readOps, writeOps, largeReadOps); @@ -786,12 +786,12 @@ public void testStatistics() throws IOException { fs.getFileChecksum(file1); checkStatistics(fs, ++readOps, writeOps, largeReadOps); checkOpStatistics(OpType.GET_FILE_CHECKSUM, opCount + 1); - + opCount = getOpStatistics(OpType.SET_PERMISSION); fs.setPermission(file1, new FsPermission((short)0777)); checkStatistics(fs, readOps, ++writeOps, largeReadOps); checkOpStatistics(OpType.SET_PERMISSION, opCount + 1); - + opCount = getOpStatistics(OpType.SET_TIMES); fs.setTimes(file1, 0L, 0L); checkStatistics(fs, readOps, ++writeOps, largeReadOps); @@ -807,7 +807,7 @@ public void testStatistics() throws IOException { fs.delete(dir, true); checkStatistics(fs, readOps, ++writeOps, largeReadOps); checkOpStatistics(OpType.DELETE, opCount + 1); - + } finally { if (cluster != null) cluster.shutdown(); } @@ -924,6 +924,11 @@ public void testStatistics2() throws IOException, NoSuchAlgorithmException { checkStatistics(dfs, ++readOps, writeOps, 0); checkOpStatistics(OpType.GET_ENCRYPTION_ZONE, opCount + 1); + opCount = getOpStatistics(OpType.GET_ENCLOSING_ROOT); + dfs.getEnclosingRoot(dir); + checkStatistics(dfs, ++readOps, writeOps, 0); + checkOpStatistics(OpType.GET_ENCLOSING_ROOT, opCount + 1); + opCount = getOpStatistics(OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST); dfs.getSnapshottableDirListing(); checkStatistics(dfs, ++readOps, writeOps, 0); @@ -1211,7 +1216,7 @@ public void testFileChecksum() throws Exception { final UserGroupInformation current = UserGroupInformation.getCurrentUser(); final UserGroupInformation ugi = UserGroupInformation.createUserForTesting( current.getShortUserName() + "x", new String[]{"user"}); - + try { hdfs.getFileChecksum(new Path( "/test/TestNonExistingFile")); @@ -1254,7 +1259,7 @@ public FileSystem run() throws Exception { final byte[] data = new byte[RAN.nextInt(block_size/2-1)+n*block_size+1]; RAN.nextBytes(data); System.out.println("data.length=" + data.length); - + //write data to a file final Path foo = new Path(dir, "foo" + n); { @@ -1263,7 +1268,7 @@ public FileSystem run() throws Exception { out.write(data); out.close(); } - + //compute checksum final FileChecksum hdfsfoocs = hdfs.getFileChecksum(foo); System.out.println("hdfsfoocs=" + hdfsfoocs); @@ -1321,7 +1326,7 @@ public FileSystem run() throws Exception { hdfs.setPermission(dir, new FsPermission((short)0)); - { //test permission error on webhdfs + { //test permission error on webhdfs try { webhdfs.getFileChecksum(webhdfsqualified); fail(); @@ -1333,7 +1338,7 @@ public FileSystem run() throws Exception { } cluster.shutdown(); } - + @Test public void testAllWithDualPort() throws Exception { dualPortTesting = true; @@ -1347,7 +1352,7 @@ public void testAllWithDualPort() throws Exception { dualPortTesting = false; } } - + @Test public void testAllWithNoXmlDefaults() throws Exception { // Do all the tests with a configuration that ignores the defaults in @@ -1360,7 +1365,7 @@ public void testAllWithNoXmlDefaults() throws Exception { testDFSClient(); testFileChecksum(); } finally { - noXmlDefaults = false; + noXmlDefaults = false; } } @@ -1424,7 +1429,7 @@ public void testCreateWithCustomChecksum() throws Exception { Configuration conf = getTestConfiguration(); MiniDFSCluster cluster = null; Path testBasePath = new Path("/test/csum"); - // create args + // create args Path path1 = new Path(testBasePath, "file_wtih_crc1"); Path path2 = new Path(testBasePath, "file_with_crc2"); ChecksumOpt opt1 = new ChecksumOpt(DataChecksum.Type.CRC32C, 512); @@ -1538,13 +1543,13 @@ public void testCreateWithStoragePolicy() throws Throwable { public void testListFiles() throws IOException { Configuration conf = getTestConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); - + try { DistributedFileSystem fs = cluster.getFileSystem(); - + final Path relative = new Path("relative"); fs.create(new Path(relative, "foo")).close(); - + final List retVal = new ArrayList<>(); final RemoteIterator iter = fs.listFiles(relative, true); @@ -1585,7 +1590,7 @@ public void testDFSClientPeerReadTimeout() throws IOException { // only need cluster to create a dfs client to get a peer final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); try { - cluster.waitActive(); + cluster.waitActive(); DistributedFileSystem dfs = cluster.getFileSystem(); // use a dummy socket to ensure the read timesout ServerSocket socket = new ServerSocket(0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEnclosingRoot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEnclosingRoot.java new file mode 100644 index 00000000000000..ac220de466d3fd --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEnclosingRoot.java @@ -0,0 +1,120 @@ +package org.apache.hadoop.hdfs; + +import java.io.File; +import java.util.EnumSet; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.key.JavaKeyStoreProvider; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FileContext; +import org.apache.hadoop.fs.FileContextTestWrapper; +import org.apache.hadoop.fs.FileSystemTestHelper; +import org.apache.hadoop.fs.FileSystemTestWrapper; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag; +import org.apache.hadoop.hdfs.client.HdfsAdmin; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.namenode.EncryptionFaultInjector; +import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.Timeout; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.slf4j.event.Level; + +import static org.junit.Assert.*; + + +public class TestEnclosingRoot { + static final Logger LOG = LoggerFactory.getLogger(TestEncryptionZones.class); + + protected Configuration conf; + private FileSystemTestHelper fsHelper; + + protected MiniDFSCluster cluster; + protected HdfsAdmin dfsAdmin; + protected DistributedFileSystem fs; + private File testRootDir; + protected final String TEST_KEY = "test_key"; + private static final String NS_METRICS = "FSNamesystem"; + private static final String AUTHORIZATION_EXCEPTION_MESSAGE = + "User [root] is not authorized to perform [READ] on key " + + "with ACL name [key2]!!"; + + protected FileSystemTestWrapper fsWrapper; + protected FileContextTestWrapper fcWrapper; + + protected static final EnumSet NO_TRASH = + EnumSet.of(CreateEncryptionZoneFlag.NO_TRASH); + + protected String getKeyProviderURI() { + return JavaKeyStoreProvider.SCHEME_NAME + "://file" + + new Path(testRootDir.toString(), "test.jks").toUri(); + } + + @Rule + public Timeout globalTimeout = new Timeout(120 * 1000); + + @Before + public void setup() throws Exception { + conf = new HdfsConfiguration(); + fsHelper = new FileSystemTestHelper(); + // Set up java key store + String testRoot = fsHelper.getTestRootDir(); + testRootDir = new File(testRoot).getAbsoluteFile(); + conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, + getKeyProviderURI()); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); + // Lower the batch size for testing + conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES, + 2); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + GenericTestUtils.setLogLevel( + LoggerFactory.getLogger(EncryptionZoneManager.class), Level.TRACE); + fs = cluster.getFileSystem(); + fsWrapper = new FileSystemTestWrapper(fs); + fcWrapper = new FileContextTestWrapper( + FileContext.getFileContext(cluster.getURI(), conf)); + dfsAdmin = new HdfsAdmin(cluster.getURI(), conf); + setProvider(); + // Create a test key + DFSTestUtil.createKey(TEST_KEY, cluster, conf); + } + + protected void setProvider() { + // Need to set the client's KeyProvider to the NN's for JKS, + // else the updates do not get flushed properly + fs.getClient().setKeyProvider(cluster.getNameNode().getNamesystem() + .getProvider()); + } + + @After + public void teardown() { + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + EncryptionFaultInjector.instance = new EncryptionFaultInjector(); + } + + @Test + public void testBasicOperationsRootDir() throws Exception { + final Path rootDir = new Path("/"); + final Path zone1 = new Path(rootDir, "zone1"); + final Path zone1FileDNE = new Path(zone1, "newDNE.txt"); + + assertEquals(fs.getEnclosingRoot(rootDir), rootDir); + assertEquals(fs.getEnclosingRoot(zone1), rootDir); + + fs.mkdirs(zone1); + dfsAdmin.createEncryptionZone(zone1, TEST_KEY, NO_TRASH); + assertEquals(fs.getEnclosingRoot(rootDir), rootDir); + assertEquals(fs.getEnclosingRoot(zone1), zone1); + assertEquals(fs.getEnclosingRoot(zone1FileDNE), zone1); + } + +}