diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java index 59bdbee3aae57..da3bd0b71e26d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java @@ -65,9 +65,9 @@ * This class provides an interface for implementors of a Hadoop file system * (analogous to the VFS of Unix). Applications do not access this class; * instead they access files across all file systems using {@link FileContext}. - * + * * Pathnames passed to AbstractFileSystem can be fully qualified URI that - * matches the "this" file system (ie same scheme and authority) + * matches the "this" file system (ie same scheme and authority) * or a Slash-relative name that is assumed to be relative * to the root of the "this" file system . */ @@ -77,34 +77,34 @@ public abstract class AbstractFileSystem implements PathCapabilities { static final Logger LOG = LoggerFactory.getLogger(AbstractFileSystem.class); /** Recording statistics per a file system class. */ - private static final Map + private static final Map STATISTICS_TABLE = new HashMap(); - + /** Cache of constructors for each file system class. */ - private static final Map, Constructor> CONSTRUCTOR_CACHE = + private static final Map, Constructor> CONSTRUCTOR_CACHE = new ConcurrentHashMap, Constructor>(); - - private static final Class[] URI_CONFIG_ARGS = + + private static final Class[] URI_CONFIG_ARGS = new Class[]{URI.class, Configuration.class}; - + /** The statistics for this file system. */ protected Statistics statistics; @VisibleForTesting static final String NO_ABSTRACT_FS_ERROR = "No AbstractFileSystem configured for scheme"; - + private final URI myUri; - + public Statistics getStatistics() { return statistics; } - + /** * Returns true if the specified string is considered valid in the path part * of a URI by this file system. The default implementation enforces the rules * of HDFS, but subclasses may override this method to implement specific * validation rules for specific file systems. - * + * * @param src String source filename to check, path part of the URI * @return boolean true if the specified string is considered valid */ @@ -121,8 +121,8 @@ public boolean isValidName(String src) { } return true; } - - /** + + /** * Create an object for the given class and initialize it from conf. * @param theClass class of which an object is created * @param conf Configuration @@ -152,7 +152,7 @@ static T newInstance(Class theClass, } return result; } - + /** * Create a file system instance for the specified uri using the conf. The * conf is used to find the class name that implements the file system. The @@ -160,7 +160,7 @@ static T newInstance(Class theClass, * * @param uri URI of the file system * @param conf Configuration for the file system - * + * * @return Returns the file system for the given URI * * @throws UnsupportedFileSystemException file system for uri is @@ -182,7 +182,7 @@ public static AbstractFileSystem createFileSystem(URI uri, Configuration conf) /** * Get the statistics for a particular file system. - * + * * @param uri * used as key to lookup STATISTICS_TABLE. Only scheme and authority * part of the uri are used. @@ -202,7 +202,7 @@ protected static synchronized Statistics getStatistics(URI uri) { } return result; } - + private static URI getBaseUri(URI uri) { String scheme = uri.getScheme(); String authority = uri.getAuthority(); @@ -214,7 +214,7 @@ private static URI getBaseUri(URI uri) { } return URI.create(baseUriString); } - + public static synchronized void clearStatistics() { for(Statistics stat: STATISTICS_TABLE.values()) { stat.reset(); @@ -230,7 +230,7 @@ public static synchronized void printStatistics() { + pair.getKey().getAuthority() + ": " + pair.getValue()); } } - + protected static synchronized Map getAllStatistics() { Map statsMap = new HashMap( STATISTICS_TABLE.size()); @@ -249,14 +249,14 @@ protected static synchronized Map getAllStatistics() { * determines a configuration property name, * fs.AbstractFileSystem.scheme.impl whose value names the * AbstractFileSystem class. - * + * * The entire URI and conf is passed to the AbstractFileSystem factory method. - * + * * @param uri for the file system to be created. * @param conf which is passed to the file system impl. - * + * * @return file system for the given URI. - * + * * @throws UnsupportedFileSystemException if the file system for * uri is not supported. */ @@ -267,7 +267,7 @@ public static AbstractFileSystem get(final URI uri, final Configuration conf) /** * Constructor to be called by subclasses. - * + * * @param uri for this file system. * @param supportedScheme the scheme supported by the implementor * @param authorityNeeded if true then theURI must have authority, if false @@ -279,7 +279,7 @@ public AbstractFileSystem(final URI uri, final String supportedScheme, final boolean authorityNeeded, final int defaultPort) throws URISyntaxException { myUri = getUri(uri, supportedScheme, authorityNeeded, defaultPort); - statistics = getStatistics(uri); + statistics = getStatistics(uri); } /** @@ -303,14 +303,14 @@ public void checkScheme(URI uri, String supportedScheme) { * Get the URI for the file system based on the given URI. The path, query * part of the given URI is stripped out and default file system port is used * to form the URI. - * + * * @param uri FileSystem URI. * @param authorityNeeded if true authority cannot be null in the URI. If * false authority must be null. * @param defaultPort default port to use if port is not specified in the URI. - * + * * @return URI of the file system - * + * * @throws URISyntaxException uri has syntax error */ private URI getUri(URI uri, String supportedScheme, @@ -329,7 +329,7 @@ private URI getUri(URI uri, String supportedScheme, throw new HadoopIllegalArgumentException("Uri without authority: " + uri); } else { return new URI(supportedScheme + ":///"); - } + } } // authority is non null - AuthorityNeeded may be true or false. int port = uri.getPort(); @@ -339,10 +339,10 @@ private URI getUri(URI uri, String supportedScheme, } return new URI(supportedScheme + "://" + uri.getHost() + ":" + port); } - + /** * The default port of this file system. - * + * * @return default port of this file system's Uri scheme * A uri with a port of -1 => default port; */ @@ -350,18 +350,18 @@ private URI getUri(URI uri, String supportedScheme, /** * Returns a URI whose scheme and authority identify this FileSystem. - * + * * @return the uri of this file system. */ public URI getUri() { return myUri; } - + /** * Check that a Path belongs to this FileSystem. - * + * * If the path is fully qualified URI, then its scheme and authority - * matches that of this file system. Otherwise the path must be + * matches that of this file system. Otherwise the path must be * slash-relative name. * @param path the path. * @throws InvalidPathException if the path is invalid @@ -375,7 +375,7 @@ public void checkPath(Path path) { if (path.isUriPathAbsolute()) { return; } - throw new InvalidPathException("relative paths not allowed:" + + throw new InvalidPathException("relative paths not allowed:" + path); } else { throw new InvalidPathException( @@ -385,17 +385,17 @@ public void checkPath(Path path) { String thisScheme = this.getUri().getScheme(); String thisHost = this.getUri().getHost(); String thatHost = uri.getHost(); - + // Schemes and hosts must match. // Allow for null Authority for file:/// if (!thisScheme.equalsIgnoreCase(thatScheme) || - (thisHost != null && + (thisHost != null && !thisHost.equalsIgnoreCase(thatHost)) || (thisHost == null && thatHost != null)) { throw new InvalidPathException("Wrong FS: " + path + ", expected: " + this.getUri()); } - + // Ports must match, unless this FS instance is using the default port, in // which case the port may be omitted from the given URI int thisPort = this.getUri().getPort(); @@ -411,13 +411,13 @@ public void checkPath(Path path) { + " with port=" + thisPort); } } - + /** * Get the path-part of a pathname. Checks that URI matches this file system * and that the path-part is a valid name. - * + * * @param p path - * + * * @return path-part of the Path p */ public String getUriPath(final Path p) { @@ -429,7 +429,7 @@ public String getUriPath(final Path p) { } return s; } - + /** * Make the path fully qualified to this file system * @param path the path. @@ -439,23 +439,23 @@ public Path makeQualified(Path path) { checkPath(path); return path.makeQualified(this.getUri(), null); } - + /** * Some file systems like LocalFileSystem have an initial workingDir * that is used as the starting workingDir. For other file systems * like HDFS there is no built in notion of an initial workingDir. - * + * * @return the initial workingDir if the file system has such a notion * otherwise return a null. */ public Path getInitialWorkingDirectory() { return null; } - - /** + + /** * Return the current user's home directory in this file system. * The default implementation returns "/user/$USER/". - * + * * @return current user's home directory. */ public Path getHomeDirectory() { @@ -470,17 +470,17 @@ public Path getHomeDirectory() { return new Path("/user/" + username) .makeQualified(getUri(), null); } - + /** * Return a set of server default configuration values. - * + * * @return server default configuration values - * + * * @throws IOException an I/O error occurred * @deprecated use {@link #getServerDefaults(Path)} instead */ @Deprecated - public abstract FsServerDefaults getServerDefaults() throws IOException; + public abstract FsServerDefaults getServerDefaults() throws IOException; /** * Return a set of server default configuration values based on path. @@ -496,7 +496,7 @@ public FsServerDefaults getServerDefaults(final Path f) throws IOException { * Return the fully-qualified path of path f resolving the path * through any internal symlinks or mount point * @param p path to be resolved - * @return fully qualified path + * @return fully qualified path * @throws FileNotFoundException when file not find throw. * @throws AccessControlException when accees control error throw. * @throws IOException raised on errors performing I/O. @@ -508,7 +508,7 @@ public Path resolvePath(final Path p) throws FileNotFoundException, checkPath(p); return getFileStatus(p).getPath(); // default impl is to return the path } - + /** * The specification of this method matches that of * {@link FileContext#create(Path, EnumSet, Options.CreateOpts...)} except @@ -541,7 +541,7 @@ public final FSDataOutputStream create(final Path f, FsPermission permission = null; Progressable progress = null; Boolean createParent = null; - + for (CreateOpts iOpt : opts) { if (CreateOpts.BlockSize.class.isInstance(iOpt)) { if (blockSize != -1) { @@ -603,10 +603,10 @@ public final FSDataOutputStream create(final Path f, FsServerDefaults ssDef = getServerDefaults(f); if (ssDef.getBlockSize() % ssDef.getBytesPerChecksum() != 0) { - throw new IOException("Internal error: default blockSize is" + + throw new IOException("Internal error: default blockSize is" + " not a multiple of default bytesPerChecksum "); } - + if (blockSize == -1) { blockSize = ssDef.getBlockSize(); } @@ -673,7 +673,7 @@ public abstract FSDataOutputStream createInternal(Path f, /** * The specification of this method matches that of * {@link FileContext#mkdir(Path, FsPermission, boolean)} except that the Path - * f must be fully qualified and the permission is absolute (i.e. + * f must be fully qualified and the permission is absolute (i.e. * umask has been applied). * @param dir directory. * @param permission permission. @@ -806,12 +806,12 @@ public final void rename(final Path src, final Path dst, } renameInternal(src, dst, overwrite); } - + /** * The specification of this method matches that of * {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path * f must be for this file system and NO OVERWRITE is performed. - * + * * File systems that do not have a built in overwrite need implement only this * method and can take advantage of the default impl of the other * {@link #renameInternal(Path, Path, boolean)} @@ -829,7 +829,7 @@ public abstract void renameInternal(final Path src, final Path dst) throws AccessControlException, FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, UnresolvedLinkException, IOException; - + /** * The specification of this method matches that of * {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path @@ -895,7 +895,7 @@ public void renameInternal(final Path src, final Path dst, } renameInternal(src, dst); } - + /** * Returns true if the file system supports symlinks, false otherwise. * @return true if filesystem supports symlinks @@ -903,9 +903,9 @@ public void renameInternal(final Path src, final Path dst, public boolean supportsSymlinks() { return false; } - + /** - * The specification of this method matches that of + * The specification of this method matches that of * {@link FileContext#createSymlink(Path, Path, boolean)}; * * @param target target. @@ -916,7 +916,7 @@ public boolean supportsSymlinks() { */ public void createSymlink(final Path target, final Path link, final boolean createParent) throws IOException, UnresolvedLinkException { - throw new IOException("File system does not support symlinks"); + throw new IOException("File system does not support symlinks"); } /** @@ -925,14 +925,14 @@ public void createSymlink(final Path target, final Path link, * {@link FileContext#getLinkTarget(Path)}. * @param f the path. * @return target path. - * @throws IOException subclass implementations may throw IOException + * @throws IOException subclass implementations may throw IOException */ public Path getLinkTarget(final Path f) throws IOException { throw new AssertionError("Implementation Error: " + getClass() + " that threw an UnresolvedLinkException, causing this method to be" + " called, needs to override this method."); } - + /** * The specification of this method matches that of * {@link FileContext#setPermission(Path, FsPermission)} except that Path f @@ -998,11 +998,11 @@ public abstract void setTimes(final Path f, final long mtime, public abstract FileChecksum getFileChecksum(final Path f) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException; - + /** * The specification of this method matches that of - * {@link FileContext#getFileStatus(Path)} - * except that an UnresolvedLinkException may be thrown if a symlink is + * {@link FileContext#getFileStatus(Path)} + * except that an UnresolvedLinkException may be thrown if a symlink is * encountered in the path. * * @param f the path. @@ -1052,7 +1052,7 @@ public void access(Path path, FsAction mode) throws AccessControlException, /** * The specification of this method matches that of * {@link FileContext#getFileLinkStatus(Path)} - * except that an UnresolvedLinkException may be thrown if a symlink is + * except that an UnresolvedLinkException may be thrown if a symlink is * encountered in the path leading up to the final path component. * If the file system does not support symlinks then the behavior is * equivalent to {@link AbstractFileSystem#getFileStatus(Path)}. @@ -1105,7 +1105,7 @@ public FsStatus getFsStatus(final Path f) throws AccessControlException, // default impl gets FsStatus of root return getFsStatus(); } - + /** * The specification of this method matches that of * {@link FileContext#getFsStatus(Path)}. @@ -1136,12 +1136,12 @@ public RemoteIterator listStatusIterator(final Path f) return new RemoteIterator() { private int i = 0; private FileStatus[] statusList = listStatus(f); - + @Override public boolean hasNext() { return i < statusList.length; } - + @Override public FileStatus next() { if (!hasNext()) { @@ -1154,7 +1154,7 @@ public FileStatus next() { /** * The specification of this method matches that of - * {@link FileContext#listLocatedStatus(Path)} except that Path f + * {@link FileContext#listLocatedStatus(Path)} except that Path f * must be for this file system. * * In HDFS implementation, the BlockLocation of returned LocatedFileStatus @@ -1174,12 +1174,12 @@ public RemoteIterator listLocatedStatus(final Path f) UnresolvedLinkException, IOException { return new RemoteIterator() { private RemoteIterator itor = listStatusIterator(f); - + @Override public boolean hasNext() throws IOException { return itor.hasNext(); } - + @Override public LocatedFileStatus next() throws IOException { if (!hasNext()) { @@ -1198,7 +1198,7 @@ public LocatedFileStatus next() throws IOException { /** * The specification of this method matches that of - * {@link FileContext.Util#listStatus(Path)} except that Path f must be + * {@link FileContext.Util#listStatus(Path)} except that Path f must be * for this file system. * @param f the path. * @throws AccessControlException access control exception. @@ -1235,7 +1235,7 @@ public RemoteIterator listCorruptFileBlocks(Path path) */ public abstract void setVerifyChecksum(final boolean verifyChecksum) throws AccessControlException, IOException; - + /** * Get a canonical name for this file system. * @return a URI string that uniquely identifies this file system @@ -1243,13 +1243,13 @@ public abstract void setVerifyChecksum(final boolean verifyChecksum) public String getCanonicalServiceName() { return SecurityUtil.buildDTServiceName(getUri(), getUriDefaultPort()); } - + /** * Get one or more delegation tokens associated with the filesystem. Normally * a file system returns a single delegation token. A file system that manages * multiple file systems underneath, could return set of delegation tokens for * all the file systems it manages - * + * * @param renewer the account name that is allowed to renew the token. * @return List of delegation tokens. * If delegation tokens not supported then return a list of size zero. @@ -1577,7 +1577,7 @@ public Collection getAllStoragePolicies() public int hashCode() { return myUri.hashCode(); } - + @Override //Object public boolean equals(Object other) { if (!(other instanceof AbstractFileSystem)) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java index 7fa49ceea61c6..a85cf2ff5a17e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java @@ -61,23 +61,23 @@ @InterfaceAudience.Public @InterfaceStability.Stable public class FilterFileSystem extends FileSystem { - + protected FileSystem fs; protected String swapScheme; - + /* * so that extending classes can define it */ public FilterFileSystem() { } - + public FilterFileSystem(FileSystem fs) { this.fs = fs; this.statistics = fs.statistics; } /** - * Get the raw file system + * Get the raw file system * @return FileSystem being filtered */ public FileSystem getRawFileSystem() { @@ -108,8 +108,8 @@ public void initialize(URI name, Configuration conf) throws IOException { public URI getUri() { return fs.getUri(); } - - + + @Override protected URI getCanonicalUri() { return fs.getCanonicalUri(); @@ -127,7 +127,7 @@ public Path makeQualified(Path path) { // swap in our scheme if the filtered fs is using a different scheme if (swapScheme != null) { try { - // NOTE: should deal with authority, but too much other stuff is broken + // NOTE: should deal with authority, but too much other stuff is broken fqPath = new Path( new URI(swapScheme, fqPath.toUri().getSchemeSpecificPart(), null) ); @@ -137,7 +137,7 @@ public Path makeQualified(Path path) { } return fqPath; } - + /////////////////////////////////////////////////////////////// // FileSystem /////////////////////////////////////////////////////////////// @@ -223,14 +223,14 @@ protected RemoteIterator listLocatedStatus(final Path f, public FSDataOutputStream createNonRecursive(Path f, FsPermission permission, EnumSet flags, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { - + return fs.createNonRecursive(f, permission, flags, bufferSize, replication, blockSize, progress); } /** * Set replication for an existing file. - * + * * @param src file name * @param replication new replication * @throws IOException raised on errors performing I/O. @@ -241,7 +241,7 @@ public FSDataOutputStream createNonRecursive(Path f, FsPermission permission, public boolean setReplication(Path src, short replication) throws IOException { return fs.setReplication(src, replication); } - + /** * Renames Path src to Path dst. Can take place on local fs * or remote DFS. @@ -261,13 +261,13 @@ protected void rename(Path src, Path dst, Rename... options) public boolean truncate(Path f, final long newLength) throws IOException { return fs.truncate(f, newLength); } - + /** Delete a file */ @Override public boolean delete(Path f, boolean recursive) throws IOException { return fs.delete(f, recursive); } - + /** List files in a directory. */ @Override public FileStatus[] listStatus(Path f) throws IOException { @@ -286,7 +286,7 @@ public RemoteIterator listLocatedStatus(Path f) throws IOException { return fs.listLocatedStatus(f); } - + /** Return a remote iterator for listing in a directory */ @Override public RemoteIterator listStatusIterator(Path f) @@ -303,34 +303,34 @@ public Path getHomeDirectory() { /** * Set the current working directory for the given file system. All relative * paths will be resolved relative to it. - * + * * @param newDir new dir. */ @Override public void setWorkingDirectory(Path newDir) { fs.setWorkingDirectory(newDir); } - + /** * Get the current working directory for the given file system - * + * * @return the directory pathname */ @Override public Path getWorkingDirectory() { return fs.getWorkingDirectory(); } - + @Override protected Path getInitialWorkingDirectory() { return fs.getInitialWorkingDirectory(); } - + @Override public FsStatus getStatus(Path p) throws IOException { return fs.getStatus(p); } - + @Override public boolean mkdirs(Path f, FsPermission permission) throws IOException { return fs.mkdirs(f, permission); @@ -351,26 +351,26 @@ public void copyFromLocalFile(boolean delSrc, Path src, Path dst) throws IOException { fs.copyFromLocalFile(delSrc, src, dst); } - + /** * The src files are on the local disk. Add it to FS at * the given dst name. * delSrc indicates if the source should be removed */ @Override - public void copyFromLocalFile(boolean delSrc, boolean overwrite, + public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path[] srcs, Path dst) throws IOException { fs.copyFromLocalFile(delSrc, overwrite, srcs, dst); } - + /** * The src file is on the local disk. Add it to FS at * the given dst name. * delSrc indicates if the source should be removed */ @Override - public void copyFromLocalFile(boolean delSrc, boolean overwrite, + public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException { fs.copyFromLocalFile(delSrc, overwrite, src, dst); @@ -380,13 +380,13 @@ public void copyFromLocalFile(boolean delSrc, boolean overwrite, * The src file is under FS, and the dst is on the local disk. * Copy it from FS control to the local dst name. * delSrc indicates if the src will be removed or not. - */ + */ @Override public void copyToLocalFile(boolean delSrc, Path src, Path dst) throws IOException { fs.copyToLocalFile(delSrc, src, dst); } - + /** * Returns a local File that the user can write output to. The caller * provides both the eventual FS target name and the local working @@ -427,7 +427,7 @@ public long getUsed(Path path) throws IOException { public long getDefaultBlockSize() { return fs.getDefaultBlockSize(); } - + @Override public short getDefaultReplication() { return fs.getDefaultReplication(); @@ -438,7 +438,7 @@ public FsServerDefaults getServerDefaults() throws IOException { return fs.getServerDefaults(); } - // path variants delegate to underlying filesystem + // path variants delegate to underlying filesystem @Override public long getDefaultBlockSize(Path f) { return fs.getDefaultBlockSize(f); @@ -476,7 +476,7 @@ public void access(Path path, FsAction mode) throws AccessControlException, public void createSymlink(final Path target, final Path link, final boolean createParent) throws AccessControlException, FileAlreadyExistsException, FileNotFoundException, - ParentNotDirectoryException, UnsupportedFileSystemException, + ParentNotDirectoryException, UnsupportedFileSystemException, IOException { fs.createSymlink(target, link, createParent); } @@ -513,7 +513,7 @@ public FileChecksum getFileChecksum(Path f, long length) throws IOException { public void setVerifyChecksum(boolean verifyChecksum) { fs.setVerifyChecksum(verifyChecksum); } - + @Override public void setWriteChecksum(boolean writeChecksum) { fs.setWriteChecksum(writeChecksum); @@ -523,7 +523,7 @@ public void setWriteChecksum(boolean writeChecksum) { public Configuration getConf() { return fs.getConf(); } - + @Override public void close() throws IOException { super.close(); @@ -564,7 +564,7 @@ protected boolean primitiveMkdir(Path f, FsPermission abdolutePermission) throws IOException { return fs.primitiveMkdir(f, abdolutePermission); } - + @Override // FileSystem public FileSystem[] getChildFileSystems() { return new FileSystem[]{fs}; @@ -575,13 +575,13 @@ public Path createSnapshot(Path path, String snapshotName) throws IOException { return fs.createSnapshot(path, snapshotName); } - + @Override // FileSystem public void renameSnapshot(Path path, String snapshotOldName, String snapshotNewName) throws IOException { fs.renameSnapshot(path, snapshotOldName, snapshotNewName); } - + @Override // FileSystem public void deleteSnapshot(Path path, String snapshotName) throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java index 808f4bb32d27d..df010e3dae7f3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java @@ -49,17 +49,17 @@ * pass all requests to the contained file system. Subclasses of * FilterFs may further override some of these methods and may also * provide additional methods and fields. - * + * */ @InterfaceAudience.Private @InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */ public abstract class FilterFs extends AbstractFileSystem { private final AbstractFileSystem myFs; - + protected AbstractFileSystem getMyFs() { return myFs; } - + protected FilterFs(AbstractFileSystem fs) throws URISyntaxException { super(fs.getUri(), fs.getUri().getScheme(), false, fs.getUriDefaultPort()); myFs = fs; @@ -69,7 +69,7 @@ protected FilterFs(AbstractFileSystem fs) throws URISyntaxException { public Statistics getStatistics() { return myFs.getStatistics(); } - + @Override public Path makeQualified(Path path) { return myFs.makeQualified(path); @@ -79,17 +79,17 @@ public Path makeQualified(Path path) { public Path getInitialWorkingDirectory() { return myFs.getInitialWorkingDirectory(); } - + @Override public Path getHomeDirectory() { return myFs.getHomeDirectory(); } - + @Override public FSDataOutputStream createInternal(Path f, EnumSet flag, FsPermission absolutePermission, int bufferSize, short replication, long blockSize, Progressable progress, - ChecksumOpt checksumOpt, boolean createParent) + ChecksumOpt checksumOpt, boolean createParent) throws IOException, UnresolvedLinkException { checkPath(f); return myFs.createInternal(f, flag, absolutePermission, bufferSize, @@ -97,7 +97,7 @@ public FSDataOutputStream createInternal(Path f, } @Override - public boolean delete(Path f, boolean recursive) + public boolean delete(Path f, boolean recursive) throws IOException, UnresolvedLinkException { checkPath(f); return myFs.delete(f, recursive); @@ -111,14 +111,14 @@ public BlockLocation[] getFileBlockLocations(Path f, long start, long len) } @Override - public FileChecksum getFileChecksum(Path f) + public FileChecksum getFileChecksum(Path f) throws IOException, UnresolvedLinkException { checkPath(f); return myFs.getFileChecksum(f); } @Override - public FileStatus getFileStatus(Path f) + public FileStatus getFileStatus(Path f) throws IOException, UnresolvedLinkException { checkPath(f); return myFs.getFileStatus(f); @@ -137,12 +137,12 @@ public void access(Path path, FsAction mode) throws AccessControlException, } @Override - public FileStatus getFileLinkStatus(final Path f) + public FileStatus getFileLinkStatus(final Path f) throws IOException, UnresolvedLinkException { checkPath(f); return myFs.getFileLinkStatus(f); } - + @Override public FsStatus getFsStatus(final Path f) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { @@ -159,7 +159,7 @@ public FsStatus getFsStatus() throws IOException { public FsServerDefaults getServerDefaults() throws IOException { return myFs.getServerDefaults(); } - + @Override public FsServerDefaults getServerDefaults(final Path f) throws IOException { return myFs.getServerDefaults(f); @@ -180,19 +180,19 @@ public int getUriDefaultPort() { public URI getUri() { return myFs.getUri(); } - + @Override public void checkPath(Path path) { myFs.checkPath(path); } - + @Override public String getUriPath(final Path p) { return myFs.getUriPath(p); } - + @Override - public FileStatus[] listStatus(Path f) + public FileStatus[] listStatus(Path f) throws IOException, UnresolvedLinkException { checkPath(f); return myFs.listStatus(f); @@ -217,7 +217,7 @@ public void mkdir(Path dir, FsPermission permission, boolean createParent) throws IOException, UnresolvedLinkException { checkPath(dir); myFs.mkdir(dir, permission, createParent); - + } @Override @@ -228,14 +228,14 @@ public FSDataInputStream open(final Path f) throws AccessControlException, } @Override - public FSDataInputStream open(Path f, int bufferSize) + public FSDataInputStream open(Path f, int bufferSize) throws IOException, UnresolvedLinkException { checkPath(f); return myFs.open(f, bufferSize); } @Override - public boolean truncate(Path f, long newLength) + public boolean truncate(Path f, long newLength) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { checkPath(f); @@ -243,7 +243,7 @@ public boolean truncate(Path f, long newLength) } @Override - public void renameInternal(Path src, Path dst) + public void renameInternal(Path src, Path dst) throws IOException, UnresolvedLinkException { checkPath(src); checkPath(dst); @@ -257,13 +257,13 @@ public void renameInternal(final Path src, final Path dst, ParentNotDirectoryException, UnresolvedLinkException, IOException { myFs.renameInternal(src, dst, overwrite); } - + @Override public void setOwner(Path f, String username, String groupname) throws IOException, UnresolvedLinkException { checkPath(f); myFs.setOwner(f, username, groupname); - + } @Override @@ -281,14 +281,14 @@ public boolean setReplication(Path f, short replication) } @Override - public void setTimes(Path f, long mtime, long atime) + public void setTimes(Path f, long mtime, long atime) throws IOException, UnresolvedLinkException { checkPath(f); myFs.setTimes(f, mtime, atime); } @Override - public void setVerifyChecksum(boolean verifyChecksum) + public void setVerifyChecksum(boolean verifyChecksum) throws IOException, UnresolvedLinkException { myFs.setVerifyChecksum(verifyChecksum); } @@ -299,7 +299,7 @@ public boolean supportsSymlinks() { } @Override - public void createSymlink(Path target, Path link, boolean createParent) + public void createSymlink(Path target, Path link, boolean createParent) throws IOException, UnresolvedLinkException { myFs.createSymlink(target, link, createParent); } @@ -308,12 +308,12 @@ public void createSymlink(Path target, Path link, boolean createParent) public Path getLinkTarget(final Path f) throws IOException { return myFs.getLinkTarget(f); } - + @Override // AbstractFileSystem public String getCanonicalServiceName() { return myFs.getCanonicalServiceName(); } - + @Override // AbstractFileSystem public List> getDelegationTokens(String renewer) throws IOException { return myFs.getDelegationTokens(renewer); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto index 80b35e8afe781..5ad3fe96f08f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto @@ -38,9 +38,9 @@ import "erasurecoding.proto"; import "HAServiceProtocol.proto"; /** - * The ClientNamenodeProtocol Service defines the interface between a client + * The ClientNamenodeProtocol Service defines the interface between a client * (as runnign inside a MR Task) and the Namenode. - * See org.apache.hadoop.hdfs.protocol.ClientProtocol for the javadoc + * See org.apache.hadoop.hdfs.protocol.ClientProtocol for the javadoc * for each of the methods. * The exceptions declared in the above class also apply to this protocol. * Exceptions are unwrapped and thrown by the PB libraries. @@ -1007,7 +1007,7 @@ service ClientNamenodeProtocol { rpc allowSnapshot(AllowSnapshotRequestProto) returns(AllowSnapshotResponseProto); rpc disallowSnapshot(DisallowSnapshotRequestProto) - returns(DisallowSnapshotResponseProto); + returns(DisallowSnapshotResponseProto); rpc getSnapshottableDirListing(GetSnapshottableDirListingRequestProto) returns(GetSnapshottableDirListingResponseProto); rpc getSnapshotListing(GetSnapshotListingRequestProto) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index c75908a73644e..bf69868ab1ac9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -325,12 +325,12 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements DeleteSnapshotResponseProto.newBuilder().build(); static final RenameSnapshotResponseProto VOID_RENAME_SNAPSHOT_RESPONSE = RenameSnapshotResponseProto.newBuilder().build(); - static final AllowSnapshotResponseProto VOID_ALLOW_SNAPSHOT_RESPONSE = + static final AllowSnapshotResponseProto VOID_ALLOW_SNAPSHOT_RESPONSE = AllowSnapshotResponseProto.newBuilder().build(); static final DisallowSnapshotResponseProto VOID_DISALLOW_SNAPSHOT_RESPONSE = DisallowSnapshotResponseProto.newBuilder().build(); - static final GetSnapshottableDirListingResponseProto - NULL_GET_SNAPSHOTTABLE_DIR_LISTING_RESPONSE = + static final GetSnapshottableDirListingResponseProto + NULL_GET_SNAPSHOTTABLE_DIR_LISTING_RESPONSE = GetSnapshottableDirListingResponseProto.newBuilder().build(); static final GetSnapshotListingResponseProto NULL_GET_SNAPSHOT_LISTING_RESPONSE = @@ -341,28 +341,28 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements VOID_UNSET_STORAGE_POLICY_RESPONSE = UnsetStoragePolicyResponseProto.newBuilder().build(); - private static final CreateResponseProto VOID_CREATE_RESPONSE = + private static final CreateResponseProto VOID_CREATE_RESPONSE = CreateResponseProto.newBuilder().build(); - private static final SetPermissionResponseProto VOID_SET_PERM_RESPONSE = + private static final SetPermissionResponseProto VOID_SET_PERM_RESPONSE = SetPermissionResponseProto.newBuilder().build(); - private static final SetOwnerResponseProto VOID_SET_OWNER_RESPONSE = + private static final SetOwnerResponseProto VOID_SET_OWNER_RESPONSE = SetOwnerResponseProto.newBuilder().build(); - private static final AbandonBlockResponseProto VOID_ADD_BLOCK_RESPONSE = + private static final AbandonBlockResponseProto VOID_ADD_BLOCK_RESPONSE = AbandonBlockResponseProto.newBuilder().build(); - private static final ReportBadBlocksResponseProto VOID_REP_BAD_BLOCK_RESPONSE = + private static final ReportBadBlocksResponseProto VOID_REP_BAD_BLOCK_RESPONSE = ReportBadBlocksResponseProto.newBuilder().build(); - private static final ConcatResponseProto VOID_CONCAT_RESPONSE = + private static final ConcatResponseProto VOID_CONCAT_RESPONSE = ConcatResponseProto.newBuilder().build(); - private static final Rename2ResponseProto VOID_RENAME2_RESPONSE = + private static final Rename2ResponseProto VOID_RENAME2_RESPONSE = Rename2ResponseProto.newBuilder().build(); - private static final GetListingResponseProto VOID_GETLISTING_RESPONSE = + private static final GetListingResponseProto VOID_GETLISTING_RESPONSE = GetListingResponseProto.newBuilder().build(); private static final GetBatchedListingResponseProto @@ -372,50 +372,50 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements .setHasMore(false) .build(); - private static final RenewLeaseResponseProto VOID_RENEWLEASE_RESPONSE = + private static final RenewLeaseResponseProto VOID_RENEWLEASE_RESPONSE = RenewLeaseResponseProto.newBuilder().build(); private static final RefreshNodesResponseProto VOID_REFRESHNODES_RESPONSE = RefreshNodesResponseProto.newBuilder().build(); - private static final FinalizeUpgradeResponseProto VOID_FINALIZEUPGRADE_RESPONSE = + private static final FinalizeUpgradeResponseProto VOID_FINALIZEUPGRADE_RESPONSE = FinalizeUpgradeResponseProto.newBuilder().build(); - private static final MetaSaveResponseProto VOID_METASAVE_RESPONSE = + private static final MetaSaveResponseProto VOID_METASAVE_RESPONSE = MetaSaveResponseProto.newBuilder().build(); - private static final GetFileInfoResponseProto VOID_GETFILEINFO_RESPONSE = + private static final GetFileInfoResponseProto VOID_GETFILEINFO_RESPONSE = GetFileInfoResponseProto.newBuilder().build(); private static final GetLocatedFileInfoResponseProto VOID_GETLOCATEDFILEINFO_RESPONSE = GetLocatedFileInfoResponseProto.newBuilder().build(); - private static final GetFileLinkInfoResponseProto VOID_GETFILELINKINFO_RESPONSE = + private static final GetFileLinkInfoResponseProto VOID_GETFILELINKINFO_RESPONSE = GetFileLinkInfoResponseProto.newBuilder().build(); - private static final SetQuotaResponseProto VOID_SETQUOTA_RESPONSE = + private static final SetQuotaResponseProto VOID_SETQUOTA_RESPONSE = SetQuotaResponseProto.newBuilder().build(); - private static final FsyncResponseProto VOID_FSYNC_RESPONSE = + private static final FsyncResponseProto VOID_FSYNC_RESPONSE = FsyncResponseProto.newBuilder().build(); - private static final SetTimesResponseProto VOID_SETTIMES_RESPONSE = + private static final SetTimesResponseProto VOID_SETTIMES_RESPONSE = SetTimesResponseProto.newBuilder().build(); - private static final CreateSymlinkResponseProto VOID_CREATESYMLINK_RESPONSE = + private static final CreateSymlinkResponseProto VOID_CREATESYMLINK_RESPONSE = CreateSymlinkResponseProto.newBuilder().build(); private static final UpdatePipelineResponseProto - VOID_UPDATEPIPELINE_RESPONSE = + VOID_UPDATEPIPELINE_RESPONSE = UpdatePipelineResponseProto.newBuilder().build(); - private static final CancelDelegationTokenResponseProto - VOID_CANCELDELEGATIONTOKEN_RESPONSE = + private static final CancelDelegationTokenResponseProto + VOID_CANCELDELEGATIONTOKEN_RESPONSE = CancelDelegationTokenResponseProto.newBuilder().build(); - private static final SetBalancerBandwidthResponseProto - VOID_SETBALANCERBANDWIDTH_RESPONSE = + private static final SetBalancerBandwidthResponseProto + VOID_SETBALANCERBANDWIDTH_RESPONSE = SetBalancerBandwidthResponseProto.newBuilder().build(); private static final SetAclResponseProto @@ -435,10 +435,10 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements private static final RemoveAclResponseProto VOID_REMOVEACL_RESPONSE = RemoveAclResponseProto.getDefaultInstance(); - + private static final SetXAttrResponseProto VOID_SETXATTR_RESPONSE = SetXAttrResponseProto.getDefaultInstance(); - + private static final RemoveXAttrResponseProto VOID_REMOVEXATTR_RESPONSE = RemoveXAttrResponseProto.getDefaultInstance(); @@ -451,7 +451,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements /** * Constructor - * + * * @param server - the NN server * @throws IOException */ @@ -492,7 +492,7 @@ public GetServerDefaultsResponseProto getServerDefaults( } } - + @Override public CreateResponseProto create(RpcController controller, CreateRequestProto req) throws ServiceException { @@ -546,7 +546,7 @@ public AppendResponseProto append(RpcController controller, public SetReplicationResponseProto setReplication(RpcController controller, SetReplicationRequestProto req) throws ServiceException { try { - boolean result = + boolean result = server.setReplication(req.getSrc(), (short) req.getReplication()); return SetReplicationResponseProto.newBuilder().setResult(result).build(); } catch (IOException e) { @@ -570,7 +570,7 @@ public SetPermissionResponseProto setPermission(RpcController controller, public SetOwnerResponseProto setOwner(RpcController controller, SetOwnerRequestProto req) throws ServiceException { try { - server.setOwner(req.getSrc(), + server.setOwner(req.getSrc(), req.hasUsername() ? req.getUsername() : null, req.hasGroupname() ? req.getGroupname() : null); } catch (IOException e) { @@ -594,7 +594,7 @@ public AbandonBlockResponseProto abandonBlock(RpcController controller, @Override public AddBlockResponseProto addBlock(RpcController controller, AddBlockRequestProto req) throws ServiceException { - + try { List excl = req.getExcludeNodesList(); List favor = req.getFavoredNodesList(); @@ -645,7 +645,7 @@ public GetAdditionalDatanodeResponseProto getAdditionalDatanode( public CompleteResponseProto complete(RpcController controller, CompleteRequestProto req) throws ServiceException { try { - boolean result = + boolean result = server.complete(req.getSrc(), req.getClientName(), req.hasLast() ? PBHelperClient.convert(req.getLast()) : null, req.hasFileId() ? req.getFileId() : HdfsConstants.GRANDFATHER_INODE_ID); @@ -654,7 +654,7 @@ public CompleteResponseProto complete(RpcController controller, throw new ServiceException(e); } } - + @Override public ReportBadBlocksResponseProto reportBadBlocks(RpcController controller, ReportBadBlocksRequestProto req) throws ServiceException { @@ -708,11 +708,11 @@ public Rename2ResponseProto rename2(RpcController controller, } try { - server.rename2(req.getSrc(), req.getDst(), + server.rename2(req.getSrc(), req.getDst(), optionList.toArray(new Rename[optionList.size()])); } catch (IOException e) { throw new ServiceException(e); - } + } return VOID_RENAME2_RESPONSE; } @@ -837,7 +837,7 @@ public RecoverLeaseResponseProto recoverLease(RpcController controller, throw new ServiceException(e); } } - + @Override public RestoreFailedStorageResponseProto restoreFailedStorage( RpcController controller, RestoreFailedStorageRequestProto req) @@ -936,7 +936,7 @@ public SetSafeModeResponseProto setSafeMode(RpcController controller, throw new ServiceException(e); } } - + @Override public SaveNamespaceResponseProto saveNamespace(RpcController controller, SaveNamespaceRequestProto req) throws ServiceException { @@ -1050,12 +1050,12 @@ public GetFileInfoResponseProto getFileInfo(RpcController controller, GetFileInfoRequestProto req) throws ServiceException { try { HdfsFileStatus result = server.getFileInfo(req.getSrc()); - + if (result != null) { return GetFileInfoResponseProto.newBuilder().setFs( PBHelperClient.convert(result)).build(); } - return VOID_GETFILEINFO_RESPONSE; + return VOID_GETFILEINFO_RESPONSE; } catch (IOException e) { throw new ServiceException(e); } @@ -1087,7 +1087,7 @@ public GetFileLinkInfoResponseProto getFileLinkInfo(RpcController controller, return GetFileLinkInfoResponseProto.newBuilder().setFs( PBHelperClient.convert(result)).build(); } else { - return VOID_GETFILELINKINFO_RESPONSE; + return VOID_GETFILELINKINFO_RESPONSE; } } catch (IOException e) { @@ -1107,7 +1107,7 @@ public GetContentSummaryResponseProto getContentSummary( throw new ServiceException(e); } } - + @Override public SetQuotaResponseProto setQuota(RpcController controller, SetQuotaRequestProto req) throws ServiceException { @@ -1121,7 +1121,7 @@ public SetQuotaResponseProto setQuota(RpcController controller, throw new ServiceException(e); } } - + @Override public FsyncResponseProto fsync(RpcController controller, FsyncRequestProto req) throws ServiceException { @@ -1212,7 +1212,7 @@ public GetDelegationTokenResponseProto getDelegationToken( try { Token token = server .getDelegationToken(new Text(req.getRenewer())); - GetDelegationTokenResponseProto.Builder rspBuilder = + GetDelegationTokenResponseProto.Builder rspBuilder = GetDelegationTokenResponseProto.newBuilder(); if (token != null) { rspBuilder.setToken(PBHelperClient.convert(token)); @@ -1267,7 +1267,7 @@ public GetDataEncryptionKeyResponseProto getDataEncryptionKey( RpcController controller, GetDataEncryptionKeyRequestProto request) throws ServiceException { try { - GetDataEncryptionKeyResponseProto.Builder builder = + GetDataEncryptionKeyResponseProto.Builder builder = GetDataEncryptionKeyResponseProto.newBuilder(); DataEncryptionKey encryptionKey = server.getDataEncryptionKey(); if (encryptionKey != null) { @@ -1306,7 +1306,7 @@ public DeleteSnapshotResponseProto deleteSnapshot(RpcController controller, throw new ServiceException(e); } } - + @Override public AllowSnapshotResponseProto allowSnapshot(RpcController controller, AllowSnapshotRequestProto req) throws ServiceException { @@ -1413,7 +1413,7 @@ public GetSnapshotDiffReportListingResponseProto getSnapshotDiffReportListing( @Override public IsFileClosedResponseProto isFileClosed( - RpcController controller, IsFileClosedRequestProto request) + RpcController controller, IsFileClosedRequestProto request) throws ServiceException { try { boolean result = server.isFileClosed(request.getSrc()); @@ -1497,7 +1497,7 @@ public AddCachePoolResponseProto addCachePool(RpcController controller, throw new ServiceException(e); } } - + @Override public ModifyCachePoolResponseProto modifyCachePool(RpcController controller, ModifyCachePoolRequestProto request) throws ServiceException { @@ -1606,7 +1606,7 @@ public GetAclStatusResponseProto getAclStatus(RpcController controller, throw new ServiceException(e); } } - + @Override public CreateEncryptionZoneResponseProto createEncryptionZone( RpcController controller, CreateEncryptionZoneRequestProto req) @@ -1762,7 +1762,7 @@ public ListXAttrsResponseProto listXAttrs(RpcController controller, throw new ServiceException(e); } } - + @Override public RemoveXAttrResponseProto removeXAttr(RpcController controller, RemoveXAttrRequestProto req) throws ServiceException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 7029542905390..4d3e91560ff17 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -413,7 +413,7 @@ void logAuditEvent(boolean succeeded, String cmd, String src) throws IOException { logAuditEvent(succeeded, cmd, src, null, null); } - + private void logAuditEvent(boolean succeeded, String cmd, String src, String dst, FileStatus stat) throws IOException { if (isAuditEnabled() && isExternalInvocation()) { @@ -554,7 +554,7 @@ private boolean isClientPortInfoAbsent(CallerContext ctx){ */ private volatile boolean needRollbackFsImage; - final LeaseManager leaseManager = new LeaseManager(this); + final LeaseManager leaseManager = new LeaseManager(this); Daemon nnrmthread = null; // NamenodeResourceMonitor thread @@ -581,7 +581,7 @@ private boolean isClientPortInfoAbsent(CallerContext ctx){ */ private final long editLogRollerThreshold; /** - * Check interval of an active namenode's edit log roller thread + * Check interval of an active namenode's edit log roller thread */ private final int editLogRollerInterval; @@ -593,7 +593,7 @@ private boolean isClientPortInfoAbsent(CallerContext ctx){ private volatile boolean hasResourcesAvailable = false; private volatile boolean fsRunning = true; - + /** The start time of the namesystem. */ private final long startTime = now(); @@ -619,11 +619,11 @@ private boolean isClientPortInfoAbsent(CallerContext ctx){ /** Lock to protect FSNamesystem. */ private final FSNamesystemLock fsLock; - /** + /** * Checkpoint lock to protect FSNamesystem modification on standby NNs. * Unlike fsLock, it does not affect block updates. On active NNs, this lock * does not provide proper protection, because there are operations that - * modify both block and name system state. Even on standby, fsLock is + * modify both block and name system state. Even on standby, fsLock is * used when block state changes need to be blocked. */ private final ReentrantLock cpLock; @@ -641,7 +641,7 @@ private boolean isClientPortInfoAbsent(CallerContext ctx){ /** * Reference to the NN's HAContext object. This is only set once - * {@link #startCommonServices(Configuration, HAContext)} is called. + * {@link #startCommonServices(Configuration, HAContext)} is called. */ private HAContext haContext; @@ -841,14 +841,14 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException { namesystem.getFSDirectory().createReservedStatuses(namesystem.getCTime()); return namesystem; } - + FSNamesystem(Configuration conf, FSImage fsImage) throws IOException { this(conf, fsImage, false); } - + /** * Create an FSNamesystem associated with the specified image. - * + * * Note that this does not load any data off of disk -- if you would * like that behavior, use {@link #loadFromDisk(Configuration)} * @@ -884,7 +884,7 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException { DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT); this.fsOwner = UserGroupInformation.getCurrentUser(); - this.supergroup = conf.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY, + this.supergroup = conf.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY, DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT); this.isPermissionEnabled = conf.getBoolean(DFS_PERMISSIONS_ENABLED_KEY, DFS_PERMISSIONS_ENABLED_DEFAULT); @@ -908,8 +908,8 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException { // block allocation has to be persisted in HA using a shared edits directory // so that the standby has up-to-date namespace information nameserviceId = DFSUtil.getNamenodeNameServiceId(conf); - this.haEnabled = HAUtil.isHAEnabled(conf, nameserviceId); - + this.haEnabled = HAUtil.isHAEnabled(conf, nameserviceId); + // Sanity check the HA-related config. if (nameserviceId != null) { LOG.info("Determined nameservice ID: " + nameserviceId); @@ -960,7 +960,7 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException { blockManager.getStoragePolicySuite().getDefaultPolicy().getId(), isSnapshotTrashRootEnabled); - this.maxFsObjects = conf.getLong(DFS_NAMENODE_MAX_OBJECTS_KEY, + this.maxFsObjects = conf.getLong(DFS_NAMENODE_MAX_OBJECTS_KEY, DFS_NAMENODE_MAX_OBJECTS_DEFAULT); this.minBlockSize = conf.getLongBytes( @@ -984,10 +984,10 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException { DFSConfigKeys.DFS_NAMENODE_MAX_CORRUPT_FILE_BLOCKS_RETURNED_DEFAULT); this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf); - + this.standbyShouldCheckpoint = conf.getBoolean( DFS_HA_STANDBY_CHECKPOINTS_KEY, DFS_HA_STANDBY_CHECKPOINTS_DEFAULT); - // # edit autoroll threshold is a multiple of the checkpoint threshold + // # edit autoroll threshold is a multiple of the checkpoint threshold this.editLogRollerThreshold = (long) (conf.getFloat( DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD, @@ -1124,13 +1124,13 @@ void unlockRetryCache() { boolean hasRetryCache() { return retryCache != null; } - + void addCacheEntryWithPayload(byte[] clientId, int callId, Object payload) { if (retryCache != null) { retryCache.addCacheEntryWithPayload(clientId, callId, payload); } } - + void addCacheEntry(byte[] clientId, int callId) { if (retryCache != null) { retryCache.addCacheEntry(clientId, callId); @@ -1267,7 +1267,7 @@ void loadFSImage(StartupOption startOpt) throws IOException { if (RollingUpgradeStartupOption.ROLLBACK.matches(startOpt)) { rollingUpgradeInfo = null; } - final boolean needToSave = staleImage && !haEnabled && !isRollingUpgrade(); + final boolean needToSave = staleImage && !haEnabled && !isRollingUpgrade(); LOG.info("Need to save fs image? " + needToSave + " (staleImage=" + staleImage + ", haEnabled=" + haEnabled + ", isRollingUpgrade=" + isRollingUpgrade() + ")"); @@ -1306,7 +1306,7 @@ private void startSecretManager() { } } } - + @Override public void startSecretManagerIfNecessary() { assert hasWriteLock() : "Starting secret manager needs write lock"; @@ -1323,8 +1323,8 @@ private void stopSecretManager() { dtSecretManager.stopThreads(); } } - - /** + + /** * Start services common to both active and standby states */ void startCommonServices(Configuration conf, HAContext haContext) throws IOException { @@ -1344,7 +1344,7 @@ void startCommonServices(Configuration conf, HAContext haContext) throws IOExcep } finally { writeUnlock("startCommonServices"); } - + registerMXBean(); DefaultMetricsSystem.instance().register(this); if (inodeAttributeProvider != null) { @@ -1356,8 +1356,8 @@ void startCommonServices(Configuration conf, HAContext haContext) throws IOExcep this.nameNodeHostName = (serviceAddress != null) ? serviceAddress.getHostName() : ""; } - - /** + + /** * Stop services common to both active and standby states */ void stopCommonServices() { @@ -1373,7 +1373,7 @@ void stopCommonServices() { } RetryCache.clear(retryCache); } - + /** * Start services required in active state * @throws IOException @@ -1384,17 +1384,17 @@ void startActiveServices() throws IOException { writeLock(); try { FSEditLog editLog = getFSImage().getEditLog(); - + if (!editLog.isOpenForWrite()) { // During startup, we're already open for write during initialization. editLog.initJournalsForWrite(); // May need to recover editLog.recoverUnclosedStreams(true); - + LOG.info("Catching up to latest edits from old active before " + "taking over writer role in edits logs"); editLogTailer.catchupDuringFailover(); - + blockManager.setPostponeBlocksFromFuture(false); blockManager.getDatanodeManager().markAllDatanodesStale(); blockManager.clearQueues(); @@ -1414,7 +1414,7 @@ void startActiveServices() throws IOException { } long nextTxId = getFSImage().getLastAppliedTxId() + 1; - LOG.info("Will take over writing edit logs at txnid " + + LOG.info("Will take over writing edit logs at txnid " + nextTxId); editLog.setNextTxId(nextTxId); @@ -1493,7 +1493,7 @@ private boolean shouldUseDelegationTokens() { alwaysUseDelegationTokensForTests; } - /** + /** * Stop services required in active state */ void stopActiveServices() { @@ -1549,10 +1549,10 @@ void stopActiveServices() { writeUnlock("stopActiveServices"); } } - + /** * Start services required in standby or observer state - * + * * @throws IOException */ void startStandbyServices(final Configuration conf, boolean isObserver) @@ -1625,7 +1625,7 @@ public void checkOperation(OperationCategory op) throws StandbyException { getSnapshotManager().initThreadLocals(); } } - + /** * @throws RetriableException * If 1) The NameNode is in SafeMode, 2) HA is enabled, and 3) @@ -1663,7 +1663,7 @@ public static Collection getNamespaceDirs(Configuration conf) { /** * Get all edits dirs which are required. If any shared edits dirs are * configured, these are also included in the set of required dirs. - * + * * @param conf the HDFS configuration. * @return all required dirs. */ @@ -1679,7 +1679,7 @@ private static Collection getStorageDirs(Configuration conf, Collection dirNames = conf.getTrimmedStringCollection(propertyName); StartupOption startOpt = NameNode.getStartupOption(conf); if(startOpt == StartupOption.IMPORT) { - // In case of IMPORT this will get rid of default directories + // In case of IMPORT this will get rid of default directories // but will retain directories specified in hdfs-site.xml // When importing image from a checkpoint, the name-node can // start with empty set of storage directories. @@ -1694,7 +1694,7 @@ private static Collection getStorageDirs(Configuration conf, "\n\tThe NameNode currently runs without persistent storage." + "\n\tAny changes to the file system meta-data may be lost." + "\n\tRecommended actions:" + - "\n\t\t- shutdown and restart NameNode with configured \"" + "\n\t\t- shutdown and restart NameNode with configured \"" + propertyName + "\" in hdfs-site.xml;" + "\n\t\t- use Backup Node as a persistent and up-to-date storage " + "of the file system meta-data."); @@ -1718,38 +1718,38 @@ public static List getNamespaceEditsDirs(Configuration conf) throws IOException { return getNamespaceEditsDirs(conf, true); } - + public static List getNamespaceEditsDirs(Configuration conf, boolean includeShared) throws IOException { // Use a LinkedHashSet so that order is maintained while we de-dup // the entries. LinkedHashSet editsDirs = new LinkedHashSet(); - + if (includeShared) { List sharedDirs = getSharedEditsDirs(conf); - + // Fail until multiple shared edits directories are supported (HDFS-2782) if (sharedDirs.size() > 1) { throw new IOException( "Multiple shared edits directories are not yet supported"); } - + // First add the shared edits dirs. It's critical that the shared dirs // are added first, since JournalSet syncs them in the order they are listed, // and we need to make sure all edits are in place in the shared storage // before they are replicated locally. See HDFS-2874. for (URI dir : sharedDirs) { if (!editsDirs.add(dir)) { - LOG.warn("Edits URI " + dir + " listed multiple times in " + + LOG.warn("Edits URI " + dir + " listed multiple times in " + DFS_NAMENODE_SHARED_EDITS_DIR_KEY + ". Ignoring duplicates."); } } - } + } // Now add the non-shared dirs. for (URI dir : getStorageDirs(conf, DFS_NAMENODE_EDITS_DIR_KEY)) { if (!editsDirs.add(dir)) { - LOG.warn("Edits URI " + dir + " listed multiple times in " + + LOG.warn("Edits URI " + dir + " listed multiple times in " + DFS_NAMENODE_SHARED_EDITS_DIR_KEY + " and " + DFS_NAMENODE_EDITS_DIR_KEY + ". Ignoring duplicates."); } @@ -1763,7 +1763,7 @@ public static List getNamespaceEditsDirs(Configuration conf, return Lists.newArrayList(editsDirs); } } - + /** * Returns edit directories that are shared between primary and secondary. * @param conf configuration @@ -1862,7 +1862,7 @@ public void cpLockInterruptibly() throws InterruptedException { public void cpUnlock() { this.cpLock.unlock(); } - + NamespaceInfo getNamespaceInfo() { readLock(); @@ -2273,7 +2273,7 @@ private void sortLocatedBlocks(String clientMachine, LocatedBlocks blocks) { * Moves all the blocks from {@code srcs} and appends them to {@code target} * To avoid rollbacks we will verify validity of ALL of the args * before we start actual move. - * + * * This does not support ".inodes" relative path * @param target target to concat into * @param srcs file that will be concatenated @@ -2306,7 +2306,7 @@ void concat(String target, String [] srcs, boolean logRetryCache) } /** - * stores the modification and access time for this inode. + * stores the modification and access time for this inode. * The access time is precise up to an hour. The transaction, if needed, is * written to the edits log but is not flushed. */ @@ -2421,15 +2421,15 @@ void createSymlink(String target, String link, /** * Set replication for an existing file. - * - * The NameNode sets new replication and schedules either replication of - * under-replicated data blocks or removal of the excessive block copies + * + * The NameNode sets new replication and schedules either replication of + * under-replicated data blocks or removal of the excessive block copies * if the blocks are over-replicated. - * + * * @see ClientProtocol#setReplication(String, short) * @param src file name * @param replication new replication - * @return true if successful; + * @return true if successful; * false if file does not exist or is a directory * @throws IOException */ @@ -2649,7 +2649,7 @@ long getPreferredBlockSize(String src) throws IOException { } /** - * If the file is within an encryption zone, select the appropriate + * If the file is within an encryption zone, select the appropriate * CryptoProtocolVersion from the list provided by the client. Since the * client may be newer, we need to handle unknown versions. * @@ -2687,7 +2687,7 @@ CryptoProtocolVersion chooseProtocolVersion( /** * Create a new file entry in the namespace. - * + * * For description of parameters and exceptions thrown see * {@link ClientProtocol#create}, except it returns valid file status upon * success @@ -2836,7 +2836,7 @@ private HdfsFileStatus startFileInt(String src, * Recover lease; * Immediately revoke the lease of the current lease holder and start lease * recovery so that the file can be forced to be closed. - * + * * @param src the path of the file to start lease recovery * @param holder the lease holder's name * @param clientMachine the client machine's name @@ -2863,7 +2863,7 @@ boolean recoverLease(String src, String holder, String clientMachine) if (isPermissionEnabled) { dir.checkPathAccess(pc, iip, FsAction.WRITE); } - + return recoverLeaseInternal(RecoverLeaseOp.RECOVER_LEASE, iip, src, holder, clientMachine, true); } catch (StandbyException se) { @@ -2884,7 +2884,7 @@ enum RecoverLeaseOp { APPEND_FILE, TRUNCATE_FILE, RECOVER_LEASE; - + public String getExceptionMessage(String src, String holder, String clientMachine, String reason) { return "Failed to " + this + " " + src + " for " + holder + @@ -2926,7 +2926,7 @@ boolean recoverLeaseInternal(RecoverLeaseOp op, INodesInPath iip, "the file is under construction but no leases found.")); } if (force) { - // close now: no need to wait for soft lease expiration and + // close now: no need to wait for soft lease expiration and // close only the file src LOG.info("recoverLease: " + lease + ", src=" + src + " from client " + clientName); @@ -2936,7 +2936,7 @@ boolean recoverLeaseInternal(RecoverLeaseOp op, INodesInPath iip, "Current lease holder " + lease.getHolder() + " does not match file creator " + clientName; // - // If the original holder has not renewed in the last SOFTLIMIT + // If the original holder has not renewed in the last SOFTLIMIT // period, then start lease recovery. // if (lease.expiredSoftLimit()) { @@ -3018,7 +3018,7 @@ LastBlockWithStatus appendFile(String srcArg, String holder, ExtendedBlock getExtendedBlock(Block blk) { return new ExtendedBlock(getBlockPoolId(), blk); } - + void setBlockPoolId(String bpid) { blockManager.setBlockPoolId(bpid); } @@ -3132,7 +3132,7 @@ LocatedBlock getAdditionalDatanode(String src, long fileId, // choose new datanodes. final DatanodeStorageInfo[] targets = blockManager.chooseTarget4AdditionalDatanode( - src, numAdditionalNodes, clientnode, chosen, + src, numAdditionalNodes, clientnode, chosen, excludes, preferredblocksize, storagePolicyID, blockType); final LocatedBlock lb = BlockManager.newLocatedBlock( blk, targets, -1, false); @@ -3201,7 +3201,7 @@ INodeFile checkLease(INodesInPath iip, String holder, long fileId) } return file; } - + /** * Complete in-progress write to the given file. * @return true if successful, false if the client should continue to retry @@ -3283,7 +3283,7 @@ private boolean checkBlocksComplete(String src, boolean allowCommittedBlock, } /** - * Change the indicated filename. + * Change the indicated filename. * @deprecated Use {@link #renameTo(String, String, boolean, * Options.Rename...)} instead. */ @@ -3358,8 +3358,8 @@ void renameTo(final String src, final String dst, /** * Remove the indicated file from namespace. - * - * @see ClientProtocol#delete(String, boolean) for detailed description and + * + * @see ClientProtocol#delete(String, boolean) for detailed description and * description of exceptions */ boolean delete(String src, boolean recursive, boolean logRetryCache) @@ -3609,7 +3609,7 @@ QuotaUsage getQuotaUsage(final String src) throws IOException { * Set the namespace quota and storage space quota for a directory. * See {@link ClientProtocol#setQuota(String, long, long, StorageType)} for the * contract. - * + * * Note: This does not support ".inodes" relative path. */ void setQuota(String src, long nsQuota, long ssQuota, StorageType type) @@ -3650,7 +3650,7 @@ void setQuota(String src, long nsQuota, long ssQuota, StorageType type) * @param fileId The inode ID that we're fsyncing. Older clients will pass * INodeId.GRANDFATHER_INODE_ID here. * @param clientName The string representation of the client - * @param lastBlockLength The length of the last block + * @param lastBlockLength The length of the last block * under construction reported from client. * @throws IOException if path does not exist */ @@ -3688,7 +3688,7 @@ void fsync(String src, long fileId, String clientName, long lastBlockLength) * replication;
* RecoveryInProgressException if lease recovery is in progress.
* IOException in case of an error. - * @return true if file has been successfully finalized and closed or + * @return true if file has been successfully finalized and closed or * false if block recovery has been initiated. Since the lease owner * has been changed and logged, caller should call logSync(). */ @@ -3761,7 +3761,7 @@ boolean internalReleaseLease(Lease lease, String src, INodesInPath iip, " replicated, lease removed, file" + src + " closed."); return true; // closed! } - // Cannot close file right now, since some blocks + // Cannot close file right now, since some blocks // are not yet minimally replicated. // This may potentially cause infinite loop in lease recovery // if there are no valid replicas on data-nodes. @@ -3847,7 +3847,7 @@ private Lease reassignLease(Lease lease, String src, String newHolder, logReassignLease(lease.getHolder(), src, newHolder); return reassignLeaseInternal(lease, newHolder, pendingFile); } - + Lease reassignLeaseInternal(Lease lease, String newHolder, INodeFile pendingFile) { assert hasWriteLock(); pendingFile.getFileUnderConstructionFeature().setClientName(newHolder); @@ -3933,9 +3933,9 @@ public boolean isInSnapshot(long blockCollectionID) { /* * 1. if bc is under construction and also with snapshot, and * bc is not in the current fsdirectory tree, bc must represent a snapshot - * file. - * 2. if fullName is not an absolute path, bc cannot be existent in the - * current fsdirectory tree. + * file. + * 2. if fullName is not an absolute path, bc cannot be existent in the + * current fsdirectory tree. * 3. if bc is not the current node associated with fullName, bc must be a * snapshot inode. */ @@ -3973,7 +3973,7 @@ void commitBlockSynchronization(ExtendedBlock oldBlock, checkOperation(OperationCategory.WRITE); // If a DN tries to commit to the standby, the recovery will // fail, and the next retry will succeed on the new NN. - + checkNameNodeSafeMode( "Cannot commitBlockSynchronization while in safe mode"); final BlockInfo storedBlock = getStoredBlock( @@ -4152,13 +4152,13 @@ void renewLease(String holder) throws IOException { * @param startAfter the name to start after * @param needLocation if blockLocations need to be returned * @return a partial listing starting after startAfter - * + * * @throws AccessControlException if access is denied * @throws UnresolvedLinkException if symbolic link is encountered * @throws IOException if other I/O error occurred */ DirectoryListing getListing(String src, byte[] startAfter, - boolean needLocation) + boolean needLocation) throws IOException { checkOperation(OperationCategory.READ); final String operationName = "listStatus"; @@ -4350,15 +4350,15 @@ BatchedDirectoryListing getBatchedListing(String[] srcs, byte[] startAfter, * data storage is reported the namenode issues a new unique storageID. *

* Finally, the namenode returns its namespaceID as the registrationID - * for the datanodes. + * for the datanodes. * namespaceID is a persistent attribute of the name space. * The registrationID is checked every time the datanode is communicating - * with the namenode. + * with the namenode. * Datanodes with inappropriate registrationID are rejected. - * If the namenode stops, and then restarts it can restore its + * If the namenode stops, and then restarts it can restore its * namespaceID and will continue serving the datanodes that has previously * registered with the namenode without restarting the whole cluster. - * + * * @see org.apache.hadoop.hdfs.server.datanode.DataNode */ void registerDatanode(DatanodeRegistration nodeReg) throws IOException { @@ -4369,10 +4369,10 @@ void registerDatanode(DatanodeRegistration nodeReg) throws IOException { writeUnlock("registerDatanode"); } } - + /** * Get registrationID for datanodes based on the namespaceID. - * + * * @see #registerDatanode(DatanodeRegistration) * @return registration ID */ @@ -4691,20 +4691,20 @@ public long getMissingReplOneBlocksCount() { // not locking return blockManager.getMissingReplOneBlocksCount(); } - + @Metric(value = {"ExpiredHeartbeats", "Number of expired heartbeats"}, type = Metric.Type.COUNTER) public int getExpiredHeartbeats() { return datanodeStatistics.getExpiredHeartbeats(); } - + @Metric({"TransactionsSinceLastCheckpoint", "Number of transactions since last checkpoint"}) public long getTransactionsSinceLastCheckpoint() { return getFSImage().getLastAppliedOrWrittenTxId() - getNNStorage().getMostRecentCheckpointTxId(); } - + @Metric({"TransactionsSinceLastLogRoll", "Number of transactions since last edit log roll"}) public long getTransactionsSinceLastLogRoll() { @@ -4733,7 +4733,7 @@ private long getCorrectTransactionsSinceLastLogRoll() { public long getLastWrittenTransactionId() { return getEditLog().getLastWrittenTxIdWithoutLock(); } - + @Metric({"LastCheckpointTime", "Time in milliseconds since the epoch of the last checkpoint"}) public long getLastCheckpointTime() { @@ -4843,7 +4843,7 @@ public long getCapacityUsedNonDFS() { public int getTotalLoad() { return datanodeStatistics.getXceiverCount(); } - + @Metric({ "SnapshottableDirectories", "Number of snapshottable directories" }) public int getNumSnapshottableDirs() { return this.snapshotManager.getNumSnapshottableDirs(); @@ -4914,7 +4914,7 @@ int getNumberOfDatanodes(DatanodeReportType type) { readLock(); try { return getBlockManager().getDatanodeManager().getDatanodeListForReport( - type).size(); + type).size(); } finally { readUnlock("getNumberOfDatanodes"); } @@ -4955,7 +4955,7 @@ DatanodeInfo[] datanodeReport(final DatanodeReportType type) readLock(); try { checkOperation(OperationCategory.UNCHECKED); - final DatanodeManager dm = getBlockManager().getDatanodeManager(); + final DatanodeManager dm = getBlockManager().getDatanodeManager(); final List results = dm.getDatanodeListForReport(type); arr = getDatanodeInfoFromDescriptors(results); } finally { @@ -4974,7 +4974,7 @@ DatanodeStorageReport[] getDatanodeStorageReport(final DatanodeReportType type readLock(); try { checkOperation(OperationCategory.UNCHECKED); - final DatanodeManager dm = getBlockManager().getDatanodeManager(); + final DatanodeManager dm = getBlockManager().getDatanodeManager(); reports = dm.getDatanodeStorageReport(type); } finally { readUnlock(operationName, getLockReportInfoSupplier(null)); @@ -5015,11 +5015,11 @@ boolean saveNamespace(final long timeWindow, final long txGap) logAuditEvent(true, operationName, null); return saved; } - + /** * Enables/Disables/Checks restoring failed storage replicas if the storage becomes available again. * Requires superuser privilege. - * + * * @throws AccessControlException if superuser privilege is violated. */ boolean restoreFailedStorage(String arg) throws IOException { @@ -5031,7 +5031,7 @@ boolean restoreFailedStorage(String arg) throws IOException { writeLock(); try { checkOperation(OperationCategory.UNCHECKED); - + // if it is disabled - enable it and vice versa. if(arg.equals("check")) { val = getNNStorage().getRestoreFailedStorage(); @@ -5048,9 +5048,9 @@ boolean restoreFailedStorage(String arg) throws IOException { } Date getStartTime() { - return new Date(startTime); + return new Date(startTime); } - + void finalizeUpgrade() throws IOException { String operationName = "finalizeUpgrade"; checkSuperuserPrivilege(operationName); @@ -5110,7 +5110,7 @@ boolean setSafeMode(SafeModeAction action) throws IOException { } /** - * Get the total number of blocks in the system. + * Get the total number of blocks in the system. */ @Override // FSNamesystemMBean @Metric @@ -5199,7 +5199,7 @@ void leaveSafeMode(boolean force) { writeLock(); try { if (!isInSafeMode()) { - NameNode.stateChangeLog.info("STATE* Safe mode is already OFF"); + NameNode.stateChangeLog.info("STATE* Safe mode is already OFF"); return; } if (blockManager.leaveSafeMode(force)) { @@ -5267,7 +5267,7 @@ NamenodeCommand startCheckpoint(NamenodeRegistration backupNode, try { checkOperation(OperationCategory.CHECKPOINT); checkNameNodeSafeMode("Checkpoint not started"); - + LOG.info("Start checkpoint for " + backupNode.getAddress()); NamenodeCommand cmd = getFSImage().startCheckpoint(backupNode, activeNamenode, getEffectiveLayoutVersion()); @@ -5288,7 +5288,7 @@ public void processIncrementalBlockReport(final DatanodeID nodeID, writeUnlock("processIncrementalBlockReport"); } } - + void endCheckpoint(NamenodeRegistration registration, CheckpointSignature sig) throws IOException { checkOperation(OperationCategory.CHECKPOINT); @@ -5531,7 +5531,7 @@ public long getExcessBlocks() { public long getNumTimedOutPendingReconstructions() { return blockManager.getNumTimedOutPendingReconstructions(); } - + // HA-only metric @Metric public long getPostponedMisreplicatedBlocks() { @@ -5543,7 +5543,7 @@ public long getPostponedMisreplicatedBlocks() { public int getPendingDataNodeMessageCount() { return blockManager.getPendingDataNodeMessageCount(); } - + // HA-only metric @Metric public String getHAState() { @@ -5573,7 +5573,7 @@ public HAServiceState getState() { public String getFSState() { return isInSafeMode() ? "safeMode" : "Operational"; } - + private ObjectName namesystemMBeanName, replicatedBlocksMBeanName, ecBlockGroupsMBeanName, namenodeMXBeanName; @@ -5660,7 +5660,7 @@ public int getNumLiveDataNodes() { public int getNumDeadDataNodes() { return getBlockManager().getDatanodeManager().getNumDeadDataNodes(); } - + @Override // FSNamesystemMBean @Metric({"NumDecomLiveDataNodes", "Number of datanodes which have been decommissioned and are now live"}) @@ -5739,7 +5739,7 @@ public int getNumDecommissioningDataNodes() { } @Override // FSNamesystemMBean - @Metric({"StaleDataNodes", + @Metric({"StaleDataNodes", "Number of datanodes marked stale due to delayed heartbeat"}) public int getNumStaleDataNodes() { return getBlockManager().getDatanodeManager().getNumStaleNodes(); @@ -5852,7 +5852,7 @@ private INodeFile checkUCBlock(ExtendedBlock block, assert hasWriteLock(); checkNameNodeSafeMode("Cannot get a new generation stamp and an " + "access token for block " + block); - + // check stored block state BlockInfo storedBlock = getStoredBlock(ExtendedBlock.getLocalBlock(block)); if (storedBlock == null) { @@ -5863,25 +5863,25 @@ private INodeFile checkUCBlock(ExtendedBlock block, + " is " + storedBlock.getBlockUCState() + " but not " + BlockUCState.UNDER_CONSTRUCTION); } - + // check file inode final INodeFile file = getBlockCollection(storedBlock); if (file == null || !file.isUnderConstruction() || isFileDeleted(file)) { - throw new IOException("The file " + storedBlock + + throw new IOException("The file " + storedBlock + " belonged to does not exist or it is not under construction."); } - + // check lease if (clientName == null || !clientName.equals(file.getFileUnderConstructionFeature() .getClientName())) { - throw new LeaseExpiredException("Lease mismatch: " + block + - " is accessed by a non lease holder " + clientName); + throw new LeaseExpiredException("Lease mismatch: " + block + + " is accessed by a non lease holder " + clientName); } return file; } - + /** * Client is reporting some bad block locations. */ @@ -5908,12 +5908,12 @@ void reportBadBlocks(LocatedBlock[] blocks) throws IOException { } /** - * Get a new generation stamp together with an access token for + * Get a new generation stamp together with an access token for * a block under construction. - * + * * This method is called for recovering a failed write or setting up * a block for appended. - * + * * @param block a block * @param clientName the name of a client * @return a located block with a new generation stamp and an access token @@ -5929,7 +5929,7 @@ LocatedBlock bumpBlockGenerationStamp(ExtendedBlock block, // check vadility of parameters final INodeFile file = checkUCBlock(block, clientName); - + // get a new generation stamp and an access token block.setGenerationStamp(nextGenerationStamp( blockManager.isLegacyBlock(block.getLocalBlock()))); @@ -5945,10 +5945,10 @@ LocatedBlock bumpBlockGenerationStamp(ExtendedBlock block, getEditLog().logSync(); return locatedBlock; } - + /** * Update a pipeline for a block under construction. - * + * * @param clientName the name of the client * @param oldBlock and old block * @param newBlock a new block with a new generation stamp and length @@ -6027,7 +6027,7 @@ private void updatePipelineInternal(String clientName, ExtendedBlock oldBlock, * Register a Backup name-node, verifying that it belongs * to the correct namespace, and adding it to the set of * active journals if necessary. - * + * * @param bnReg registration of the new BackupNode * @param nnReg registration of this NameNode * @throws IOException if the namespace IDs do not match @@ -6080,12 +6080,12 @@ void releaseBackupNode(NamenodeRegistration registration) static class CorruptFileBlockInfo { final String path; final Block block; - + public CorruptFileBlockInfo(String p, Block b) { path = p; block = b; } - + @Override public String toString() { return block.getBlockName() + "\t" + path; @@ -6250,7 +6250,7 @@ Token getDelegationToken(Text renewer) } /** - * + * * @param token token to renew * @return new expiryTime of the token * @throws InvalidToken if {@code token} is invalid @@ -6294,7 +6294,7 @@ long renewDelegationToken(Token token) } /** - * + * * @param token token to cancel * @throws IOException on error */ @@ -6356,11 +6356,11 @@ void loadSecretManagerState(SecretManagerSection s, /** * Log the updateMasterKey operation to edit logs. - * + * * @param key new delegation key. */ public void logUpdateMasterKey(DelegationKey key) { - + assert !isInSafeMode() : "this should never be called while in safemode, since we stop " + "the DT manager before entering safemode!"; @@ -6370,10 +6370,10 @@ public void logUpdateMasterKey(DelegationKey key) { getEditLog().logUpdateMasterKey(key); getEditLog().logSync(); } - + /** * Log the cancellation of expired tokens to edit logs. - * + * * @param id token identifier to cancel */ public void logExpireDelegationToken(DelegationTokenIdentifier id) { @@ -6385,16 +6385,16 @@ public void logExpireDelegationToken(DelegationTokenIdentifier id) { assert hasReadLock(); // do not logSync so expiration edits are batched getEditLog().logCancelDelegationToken(id); - } - + } + private void logReassignLease(String leaseHolder, String src, String newHolder) { assert hasWriteLock(); getEditLog().logReassignLease(leaseHolder, src, newHolder); } - + /** - * + * * @return true if delegation token operation is allowed */ private boolean isAllowedDelegationTokenOp() throws IOException { @@ -6407,7 +6407,7 @@ private boolean isAllowedDelegationTokenOp() throws IOException { } return true; } - + /** * Returns authentication method used to establish the connection. * @return AuthenticationMethod used to establish connection @@ -6422,9 +6422,9 @@ private AuthenticationMethod getConnectionAuthenticationMethod() } return authMethod; } - + /** - * Client invoked methods are invoked over RPC and will be in + * Client invoked methods are invoked over RPC and will be in * RPC call context even if the client exits. */ boolean isExternalInvocation() { @@ -6545,7 +6545,7 @@ public long getTotalBlocks() { public long getNumberOfMissingBlocks() { return getMissingBlocksCount(); } - + @Override // NameNodeMXBean public long getNumberOfMissingBlocksWithReplicationFactorOne() { return getMissingReplOneBlocksCount(); @@ -6562,7 +6562,7 @@ public int getThreads() { */ @Override // NameNodeMXBean public String getLiveNodes() { - final Map> info = + final Map> info = new HashMap>(); final List live = new ArrayList(); blockManager.getDatanodeManager().fetchDatanodes(live, null, false); @@ -6616,7 +6616,7 @@ public String getLiveNodes() { */ @Override // NameNodeMXBean public String getDeadNodes() { - final Map> info = + final Map> info = new HashMap>(); final List dead = new ArrayList(); blockManager.getDatanodeManager().fetchDatanodes(null, dead, false); @@ -6640,7 +6640,7 @@ public String getDeadNodes() { */ @Override // NameNodeMXBean public String getDecomNodes() { - final Map> info = + final Map> info = new HashMap>(); final List decomNodeList = blockManager.getDatanodeManager( ).getDecommissioningNodes(); @@ -6705,17 +6705,17 @@ private long getDfsUsed(DatanodeDescriptor alivenode) { public String getClusterId() { return getNNStorage().getClusterID(); } - + @Override // NameNodeMXBean public String getBlockPoolId() { return getBlockManager().getBlockPoolId(); } - + @Override // NameNodeMXBean public String getNameDirStatuses() { Map> statusMap = new HashMap>(); - + Map activeDirs = new HashMap(); for (Iterator it = getNNStorage().dirIterator(); it.hasNext();) { @@ -6723,7 +6723,7 @@ public String getNameDirStatuses() { activeDirs.put(st.getRoot(), st.getStorageDirType()); } statusMap.put("active", activeDirs); - + List removedStorageDirs = getNNStorage().getRemovedStorageDirs(); Map failedDirs = new HashMap(); @@ -6731,7 +6731,7 @@ public String getNameDirStatuses() { failedDirs.put(st.getRoot(), st.getStorageDirType()); } statusMap.put("failed", failedDirs); - + return JSON.toString(statusMap); } @@ -6826,7 +6826,7 @@ public String getJournalTransactionInfo() { Long.toString(this.getFSImage().getMostRecentCheckpointTxId())); return JSON.toString(txnIdMap); } - + @Override // NameNodeMXBean public long getNNStartedTimeInMillis() { return startTime; @@ -7000,27 +7000,27 @@ public synchronized void verifyToken(DelegationTokenIdentifier identifier, public EditLogTailer getEditLogTailer() { return editLogTailer; } - + @VisibleForTesting public void setEditLogTailerForTests(EditLogTailer tailer) { this.editLogTailer = tailer; } - + @VisibleForTesting void setFsLockForTests(ReentrantReadWriteLock lock) { this.fsLock.coarseLock = lock; } - + @VisibleForTesting public ReentrantReadWriteLock getFsLockForTests() { return fsLock.coarseLock; } - + @VisibleForTesting public ReentrantLock getCpLockForTests() { return cpLock; } - + @VisibleForTesting public void setNNResourceChecker(NameNodeResourceChecker nnResourceChecker) { this.nnResourceChecker = nnResourceChecker; @@ -7029,7 +7029,7 @@ public void setNNResourceChecker(NameNodeResourceChecker nnResourceChecker) { public SnapshotManager getSnapshotManager() { return snapshotManager; } - + /** Allow snapshot on a directory. */ void allowSnapshot(String path) throws IOException { checkOperation(OperationCategory.WRITE); @@ -7046,7 +7046,7 @@ void allowSnapshot(String path) throws IOException { getEditLog().logSync(); logAuditEvent(true, operationName, path, null, null); } - + /** Disallow snapshot on a directory. */ void disallowSnapshot(String path) throws IOException { checkOperation(OperationCategory.WRITE); @@ -7063,7 +7063,7 @@ void disallowSnapshot(String path) throws IOException { getEditLog().logSync(); logAuditEvent(true, operationName, path, null, null); } - + /** * Create a snapshot * @param snapshotRoot The directory path where the snapshot is taken @@ -7095,14 +7095,14 @@ String createSnapshot(String snapshotRoot, String snapshotName, snapshotPath, null); return snapshotPath; } - + /** * Rename a snapshot * @param path The directory path where the snapshot was taken * @param snapshotOldName Old snapshot name * @param snapshotNewName New snapshot name * @throws SafeModeException - * @throws IOException + * @throws IOException */ void renameSnapshot( String path, String snapshotOldName, String snapshotNewName, @@ -7135,8 +7135,8 @@ void renameSnapshot( } /** - * Get the list of snapshottable directories that are owned - * by the current user. Return all the snapshottable directories if the + * Get the list of snapshottable directories that are owned + * by the current user. Return all the snapshottable directories if the * current user is a super user. * @return The list of all the current snapshottable directories. * @throws IOException If an I/O error occurred. @@ -7319,7 +7319,7 @@ SnapshotDiffReportListing getSnapshotDiffReportListing(String path, null); return diffs; } - + /** * Delete a snapshot of a snapshottable directory * @param snapshotRoot The snapshottable directory @@ -7462,7 +7462,7 @@ void startRollingUpgradeInternal(long startTime) /** * Update internal state to indicate that a rolling upgrade is in progress for * non-HA setup. This requires the namesystem is in SafeMode and after doing a - * checkpoint for rollback the namesystem will quit the safemode automatically + * checkpoint for rollback the namesystem will quit the safemode automatically */ private void startRollingUpgradeInternalForNonHA(long startTime) throws IOException { @@ -8882,7 +8882,7 @@ private static void enableAsyncAuditLog(Configuration conf) { logger.removeAppender(appender); asyncAppender.addAppender(appender); } - logger.addAppender(asyncAppender); + logger.addAppender(asyncAppender); } } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index f2adbf29a4871..25e0f6e302c0e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -239,12 +239,12 @@ @InterfaceAudience.Private @VisibleForTesting public class NameNodeRpcServer implements NamenodeProtocols { - + private static final Logger LOG = NameNode.LOG; private static final Logger stateChangeLog = NameNode.stateChangeLog; private static final Logger blockStateChangeLog = NameNode .blockStateChangeLog; - + // Dependencies from other parts of NN. protected final FSNamesystem namesystem; protected final NameNode nn; @@ -261,11 +261,11 @@ public class NameNodeRpcServer implements NamenodeProtocols { /** The RPC server that listens to lifeline requests */ private final RPC.Server lifelineRpcServer; private final InetSocketAddress lifelineRPCAddress; - + /** The RPC server that listens to requests from clients */ protected final RPC.Server clientRpcServer; protected final InetSocketAddress clientRpcAddress; - + private final String minimumDataNodeVersion; private final String defaultECPolicyName; @@ -280,23 +280,23 @@ public NameNodeRpcServer(Configuration conf, NameNode nn) this.retryCache = namesystem.getRetryCache(); this.metrics = NameNode.getNameNodeMetrics(); - int handlerCount = - conf.getInt(DFS_NAMENODE_HANDLER_COUNT_KEY, + int handlerCount = + conf.getInt(DFS_NAMENODE_HANDLER_COUNT_KEY, DFS_NAMENODE_HANDLER_COUNT_DEFAULT); ipProxyUsers = conf.getStrings(DFS_NAMENODE_IP_PROXY_USERS); RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, ProtobufRpcEngine2.class); - ClientNamenodeProtocolServerSideTranslatorPB - clientProtocolServerTranslator = + ClientNamenodeProtocolServerSideTranslatorPB + clientProtocolServerTranslator = new ClientNamenodeProtocolServerSideTranslatorPB(this); BlockingService clientNNPbService = ClientNamenodeProtocol. newReflectiveBlockingService(clientProtocolServerTranslator); int maxDataLength = conf.getInt(IPC_MAXIMUM_DATA_LENGTH, IPC_MAXIMUM_DATA_LENGTH_DEFAULT); - DatanodeProtocolServerSideTranslatorPB dnProtoPbTranslator = + DatanodeProtocolServerSideTranslatorPB dnProtoPbTranslator = new DatanodeProtocolServerSideTranslatorPB(this, maxDataLength); BlockingService dnProtoPbService = DatanodeProtocolService .newReflectiveBlockingService(dnProtoPbTranslator); @@ -306,22 +306,22 @@ public NameNodeRpcServer(Configuration conf, NameNode nn) BlockingService lifelineProtoPbService = DatanodeLifelineProtocolService .newReflectiveBlockingService(lifelineProtoPbTranslator); - NamenodeProtocolServerSideTranslatorPB namenodeProtocolXlator = + NamenodeProtocolServerSideTranslatorPB namenodeProtocolXlator = new NamenodeProtocolServerSideTranslatorPB(this); BlockingService NNPbService = NamenodeProtocolService .newReflectiveBlockingService(namenodeProtocolXlator); - - RefreshAuthorizationPolicyProtocolServerSideTranslatorPB refreshAuthPolicyXlator = + + RefreshAuthorizationPolicyProtocolServerSideTranslatorPB refreshAuthPolicyXlator = new RefreshAuthorizationPolicyProtocolServerSideTranslatorPB(this); BlockingService refreshAuthService = RefreshAuthorizationPolicyProtocolService .newReflectiveBlockingService(refreshAuthPolicyXlator); - RefreshUserMappingsProtocolServerSideTranslatorPB refreshUserMappingXlator = + RefreshUserMappingsProtocolServerSideTranslatorPB refreshUserMappingXlator = new RefreshUserMappingsProtocolServerSideTranslatorPB(this); BlockingService refreshUserMappingService = RefreshUserMappingsProtocolService .newReflectiveBlockingService(refreshUserMappingXlator); - RefreshCallQueueProtocolServerSideTranslatorPB refreshCallQueueXlator = + RefreshCallQueueProtocolServerSideTranslatorPB refreshCallQueueXlator = new RefreshCallQueueProtocolServerSideTranslatorPB(this); BlockingService refreshCallQueueService = RefreshCallQueueProtocolService .newReflectiveBlockingService(refreshCallQueueXlator); @@ -331,12 +331,12 @@ public NameNodeRpcServer(Configuration conf, NameNode nn) BlockingService genericRefreshService = GenericRefreshProtocolService .newReflectiveBlockingService(genericRefreshXlator); - GetUserMappingsProtocolServerSideTranslatorPB getUserMappingXlator = + GetUserMappingsProtocolServerSideTranslatorPB getUserMappingXlator = new GetUserMappingsProtocolServerSideTranslatorPB(this); BlockingService getUserMappingService = GetUserMappingsProtocolService .newReflectiveBlockingService(getUserMappingXlator); - - HAServiceProtocolServerSideTranslatorPB haServiceProtocolXlator = + + HAServiceProtocolServerSideTranslatorPB haServiceProtocolXlator = new HAServiceProtocolServerSideTranslatorPB(this); BlockingService haPbService = HAServiceProtocolService .newReflectiveBlockingService(haServiceProtocolXlator); @@ -380,14 +380,14 @@ public NameNodeRpcServer(Configuration conf, NameNode nn) serviceRpcServer); DFSUtil.addPBProtocol(conf, RefreshAuthorizationPolicyProtocolPB.class, refreshAuthService, serviceRpcServer); - DFSUtil.addPBProtocol(conf, RefreshUserMappingsProtocolPB.class, + DFSUtil.addPBProtocol(conf, RefreshUserMappingsProtocolPB.class, refreshUserMappingService, serviceRpcServer); // We support Refreshing call queue here in case the client RPC queue is full DFSUtil.addPBProtocol(conf, RefreshCallQueueProtocolPB.class, refreshCallQueueService, serviceRpcServer); DFSUtil.addPBProtocol(conf, GenericRefreshProtocolPB.class, genericRefreshService, serviceRpcServer); - DFSUtil.addPBProtocol(conf, GetUserMappingsProtocolPB.class, + DFSUtil.addPBProtocol(conf, GetUserMappingsProtocolPB.class, getUserMappingService, serviceRpcServer); // Update the address with the correct port @@ -481,15 +481,15 @@ public NameNodeRpcServer(Configuration conf, NameNode nn) clientRpcServer); DFSUtil.addPBProtocol(conf, DatanodeProtocolPB.class, dnProtoPbService, clientRpcServer); - DFSUtil.addPBProtocol(conf, RefreshAuthorizationPolicyProtocolPB.class, + DFSUtil.addPBProtocol(conf, RefreshAuthorizationPolicyProtocolPB.class, refreshAuthService, clientRpcServer); - DFSUtil.addPBProtocol(conf, RefreshUserMappingsProtocolPB.class, + DFSUtil.addPBProtocol(conf, RefreshUserMappingsProtocolPB.class, refreshUserMappingService, clientRpcServer); DFSUtil.addPBProtocol(conf, RefreshCallQueueProtocolPB.class, refreshCallQueueService, clientRpcServer); DFSUtil.addPBProtocol(conf, GenericRefreshProtocolPB.class, genericRefreshService, clientRpcServer); - DFSUtil.addPBProtocol(conf, GetUserMappingsProtocolPB.class, + DFSUtil.addPBProtocol(conf, GetUserMappingsProtocolPB.class, getUserMappingService, clientRpcServer); // set service-level authorization security policy @@ -510,7 +510,7 @@ public NameNodeRpcServer(Configuration conf, NameNode nn) clientRpcAddress = new InetSocketAddress( rpcAddr.getHostName(), listenAddr.getPort()); nn.setRpcServerAddress(conf, clientRpcAddress); - + minimumDataNodeVersion = conf.get( DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY, DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT); @@ -572,33 +572,33 @@ RPC.Server getLifelineRpcServer() { public RPC.Server getClientRpcServer() { return clientRpcServer; } - + /** Allow access to the service RPC server for testing */ @VisibleForTesting RPC.Server getServiceRpcServer() { return serviceRpcServer; } - + /** * Start client and service RPC servers. */ void start() { clientRpcServer.start(); if (serviceRpcServer != null) { - serviceRpcServer.start(); + serviceRpcServer.start(); } if (lifelineRpcServer != null) { lifelineRpcServer.start(); } } - + /** * Wait until the RPC servers have shutdown. */ void join() throws InterruptedException { clientRpcServer.join(); if (serviceRpcServer != null) { - serviceRpcServer.join(); + serviceRpcServer.join(); } if (lifelineRpcServer != null) { lifelineRpcServer.join(); @@ -675,7 +675,7 @@ public ExportedBlockKeys getBlockKeys() throws IOException { @Override // NamenodeProtocol public void errorReport(NamenodeRegistration registration, - int errorCode, + int errorCode, String msg) throws IOException { String operationName = "errorReport"; checkNNStartup(); @@ -782,11 +782,11 @@ public void cancelDelegationToken(Token token) checkNNStartup(); namesystem.cancelDelegationToken(token); } - + @Override // ClientProtocol - public LocatedBlocks getBlockLocations(String src, - long offset, - long length) + public LocatedBlocks getBlockLocations(String src, + long offset, + long length) throws IOException { checkNNStartup(); metrics.incrGetBlockLocations(); @@ -794,7 +794,7 @@ public LocatedBlocks getBlockLocations(String src, namesystem.getBlockLocations(getClientMachine(), src, offset, length); return locatedBlocks; } - + @Override // ClientProtocol public FsServerDefaults getServerDefaults() throws IOException { checkNNStartup(); @@ -872,7 +872,7 @@ public boolean recoverLease(String src, String clientName) throws IOException { } @Override // ClientProtocol - public boolean setReplication(String src, short replication) + public boolean setReplication(String src, short replication) throws IOException { checkNNStartup(); return namesystem.setReplication(src, replication); @@ -921,7 +921,7 @@ public void setOwner(String src, String username, String groupname) checkNNStartup(); namesystem.setOwner(src, username, groupname); } - + @Override public LocatedBlock addBlock(String src, String clientName, ExtendedBlock previous, DatanodeInfo[] excludedNodes, long fileId, @@ -983,10 +983,10 @@ public boolean complete(String src, String clientName, } /** - * The client has detected an error on the specified located blocks - * and is reporting them to the server. For now, the namenode will - * mark the block as corrupt. In the future we might - * check the blocks are actually corrupt. + * The client has detected an error on the specified located blocks + * and is reporting them to the server. For now, the namenode will + * mark the block as corrupt. In the future we might + * check the blocks are actually corrupt. */ @Override // ClientProtocol, DatanodeProtocol public void reportBadBlocks(LocatedBlock[] blocks) throws IOException { @@ -1022,7 +1022,7 @@ public void updatePipeline(String clientName, ExtendedBlock oldBlock, RetryCache.setState(cacheEntry, success); } } - + @Override // DatanodeProtocol public void commitBlockSynchronization(ExtendedBlock block, long newgenerationstamp, long newlength, @@ -1033,14 +1033,14 @@ public void commitBlockSynchronization(ExtendedBlock block, namesystem.commitBlockSynchronization(block, newgenerationstamp, newlength, closeFile, deleteblock, newtargets, newtargetstorages); } - + @Override // ClientProtocol - public long getPreferredBlockSize(String filename) + public long getPreferredBlockSize(String filename) throws IOException { checkNNStartup(); return namesystem.getPreferredBlockSize(filename); } - + @Deprecated @Override // ClientProtocol public boolean rename(String src, String dst) throws IOException { @@ -1067,7 +1067,7 @@ public boolean rename(String src, String dst) throws IOException { } return ret; } - + @Override // ClientProtocol public void concat(String trg, String[] src) throws IOException { checkNNStartup(); @@ -1089,7 +1089,7 @@ public void concat(String trg, String[] src) throws IOException { RetryCache.setState(cacheEntry, success); } } - + @Override // ClientProtocol public void rename2(String src, String dst, Options.Rename... options) throws IOException { @@ -1144,14 +1144,14 @@ public boolean delete(String src, boolean recursive) throws IOException { } finally { RetryCache.setState(cacheEntry, ret); } - if (ret) + if (ret) metrics.incrDeleteFileOps(); return ret; } /** * Check path length does not exceed maximum. Returns true if - * length and depth are okay. Returns false if length is too long + * length and depth are okay. Returns false if length is too long * or depth is too great. */ private boolean checkPathLength(String src) { @@ -1159,14 +1159,14 @@ private boolean checkPathLength(String src) { return (src.length() <= MAX_PATH_LENGTH && srcPath.depth() <= MAX_PATH_DEPTH); } - + @Override // ClientProtocol public boolean mkdirs(String src, FsPermission masked, boolean createParent) throws IOException { checkNNStartup(); stateChangeLog.debug("*DIR* NameNode.mkdirs: {}.", src); if (!checkPathLength(src)) { - throw new IOException("mkdirs: Pathname too long. Limit " + throw new IOException("mkdirs: Pathname too long. Limit " + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels."); } return namesystem.mkdirs(src, @@ -1184,7 +1184,7 @@ public void renewLease(String clientName, List namespaces) + ") should be null or empty"); } checkNNStartup(); - namesystem.renewLease(clientName); + namesystem.renewLease(clientName); } @Override // ClientProtocol @@ -1246,14 +1246,14 @@ public boolean isFileClosed(String src) throws IOException{ checkNNStartup(); return namesystem.isFileClosed(src); } - + @Override // ClientProtocol public HdfsFileStatus getFileLinkInfo(String src) throws IOException { checkNNStartup(); metrics.incrFileInfoOps(); return namesystem.getFileInfo(src, false, false, false); } - + @Override // ClientProtocol public long[] getStats() throws IOException { checkNNStartup(); @@ -1308,7 +1308,7 @@ public boolean setSafeMode(SafeModeAction action, boolean isChecked) } @Override // ClientProtocol - public boolean restoreFailedStorage(String arg) throws IOException { + public boolean restoreFailedStorage(String arg) throws IOException { checkNNStartup(); return namesystem.restoreFailedStorage(arg); } @@ -1329,7 +1329,7 @@ public boolean saveNamespace(long timeWindow, long txGap) throws IOException { } return true; } - + @Override // ClientProtocol public long rollEdits() throws AccessControlException, IOException { checkNNStartup(); @@ -1351,7 +1351,7 @@ public long getTransactionID() throws IOException { namesystem.checkSuperuserPrivilege(operationName); return namesystem.getFSImage().getCorrectLastAppliedOrWrittenTxId(); } - + @Override // NamenodeProtocol public long getMostRecentCheckpointTxId() throws IOException { String operationName = "getMostRecentCheckpointTxId"; @@ -1360,13 +1360,13 @@ public long getMostRecentCheckpointTxId() throws IOException { namesystem.checkSuperuserPrivilege(operationName); return namesystem.getFSImage().getMostRecentCheckpointTxId(); } - + @Override // NamenodeProtocol public CheckpointSignature rollEditLog() throws IOException { checkNNStartup(); return namesystem.rollEditLog(); } - + @Override // NamenodeProtocol public RemoteEditLogManifest getEditLogManifest(long sinceTxId) throws IOException { @@ -1392,7 +1392,7 @@ public boolean isRollingUpgrade() throws IOException { namesystem.checkSuperuserPrivilege(operationName); return namesystem.isRollingUpgrade(); } - + @Override // ClientProtocol public void finalizeUpgrade() throws IOException { checkNNStartup(); @@ -1487,7 +1487,7 @@ public void setBalancerBandwidth(long bandwidth) throws IOException { checkNNStartup(); namesystem.setBalancerBandwidth(bandwidth); } - + @Override // ClientProtocol public ContentSummary getContentSummary(String path) throws IOException { checkNNStartup(); @@ -1530,7 +1530,7 @@ public void setQuota(String path, long namespaceQuota, long storagespaceQuota, checkNNStartup(); namesystem.setQuota(path, namespaceQuota, storagespaceQuota, type); } - + @Override // ClientProtocol public void fsync(String src, long fileId, String clientName, long lastBlockLength) @@ -1540,7 +1540,7 @@ public void fsync(String src, long fileId, String clientName, } @Override // ClientProtocol - public void setTimes(String src, long mtime, long atime) + public void setTimes(String src, long mtime, long atime) throws IOException { checkNNStartup(); namesystem.setTimes(src, mtime, atime); @@ -1557,12 +1557,12 @@ public void createSymlink(String target, String link, FsPermission dirPerms, } /* We enforce the MAX_PATH_LENGTH limit even though a symlink target - * URI may refer to a non-HDFS file system. + * URI may refer to a non-HDFS file system. */ if (!checkPathLength(link)) { throw new IOException("Symlink path exceeds " + MAX_PATH_LENGTH + " character limit"); - + } final UserGroupInformation ugi = getRemoteUser(); @@ -1635,7 +1635,7 @@ public DatanodeCommand blockReport(final DatanodeRegistration nodeReg, verifyRequest(nodeReg); blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: from {}, reports.length={}.", nodeReg, reports.length); - final BlockManager bm = namesystem.getBlockManager(); + final BlockManager bm = namesystem.getBlockManager(); boolean noStaleStorages = false; try { if (bm.checkBlockReportLease(context, nodeReg)) { @@ -1713,9 +1713,9 @@ public void run() { @Override // DatanodeProtocol public void errorReport(DatanodeRegistration nodeReg, - int errorCode, String msg) throws IOException { + int errorCode, String msg) throws IOException { checkNNStartup(); - String dnName = + String dnName = (nodeReg == null) ? "Unknown DataNode" : nodeReg.toString(); if (errorCode == DatanodeProtocol.NOTIFY) { @@ -1728,12 +1728,12 @@ public void errorReport(DatanodeRegistration nodeReg, LOG.warn("Disk error on " + dnName + ": " + msg); } else if (errorCode == DatanodeProtocol.FATAL_DISK_ERROR) { LOG.warn("Fatal disk error on " + dnName + ": " + msg); - namesystem.getBlockManager().getDatanodeManager().removeDatanode(nodeReg); + namesystem.getBlockManager().getDatanodeManager().removeDatanode(nodeReg); } else { LOG.info("Error report from " + dnName + ": " + msg); } } - + @Override // DatanodeProtocol, NamenodeProtocol public NamespaceInfo versionRequest() throws IOException { checkNNStartup(); @@ -1751,9 +1751,9 @@ public void sendLifeline(DatanodeRegistration nodeReg, StorageReport[] report, xceiverCount, xmitsInProgress, failedVolumes, volumeFailureSummary); } - /** + /** * Verifies the given registration. - * + * * @param nodeReg node registration * @throws UnregisteredNodeException if the registration is invalid */ @@ -1816,7 +1816,7 @@ public Collection refresh(String identifier, String[] args) { // Let the registry handle as needed return RefreshRegistry.defaultRegistry().dispatch(identifier, args); } - + @Override // GetUserMappingsProtocol public String[] getGroupsForUser(String user) throws IOException { LOG.debug("Getting groups for user {}", user); @@ -1829,9 +1829,9 @@ public synchronized void monitorHealth() throws HealthCheckFailedException, checkNNStartup(); nn.monitorHealth(); } - + @Override // HAServiceProtocol - public synchronized void transitionToActive(StateChangeRequestInfo req) + public synchronized void transitionToActive(StateChangeRequestInfo req) throws ServiceFailedException, AccessControlException, IOException { checkNNStartup(); nn.checkHaStateChange(req); @@ -1866,7 +1866,7 @@ public synchronized void transitionToObserver(StateChangeRequestInfo req) } @Override // HAServiceProtocol - public synchronized HAServiceStatus getServiceStatus() + public synchronized HAServiceStatus getServiceStatus() throws AccessControlException, ServiceFailedException, IOException { checkNNStartup(); return nn.getServiceStatus(); @@ -1882,7 +1882,7 @@ void verifyLayoutVersion(int version) throws IOException { throw new IncorrectVersionException( HdfsServerConstants.NAMENODE_LAYOUT_VERSION, version, "data node"); } - + private void verifySoftwareVersion(DatanodeRegistration dnReg) throws IncorrectVersionException { String dnVersion = dnReg.getSoftwareVersion(); @@ -1951,7 +1951,7 @@ public String createSnapshot(String snapshotRoot, String snapshotName) } return ret; } - + @Override public void deleteSnapshot(String snapshotRoot, String snapshotName) throws IOException { @@ -2223,7 +2223,7 @@ public AclStatus getAclStatus(String src) throws IOException { checkNNStartup(); return namesystem.getAclStatus(src); } - + @Override // ClientProtocol public void createEncryptionZone(String src, String keyName) throws IOException { @@ -2320,9 +2320,9 @@ public void setXAttr(String src, XAttr xAttr, EnumSet flag) RetryCache.setState(cacheEntry, success); } } - + @Override // ClientProtocol - public List getXAttrs(String src, List xAttrs) + public List getXAttrs(String src, List xAttrs) throws IOException { checkNNStartup(); return namesystem.getXAttrs(src, xAttrs); @@ -2333,7 +2333,7 @@ public List listXAttrs(String src) throws IOException { checkNNStartup(); return namesystem.listXAttrs(src); } - + @Override // ClientProtocol public void removeXAttr(String src, XAttr xAttr) throws IOException { checkNNStartup(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java index 39d027aec2340..7eefd8add1df3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java @@ -81,7 +81,7 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest { private static FileSystem fHdfs2; private FileSystem fsTarget2; Path targetTestRoot2; - + @Override protected FileSystemTestHelper createFileSystemHelper() { return new FileSystemTestHelper("/tmp/TestViewFileSystemHdfs"); @@ -106,14 +106,14 @@ public static void clusterSetupAtBegining() throws IOException, SupportsBlocks = true; CONF.setBoolean( DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); - + cluster = new MiniDFSCluster.Builder(CONF).nnTopology( MiniDFSNNTopology.simpleFederatedTopology(2)) .numDataNodes(2) .build(); cluster.waitClusterUp(); - + fHdfs = cluster.getFileSystem(0); fHdfs2 = cluster.getFileSystem(1); fHdfs.getConf().set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, @@ -121,16 +121,16 @@ public static void clusterSetupAtBegining() throws IOException, fHdfs2.getConf().set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, FsConstants.VIEWFS_URI.toString()); - defaultWorkingDirectory = fHdfs.makeQualified( new Path("/user/" + + defaultWorkingDirectory = fHdfs.makeQualified( new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName())); - defaultWorkingDirectory2 = fHdfs2.makeQualified( new Path("/user/" + + defaultWorkingDirectory2 = fHdfs2.makeQualified( new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName())); - + fHdfs.mkdirs(defaultWorkingDirectory); fHdfs2.mkdirs(defaultWorkingDirectory2); } - + @AfterClass public static void ClusterShutdownAtEnd() throws Exception { if (cluster != null) { @@ -167,7 +167,7 @@ void setupMountPoints() { int getExpectedDirPaths() { return 8; } - + @Override int getExpectedMountPoints() { return 9; @@ -175,7 +175,7 @@ int getExpectedMountPoints() { @Override int getExpectedDelegationTokenCount() { - return 2; // Mount points to 2 unique hdfs + return 2; // Mount points to 2 unique hdfs } @Override @@ -193,34 +193,34 @@ Path getTrashRootInFallBackFS() throws IOException { @Test public void testTrashRootsAfterEncryptionZoneDeletion() throws Exception { try { - final Path zone = new Path("/EZ"); - fsTarget.mkdirs(zone); - final Path zone1 = new Path("/EZ/zone1"); - fsTarget.mkdirs(zone1); - - DFSTestUtil.createKey("test_key", cluster, CONF); - HdfsAdmin hdfsAdmin = new HdfsAdmin(cluster.getURI(0), CONF); - final EnumSet provisionTrash = - EnumSet.of(CreateEncryptionZoneFlag.PROVISION_TRASH); - hdfsAdmin.createEncryptionZone(zone1, "test_key", provisionTrash); - - final Path encFile = new Path(zone1, "encFile"); - DFSTestUtil.createFile(fsTarget, encFile, 10240, (short) 1, 0xFEED); - - Configuration clientConf = new Configuration(CONF); - clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1); - clientConf.set("fs.default.name", fsTarget.getUri().toString()); - FsShell shell = new FsShell(clientConf); - - //Verify file deletion within EZ - DFSTestUtil.verifyDelete(shell, fsTarget, encFile, true); - assertTrue("ViewFileSystem trash roots should include EZ file trash", - (fsView.getTrashRoots(true).size() == 1)); - - //Verify deletion of EZ - DFSTestUtil.verifyDelete(shell, fsTarget, zone, true); - assertTrue("ViewFileSystem trash roots should include EZ zone trash", - (fsView.getTrashRoots(true).size() == 2)); + final Path zone = new Path("/EZ"); + fsTarget.mkdirs(zone); + final Path zone1 = new Path("/EZ/zone1"); + fsTarget.mkdirs(zone1); + + DFSTestUtil.createKey("test_key", cluster, CONF); + HdfsAdmin hdfsAdmin = new HdfsAdmin(cluster.getURI(0), CONF); + final EnumSet provisionTrash = + EnumSet.of(CreateEncryptionZoneFlag.PROVISION_TRASH); + hdfsAdmin.createEncryptionZone(zone1, "test_key", provisionTrash); + + final Path encFile = new Path(zone1, "encFile"); + DFSTestUtil.createFile(fsTarget, encFile, 10240, (short) 1, 0xFEED); + + Configuration clientConf = new Configuration(CONF); + clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1); + clientConf.set("fs.default.name", fsTarget.getUri().toString()); + FsShell shell = new FsShell(clientConf); + + //Verify file deletion within EZ + DFSTestUtil.verifyDelete(shell, fsTarget, encFile, true); + assertTrue("ViewFileSystem trash roots should include EZ file trash", + (fsView.getTrashRoots(true).size() == 1)); + + //Verify deletion of EZ + DFSTestUtil.verifyDelete(shell, fsTarget, zone, true); + assertTrue("ViewFileSystem trash roots should include EZ zone trash", + (fsView.getTrashRoots(true).size() == 2)); } finally { DFSTestUtil.deleteKey("test_key", cluster); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 2b88f6e71ac08..e816edd3110de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -201,18 +201,18 @@ public class DFSTestUtil { private static final Logger LOG = LoggerFactory.getLogger(DFSTestUtil.class); - + private static final Random gen = new Random(); private static final String[] dirNames = { "zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine" }; - + private final int maxLevels; private final int maxSize; private final int minSize; private final int nFiles; private MyFile[] files; - + /** Creates a new instance of DFSTestUtil * * @param nFiles Number of files to be created @@ -242,7 +242,7 @@ public DFSTestUtil(String testName, int nFiles, int maxLevels, int maxSize, this.maxSize = maxSize; this.minSize = minSize; } - + /** * when formatting a namenode - we must provide clusterid. * @param conf @@ -328,11 +328,11 @@ public static ErasureCodingPolicyState getECPolicyState( * a single file. */ private class MyFile { - + private String name = ""; private final int size; private final long seed; - + MyFile() { int nLevels = gen.nextInt(maxLevels); if (nLevels != 0) { @@ -353,7 +353,7 @@ private class MyFile { size = minSize + gen.nextInt(maxSize - minSize); seed = gen.nextLong(); } - + String getName() { return name; } int getSize() { return size; } long getSeed() { return seed; } @@ -388,20 +388,20 @@ public static byte[] readFileAsBytes(FileSystem fs, Path fileName) throws IOExce return os.toByteArray(); } } - + /** create nFiles with random names and directory hierarchies * with random (but reproducible) data in them. */ public void createFiles(FileSystem fs, String topdir, short replicationFactor) throws IOException { files = new MyFile[nFiles]; - + for (int idx = 0; idx < nFiles; idx++) { files[idx] = new MyFile(); } - + Path root = new Path(topdir); - + for (int idx = 0; idx < nFiles; idx++) { createFile(fs, new Path(root, files[idx].getName()), files[idx].getSize(), replicationFactor, files[idx].getSeed()); @@ -414,7 +414,7 @@ public static String readFile(FileSystem fs, Path fileName) return new String(buf, 0, buf.length); } - public static byte[] readFileBuffer(FileSystem fs, Path fileName) + public static byte[] readFileBuffer(FileSystem fs, Path fileName) throws IOException { try (ByteArrayOutputStream os = new ByteArrayOutputStream(); FSDataInputStream in = fs.open(fileName)) { @@ -422,13 +422,13 @@ public static byte[] readFileBuffer(FileSystem fs, Path fileName) return os.toByteArray(); } } - - public static void createFile(FileSystem fs, Path fileName, long fileLen, + + public static void createFile(FileSystem fs, Path fileName, long fileLen, short replFactor, long seed) throws IOException { createFile(fs, fileName, 1024, fileLen, fs.getDefaultBlockSize(fileName), replFactor, seed); } - + public static void createFile(FileSystem fs, Path fileName, int bufferLen, long fileLen, long blockSize, short replFactor, long seed) throws IOException { @@ -483,18 +483,18 @@ public static void createFile(FileSystem fs, Path fileName, } } } - + public static byte[] calculateFileContentsFromSeed(long seed, int length) { Random rb = new Random(seed); byte val[] = new byte[length]; rb.nextBytes(val); return val; } - + /** check if the files have been copied correctly. */ public boolean checkFiles(FileSystem fs, String topdir) throws IOException { Path root = new Path(topdir); - + for (int idx = 0; idx < nFiles; idx++) { Path fPath = new Path(root, files[idx].getName()); try (FSDataInputStream in = fs.open(fPath)) { @@ -510,11 +510,11 @@ public boolean checkFiles(FileSystem fs, String topdir) throws IOException { } } } - + return true; } - void setReplication(FileSystem fs, String topdir, short value) + void setReplication(FileSystem fs, String topdir, short value) throws IOException { Path root = new Path(topdir); for (int idx = 0; idx < nFiles; idx++) { @@ -527,7 +527,7 @@ void setReplication(FileSystem fs, String topdir, short value) * Waits for the replication factor of all files to reach the * specified target. */ - public void waitReplication(FileSystem fs, String topdir, short value) + public void waitReplication(FileSystem fs, String topdir, short value) throws IOException, InterruptedException, TimeoutException { Path root = new Path(topdir); @@ -658,7 +658,7 @@ public static void waitCorruptReplicas(FileSystem fs, FSNamesystem ns, /* * Wait up to 20s for the given DN (IP:port) to be decommissioned */ - public static void waitForDecommission(FileSystem fs, String name) + public static void waitForDecommission(FileSystem fs, String name) throws IOException, InterruptedException, TimeoutException { DatanodeInfo dn = null; int count = 0; @@ -722,10 +722,10 @@ public static long getDatanodeCapacity(DatanodeManager dm, int index) { } /* - * Wait for the given # live/dead DNs, total capacity, and # vol failures. + * Wait for the given # live/dead DNs, total capacity, and # vol failures. */ - public static void waitForDatanodeStatus(DatanodeManager dm, int expectedLive, - int expectedDead, long expectedVolFails, long expectedTotalCapacity, + public static void waitForDatanodeStatus(DatanodeManager dm, int expectedLive, + int expectedDead, long expectedVolFails, long expectedTotalCapacity, long timeout) throws InterruptedException, TimeoutException { final List live = new ArrayList(); final List dead = new ArrayList(); @@ -765,7 +765,7 @@ public static void waitForDatanodeStatus(DatanodeManager dm, int expectedLive, /* * Wait for the given DN to consider itself dead. */ - public static void waitForDatanodeDeath(DataNode dn) + public static void waitForDatanodeDeath(DataNode dn) throws InterruptedException, TimeoutException { final int ATTEMPTS = 10; int count = 0; @@ -778,7 +778,7 @@ public static void waitForDatanodeDeath(DataNode dn) throw new TimeoutException("Timed out waiting for DN to die"); } } - + /** return list of filenames created as part of createFiles */ public String[] getFileNames(String topDir) { if (nFiles == 0) @@ -837,20 +837,20 @@ public static void waitReplication(FileSystem fs, Path fileName, LOG.info("All blocks of file {} verified to have replication factor {}", fileName, replFactor); } - + /** delete directory and everything underneath it.*/ public void cleanup(FileSystem fs, String topdir) throws IOException { Path root = new Path(topdir); fs.delete(root, true); files = null; } - + public static ExtendedBlock getFirstBlock(FileSystem fs, Path path) throws IOException { try (HdfsDataInputStream in = (HdfsDataInputStream) fs.open(path)) { in.readByte(); return in.getCurrentBlock(); } - } + } public static List getAllBlocks(FSDataInputStream in) throws IOException { @@ -920,7 +920,7 @@ public static void writeFile(FileSystem fs, Path p, String s) } /* Append the given string to the given file */ - public static void appendFile(FileSystem fs, Path p, String s) + public static void appendFile(FileSystem fs, Path p, String s) throws IOException { assert fs.exists(p); try (InputStream is = new ByteArrayInputStream(s.getBytes()); @@ -928,7 +928,7 @@ public static void appendFile(FileSystem fs, Path p, String s) IOUtils.copyBytes(is, os, s.length()); } } - + /** * Append specified length of bytes to a given file * @param fs The file system @@ -987,23 +987,23 @@ public static void appendFileNewBlock(DistributedFileSystem fs, public static String urlGet(URL url) throws IOException { return new String(urlGetBytes(url), Charsets.UTF_8); } - + /** * @return URL contents as a byte array */ public static byte[] urlGetBytes(URL url) throws IOException { URLConnection conn = url.openConnection(); HttpURLConnection hc = (HttpURLConnection)conn; - + assertEquals(HttpURLConnection.HTTP_OK, hc.getResponseCode()); ByteArrayOutputStream out = new ByteArrayOutputStream(); IOUtils.copyBytes(conn.getInputStream(), out, 4096, true); return out.toByteArray(); } - + /** * mock class to get group mapping for fake users - * + * */ static class MockUnixGroupsMapping extends ShellBasedUnixGroupsMapping { static Map fakeUser2GroupsMap; @@ -1013,14 +1013,14 @@ static class MockUnixGroupsMapping extends ShellBasedUnixGroupsMapping { defaultGroups.add("supergroup"); fakeUser2GroupsMap = new HashMap(); } - + @Override public List getGroups(String user) throws IOException { boolean found = false; - + // check to see if this is one of fake users List l = new ArrayList(); - for(String u : fakeUser2GroupsMap.keySet()) { + for(String u : fakeUser2GroupsMap.keySet()) { if(user.equals(u)) { found = true; for(String gr : fakeUser2GroupsMap.get(u)) { @@ -1028,12 +1028,12 @@ public List getGroups(String user) throws IOException { } } } - + // default if(!found) { l = super.getGroups(user); if(l.size() == 0) { - System.out.println("failed to get real group for " + user + + System.out.println("failed to get real group for " + user + "; using default"); return defaultGroups; } @@ -1041,7 +1041,7 @@ public List getGroups(String user) throws IOException { return l; } } - + /** * update the configuration with fake class for mapping user to groups * @param conf @@ -1052,18 +1052,18 @@ public List getGroups(String user) throws IOException { if(map!=null) { MockUnixGroupsMapping.fakeUser2GroupsMap = map; } - + // fake mapping user to groups conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, DFSTestUtil.MockUnixGroupsMapping.class, ShellBasedUnixGroupsMapping.class); - + } - + /** * Get a FileSystem instance as specified user in a doAs block. */ - static public FileSystem getFileSystemAs(UserGroupInformation ugi, + static public FileSystem getFileSystemAs(UserGroupInformation ugi, final Configuration conf) throws IOException { try { return ugi.doAs(new PrivilegedExceptionAction() { @@ -1086,7 +1086,7 @@ public static byte[] generateSequentialBytes(int start, int length) { return result; } - + public static Statistics getStatistics(FileSystem fs) { return FileSystem.getStatistics(fs.getUri().getScheme(), fs.getClass()); } @@ -1104,7 +1104,7 @@ public static byte[] loadFile(String filename) throws IOException { } /** For {@link TestTransferRbw} */ - public static BlockOpResponseProto transferRbw(final ExtendedBlock b, + public static BlockOpResponseProto transferRbw(final ExtendedBlock b, final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException { assertEquals(2, datanodes.length); final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length); @@ -1124,7 +1124,7 @@ public static BlockOpResponseProto transferRbw(final ExtendedBlock b, return BlockOpResponseProto.parseDelimitedFrom(in); } } - + public static void setFederatedConfiguration(MiniDFSCluster cluster, Configuration conf) { Set nameservices = new HashSet(); @@ -1172,7 +1172,7 @@ public static void setFederatedHAConfiguration(MiniDFSCluster cluster, conf.set(DFSConfigKeys.DFS_NAMESERVICES, Joiner.on(",") .join(nameservices.keySet())); } - + private static DatanodeID getDatanodeID(String ipAddr) { return new DatanodeID(ipAddr, "localhost", UUID.randomUUID().toString(), @@ -1205,13 +1205,13 @@ public static DatanodeInfo getDatanodeInfo(String ipAddr) { return new DatanodeInfoBuilder().setNodeID(getDatanodeID(ipAddr)) .build(); } - + public static DatanodeInfo getLocalDatanodeInfo(int port) { return new DatanodeInfoBuilder().setNodeID(getLocalDatanodeID(port)) .build(); } - public static DatanodeInfo getDatanodeInfo(String ipAddr, + public static DatanodeInfo getDatanodeInfo(String ipAddr, String host, int port) { return new DatanodeInfoBuilder().setNodeID( new DatanodeID(ipAddr, host, UUID.randomUUID().toString(), port, @@ -1240,10 +1240,10 @@ public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr, return getDatanodeDescriptor(ipAddr, DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT, rackLocation); } - + public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr, String rackLocation, String hostname) { - return getDatanodeDescriptor(ipAddr, + return getDatanodeDescriptor(ipAddr, DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT, rackLocation, hostname); } @@ -1251,15 +1251,15 @@ public static DatanodeStorageInfo createDatanodeStorageInfo( String storageID, String ip) { return createDatanodeStorageInfo(storageID, ip, "defaultRack", "host"); } - + public static DatanodeStorageInfo[] createDatanodeStorageInfos(String[] racks) { return createDatanodeStorageInfos(racks, null); } - + public static DatanodeStorageInfo[] createDatanodeStorageInfos(String[] racks, String[] hostnames) { return createDatanodeStorageInfos(racks.length, racks, hostnames); } - + public static DatanodeStorageInfo[] createDatanodeStorageInfos(int n) { return createDatanodeStorageInfos(n, null, null); } @@ -1323,17 +1323,17 @@ public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr, DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT); return new DatanodeDescriptor(dnId, rackLocation); } - + public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr, int port, String rackLocation) { return getDatanodeDescriptor(ipAddr, port, rackLocation, "host"); } - + public static DatanodeRegistration getLocalDatanodeRegistration() { return new DatanodeRegistration(getLocalDatanodeID(), new StorageInfo( NodeType.DATA_NODE), new ExportedBlockKeys(), VersionInfo.getVersion()); } - + /** Copy one file's contents into the other **/ public static void copyFile(File src, File dest) throws IOException { FileUtils.copyFile(src, dest); @@ -1344,10 +1344,10 @@ public static class Builder { private int maxSize = 8*1024; private int minSize = 1; private int nFiles = 1; - + public Builder() { } - + public Builder setName(String string) { return this; } @@ -1356,7 +1356,7 @@ public Builder setNumFiles(int nFiles) { this.nFiles = nFiles; return this; } - + public Builder setMaxLevels(int maxLevels) { this.maxLevels = maxLevels; return this; @@ -1371,21 +1371,21 @@ public Builder setMinSize(int minSize) { this.minSize = minSize; return this; } - + public DFSTestUtil build() { return new DFSTestUtil(nFiles, maxLevels, maxSize, minSize); } } - + /** * Run a set of operations and generate all edit logs */ public static void runOperations(MiniDFSCluster cluster, - DistributedFileSystem filesystem, Configuration conf, long blockSize, + DistributedFileSystem filesystem, Configuration conf, long blockSize, int nnIndex) throws IOException { // create FileContext for rename2 FileContext fc = FileContext.getFileContext(cluster.getURI(0), conf); - + // OP_ADD 0 final Path pathFileCreate = new Path("/file_create"); FSDataOutputStream s = filesystem.create(pathFileCreate); @@ -1448,7 +1448,7 @@ public static void runOperations(MiniDFSCluster cluster, long atime = mtime; filesystem.setTimes(pathFileCreate, mtime, atime); // OP_SET_QUOTA 14 - filesystem.setQuota(pathDirectoryMkdir, 1000L, + filesystem.setQuota(pathDirectoryMkdir, 1000L, HdfsConstants.QUOTA_DONT_SET); // OP_SET_QUOTA_BY_STORAGETYPE filesystem.setQuotaByStorageType(pathDirectoryMkdir, StorageType.SSD, 888L); @@ -1480,7 +1480,7 @@ public static void runOperations(MiniDFSCluster cluster, // OP_SYMLINK 17 Path pathSymlink = new Path("/file_symlink"); fc.createSymlink(pathConcatTarget, pathSymlink, false); - + // OP_REASSIGN_LEASE 22 String filePath = "/hard-lease-recovery-test"; byte[] bytes = "foo-bar-baz".getBytes(); @@ -1549,9 +1549,9 @@ public static void runOperations(MiniDFSCluster cluster, .build()); filesystem.setAcl(pathConcatTarget, aclEntryList); // OP_SET_XATTR - filesystem.setXAttr(pathConcatTarget, "user.a1", + filesystem.setXAttr(pathConcatTarget, "user.a1", new byte[]{0x31, 0x32, 0x33}); - filesystem.setXAttr(pathConcatTarget, "user.a2", + filesystem.setXAttr(pathConcatTarget, "user.a2", new byte[]{0x37, 0x38, 0x39}); // OP_REMOVE_XATTR filesystem.removeXAttr(pathConcatTarget, "user.a2"); @@ -1630,7 +1630,7 @@ public static long verifyExpectedCacheUsage(final long expectedCacheUsed, final long expectedBlocks, final FsDatasetSpi fsd) throws Exception { GenericTestUtils.waitFor(new Supplier() { private int tries = 0; - + @Override public Boolean get() { long curCacheUsed = fsd.getCacheUsed(); @@ -1693,7 +1693,7 @@ public static class ShortCircuitTestContext implements Closeable { private final TemporarySocketDirectory sockDir; private boolean closed = false; private final boolean formerTcpReadsDisabled; - + public ShortCircuitTestContext(String testName) { this.testName = testName; this.sockDir = new TemporarySocketDirectory(); @@ -1701,7 +1701,7 @@ public ShortCircuitTestContext(String testName) { formerTcpReadsDisabled = DFSInputStream.tcpReadsDisabledForTesting; Assume.assumeTrue(DomainSocket.getLoadingFailureReason() == null); } - + public Configuration newConfiguration() { Configuration conf = new Configuration(); conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true); @@ -1969,7 +1969,7 @@ public static void FsShellRun(String cmd, int retcode, String contain, Configuration conf) throws Exception { FsShell shell = new FsShell(new Configuration(conf)); toolRun(shell, cmd, retcode, contain); - } + } public static void DFSAdminRun(String cmd, int retcode, String contain, Configuration conf) throws Exception { @@ -2079,7 +2079,7 @@ public static void resetLastUpdatesWithOffset(DatanodeInfo dn, long offset) { dn.setLastUpdate(Time.now() + offset); dn.setLastUpdateMonotonic(Time.monotonicNow() + offset); } - + /** * This method takes a set of block locations and fills the provided buffer * with expected bytes based on simulated content from diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java index d6fa7765d1a07..cad144ad25040 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java @@ -141,9 +141,9 @@ public class TestDistributedFileSystem { } private boolean dualPortTesting = false; - + private boolean noXmlDefaults = false; - + HdfsConfiguration getTestConfiguration() { HdfsConfiguration conf; if (noXmlDefaults) { @@ -199,9 +199,9 @@ public void testFileSystemCloseAll() throws Exception { if (cluster != null) {cluster.shutdown();} } } - + /** - * Tests DFSClient.close throws no ConcurrentModificationException if + * Tests DFSClient.close throws no ConcurrentModificationException if * multiple files are open. * Also tests that any cached sockets are closed. (HDFS-3359) * Also tests deprecated listOpenFiles(EnumSet<>). (HDFS-14595) @@ -402,7 +402,7 @@ public void testDFSCloseOrdering() throws Exception { inOrder.verify(fs.dfs).delete(eq(path.toString()), eq(true)); inOrder.verify(fs.dfs).close(); } - + private static class MyDistributedFileSystem extends DistributedFileSystem { MyDistributedFileSystem() { dfs = mock(DFSClient.class); @@ -481,7 +481,7 @@ public void testDFSClient() throws Exception { .getDeclaredMethod("isRunning"); checkMethod.setAccessible(true); assertFalse((boolean) checkMethod.invoke(dfs.dfs.getLeaseRenewer())); - + { //create a file final FSDataOutputStream out = dfs.create(filepaths[0]); @@ -593,11 +593,11 @@ public void testDFSClient() throws Exception { assertFalse((boolean)checkMethod.invoke(dfs.dfs.getLeaseRenewer())); dfs.close(); } - + { // test accessing DFS with ip address. should work with any hostname // alias or ip address that points to the interface that NameNode // is listening on. In this case, it is localhost. - String uri = "hdfs://127.0.0.1:" + cluster.getNameNodePort() + + String uri = "hdfs://127.0.0.1:" + cluster.getNameNodePort() + "/test/ipAddress/file"; Path path = new Path(uri); FileSystem fs = FileSystem.get(path.toUri(), conf); @@ -605,7 +605,7 @@ public void testDFSClient() throws Exception { byte[] buf = new byte[1024]; out.write(buf); out.close(); - + FSDataInputStream in = fs.open(path); in.readFully(buf); in.close(); @@ -699,7 +699,7 @@ public void testStatistics() throws IOException { fs.mkdirs(dir); checkStatistics(fs, readOps, ++writeOps, largeReadOps); checkOpStatistics(OpType.MKDIRS, opCount + 1); - + opCount = getOpStatistics(OpType.CREATE); FSDataOutputStream out = fs.create(file, (short)1); out.close(); @@ -710,7 +710,7 @@ public void testStatistics() throws IOException { FileStatus status = fs.getFileStatus(file); checkStatistics(fs, ++readOps, writeOps, largeReadOps); checkOpStatistics(OpType.GET_FILE_STATUS, opCount + 1); - + opCount = getOpStatistics(OpType.GET_FILE_BLOCK_LOCATIONS); fs.getFileBlockLocations(file, 0, 0); checkStatistics(fs, ++readOps, writeOps, largeReadOps); @@ -718,30 +718,30 @@ public void testStatistics() throws IOException { fs.getFileBlockLocations(status, 0, 0); checkStatistics(fs, ++readOps, writeOps, largeReadOps); checkOpStatistics(OpType.GET_FILE_BLOCK_LOCATIONS, opCount + 2); - + opCount = getOpStatistics(OpType.OPEN); FSDataInputStream in = fs.open(file); in.close(); checkStatistics(fs, ++readOps, writeOps, largeReadOps); checkOpStatistics(OpType.OPEN, opCount + 1); - + opCount = getOpStatistics(OpType.SET_REPLICATION); fs.setReplication(file, (short)2); checkStatistics(fs, readOps, ++writeOps, largeReadOps); checkOpStatistics(OpType.SET_REPLICATION, opCount + 1); - + opCount = getOpStatistics(OpType.RENAME); Path file1 = new Path(dir, "file1"); fs.rename(file, file1); checkStatistics(fs, readOps, ++writeOps, largeReadOps); checkOpStatistics(OpType.RENAME, opCount + 1); - + opCount = getOpStatistics(OpType.GET_CONTENT_SUMMARY); fs.getContentSummary(file1); checkStatistics(fs, ++readOps, writeOps, largeReadOps); checkOpStatistics(OpType.GET_CONTENT_SUMMARY, opCount + 1); - - + + // Iterative ls test long mkdirOp = getOpStatistics(OpType.MKDIRS); long listStatusOp = getOpStatistics(OpType.LIST_STATUS); @@ -752,7 +752,7 @@ public void testStatistics() throws IOException { mkdirOp++; FileStatus[] list = fs.listStatus(dir); if (list.length > lsLimit) { - // if large directory, then count readOps and largeReadOps by + // if large directory, then count readOps and largeReadOps by // number times listStatus iterates int iterations = (int)Math.ceil((double)list.length/lsLimit); largeReadOps += iterations; @@ -763,7 +763,7 @@ public void testStatistics() throws IOException { readOps++; listStatusOp++; } - + // writeOps incremented by 1 for mkdirs // readOps and largeReadOps incremented by 1 or more checkStatistics(fs, readOps, ++writeOps, largeReadOps); @@ -776,7 +776,7 @@ public void testStatistics() throws IOException { checkStatistics(fs, readOps, writeOps, largeReadOps); checkOpStatistics(OpType.LIST_LOCATED_STATUS, locatedListStatusOP); } - + opCount = getOpStatistics(OpType.GET_STATUS); fs.getStatus(file1); checkStatistics(fs, ++readOps, writeOps, largeReadOps); @@ -786,12 +786,12 @@ public void testStatistics() throws IOException { fs.getFileChecksum(file1); checkStatistics(fs, ++readOps, writeOps, largeReadOps); checkOpStatistics(OpType.GET_FILE_CHECKSUM, opCount + 1); - + opCount = getOpStatistics(OpType.SET_PERMISSION); fs.setPermission(file1, new FsPermission((short)0777)); checkStatistics(fs, readOps, ++writeOps, largeReadOps); checkOpStatistics(OpType.SET_PERMISSION, opCount + 1); - + opCount = getOpStatistics(OpType.SET_TIMES); fs.setTimes(file1, 0L, 0L); checkStatistics(fs, readOps, ++writeOps, largeReadOps); @@ -807,7 +807,7 @@ public void testStatistics() throws IOException { fs.delete(dir, true); checkStatistics(fs, readOps, ++writeOps, largeReadOps); checkOpStatistics(OpType.DELETE, opCount + 1); - + } finally { if (cluster != null) cluster.shutdown(); } @@ -1216,7 +1216,7 @@ public void testFileChecksum() throws Exception { final UserGroupInformation current = UserGroupInformation.getCurrentUser(); final UserGroupInformation ugi = UserGroupInformation.createUserForTesting( current.getShortUserName() + "x", new String[]{"user"}); - + try { hdfs.getFileChecksum(new Path( "/test/TestNonExistingFile")); @@ -1259,7 +1259,7 @@ public FileSystem run() throws Exception { final byte[] data = new byte[RAN.nextInt(block_size/2-1)+n*block_size+1]; RAN.nextBytes(data); System.out.println("data.length=" + data.length); - + //write data to a file final Path foo = new Path(dir, "foo" + n); { @@ -1268,7 +1268,7 @@ public FileSystem run() throws Exception { out.write(data); out.close(); } - + //compute checksum final FileChecksum hdfsfoocs = hdfs.getFileChecksum(foo); System.out.println("hdfsfoocs=" + hdfsfoocs); @@ -1326,7 +1326,7 @@ public FileSystem run() throws Exception { hdfs.setPermission(dir, new FsPermission((short)0)); - { //test permission error on webhdfs + { //test permission error on webhdfs try { webhdfs.getFileChecksum(webhdfsqualified); fail(); @@ -1338,7 +1338,7 @@ public FileSystem run() throws Exception { } cluster.shutdown(); } - + @Test public void testAllWithDualPort() throws Exception { dualPortTesting = true; @@ -1352,7 +1352,7 @@ public void testAllWithDualPort() throws Exception { dualPortTesting = false; } } - + @Test public void testAllWithNoXmlDefaults() throws Exception { // Do all the tests with a configuration that ignores the defaults in @@ -1365,7 +1365,7 @@ public void testAllWithNoXmlDefaults() throws Exception { testDFSClient(); testFileChecksum(); } finally { - noXmlDefaults = false; + noXmlDefaults = false; } } @@ -1429,7 +1429,7 @@ public void testCreateWithCustomChecksum() throws Exception { Configuration conf = getTestConfiguration(); MiniDFSCluster cluster = null; Path testBasePath = new Path("/test/csum"); - // create args + // create args Path path1 = new Path(testBasePath, "file_wtih_crc1"); Path path2 = new Path(testBasePath, "file_with_crc2"); ChecksumOpt opt1 = new ChecksumOpt(DataChecksum.Type.CRC32C, 512); @@ -1543,13 +1543,13 @@ public void testCreateWithStoragePolicy() throws Throwable { public void testListFiles() throws IOException { Configuration conf = getTestConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); - + try { DistributedFileSystem fs = cluster.getFileSystem(); - + final Path relative = new Path("relative"); fs.create(new Path(relative, "foo")).close(); - + final List retVal = new ArrayList<>(); final RemoteIterator iter = fs.listFiles(relative, true); @@ -1590,7 +1590,7 @@ public void testDFSClientPeerReadTimeout() throws IOException { // only need cluster to create a dfs client to get a peer final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); try { - cluster.waitActive(); + cluster.waitActive(); DistributedFileSystem dfs = cluster.getFileSystem(); // use a dummy socket to ensure the read timesout ServerSocket socket = new ServerSocket(0);