diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 2b55e4c14d..309f5d244e 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -411,6 +411,9 @@ Release 0.23.3 - UNRELEASED HADOOP-8180. Remove hsqldb since its not needed from pom.xml (Ravi Prakash via tgraves) + HADOOP-8014. ViewFileSystem does not correctly implement getDefaultBlockSize, + getDefaultReplication, getContentSummary (John George via bobby) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index 61db1ed5e1..288a3033ed 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -579,7 +579,8 @@ public BlockLocation[] getFileBlockLocations(FileStatus file, * * The FileSystem will simply return an elt containing 'localhost'. * - * @param p path of file to get locations for + * @param p path is used to identify an FS since an FS could have + * another FS that it could be delegating the call to * @param start offset into the given file * @param len length for which to get locations for */ @@ -602,10 +603,21 @@ public FsServerDefaults getServerDefaults() throws IOException { return new FsServerDefaults(getDefaultBlockSize(), conf.getInt("io.bytes.per.checksum", 512), 64 * 1024, - getDefaultReplication(), + getDefaultReplication(), conf.getInt("io.file.buffer.size", 4096)); } - + + /** + * Return a set of server default configuration values + * @param p path is used to identify an FS since an FS could have + * another FS that it could be delegating the call to + * @return server default configuration values + * @throws IOException + */ + public FsServerDefaults getServerDefaults(Path p) throws IOException { + return getServerDefaults(); + } + /** * Return the fully-qualified path of path f resolving the path * through any symlinks or mount point @@ -653,8 +665,8 @@ public FSDataOutputStream create(Path f, boolean overwrite) throws IOException { return create(f, overwrite, getConf().getInt("io.file.buffer.size", 4096), - getDefaultReplication(), - getDefaultBlockSize()); + getDefaultReplication(f), + getDefaultBlockSize(f)); } /** @@ -668,8 +680,8 @@ public FSDataOutputStream create(Path f, Progressable progress) throws IOException { return create(f, true, getConf().getInt("io.file.buffer.size", 4096), - getDefaultReplication(), - getDefaultBlockSize(), progress); + getDefaultReplication(f), + getDefaultBlockSize(f), progress); } /** @@ -683,7 +695,7 @@ public FSDataOutputStream create(Path f, short replication) return create(f, true, getConf().getInt("io.file.buffer.size", 4096), replication, - getDefaultBlockSize()); + getDefaultBlockSize(f)); } /** @@ -699,7 +711,7 @@ public FSDataOutputStream create(Path f, short replication, return create(f, true, getConf().getInt("io.file.buffer.size", 4096), replication, - getDefaultBlockSize(), progress); + getDefaultBlockSize(f), progress); } @@ -715,8 +727,8 @@ public FSDataOutputStream create(Path f, int bufferSize ) throws IOException { return create(f, overwrite, bufferSize, - getDefaultReplication(), - getDefaultBlockSize()); + getDefaultReplication(f), + getDefaultBlockSize(f)); } /** @@ -733,8 +745,8 @@ public FSDataOutputStream create(Path f, Progressable progress ) throws IOException { return create(f, overwrite, bufferSize, - getDefaultReplication(), - getDefaultBlockSize(), progress); + getDefaultReplication(f), + getDefaultBlockSize(f), progress); } @@ -1916,11 +1928,31 @@ public long getDefaultBlockSize() { return getConf().getLong("fs.local.block.size", 32 * 1024 * 1024); } + /** Return the number of bytes that large input files should be optimally + * be split into to minimize i/o time. The given path will be used to + * locate the actual filesystem. The full path does not have to exist. + * @param f path of file + * @return the default block size for the path's filesystem + */ + public long getDefaultBlockSize(Path f) { + return getDefaultBlockSize(); + } + /** * Get the default replication. */ public short getDefaultReplication() { return 1; } + /** + * Get the default replication for a path. The given path will be used to + * locate the actual filesystem. The full path does not have to exist. + * @param path of the file + * @return default replication for the path's filesystem + */ + public short getDefaultReplication(Path path) { + return getDefaultReplication(); + } + /** * Return a file status object that represents the path. * @param f The path we want information from diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java index 91ee2ae710..1794c3d032 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java @@ -28,6 +28,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Progressable; @@ -267,6 +268,7 @@ public boolean mkdirs(Path f, FsPermission permission) throws IOException { return fs.mkdirs(f, permission); } + /** * The src file is on the local disk. Add it to FS at * the given dst name. @@ -336,19 +338,42 @@ public long getUsed() throws IOException{ return fs.getUsed(); } - /** Return the number of bytes that large input files should be optimally - * be split into to minimize i/o time. */ + @Override public long getDefaultBlockSize() { return fs.getDefaultBlockSize(); } - /** - * Get the default replication. - */ + @Override public short getDefaultReplication() { return fs.getDefaultReplication(); } + @Override + public FsServerDefaults getServerDefaults() throws IOException { + return fs.getServerDefaults(); + } + + // path variants delegate to underlying filesystem + @Override + public ContentSummary getContentSummary(Path f) throws IOException { + return fs.getContentSummary(f); + } + + @Override + public long getDefaultBlockSize(Path f) { + return fs.getDefaultBlockSize(f); + } + + @Override + public short getDefaultReplication(Path f) { + return fs.getDefaultReplication(f); + } + + @Override + public FsServerDefaults getServerDefaults(Path f) throws IOException { + return fs.getServerDefaults(f); + } + /** * Get file status. */ @@ -441,4 +466,4 @@ public List> getDelegationTokens(String renewer, Credentials credentials) throws IOException { return fs.getDelegationTokens(renewer, credentials); } -} \ No newline at end of file +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java index 18ec724b7a..209fd216d1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java @@ -23,6 +23,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileChecksum; @@ -208,11 +209,6 @@ public FsStatus getStatus(Path p) throws IOException { return super.getStatus(fullPath(p)); } - @Override - public FsServerDefaults getServerDefaults() throws IOException { - return super.getServerDefaults(); - } - @Override public FileStatus[] listStatus(final Path f) throws IOException { @@ -273,4 +269,42 @@ public void setTimes(final Path f, final long mtime, final long atime) public Path resolvePath(final Path p) throws IOException { return super.resolvePath(fullPath(p)); } + + @Override + public ContentSummary getContentSummary(Path f) throws IOException { + return super.getContentSummary(fullPath(f)); + } + + + private static Path rootPath = new Path(Path.SEPARATOR); + + @Override + public long getDefaultBlockSize() { + return getDefaultBlockSize(fullPath(rootPath)); + } + + @Override + public long getDefaultBlockSize(Path f) { + return super.getDefaultBlockSize(fullPath(f)); + } + + @Override + public short getDefaultReplication() { + return getDefaultReplication(fullPath(rootPath)); + } + + @Override + public short getDefaultReplication(Path f) { + return super.getDefaultReplication(fullPath(f)); + } + + @Override + public FsServerDefaults getServerDefaults() throws IOException { + return getServerDefaults(fullPath(rootPath)); + } + + @Override + public FsServerDefaults getServerDefaults(Path f) throws IOException { + return super.getServerDefaults(fullPath(f)); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java index b3d19f7734..c2bdaaad9d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java @@ -34,6 +34,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileAlreadyExistsException; @@ -41,6 +42,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsConstants; +import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; @@ -470,6 +472,57 @@ public void setVerifyChecksum(final boolean verifyChecksum) { } } + @Override + public long getDefaultBlockSize() { + throw new NotInMountpointException("getDefaultBlockSize"); + } + + @Override + public short getDefaultReplication() { + throw new NotInMountpointException("getDefaultReplication"); + } + + @Override + public FsServerDefaults getServerDefaults() throws IOException { + throw new NotInMountpointException("getServerDefaults"); + } + + @Override + public long getDefaultBlockSize(Path f) { + try { + InodeTree.ResolveResult res = + fsState.resolve(getUriPath(f), true); + return res.targetFileSystem.getDefaultBlockSize(res.remainingPath); + } catch (FileNotFoundException e) { + throw new NotInMountpointException(f, "getDefaultBlockSize"); + } + } + + @Override + public short getDefaultReplication(Path f) { + try { + InodeTree.ResolveResult res = + fsState.resolve(getUriPath(f), true); + return res.targetFileSystem.getDefaultReplication(res.remainingPath); + } catch (FileNotFoundException e) { + throw new NotInMountpointException(f, "getDefaultReplication"); + } + } + + @Override + public FsServerDefaults getServerDefaults(Path f) throws IOException { + InodeTree.ResolveResult res = + fsState.resolve(getUriPath(f), true); + return res.targetFileSystem.getServerDefaults(res.remainingPath); + } + + @Override + public ContentSummary getContentSummary(Path f) throws IOException { + InodeTree.ResolveResult res = + fsState.resolve(getUriPath(f), true); + return res.targetFileSystem.getContentSummary(res.remainingPath); + } + @Override public void setWriteChecksum(final boolean writeChecksum) { List> mountPoints = @@ -742,5 +795,20 @@ public void setTimes(Path f, long mtime, long atime) public void setVerifyChecksum(boolean verifyChecksum) { // Noop for viewfs } + + @Override + public FsServerDefaults getServerDefaults(Path f) throws IOException { + throw new NotInMountpointException(f, "getServerDefaults"); + } + + @Override + public long getDefaultBlockSize(Path f) { + throw new NotInMountpointException(f, "getDefaultBlockSize"); + } + + @Override + public short getDefaultReplication(Path f) { + throw new NotInMountpointException(f, "getDefaultReplication"); + } } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java index 05fec95631..035a0165fb 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java @@ -36,6 +36,7 @@ public final class FileSystemTestHelper { System.getProperty("test.build.data", "target/test/data") + "/test"; private static final int DEFAULT_BLOCK_SIZE = 1024; private static final int DEFAULT_NUM_BLOCKS = 2; + private static final short DEFAULT_NUM_REPL = 1; private static String absTestRootDir = null; /** Hidden constructor */ @@ -99,9 +100,9 @@ public static Path getDefaultWorkingDirectory(FileSystem fSys) * Create files with numBlocks blocks each with block size blockSize. */ public static long createFile(FileSystem fSys, Path path, int numBlocks, - int blockSize, boolean createParent) throws IOException { + int blockSize, short numRepl, boolean createParent) throws IOException { FSDataOutputStream out = - fSys.create(path, false, 4096, fSys.getDefaultReplication(), blockSize ); + fSys.create(path, false, 4096, numRepl, blockSize ); byte[] data = getFileData(numBlocks, blockSize); out.write(data, 0, data.length); @@ -109,13 +110,19 @@ public static long createFile(FileSystem fSys, Path path, int numBlocks, return data.length; } + + public static long createFile(FileSystem fSys, Path path, int numBlocks, + int blockSize, boolean createParent) throws IOException { + return createFile(fSys, path, numBlocks, blockSize, fSys.getDefaultReplication(), true); + } + public static long createFile(FileSystem fSys, Path path, int numBlocks, int blockSize) throws IOException { return createFile(fSys, path, numBlocks, blockSize, true); - } + } public static long createFile(FileSystem fSys, Path path) throws IOException { - return createFile(fSys, path, DEFAULT_NUM_BLOCKS, DEFAULT_BLOCK_SIZE, true); + return createFile(fSys, path, DEFAULT_NUM_BLOCKS, DEFAULT_BLOCK_SIZE, DEFAULT_NUM_REPL, true); } public static long createFile(FileSystem fSys, String name) throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java index c46ab96f37..127866be1b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java @@ -23,6 +23,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.FsConstants; @@ -170,7 +171,15 @@ public void testRename() throws IOException { Assert.assertTrue(fSys.isDirectory(FileSystemTestHelper.getTestRootPath(fSys,"/newDir/dirFooBar"))); Assert.assertTrue(fSysTarget.isDirectory(new Path(chrootedTo,"newDir/dirFooBar"))); } - + + @Test + public void testGetContentSummary() throws IOException { + // GetContentSummary of a dir + fSys.mkdirs(new Path("/newDir/dirFoo")); + ContentSummary cs = fSys.getContentSummary(new Path("/newDir/dirFoo")); + Assert.assertEquals(-1L, cs.getQuota()); + Assert.assertEquals(-1L, cs.getSpaceQuota()); + } /** * We would have liked renames across file system to fail but