diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index e533f427bb..39e0192cab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -122,6 +122,8 @@ Trunk (Unreleased) HDFS-4904. Remove JournalService. (Arpit Agarwal via cnauroth) + HDFS-4772. Add number of children in HdfsFileStatus. (brandonli) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java index 7d1bd32ba8..2d8cd24ec5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java @@ -42,6 +42,9 @@ public class HdfsFileStatus { private String group; private long fileId; + // Used by dir, not including dot and dotdot. Always zero for a regular file. + private int childrenNum; + public static final byte[] EMPTY_NAME = new byte[0]; /** @@ -61,7 +64,7 @@ public class HdfsFileStatus { public HdfsFileStatus(long length, boolean isdir, int block_replication, long blocksize, long modification_time, long access_time, FsPermission permission, String owner, String group, - byte[] symlink, byte[] path, long fileId) { + byte[] symlink, byte[] path, long fileId, int childrenNum) { this.length = length; this.isdir = isdir; this.block_replication = (short)block_replication; @@ -78,6 +81,7 @@ public HdfsFileStatus(long length, boolean isdir, int block_replication, this.symlink = symlink; this.path = path; this.fileId = fileId; + this.childrenNum = childrenNum; } /** @@ -230,4 +234,8 @@ final public byte[] getSymlinkInBytes() { final public long getFileId() { return fileId; } + + final public int getChildrenNum() { + return childrenNum; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java index 987ba42091..0949cece7e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java @@ -50,9 +50,11 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus { public HdfsLocatedFileStatus(long length, boolean isdir, int block_replication, long blocksize, long modification_time, long access_time, FsPermission permission, String owner, String group, - byte[] symlink, byte[] path, long fileId, LocatedBlocks locations) { + byte[] symlink, byte[] path, long fileId, LocatedBlocks locations, + int childrenNum) { super(length, isdir, block_replication, blocksize, modification_time, - access_time, permission, owner, group, symlink, path, fileId); + access_time, permission, owner, group, symlink, path, fileId, + childrenNum); this.locations = locations; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java index 2300fc31d9..ef0f894baf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java @@ -57,10 +57,11 @@ public int compare(SnapshottableDirectoryStatus left, public SnapshottableDirectoryStatus(long modification_time, long access_time, FsPermission permission, String owner, String group, byte[] localName, - long inodeId, + long inodeId, int childrenNum, int snapshotNumber, int snapshotQuota, byte[] parentFullPath) { this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time, - access_time, permission, owner, group, null, localName, inodeId); + access_time, permission, owner, group, null, localName, inodeId, + childrenNum); this.snapshotNumber = snapshotNumber; this.snapshotQuota = snapshotQuota; this.parentFullPath = parentFullPath; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 3927106048..88b0a97ead 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -1055,7 +1055,8 @@ public static HdfsFileStatus convert(HdfsFileStatusProto fs) { fs.getSymlink().toByteArray() : null, fs.getPath().toByteArray(), fs.hasFileId()? fs.getFileId(): INodeId.GRANDFATHER_INODE_ID, - fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null); + fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null, + fs.hasChildrenNum() ? fs.getChildrenNum() : 0); } public static SnapshottableDirectoryStatus convert( @@ -1072,6 +1073,7 @@ public static SnapshottableDirectoryStatus convert( status.getGroup(), status.getPath().toByteArray(), status.getFileId(), + status.getChildrenNum(), sdirStatusProto.getSnapshotNumber(), sdirStatusProto.getSnapshotQuota(), sdirStatusProto.getParentFullpath().toByteArray()); @@ -1099,6 +1101,7 @@ public static HdfsFileStatusProto convert(HdfsFileStatus fs) { setOwner(fs.getOwner()). setGroup(fs.getGroup()). setFileId(fs.getFileId()). + setChildrenNum(fs.getChildrenNum()). setPath(ByteString.copyFrom(fs.getLocalNameInBytes())); if (fs.isSymlink()) { builder.setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes())); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 63b603c268..917baa8902 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -1582,6 +1582,13 @@ HdfsFileStatus getFileInfo(String src, boolean resolveLink) } } + /** + * Currently we only support "ls /xxx/.snapshot" which will return all the + * snapshots of a directory. The FSCommand Ls will first call getFileInfo to + * make sure the file/directory exists (before the real getListing call). + * Since we do not have a real INode for ".snapshot", we return an empty + * non-null HdfsFileStatus here. + */ private HdfsFileStatus getFileInfo4DotSnapshot(String src) throws UnresolvedLinkException { Preconditions.checkArgument( @@ -1596,7 +1603,7 @@ private HdfsFileStatus getFileInfo4DotSnapshot(String src) && node.isDirectory() && node.asDirectory() instanceof INodeDirectorySnapshottable) { return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null, - HdfsFileStatus.EMPTY_NAME, -1L); + HdfsFileStatus.EMPTY_NAME, -1L, 0); } return null; } @@ -2521,6 +2528,9 @@ private HdfsFileStatus createFileStatus(byte[] path, INode node, replication = fileNode.getFileReplication(snapshot); blocksize = fileNode.getPreferredBlockSize(); } + int childrenNum = node.isDirectory() ? + node.asDirectory().getChildrenNum(snapshot) : 0; + return new HdfsFileStatus( size, node.isDirectory(), @@ -2533,7 +2543,8 @@ private HdfsFileStatus createFileStatus(byte[] path, INode node, node.getGroupName(snapshot), node.isSymlink() ? node.asSymlink().getSymlink() : null, path, - node.getId()); + node.getId(), + childrenNum); } /** @@ -2563,12 +2574,15 @@ private HdfsLocatedFileStatus createLocatedFileStatus(byte[] path, loc = new LocatedBlocks(); } } + int childrenNum = node.isDirectory() ? + node.asDirectory().getChildrenNum(snapshot) : 0; + return new HdfsLocatedFileStatus(size, node.isDirectory(), replication, blocksize, node.getModificationTime(snapshot), node.getAccessTime(snapshot), node.getFsPermission(snapshot), node.getUserName(snapshot), node.getGroupName(snapshot), node.isSymlink() ? node.asSymlink().getSymlink() : null, path, - node.getId(), loc); + node.getId(), loc, childrenNum); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index f2e5a1207d..c1feb6ab50 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -655,4 +655,8 @@ public SnapshotAndINode(Snapshot snapshot) { this(snapshot, snapshot.getRoot()); } } + + public final int getChildrenNum(final Snapshot snapshot) { + return getChildrenList(snapshot).size(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java index 2ea5f354ff..e1759d1adc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java @@ -130,6 +130,7 @@ public LinkedElement getNext() { } /** Get inode id */ + @Override public final long getId() { return this.id; } @@ -226,6 +227,7 @@ final long getModificationTime(Snapshot snapshot) { /** Update modification time if it is larger than the current value. */ + @Override public final INode updateModificationTime(long mtime, Snapshot latest, final INodeMap inodeMap) throws QuotaExceededException { Preconditions.checkState(isDirectory()); @@ -256,6 +258,7 @@ final long getAccessTime(Snapshot snapshot) { /** * Set last access time of inode. */ + @Override public final void setAccessTime(long accessTime) { this.accessTime = accessTime; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java index fc34dee191..a765db31f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java @@ -325,7 +325,8 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing( SnapshottableDirectoryStatus status = new SnapshottableDirectoryStatus( dir.getModificationTime(), dir.getAccessTime(), dir.getFsPermission(), dir.getUserName(), dir.getGroupName(), - dir.getLocalNameBytes(), dir.getId(), dir.getNumSnapshots(), + dir.getLocalNameBytes(), dir.getId(), dir.getChildrenNum(null), + dir.getNumSnapshots(), dir.getSnapshotQuota(), dir.getParent() == null ? DFSUtil.EMPTY_BYTES : DFSUtil.string2Bytes(dir.getParent().getFullPathName())); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index 5d1d33f910..623ac89222 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -221,6 +221,7 @@ public static String toJsonString(final HdfsFileStatus status, m.put("blockSize", status.getBlockSize()); m.put("replication", status.getReplication()); m.put("fileId", status.getFileId()); + m.put("childrenNum", status.getChildrenNum()); return includeType ? toJsonString(FileStatus.class, m): JSON.toString(m); } @@ -247,9 +248,10 @@ public static HdfsFileStatus toFileStatus(final Map json, boolean includes final short replication = (short) (long) (Long) m.get("replication"); final long fileId = m.containsKey("fileId") ? (Long) m.get("fileId") : INodeId.GRANDFATHER_INODE_ID; + final int childrenNum = (int) (long) (Long) m.get("childrenNum"); return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication, blockSize, mTime, aTime, permission, owner, group, - symlink, DFSUtil.string2Bytes(localName), fileId); + symlink, DFSUtil.string2Bytes(localName), fileId, childrenNum); } /** Convert an ExtendedBlock to a Json map. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto index 742c090308..88c6be2379 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto @@ -173,6 +173,7 @@ message HdfsFileStatusProto { // Optional field for fileId optional uint64 fileId = 13 [default = 0]; // default as an invalid id + optional uint32 childrenNum = 14 [default = 0]; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index 89c08966b1..f74bf04501 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -252,12 +252,12 @@ public Object answer(InvocationOnMock invocation) Mockito.doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], - 1010)).when(mockNN).getFileInfo(anyString()); + 1010, 0)).when(mockNN).getFileInfo(anyString()); Mockito.doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], - 1010)) + 1010, 0)) .when(mockNN) .create(anyString(), (FsPermission) anyObject(), anyString(), (EnumSetWritable) anyObject(), anyBoolean(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java index 0e0d33cda3..147cf0bed1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java @@ -24,7 +24,6 @@ import java.io.FileNotFoundException; import java.io.IOException; -import java.util.Random; import java.util.concurrent.TimeoutException; import org.apache.commons.logging.LogFactory; @@ -37,7 +36,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -105,6 +103,16 @@ public void testGetFileInfo() throws IOException { // Make sure getFileInfo returns null for files which do not exist HdfsFileStatus fileInfo = dfsClient.getFileInfo("/noSuchFile"); assertEquals("Non-existant file should result in null", null, fileInfo); + + Path path1 = new Path("/name1"); + Path path2 = new Path("/name1/name2"); + assertTrue(fs.mkdirs(path1)); + FSDataOutputStream out = fs.create(path2, false); + out.close(); + fileInfo = dfsClient.getFileInfo(path1.toString()); + assertEquals(1, fileInfo.getChildrenNum()); + fileInfo = dfsClient.getFileInfo(path2.toString()); + assertEquals(0, fileInfo.getChildrenNum()); // Test getFileInfo throws the right exception given a non-absolute path. try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java index 8cb9d42cd0..276343985a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java @@ -294,12 +294,12 @@ public void testFactory() throws Exception { Mockito.doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], - 1010)).when(mcp).getFileInfo(anyString()); + 1010, 0)).when(mcp).getFileInfo(anyString()); Mockito .doReturn( new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission( (short) 777), "owner", "group", new byte[0], new byte[0], - 1010)) + 1010, 0)) .when(mcp) .create(anyString(), (FsPermission) anyObject(), anyString(), (EnumSetWritable) anyObject(), anyBoolean(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java index 4e77c07fed..5460047a2e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java @@ -46,7 +46,7 @@ public void testHdfsFileStatus() { final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26, now, now + 10, new FsPermission((short) 0644), "user", "group", DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"), - INodeId.GRANDFATHER_INODE_ID); + INodeId.GRANDFATHER_INODE_ID, 0); final FileStatus fstatus = toFileStatus(status, parent); System.out.println("status = " + status); System.out.println("fstatus = " + fstatus);