From 2624b20291629b4565ea45590b66f2c38f96df67 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Thu, 13 Feb 2014 00:00:42 +0000 Subject: [PATCH] HDFS-5847. Consolidate INodeReference into a separate section. Contributed by Jing Zhao. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1567812 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../server/namenode/FSImageFormatPBINode.java | 59 ++------ .../namenode/FSImageFormatProtobuf.java | 20 ++- .../snapshot/FSImageFormatPBSnapshot.java | 135 +++++++++++++----- .../tools/offlineImageViewer/LsrPBImage.java | 27 +++- .../offlineImageViewer/PBImageXmlWriter.java | 38 +++-- .../hadoop-hdfs/src/main/proto/fsimage.proto | 36 ++--- 7 files changed, 198 insertions(+), 119 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 847c98d862..e9f915e04c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -338,6 +338,8 @@ Trunk (Unreleased) HDFS-5915. Refactor FSImageFormatProtobuf to simplify cross section reads. (Haohui Mai via cnauroth) + HDFS-5847. Consolidate INodeReference into a separate section. (jing9) + Release 2.4.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java index 43bbfdbc7f..bbb73c5c37 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java @@ -43,9 +43,6 @@ import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; -import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference; -import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount; -import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.util.ReadOnlyList; @@ -69,25 +66,6 @@ public static PermissionStatus loadPermission(long id, new FsPermission(perm)); } - public static INodeReference loadINodeReference( - INodeSection.INodeReference r, FSDirectory dir) throws IOException { - long referredId = r.getReferredId(); - INode referred = dir.getInode(referredId); - WithCount withCount = (WithCount) referred.getParentReference(); - if (withCount == null) { - withCount = new INodeReference.WithCount(null, referred); - } - final INodeReference ref; - if (r.hasDstSnapshotId()) { // DstReference - ref = new INodeReference.DstReference(null, withCount, - r.getDstSnapshotId()); - } else { - ref = new INodeReference.WithName(null, withCount, r.getName() - .toByteArray(), r.getLastSnapshotId()); - } - return ref; - } - public static INodeDirectory loadINodeDirectory(INodeSection.INode n, final String[] stringTable) { assert n.getType() == INodeSection.INode.Type.DIRECTORY; @@ -126,6 +104,8 @@ public static void updateBlocksMap(INodeFile file, BlockManager bm) { } void loadINodeDirectorySection(InputStream in) throws IOException { + final List refList = parent.getLoaderContext() + .getRefList(); while (true) { INodeDirectorySection.DirEntry e = INodeDirectorySection.DirEntry .parseDelimitedFrom(in); @@ -138,20 +118,13 @@ void loadINodeDirectorySection(InputStream in) throws IOException { INode child = dir.getInode(id); addToParent(p, child); } - for (int i = 0; i < e.getNumOfRef(); i++) { - INodeReference ref = loadINodeReference(in); + for (int refId : e.getRefChildrenList()) { + INodeReference ref = refList.get(refId); addToParent(p, ref); } } } - private INodeReference loadINodeReference(InputStream in) - throws IOException { - INodeSection.INodeReference ref = INodeSection.INodeReference - .parseDelimitedFrom(in); - return loadINodeReference(ref, dir); - } - void loadINodeSection(InputStream in) throws IOException { INodeSection s = INodeSection.parseDelimitedFrom(in); fsn.resetLastInodeId(s.getLastInodeId()); @@ -306,19 +279,6 @@ public static INodeSection.INodeDirectory.Builder buildINodeDirectory( return b; } - public static INodeSection.INodeReference.Builder buildINodeReference( - INodeReference ref) throws IOException { - INodeSection.INodeReference.Builder rb = INodeSection.INodeReference - .newBuilder().setReferredId(ref.getId()); - if (ref instanceof WithName) { - rb.setLastSnapshotId(((WithName) ref).getLastSnapshotId()).setName( - ByteString.copyFrom(ref.getLocalNameBytes())); - } else if (ref instanceof DstReference) { - rb.setDstSnapshotId(((DstReference) ref).getDstSnapshotId()); - } - return rb; - } - private final FSNamesystem fsn; private final FileSummary.Builder summary; private final SaveNamespaceContext context; @@ -334,6 +294,8 @@ public static INodeSection.INodeReference.Builder buildINodeReference( void serializeINodeDirectorySection(OutputStream out) throws IOException { Iterator iter = fsn.getFSDirectory() .getINodeMap().getMapIterator(); + final ArrayList refList = parent.getSaverContext() + .getRefList(); int i = 0; while (iter.hasNext()) { INodeWithAdditionalFields n = iter.next(); @@ -346,21 +308,16 @@ void serializeINodeDirectorySection(OutputStream out) throws IOException { if (children.size() > 0) { INodeDirectorySection.DirEntry.Builder b = INodeDirectorySection. DirEntry.newBuilder().setParent(n.getId()); - List refs = new ArrayList(); for (INode inode : children) { if (!inode.isReference()) { b.addChildren(inode.getId()); } else { - refs.add(inode.asReference()); + refList.add(inode.asReference()); + b.addRefChildren(refList.size() - 1); } } - b.setNumOfRef(refs.size()); INodeDirectorySection.DirEntry e = b.build(); e.writeDelimitedTo(out); - for (INodeReference ref : refs) { - INodeSection.INodeReference.Builder rb = buildINodeReference(ref); - rb.build().writeDelimitedTo(out); - } } ++i; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java index c03ba60641..92245434cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java @@ -75,10 +75,15 @@ public final class FSImageFormatProtobuf { public static final class LoaderContext { private String[] stringTable; + private final ArrayList refList = Lists.newArrayList(); public String[] getStringTable() { return stringTable; } + + public ArrayList getRefList() { + return refList; + } } public static final class SaverContext { @@ -112,10 +117,15 @@ Set> entrySet() { } } private final DeduplicationMap stringMap = DeduplicationMap.newMap(); + private final ArrayList refList = Lists.newArrayList(); public DeduplicationMap getStringMap() { return stringMap; } + + public ArrayList getRefList() { + return refList; + } } public static final class Loader implements FSImageFormat.AbstractLoader { @@ -123,7 +133,6 @@ public static final class Loader implements FSImageFormat.AbstractLoader { private final Configuration conf; private final FSNamesystem fsn; private final LoaderContext ctx; - /** The MD5 sum of the loaded file */ private MD5Hash imgDigest; /** The transaction ID of the last edit represented by the loaded file */ @@ -226,6 +235,9 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) { inodeLoader.loadINodeSection(in); } break; + case INODE_REFRENCE: + snapshotLoader.loadINodeReferenceSection(in); + break; case INODE_DIR: inodeLoader.loadINodeDirectorySection(in); break; @@ -313,9 +325,10 @@ private void loadCacheManagerSection(InputStream in) throws IOException { } public static final class Saver { + public static final int CHECK_CANCEL_INTERVAL = 4096; + private final SaveNamespaceContext context; private final SaverContext saverContext; - private long currentOffset = FSImageUtil.MAGIC_HEADER.length; private MD5Hash savedDigest; @@ -324,7 +337,6 @@ public static final class Saver { private OutputStream sectionOutputStream; private CompressionCodec codec; private OutputStream underlyingOutputStream; - public static final int CHECK_CANCEL_INTERVAL = 4096; Saver(SaveNamespaceContext context) { this.context = context; @@ -400,6 +412,7 @@ private void saveSnapshots(FileSummary.Builder summary) throws IOException { snapshotSaver.serializeSnapshotSection(sectionOutputStream); snapshotSaver.serializeSnapshotDiffSection(sectionOutputStream); + snapshotSaver.serializeINodeReferenceSection(sectionOutputStream); } private void saveInternal(FileOutputStream fout, @@ -535,6 +548,7 @@ public enum SectionName { NS_INFO("NS_INFO"), STRING_TABLE("STRING_TABLE"), INODE("INODE"), + INODE_REFRENCE("INODE_REFRENCE"), SNAPSHOT("SNAPSHOT"), INODE_DIR("INODE_DIR"), FILES_UNDERCONSTRUCTION("FILES_UNDERCONSTRUCTION"), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java index b64a3db932..660b0dc274 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java @@ -18,12 +18,10 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot; import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.Loader.loadINodeDirectory; -import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.Loader.loadINodeReference; import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.Loader.loadPermission; import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.Loader.updateBlocksMap; import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.Saver.buildINodeDirectory; import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.Saver.buildINodeFile; -import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.Saver.buildINodeReference; import java.io.IOException; import java.io.InputStream; @@ -40,8 +38,10 @@ import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf; +import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SectionName; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary; +import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.CreatedListEntry; @@ -54,6 +54,9 @@ import org.apache.hadoop.hdfs.server.namenode.INodeFileAttributes; import org.apache.hadoop.hdfs.server.namenode.INodeMap; import org.apache.hadoop.hdfs.server.namenode.INodeReference; +import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference; +import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount; +import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName; import org.apache.hadoop.hdfs.server.namenode.INodeWithAdditionalFields; import org.apache.hadoop.hdfs.server.namenode.SaveNamespaceContext; import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff; @@ -75,7 +78,6 @@ public final static class Loader { private final FSImageFormatProtobuf.Loader parent; private final Map snapshotMap; - public Loader(FSNamesystem fsn, FSImageFormatProtobuf.Loader parent) { this.fsn = fsn; this.fsDir = fsn.getFSDirectory(); @@ -83,6 +85,43 @@ public Loader(FSNamesystem fsn, FSImageFormatProtobuf.Loader parent) { this.parent = parent; } + /** + * The sequence of the ref node in refList must be strictly the same with + * the sequence in fsimage + */ + public void loadINodeReferenceSection(InputStream in) throws IOException { + final List refList = parent.getLoaderContext() + .getRefList(); + while (true) { + INodeReferenceSection.INodeReference e = INodeReferenceSection + .INodeReference.parseDelimitedFrom(in); + if (e == null) { + break; + } + INodeReference ref = loadINodeReference(e); + refList.add(ref); + } + } + + private INodeReference loadINodeReference( + INodeReferenceSection.INodeReference r) throws IOException { + long referredId = r.getReferredId(); + INode referred = fsDir.getInode(referredId); + WithCount withCount = (WithCount) referred.getParentReference(); + if (withCount == null) { + withCount = new INodeReference.WithCount(null, referred); + } + final INodeReference ref; + if (r.hasDstSnapshotId()) { // DstReference + ref = new INodeReference.DstReference(null, withCount, + r.getDstSnapshotId()); + } else { + ref = new INodeReference.WithName(null, withCount, r.getName() + .toByteArray(), r.getLastSnapshotId()); + } + return ref; + } + /** * Load the snapshots section from fsimage. Also convert snapshottable * directories into {@link INodeDirectorySnapshottable}. @@ -131,6 +170,8 @@ private void loadSnapshots(InputStream in, int size) throws IOException { * Load the snapshot diff section from fsimage. */ public void loadSnapshotDiffSection(InputStream in) throws IOException { + final List refList = parent.getLoaderContext() + .getRefList(); while (true) { SnapshotDiffSection.DiffEntry entry = SnapshotDiffSection.DiffEntry .parseDelimitedFrom(in); @@ -145,7 +186,8 @@ public void loadSnapshotDiffSection(InputStream in) throws IOException { loadFileDiffList(in, inode.asFile(), entry.getNumOfDiff()); break; case DIRECTORYDIFF: - loadDirectoryDiffList(in, inode.asDirectory(), entry.getNumOfDiff()); + loadDirectoryDiffList(in, inode.asDirectory(), entry.getNumOfDiff(), + refList); break; } } @@ -199,13 +241,13 @@ private void addToDeletedList(INode dnode, INodeDirectory parent) { /** * Load the deleted list in a DirectoryDiff - * @param totalSize the total size of the deleted list - * @param deletedNodes non-reference inodes in the deleted list. These - * inodes' ids are directly recorded in protobuf */ - private List loadDeletedList(InputStream in, INodeDirectory dir, - int refNum, List deletedNodes) throws IOException { - List dlist = new ArrayList(refNum + deletedNodes.size()); + private List loadDeletedList(final List refList, + InputStream in, INodeDirectory dir, List deletedNodes, + List deletedRefNodes) + throws IOException { + List dlist = new ArrayList(deletedRefNodes.size() + + deletedNodes.size()); // load non-reference inodes for (long deletedId : deletedNodes) { INode deleted = fsDir.getInode(deletedId); @@ -213,13 +255,12 @@ private List loadDeletedList(InputStream in, INodeDirectory dir, addToDeletedList(deleted, dir); } // load reference nodes in the deleted list - for (int r = 0; r < refNum; r++) { - INodeSection.INodeReference ref = INodeSection.INodeReference - .parseDelimitedFrom(in); - INodeReference refNode = loadINodeReference(ref, fsDir); - dlist.add(refNode); - addToDeletedList(refNode, dir); + for (int refId : deletedRefNodes) { + INodeReference deletedRef = refList.get(refId); + dlist.add(deletedRef); + addToDeletedList(deletedRef, dir); } + Collections.sort(dlist, new Comparator() { @Override public int compare(INode n1, INode n2) { @@ -231,7 +272,7 @@ public int compare(INode n1, INode n2) { /** Load DirectoryDiff list for a directory with snapshot feature */ private void loadDirectoryDiffList(InputStream in, INodeDirectory dir, - int size) throws IOException { + int size, final List refList) throws IOException { if (!dir.isWithSnapshot()) { dir.addSnapshotFeature(null); } @@ -247,7 +288,7 @@ private void loadDirectoryDiffList(InputStream in, INodeDirectory dir, INodeDirectoryAttributes copy = null; if (useRoot) { copy = snapshot.getRoot(); - }else if (diffInPb.hasSnapshotCopy()) { + } else if (diffInPb.hasSnapshotCopy()) { INodeSection.INodeDirectory dirCopyInPb = diffInPb.getSnapshotCopy(); final byte[] name = diffInPb.getName().toByteArray(); PermissionStatus permission = loadPermission( @@ -265,8 +306,8 @@ private void loadDirectoryDiffList(InputStream in, INodeDirectory dir, List clist = loadCreatedList(in, dir, diffInPb.getCreatedListSize()); // load deleted list - List dlist = loadDeletedList(in, dir, - diffInPb.getNumOfDeletedRef(), diffInPb.getDeletedINodeList()); + List dlist = loadDeletedList(refList, in, dir, + diffInPb.getDeletedINodeList(), diffInPb.getDeletedINodeRefList()); // create the directory diff DirectoryDiff diff = new DirectoryDiff(snapshotId, copy, null, childrenSize, clist, dlist, useRoot); @@ -285,7 +326,8 @@ public final static class Saver { private final SaveNamespaceContext context; public Saver(FSImageFormatProtobuf.Saver parent, - FileSummary.Builder headers, SaveNamespaceContext context, FSNamesystem fsn) { + FileSummary.Builder headers, SaveNamespaceContext context, + FSNamesystem fsn) { this.parent = parent; this.headers = headers; this.context = context; @@ -330,12 +372,42 @@ public void serializeSnapshotSection(OutputStream out) throws IOException { parent.commitSection(headers, FSImageFormatProtobuf.SectionName.SNAPSHOT); } + /** + * This can only be called after serializing both INode_Dir and SnapshotDiff + */ + public void serializeINodeReferenceSection(OutputStream out) + throws IOException { + final List refList = parent.getSaverContext() + .getRefList(); + for (INodeReference ref : refList) { + INodeReferenceSection.INodeReference.Builder rb = buildINodeReference(ref); + rb.build().writeDelimitedTo(out); + } + parent.commitSection(headers, SectionName.INODE_REFRENCE); + } + + private INodeReferenceSection.INodeReference.Builder buildINodeReference( + INodeReference ref) throws IOException { + INodeReferenceSection.INodeReference.Builder rb = + INodeReferenceSection.INodeReference.newBuilder(). + setReferredId(ref.getId()); + if (ref instanceof WithName) { + rb.setLastSnapshotId(((WithName) ref).getLastSnapshotId()).setName( + ByteString.copyFrom(ref.getLocalNameBytes())); + } else if (ref instanceof DstReference) { + rb.setDstSnapshotId(((DstReference) ref).getDstSnapshotId()); + } + return rb; + } + /** * save all the snapshot diff to fsimage */ public void serializeSnapshotDiffSection(OutputStream out) throws IOException { INodeMap inodesMap = fsn.getFSDirectory().getINodeMap(); + final List refList = parent.getSaverContext() + .getRefList(); int i = 0; Iterator iter = inodesMap.getMapIterator(); while (iter.hasNext()) { @@ -343,7 +415,7 @@ public void serializeSnapshotDiffSection(OutputStream out) if (inode.isFile()) { serializeFileDiffList(inode.asFile(), out); } else if (inode.isDirectory()) { - serializeDirDiffList(inode.asDirectory(), out); + serializeDirDiffList(inode.asDirectory(), refList, out); } ++i; if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) { @@ -378,22 +450,18 @@ private void serializeFileDiffList(INodeFile file, OutputStream out) } } - private void saveCreatedDeletedList(List created, - List deletedRefs, OutputStream out) throws IOException { + private void saveCreatedList(List created, OutputStream out) + throws IOException { // local names of the created list member for (INode c : created) { SnapshotDiffSection.CreatedListEntry.newBuilder() .setName(ByteString.copyFrom(c.getLocalNameBytes())).build() .writeDelimitedTo(out); } - // reference nodes in deleted list - for (INodeReference ref : deletedRefs) { - INodeSection.INodeReference.Builder rb = buildINodeReference(ref); - rb.build().writeDelimitedTo(out); - } } - private void serializeDirDiffList(INodeDirectory dir, OutputStream out) + private void serializeDirDiffList(INodeDirectory dir, + final List refList, OutputStream out) throws IOException { DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature(); if (sf != null) { @@ -419,17 +487,16 @@ private void serializeDirDiffList(INodeDirectory dir, OutputStream out) .getList(ListType.CREATED); db.setCreatedListSize(created.size()); List deleted = diff.getChildrenDiff().getList(ListType.DELETED); - List refs = new ArrayList(); for (INode d : deleted) { if (d.isReference()) { - refs.add(d.asReference()); + refList.add(d.asReference()); + db.addDeletedINodeRef(refList.size() - 1); } else { db.addDeletedINode(d.getId()); } } - db.setNumOfDeletedRef(refs.size()); db.build().writeDelimitedTo(out); - saveCreatedDeletedList(created, refs, out); + saveCreatedList(created, out); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java index e467725646..61c4d5e22c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSImageUtil; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection; +import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory; @@ -79,6 +80,7 @@ final class LsrPBImage { private String[] stringTable; private HashMap inodes = Maps.newHashMap(); private HashMap dirmap = Maps.newHashMap(); + private ArrayList refList = Lists.newArrayList(); public LsrPBImage(Configuration conf, PrintWriter out) { this.conf = conf; @@ -125,6 +127,9 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) { case INODE: loadINodeSection(is); break; + case INODE_REFRENCE: + loadINodeReferenceSection(is); + break; case INODE_DIR: loadINodeDirectorySection(is); break; @@ -202,14 +207,26 @@ private void loadINodeDirectorySection(InputStream in) throws IOException { if (e == null) { break; } - long[] l = new long[e.getChildrenCount()]; - for (int i = 0; i < l.length; ++i) { + long[] l = new long[e.getChildrenCount() + e.getRefChildrenCount()]; + for (int i = 0; i < e.getChildrenCount(); ++i) { l[i] = e.getChildren(i); } - dirmap.put(e.getParent(), l); - for (int i = 0; i < e.getNumOfRef(); i++) { - INodeSection.INodeReference.parseDelimitedFrom(in); + for (int i = e.getChildrenCount(); i < l.length; i++) { + int refId = e.getRefChildren(i - e.getChildrenCount()); + l[i] = refList.get(refId).getReferredId(); } + dirmap.put(e.getParent(), l); + } + } + + private void loadINodeReferenceSection(InputStream in) throws IOException { + while (true) { + INodeReferenceSection.INodeReference e = INodeReferenceSection + .INodeReference.parseDelimitedFrom(in); + if (e == null) { + break; + } + refList.add(e); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java index 7ebf1196c4..d70f63710b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink; +import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection; @@ -132,6 +133,9 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) { case INODE: dumpINodeSection(is); break; + case INODE_REFRENCE: + dumpINodeReferenceSection(is); + break; case INODE_DIR: dumpINodeDirectorySection(is); break; @@ -227,18 +231,27 @@ private void dumpINodeDirectorySection(InputStream in) throws IOException { for (long id : e.getChildrenList()) { o("inode", id); } - for (int i = 0; i < e.getNumOfRef(); i++) { - INodeSection.INodeReference r = INodeSection.INodeReference - .parseDelimitedFrom(in); - dumpINodeReference(r); - + for (int refId : e.getRefChildrenList()) { + o("inodereference-index", refId); } out.print("\n"); } out.print("\n"); } - private void dumpINodeReference(INodeSection.INodeReference r) { + private void dumpINodeReferenceSection(InputStream in) throws IOException { + out.print(""); + while (true) { + INodeReferenceSection.INodeReference e = INodeReferenceSection + .INodeReference.parseDelimitedFrom(in); + if (e == null) { + break; + } + dumpINodeReference(e); + } + } + + private void dumpINodeReference(INodeReferenceSection.INodeReference r) { out.print(""); o("referredId", r.getReferredId()).o("name", r.getName().toStringUtf8()) .o("dstSnapshotId", r.getDstSnapshotId()) @@ -362,10 +375,15 @@ private void dumpSnapshotDiffSection(InputStream in) throws IOException { o("name", ce.getName().toStringUtf8()); out.print("\n"); } - for (int j = 0; j < d.getNumOfDeletedRef(); ++j) { - INodeSection.INodeReference r = INodeSection.INodeReference - .parseDelimitedFrom(in); - dumpINodeReference(r); + for (long did : d.getDeletedINodeList()) { + out.print(""); + o("inode", did); + out.print("\n"); + } + for (int dRefid : d.getDeletedINodeRefList()) { + out.print(""); + o("inodereference-index", dRefid); + out.print("\n"); } out.print("\n"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto index af7ba874d2..79d6fd0dbd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto @@ -112,17 +112,6 @@ message INodeSection { optional bytes target = 2; } - message INodeReference { - // id of the referred inode - optional uint64 referredId = 1; - // local name recorded in WithName - optional bytes name = 2; - // recorded in DstReference - optional uint32 dstSnapshotId = 3; - // recorded in WithName - optional uint32 lastSnapshotId = 4; - } - message INode { enum Type { FILE = 1; @@ -163,13 +152,28 @@ message FilesUnderConstructionSection { message INodeDirectorySection { message DirEntry { optional uint64 parent = 1; + // children that are not reference nodes repeated uint64 children = 2 [packed = true]; - optional uint64 numOfRef = 3; - // repeated INodeReference... + // children that are reference nodes, each element is a reference node id + repeated uint32 refChildren = 3 [packed = true]; } // repeated DirEntry, ended at the boundary of the section. } +message INodeReferenceSection { + message INodeReference { + // id of the referred inode + optional uint64 referredId = 1; + // local name recorded in WithName + optional bytes name = 2; + // recorded in DstReference + optional uint32 dstSnapshotId = 3; + // recorded in WithName + optional uint32 lastSnapshotId = 4; + } + // repeated INodeReference... +} + /** * This section records the information about snapshot * NAME: SNAPSHOT @@ -204,10 +208,10 @@ message SnapshotDiffSection { optional bytes name = 4; optional INodeSection.INodeDirectory snapshotCopy = 5; optional uint32 createdListSize = 6; - optional uint32 numOfDeletedRef = 7; // number of reference nodes in deleted list - repeated uint64 deletedINode = 8 [packed = true]; // id of deleted inode + repeated uint64 deletedINode = 7 [packed = true]; // id of deleted inodes + // id of reference nodes in the deleted list + repeated uint32 deletedINodeRef = 8 [packed = true]; // repeated CreatedListEntry (size is specified by createdListSize) - // repeated INodeReference (reference inodes in deleted list) } message FileDiff {