From 042b33f20b01aadb5cd03da731ae7a3d94026aac Mon Sep 17 00:00:00 2001 From: arp Date: Wed, 27 Aug 2014 15:12:19 -0700 Subject: [PATCH] HDFS-6922. Add LazyPersist flag to INodeFile, save it in FsImage and edit logs. (Arpit Agarwal) --- .../hadoop-hdfs/CHANGES-HDFS-6581.txt | 3 ++- .../blockmanagement/BlockCollection.java | 6 +++++ .../hdfs/server/namenode/FSDirectory.java | 17 +++++++++----- .../hdfs/server/namenode/FSEditLog.java | 2 ++ .../hdfs/server/namenode/FSEditLogLoader.java | 3 ++- .../hdfs/server/namenode/FSEditLogOp.java | 23 +++++++++++++++++++ .../hdfs/server/namenode/FSImageFormat.java | 9 ++++++-- .../server/namenode/FSImageFormatPBINode.java | 6 +++-- .../server/namenode/FSImageSerialization.java | 6 +++-- .../hdfs/server/namenode/FSNamesystem.java | 2 +- .../hdfs/server/namenode/INodeFile.java | 22 ++++++++++++++---- .../server/namenode/INodeFileAttributes.java | 11 ++++++--- .../namenode/NameNodeLayoutVersion.java | 4 +++- .../snapshot/FSImageFormatPBSnapshot.java | 4 +++- .../offlineImageViewer/FSImageLoader.java | 1 + .../offlineImageViewer/PBImageXmlWriter.java | 4 ++++ .../hadoop-hdfs/src/main/proto/fsimage.proto | 1 + .../hdfs/server/namenode/CreateEditsLog.java | 4 ++-- .../hdfs/server/namenode/TestEditLog.java | 2 +- .../namenode/TestFSPermissionChecker.java | 2 +- .../hdfs/server/namenode/TestINodeFile.java | 8 +++---- 21 files changed, 107 insertions(+), 33 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt index fc6e0e0af9..1f2bf649c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt @@ -4,5 +4,6 @@ HDFS-6924. Add new RAM_DISK storage type. (Arpit Agarwal) - + HDFS-6922. Add LazyPersist flag to INodeFile, save it in FsImage and + edit logs. (Arpit Agarwal) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java index c1e0682dd5..bd3a7802a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java @@ -54,6 +54,12 @@ public interface BlockCollection { */ public long getPreferredBlockSize(); + /** + * Return true if the file was created with {@Link CreateFlag#LAZY_PERSIST}. + * @return + */ + public boolean getLazyPersistFlag(); + /** * Get block replication for the collection * @return block replication value diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index d03a4e5827..afe9d971b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -278,6 +278,7 @@ void disableQuotaChecks() { */ INodeFile addFile(String path, PermissionStatus permissions, short replication, long preferredBlockSize, + boolean isLazyPersist, String clientName, String clientMachine) throws FileAlreadyExistsException, QuotaExceededException, UnresolvedLinkException, SnapshotAccessControlException, AclException { @@ -285,7 +286,7 @@ INodeFile addFile(String path, PermissionStatus permissions, long modTime = now(); INodeFile newNode = new INodeFile(namesystem.allocateNewInodeId(), null, permissions, modTime, modTime, BlockInfo.EMPTY_ARRAY, replication, - preferredBlockSize); + preferredBlockSize, isLazyPersist); newNode.toUnderConstruction(clientName, clientMachine); boolean added = false; @@ -315,6 +316,7 @@ INodeFile unprotectedAddFile( long id, long modificationTime, long atime, long preferredBlockSize, + boolean isLazyPersist, boolean underConstruction, String clientName, String clientMachine) { @@ -323,12 +325,12 @@ INodeFile unprotectedAddFile( long id, if (underConstruction) { newNode = new INodeFile(id, null, permissions, modificationTime, modificationTime, BlockInfo.EMPTY_ARRAY, replication, - preferredBlockSize); + preferredBlockSize, isLazyPersist); newNode.toUnderConstruction(clientName, clientMachine); } else { newNode = new INodeFile(id, null, permissions, modificationTime, atime, - BlockInfo.EMPTY_ARRAY, replication, preferredBlockSize); + BlockInfo.EMPTY_ARRAY, replication, preferredBlockSize, isLazyPersist); } try { @@ -2283,11 +2285,13 @@ HdfsFileStatus createFileStatus(byte[] path, INode node, long size = 0; // length is zero for directories short replication = 0; long blocksize = 0; + boolean isLazyPersist = false; if (node.isFile()) { final INodeFile fileNode = node.asFile(); size = fileNode.computeFileSize(snapshot); replication = fileNode.getFileReplication(snapshot); blocksize = fileNode.getPreferredBlockSize(); + isLazyPersist = fileNode.getLazyPersistFlag(); } int childrenNum = node.isDirectory() ? node.asDirectory().getChildrenNum(snapshot) : 0; @@ -2300,7 +2304,7 @@ HdfsFileStatus createFileStatus(byte[] path, INode node, node.isDirectory(), replication, blocksize, - false, + isLazyPersist, node.getModificationTime(snapshot), node.getAccessTime(snapshot), getPermissionForFileStatus(node, snapshot), @@ -2322,6 +2326,7 @@ private HdfsLocatedFileStatus createLocatedFileStatus(byte[] path, long size = 0; // length is zero for directories short replication = 0; long blocksize = 0; + boolean isLazyPersist = false; LocatedBlocks loc = null; final FileEncryptionInfo feInfo = isRawPath ? null : getFileEncryptionInfo(node, snapshot); @@ -2329,7 +2334,7 @@ private HdfsLocatedFileStatus createLocatedFileStatus(byte[] path, final INodeFile fileNode = node.asFile(); size = fileNode.computeFileSize(snapshot); replication = fileNode.getFileReplication(snapshot); - blocksize = fileNode.getPreferredBlockSize(); + isLazyPersist = fileNode.getLazyPersistFlag(); final boolean inSnapshot = snapshot != Snapshot.CURRENT_STATE_ID; final boolean isUc = !inSnapshot && fileNode.isUnderConstruction(); @@ -2348,7 +2353,7 @@ private HdfsLocatedFileStatus createLocatedFileStatus(byte[] path, HdfsLocatedFileStatus status = new HdfsLocatedFileStatus(size, node.isDirectory(), replication, - blocksize, false, node.getModificationTime(snapshot), + blocksize, isLazyPersist, node.getModificationTime(snapshot), node.getAccessTime(snapshot), getPermissionForFileStatus(node, snapshot), node.getUserName(snapshot), node.getGroupName(snapshot), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index b2adcd455f..bf3767beeb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -697,6 +697,7 @@ public void logOpenFile(String path, INodeFile newNode, boolean toLogRpcIds) { .setModificationTime(newNode.getModificationTime()) .setAccessTime(newNode.getAccessTime()) .setBlockSize(newNode.getPreferredBlockSize()) + .setLazyPersistFlag(newNode.getLazyPersistFlag()) .setBlocks(newNode.getBlocks()) .setPermissionStatus(permissions) .setClientName(newNode.getFileUnderConstructionFeature().getClientName()) @@ -727,6 +728,7 @@ public void logCloseFile(String path, INodeFile newNode) { .setModificationTime(newNode.getModificationTime()) .setAccessTime(newNode.getAccessTime()) .setBlockSize(newNode.getPreferredBlockSize()) + .setLazyPersistFlag(newNode.getLazyPersistFlag()) .setBlocks(newNode.getBlocks()) .setPermissionStatus(newNode.getPermissionStatus()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index d522e51bc2..951f3e92aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -357,7 +357,8 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, path, addCloseOp.permissions, addCloseOp.aclEntries, addCloseOp.xAttrs, replication, addCloseOp.mtime, addCloseOp.atime, - addCloseOp.blockSize, true, addCloseOp.clientName, + addCloseOp.blockSize, addCloseOp.isLazyPersist, + true, addCloseOp.clientName, addCloseOp.clientMachine); fsNamesys.leaseManager.addLease(addCloseOp.clientName, path); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index 5543e0cb86..94c287c2fa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -401,6 +401,7 @@ static abstract class AddCloseOp extends FSEditLogOp implements BlockListUpdatin long mtime; long atime; long blockSize; + boolean isLazyPersist; Block[] blocks; PermissionStatus permissions; List aclEntries; @@ -448,6 +449,11 @@ T setBlockSize(long blockSize) { return (T)this; } + T setLazyPersistFlag(boolean isLazyPersist) { + this.isLazyPersist = isLazyPersist; + return (T)this; + } + T setBlocks(Block[] blocks) { if (blocks.length > MAX_BLOCKS) { throw new RuntimeException("Can't have more than " + MAX_BLOCKS + @@ -495,6 +501,7 @@ public void writeFields(DataOutputStream out) throws IOException { FSImageSerialization.writeLong(mtime, out); FSImageSerialization.writeLong(atime, out); FSImageSerialization.writeLong(blockSize, out); + FSImageSerialization.writeInt((isLazyPersist ? 1 : 0), out); new ArrayWritable(Block.class, blocks).write(out); permissions.write(out); @@ -562,6 +569,13 @@ void readFields(DataInputStream in, int logVersion) this.blockSize = readLong(in); } + if (NameNodeLayoutVersion.supports( + NameNodeLayoutVersion.Feature.LAZY_PERSIST_FILES, logVersion)) { + this.isLazyPersist = (FSImageSerialization.readInt(in) != 0); + } else { + this.isLazyPersist = false; + } + this.blocks = readBlocks(in, logVersion); this.permissions = PermissionStatus.read(in); @@ -615,6 +629,8 @@ public String stringifyMembers() { builder.append(atime); builder.append(", blockSize="); builder.append(blockSize); + builder.append(", lazyPersist"); + builder.append(isLazyPersist); builder.append(", blocks="); builder.append(Arrays.toString(blocks)); builder.append(", permissions="); @@ -651,6 +667,8 @@ protected void toXml(ContentHandler contentHandler) throws SAXException { Long.toString(atime)); XMLUtils.addSaxString(contentHandler, "BLOCKSIZE", Long.toString(blockSize)); + XMLUtils.addSaxString(contentHandler, "LAZY_PERSIST", + Boolean.toString(isLazyPersist)); XMLUtils.addSaxString(contentHandler, "CLIENT_NAME", clientName); XMLUtils.addSaxString(contentHandler, "CLIENT_MACHINE", clientMachine); for (Block b : blocks) { @@ -674,6 +692,11 @@ void fromXml(Stanza st) throws InvalidXmlException { this.mtime = Long.parseLong(st.getValue("MTIME")); this.atime = Long.parseLong(st.getValue("ATIME")); this.blockSize = Long.parseLong(st.getValue("BLOCKSIZE")); + + String lazyPersistString = st.getValueOrNull("LAZY_PERSIST"); + this.isLazyPersist = + lazyPersistString != null && Boolean.parseBoolean(lazyPersistString); + this.clientName = st.getValue("CLIENT_NAME"); this.clientMachine = st.getValue("CLIENT_MACHINE"); if (st.hasChildren("BLOCK")) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 5b6d269546..76b6c80e4a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -783,8 +783,11 @@ INode loadINode(final byte[] localName, boolean isSnapshotINode, if (counter != null) { counter.increment(); } + + // Images in the old format will not have the lazyPersist flag so it is + // safe to pass false always. final INodeFile file = new INodeFile(inodeId, localName, permissions, - modificationTime, atime, blocks, replication, blockSize); + modificationTime, atime, blocks, replication, blockSize, false); if (underConstruction) { file.toUnderConstruction(clientName, clientMachine); } @@ -884,8 +887,10 @@ public INodeFileAttributes loadINodeFileAttributes(DataInput in) in.readShort()); final long preferredBlockSize = in.readLong(); + // LazyPersist flag will not be present in old image formats and hence + // can be safely set to false always. return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime, - accessTime, replication, preferredBlockSize, null); + accessTime, replication, preferredBlockSize, false, null); } public INodeDirectoryAttributes loadINodeDirectoryAttributes(DataInput in) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java index feff70465f..51e297d3c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java @@ -283,7 +283,8 @@ private INodeFile loadINodeFile(INodeSection.INode n) { final INodeFile file = new INodeFile(n.getId(), n.getName().toByteArray(), permissions, f.getModificationTime(), - f.getAccessTime(), blocks, replication, f.getPreferredBlockSize()); + f.getAccessTime(), blocks, replication, f.getPreferredBlockSize(), + f.hasIsLazyPersist() ? f.getIsLazyPersist() : false); if (f.hasAcl()) { file.addAclFeature(new AclFeature(loadAclEntries(f.getAcl(), @@ -391,7 +392,8 @@ public static INodeSection.INodeFile.Builder buildINodeFile( .setModificationTime(file.getModificationTime()) .setPermission(buildPermissionStatus(file, state.getStringMap())) .setPreferredBlockSize(file.getPreferredBlockSize()) - .setReplication(file.getFileReplication()); + .setReplication(file.getFileReplication()) + .setIsLazyPersist(file.getLazyPersistFlag()); AclFeature f = file.getAclFeature(); if (f != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java index eb8354d7aa..e369575c84 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java @@ -146,14 +146,16 @@ static INodeFile readINodeUnderConstruction( int numLocs = in.readInt(); assert numLocs == 0 : "Unexpected block locations"; + // Images in the pre-protobuf format will not have the lazyPersist flag, + // so it is safe to pass false always. INodeFile file = new INodeFile(inodeId, name, perm, modificationTime, - modificationTime, blocks, blockReplication, preferredBlockSize); + modificationTime, blocks, blockReplication, preferredBlockSize, false); file.toUnderConstruction(clientName, clientMachine); return file; } // Helper function that writes an INodeUnderConstruction - // into the input stream + // into the output stream // static void writeINodeUnderConstruction(DataOutputStream out, INodeFile cons, String path) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 6f1f969894..8054b79e9d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2616,7 +2616,7 @@ private void startFileInternal(FSPermissionChecker pc, String src, if (parent != null && mkdirsRecursively(parent.toString(), permissions, true, now())) { newNode = dir.addFile(src, permissions, replication, blockSize, - holder, clientMachine); + isLazyPersist, holder, clientMachine); } if (newNode == null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 94fa686709..a254f3e987 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -74,7 +74,9 @@ public static INodeFile valueOf(INode inode, String path, boolean acceptNull) /** Format: [16 bits for replication][48 bits for PreferredBlockSize] */ static enum HeaderFormat { PREFERRED_BLOCK_SIZE(null, 48, 1), - REPLICATION(PREFERRED_BLOCK_SIZE.BITS, 16, 1); + REPLICATION(PREFERRED_BLOCK_SIZE.BITS, 12, 1), + LAZY_PERSIST(REPLICATION.BITS, 4, 0); + private final LongBitFormat BITS; @@ -90,12 +92,18 @@ static long getPreferredBlockSize(long header) { return PREFERRED_BLOCK_SIZE.BITS.retrieve(header); } - static long toLong(long preferredBlockSize, short replication) { + static boolean getLazyPersistFlag(long header) { + return LAZY_PERSIST.BITS.retrieve(header) == 0 ? false : true; + } + + static long toLong(long preferredBlockSize, short replication, boolean isLazyPersist) { long h = 0; h = PREFERRED_BLOCK_SIZE.BITS.combine(preferredBlockSize, h); h = REPLICATION.BITS.combine(replication, h); + h = LAZY_PERSIST.BITS.combine(isLazyPersist ? 1 : 0, h); return h; } + } private long header = 0L; @@ -104,9 +112,9 @@ static long toLong(long preferredBlockSize, short replication) { INodeFile(long id, byte[] name, PermissionStatus permissions, long mtime, long atime, BlockInfo[] blklist, short replication, - long preferredBlockSize) { + long preferredBlockSize, boolean isLazyPersist) { super(id, name, permissions, mtime, atime); - header = HeaderFormat.toLong(preferredBlockSize, replication); + header = HeaderFormat.toLong(preferredBlockSize, replication, isLazyPersist); this.blocks = blklist; } @@ -160,7 +168,6 @@ public boolean isUnderConstruction() { return getFileUnderConstructionFeature() != null; } - /** Convert this file to an {@link INodeFileUnderConstruction}. */ INodeFile toUnderConstruction(String clientName, String clientMachine) { Preconditions.checkState(!isUnderConstruction(), "file is already under construction"); @@ -355,6 +362,11 @@ public long getPreferredBlockSize() { return HeaderFormat.getPreferredBlockSize(header); } + @Override + public boolean getLazyPersistFlag() { + return HeaderFormat.getLazyPersistFlag(header); + } + @Override public long getHeaderLong() { return header; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java index 47b76b74ab..64ee1fcb0b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java @@ -21,7 +21,6 @@ import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.server.namenode.INodeFile.HeaderFormat; import org.apache.hadoop.hdfs.server.namenode.XAttrFeature; - /** * The attributes of a file. */ @@ -32,6 +31,8 @@ public interface INodeFileAttributes extends INodeAttributes { /** @return preferred block size in bytes */ public long getPreferredBlockSize(); + + public boolean getLazyPersistFlag(); /** @return the header as a long. */ public long getHeaderLong(); @@ -45,10 +46,11 @@ public static class SnapshotCopy extends INodeAttributes.SnapshotCopy public SnapshotCopy(byte[] name, PermissionStatus permissions, AclFeature aclFeature, long modificationTime, long accessTime, - short replication, long preferredBlockSize, XAttrFeature xAttrsFeature) { + short replication, long preferredBlockSize, + boolean isTransient, XAttrFeature xAttrsFeature) { super(name, permissions, aclFeature, modificationTime, accessTime, xAttrsFeature); - header = HeaderFormat.toLong(preferredBlockSize, replication); + header = HeaderFormat.toLong(preferredBlockSize, replication, isTransient); } public SnapshotCopy(INodeFile file) { @@ -66,6 +68,9 @@ public long getPreferredBlockSize() { return HeaderFormat.getPreferredBlockSize(header); } + @Override + public boolean getLazyPersistFlag() { return HeaderFormat.getLazyPersistFlag(header); } + @Override public long getHeaderLong() { return header; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java index 6ae2806d8f..16d55fd045 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java @@ -65,7 +65,9 @@ public static boolean supports(final LayoutFeature f, final int lv) { public static enum Feature implements LayoutFeature { ROLLING_UPGRADE(-55, -53, "Support rolling upgrade", false), EDITLOG_LENGTH(-56, "Add length field to every edit log op"), - XATTRS(-57, "Extended attributes"); + XATTRS(-57, "Extended attributes"), + LAZY_PERSIST_FILES(-58, "Support for optional lazy persistence of " + + " files with reduced durability guarantees"); private final FeatureInfo info; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java index 3f4cda534a..6e00c17139 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java @@ -220,7 +220,9 @@ private void loadFileDiffList(InputStream in, INodeFile file, int size) copy = new INodeFileAttributes.SnapshotCopy(pbf.getName() .toByteArray(), permission, acl, fileInPb.getModificationTime(), fileInPb.getAccessTime(), (short) fileInPb.getReplication(), - fileInPb.getPreferredBlockSize(), xAttrs); + fileInPb.getPreferredBlockSize(), + fileInPb.hasIsLazyPersist() ? fileInPb.getIsLazyPersist() : false, + xAttrs); } FileDiff diff = new FileDiff(pbf.getSnapshotId(), copy, null, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java index bab83a132f..7ad1c59702 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java @@ -391,6 +391,7 @@ private long getINodeId(String strPath) { f.getPermission(), stringTable); map.put("accessTime", f.getAccessTime()); map.put("blockSize", f.getPreferredBlockSize()); + map.put("lazyPersist", f.getIsLazyPersist()); map.put("group", p.getGroupName()); map.put("length", getFileSize(f)); map.put("modificationTime", f.getModificationTime()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java index 99617b805e..744fc754b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java @@ -247,6 +247,10 @@ private void dumpINodeFile(INodeSection.INodeFile f) { .o("perferredBlockSize", f.getPreferredBlockSize()) .o("permission", dumpPermission(f.getPermission())); + if (f.hasIsLazyPersist()) { + o("lazyPersist", f.getIsLazyPersist()); + } + if (f.getBlocksCount() > 0) { out.print(""); for (BlockProto b : f.getBlocksList()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto index 1c8edfa0c1..63674dec6f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto @@ -134,6 +134,7 @@ message INodeSection { optional FileUnderConstructionFeature fileUC = 7; optional AclFeatureProto acl = 8; optional XAttrFeatureProto xAttrs = 9; + optional bool isLazyPersist = 10 [default = false]; } message INodeDirectory { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java index a5e2edf87c..7f193f5b6a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java @@ -82,7 +82,7 @@ static void addFiles(FSEditLog editLog, int numFiles, short replication, } final INodeFile inode = new INodeFile(inodeId.nextValue(), null, - p, 0L, 0L, blocks, replication, blockSize); + p, 0L, 0L, blocks, replication, blockSize, false); inode.toUnderConstruction("", ""); // Append path to filename with information about blockIDs @@ -97,7 +97,7 @@ static void addFiles(FSEditLog editLog, int numFiles, short replication, editLog.logMkDir(currentDir, dirInode); } INodeFile fileUc = new INodeFile(inodeId.nextValue(), null, - p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize); + p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize, false); fileUc.toUnderConstruction("", ""); editLog.logOpenFile(filePath, fileUc, false); editLog.logCloseFile(filePath, inode); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index 8074a68e3a..762969ea5c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -194,7 +194,7 @@ public void run() { for (int i = 0; i < numTransactions; i++) { INodeFile inode = new INodeFile(namesystem.allocateNewInodeId(), null, - p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize); + p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize, false); inode.toUnderConstruction("", ""); editLog.logOpenFile("/filename" + (startIndex + i), inode, false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java index b1c5ca7c55..ad51445377 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java @@ -423,7 +423,7 @@ private static INodeFile createINodeFile(INodeDirectory parent, String name, FsPermission.createImmutable(perm)); INodeFile inodeFile = new INodeFile(INodeId.GRANDFATHER_INODE_ID, name.getBytes("UTF-8"), permStatus, 0L, 0L, null, REPLICATION, - PREFERRED_BLOCK_SIZE); + PREFERRED_BLOCK_SIZE, false); parent.addChild(inodeFile); return inodeFile; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index a739b7aa6e..6d669bd769 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -82,7 +82,7 @@ public class TestINodeFile { INodeFile createINodeFile(short replication, long preferredBlockSize) { return new INodeFile(INodeId.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, - null, replication, preferredBlockSize); + null, replication, preferredBlockSize, false); } /** * Test for the Replication value. Sets a value and checks if it was set @@ -259,7 +259,7 @@ private INodeFile[] createINodeFiles(int nCount, String fileNamePrefix) { INodeFile[] iNodes = new INodeFile[nCount]; for (int i = 0; i < nCount; i++) { iNodes[i] = new INodeFile(i, null, perm, 0L, 0L, null, replication, - preferredBlockSize); + preferredBlockSize, false); iNodes[i].setLocalName(DFSUtil.string2Bytes(fileNamePrefix + i)); BlockInfo newblock = new BlockInfo(replication); iNodes[i].addBlock(newblock); @@ -316,7 +316,7 @@ public void testValueOf () throws IOException { {//cast from INodeFileUnderConstruction final INode from = new INodeFile( - INodeId.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, replication, 1024L); + INodeId.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, replication, 1024L, false); from.asFile().toUnderConstruction("client", "machine"); //cast to INodeFile, should success @@ -1079,7 +1079,7 @@ public void testFilesInGetListingOps() throws Exception { public void testFileUnderConstruction() { replication = 3; final INodeFile file = new INodeFile(INodeId.GRANDFATHER_INODE_ID, null, - perm, 0L, 0L, null, replication, 1024L); + perm, 0L, 0L, null, replication, 1024L, false); assertFalse(file.isUnderConstruction()); final String clientName = "client";