diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt index 4bcd907fa7..e53b8fdb29 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt @@ -28,3 +28,8 @@ Branch-2802 Snapshot (Unreleased) (szetszwo) HDFS-4097. Provide CLI support for createSnapshot. (Brandon Li via suresh) + + HDFS-4092. Update file deletion logic for snapshot so that the current inode + is removed from the circular linked list; and if some blocks at the end of the + block list no longer belong to any other inode, collect them and update the + block list. (szetszwo) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 58e6c72188..d7935e0d58 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -307,7 +307,7 @@ INodeFileSnapshot addFileSnapshot(String srcPath, String dstPath ) throws IOException, QuotaExceededException { waitForReady(); - final INodeFile src = rootDir.getINodeFile(srcPath); + final INodeFile src = INodeFile.valueOf(rootDir.getNode(srcPath, false), srcPath); INodeFileSnapshot snapshot = new INodeFileSnapshot(src, src.computeFileSize(true)); writeLock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 26af442f18..64ce99f575 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -170,6 +170,7 @@ import org.apache.hadoop.hdfs.server.namenode.ha.StandbyState; import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; +import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithLink; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager; import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; @@ -1398,6 +1399,10 @@ private void concatInternal(String target, String [] srcs) throw new HadoopIllegalArgumentException("concat: target file " + target + " is empty"); } + if (trgInode instanceof INodeFileWithLink) { + throw new HadoopIllegalArgumentException("concat: target file " + + target + " is in a snapshot"); + } long blockSize = trgInode.getPreferredBlockSize(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index 855e3dc54a..f0483a8cc5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -148,21 +148,6 @@ INode getNode(String path, boolean resolveLink) return getNode(getPathComponents(path), resolveLink); } - /** @return the INodeFile corresponding to the path. */ - INodeFile getINodeFile(String path) throws FileNotFoundException, - UnresolvedLinkException { - final INode inode = getNode(path, false); - if (inode == null) { - throw new FileNotFoundException("File \"" + path - + "\" not found"); - } - if (!(inode instanceof INodeFile)) { - throw new FileNotFoundException("Path \"" + path - + "\" is not a file"); - } - return (INodeFile)inode; - } - /** * Retrieve existing INodes from a path. If existing is big enough to store * all path components (existing and non-existing), then existing INodes diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index c84763f54c..f88abaa693 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -55,7 +55,7 @@ public static INodeFile valueOf(INode inode, String path) throws IOException { private long header; - BlockInfo blocks[] = null; + protected BlockInfo[] blocks = null; INodeFile(PermissionStatus permissions, BlockInfo[] blklist, short replication, long modificationTime, @@ -162,7 +162,7 @@ public void setBlock(int idx, BlockInfo blk) { } @Override - int collectSubtreeBlocksAndClear(List v) { + protected int collectSubtreeBlocksAndClear(List v) { parent = null; if(blocks != null && v != null) { for (BlockInfo blk : blocks) { @@ -192,7 +192,7 @@ long[] computeContentSummary(long[] summary) { /** Compute file size. * May or may not include BlockInfoUnderConstruction. */ - long computeFileSize(boolean includesBlockInfoUnderConstruction) { + protected long computeFileSize(boolean includesBlockInfoUnderConstruction) { if (blocks == null || blocks.length == 0) { return 0; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileSnapshot.java index 1667cb4f7f..e3e9a8e53d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileSnapshot.java @@ -32,4 +32,11 @@ public INodeFileSnapshot(INodeFile f, long size) { super(f); this.size = size; } + + @Override + protected long computeFileSize(boolean includesBlockInfoUnderConstruction) { + //ignore includesBlockInfoUnderConstruction + //since files in a snapshot are considered as closed. + return size; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithLink.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithLink.java index 011984f426..b46b170981 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithLink.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithLink.java @@ -17,7 +17,11 @@ */ package org.apache.hadoop.hdfs.server.namenode.snapshot; +import java.util.List; + import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.namenode.INodeFile; /** @@ -65,4 +69,73 @@ public short getBlockReplication() { } return max; } + + /** + * {@inheritDoc} + * + * Remove the current inode from the circular linked list. + * If some blocks at the end of the block list no longer belongs to + * any other inode, collect them and update the block list. + */ + @Override + protected int collectSubtreeBlocksAndClear(List v) { + if (next == this) { + //this is the only remaining inode. + super.collectSubtreeBlocksAndClear(v); + } else { + //There are other inode(s) using the blocks. + //Compute max file size excluding this and find the last inode. + long max = next.computeFileSize(true); + INodeFileWithLink last = next; + for(INodeFileWithLink i = next.getNext(); i != this; i = i.getNext()) { + final long size = i.computeFileSize(true); + if (size > max) { + max = size; + } + last = i; + } + + collectBlocksBeyondMaxAndClear(max, v); + + //remove this from the circular linked list. + last.next = this.next; + this.next = null; + //clear parent + parent = null; + } + return 1; + } + + private void collectBlocksBeyondMaxAndClear(final long max, final List v) { + if (blocks != null) { + //find the minimum n such that the size of the first n blocks > max + int n = 0; + for(long size = 0; n < blocks.length && max > size; n++) { + size += blocks[n].getNumBytes(); + } + + //starting from block[n], the data is beyond max. + if (n < blocks.length) { + //resize the array. + final BlockInfo[] newBlocks; + if (n == 0) { + newBlocks = null; + } else { + newBlocks = new BlockInfo[n]; + System.arraycopy(blocks, 0, newBlocks, 0, n); + } + for(INodeFileWithLink i = next; i != this; i = i.getNext()) { + i.blocks = newBlocks; + } + + //collect the blocks beyond max. + if (v != null) { + for(; n < blocks.length; n++) { + v.add(blocks[n]); + } + } + } + blocks = null; + } + } }