HDFS-4092. Update file deletion logic for snapshot so that the current inode is removed from the circular linked list; and if some blocks at the end of the block list no longer belong to any other inode, collect them and update the block list.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1402287 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
40d2b6f308
commit
719279ea8a
@ -28,3 +28,8 @@ Branch-2802 Snapshot (Unreleased)
|
||||
(szetszwo)
|
||||
|
||||
HDFS-4097. Provide CLI support for createSnapshot. (Brandon Li via suresh)
|
||||
|
||||
HDFS-4092. Update file deletion logic for snapshot so that the current inode
|
||||
is removed from the circular linked list; and if some blocks at the end of the
|
||||
block list no longer belong to any other inode, collect them and update the
|
||||
block list. (szetszwo)
|
||||
|
@ -307,7 +307,7 @@ INodeFileSnapshot addFileSnapshot(String srcPath, String dstPath
|
||||
) throws IOException, QuotaExceededException {
|
||||
waitForReady();
|
||||
|
||||
final INodeFile src = rootDir.getINodeFile(srcPath);
|
||||
final INodeFile src = INodeFile.valueOf(rootDir.getNode(srcPath, false), srcPath);
|
||||
INodeFileSnapshot snapshot = new INodeFileSnapshot(src, src.computeFileSize(true));
|
||||
|
||||
writeLock();
|
||||
|
@ -170,6 +170,7 @@
|
||||
import org.apache.hadoop.hdfs.server.namenode.ha.StandbyState;
|
||||
import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
|
||||
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithLink;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
|
||||
@ -1398,6 +1399,10 @@ private void concatInternal(String target, String [] srcs)
|
||||
throw new HadoopIllegalArgumentException("concat: target file "
|
||||
+ target + " is empty");
|
||||
}
|
||||
if (trgInode instanceof INodeFileWithLink) {
|
||||
throw new HadoopIllegalArgumentException("concat: target file "
|
||||
+ target + " is in a snapshot");
|
||||
}
|
||||
|
||||
long blockSize = trgInode.getPreferredBlockSize();
|
||||
|
||||
|
@ -148,21 +148,6 @@ INode getNode(String path, boolean resolveLink)
|
||||
return getNode(getPathComponents(path), resolveLink);
|
||||
}
|
||||
|
||||
/** @return the INodeFile corresponding to the path. */
|
||||
INodeFile getINodeFile(String path) throws FileNotFoundException,
|
||||
UnresolvedLinkException {
|
||||
final INode inode = getNode(path, false);
|
||||
if (inode == null) {
|
||||
throw new FileNotFoundException("File \"" + path
|
||||
+ "\" not found");
|
||||
}
|
||||
if (!(inode instanceof INodeFile)) {
|
||||
throw new FileNotFoundException("Path \"" + path
|
||||
+ "\" is not a file");
|
||||
}
|
||||
return (INodeFile)inode;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve existing INodes from a path. If existing is big enough to store
|
||||
* all path components (existing and non-existing), then existing INodes
|
||||
|
@ -55,7 +55,7 @@ public static INodeFile valueOf(INode inode, String path) throws IOException {
|
||||
|
||||
private long header;
|
||||
|
||||
BlockInfo blocks[] = null;
|
||||
protected BlockInfo[] blocks = null;
|
||||
|
||||
INodeFile(PermissionStatus permissions, BlockInfo[] blklist,
|
||||
short replication, long modificationTime,
|
||||
@ -162,7 +162,7 @@ public void setBlock(int idx, BlockInfo blk) {
|
||||
}
|
||||
|
||||
@Override
|
||||
int collectSubtreeBlocksAndClear(List<Block> v) {
|
||||
protected int collectSubtreeBlocksAndClear(List<Block> v) {
|
||||
parent = null;
|
||||
if(blocks != null && v != null) {
|
||||
for (BlockInfo blk : blocks) {
|
||||
@ -192,7 +192,7 @@ long[] computeContentSummary(long[] summary) {
|
||||
/** Compute file size.
|
||||
* May or may not include BlockInfoUnderConstruction.
|
||||
*/
|
||||
long computeFileSize(boolean includesBlockInfoUnderConstruction) {
|
||||
protected long computeFileSize(boolean includesBlockInfoUnderConstruction) {
|
||||
if (blocks == null || blocks.length == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
@ -32,4 +32,11 @@ public INodeFileSnapshot(INodeFile f, long size) {
|
||||
super(f);
|
||||
this.size = size;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected long computeFileSize(boolean includesBlockInfoUnderConstruction) {
|
||||
//ignore includesBlockInfoUnderConstruction
|
||||
//since files in a snapshot are considered as closed.
|
||||
return size;
|
||||
}
|
||||
}
|
||||
|
@ -17,7 +17,11 @@
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||
|
||||
/**
|
||||
@ -65,4 +69,73 @@ public short getBlockReplication() {
|
||||
}
|
||||
return max;
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*
|
||||
* Remove the current inode from the circular linked list.
|
||||
* If some blocks at the end of the block list no longer belongs to
|
||||
* any other inode, collect them and update the block list.
|
||||
*/
|
||||
@Override
|
||||
protected int collectSubtreeBlocksAndClear(List<Block> v) {
|
||||
if (next == this) {
|
||||
//this is the only remaining inode.
|
||||
super.collectSubtreeBlocksAndClear(v);
|
||||
} else {
|
||||
//There are other inode(s) using the blocks.
|
||||
//Compute max file size excluding this and find the last inode.
|
||||
long max = next.computeFileSize(true);
|
||||
INodeFileWithLink last = next;
|
||||
for(INodeFileWithLink i = next.getNext(); i != this; i = i.getNext()) {
|
||||
final long size = i.computeFileSize(true);
|
||||
if (size > max) {
|
||||
max = size;
|
||||
}
|
||||
last = i;
|
||||
}
|
||||
|
||||
collectBlocksBeyondMaxAndClear(max, v);
|
||||
|
||||
//remove this from the circular linked list.
|
||||
last.next = this.next;
|
||||
this.next = null;
|
||||
//clear parent
|
||||
parent = null;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
private void collectBlocksBeyondMaxAndClear(final long max, final List<Block> v) {
|
||||
if (blocks != null) {
|
||||
//find the minimum n such that the size of the first n blocks > max
|
||||
int n = 0;
|
||||
for(long size = 0; n < blocks.length && max > size; n++) {
|
||||
size += blocks[n].getNumBytes();
|
||||
}
|
||||
|
||||
//starting from block[n], the data is beyond max.
|
||||
if (n < blocks.length) {
|
||||
//resize the array.
|
||||
final BlockInfo[] newBlocks;
|
||||
if (n == 0) {
|
||||
newBlocks = null;
|
||||
} else {
|
||||
newBlocks = new BlockInfo[n];
|
||||
System.arraycopy(blocks, 0, newBlocks, 0, n);
|
||||
}
|
||||
for(INodeFileWithLink i = next; i != this; i = i.getNext()) {
|
||||
i.blocks = newBlocks;
|
||||
}
|
||||
|
||||
//collect the blocks beyond max.
|
||||
if (v != null) {
|
||||
for(; n < blocks.length; n++) {
|
||||
v.add(blocks[n]);
|
||||
}
|
||||
}
|
||||
}
|
||||
blocks = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user