HDFS-4111. Support snapshot of subtrees. Contributed by Tsz Wo (Nicholas), Sze.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1402684 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
b5355e5050
commit
9e26fdcda7
@ -10,7 +10,7 @@ Branch-2802 Snapshot (Unreleased)
|
||||
|
||||
HDFS-4083. Protocol changes for snapshots. (suresh)
|
||||
|
||||
HDFS-4077. Add support for Snapshottable Directory. (Nicholas via suresh)
|
||||
HDFS-4077. Add support for Snapshottable Directory. (szetszwo via suresh)
|
||||
|
||||
HDFS-4087. Protocol changes for listSnapshots functionality.
|
||||
(Brandon Li via suresh)
|
||||
@ -33,3 +33,5 @@ Branch-2802 Snapshot (Unreleased)
|
||||
is removed from the circular linked list; and if some blocks at the end of the
|
||||
block list no longer belong to any other inode, collect them and update the
|
||||
block list. (szetszwo)
|
||||
|
||||
HDFS-4111. Support snapshot of subtrees. (szetszwo via suresh)
|
||||
|
@ -2949,6 +2949,12 @@ public BlockInfo addBlockCollection(BlockInfo block, BlockCollection bc) {
|
||||
return blocksMap.addBlockCollection(block, bc);
|
||||
}
|
||||
|
||||
public void addBlockCollection(BlockCollection bc) {
|
||||
for(BlockInfo block : bc.getBlocks()) {
|
||||
addBlockCollection(block, bc);
|
||||
}
|
||||
}
|
||||
|
||||
public BlockCollection getBlockCollection(Block b) {
|
||||
return blocksMap.getBlockCollection(b);
|
||||
}
|
||||
|
@ -1173,7 +1173,7 @@ public void replaceINodeDirectory(String path, INodeDirectory oldnode,
|
||||
replaceINodeUnsynced(path, oldnode, newnode);
|
||||
|
||||
//update children's parent directory
|
||||
for(INode i : newnode.getChildren()) {
|
||||
for(INode i : newnode.getChildrenList()) {
|
||||
i.parent = newnode;
|
||||
}
|
||||
} finally {
|
||||
|
@ -140,7 +140,6 @@
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshotInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode;
|
||||
@ -467,7 +466,6 @@ public static FSNamesystem loadFromDisk(Configuration conf,
|
||||
DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT);
|
||||
|
||||
this.blockManager = new BlockManager(this, this, conf);
|
||||
this.snapshotManager = new SnapshotManager(this);
|
||||
this.datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics();
|
||||
|
||||
this.fsOwner = UserGroupInformation.getCurrentUser();
|
||||
@ -538,6 +536,7 @@ public static FSNamesystem loadFromDisk(Configuration conf,
|
||||
|
||||
this.dtSecretManager = createDelegationTokenSecretManager(conf);
|
||||
this.dir = new FSDirectory(fsImage, this, conf);
|
||||
this.snapshotManager = new SnapshotManager(this, dir);
|
||||
this.safeMode = new SafeModeInfo(conf);
|
||||
|
||||
} catch(IOException e) {
|
||||
@ -5528,10 +5527,29 @@ public void disallowSnapshot(String snapshotRoot)
|
||||
/**
|
||||
* Create a snapshot
|
||||
* @param snapshotName The name of the snapshot
|
||||
* @param snapshotRoot The directory where the snapshot will be taken
|
||||
* @param path The directory path where the snapshot is taken
|
||||
*/
|
||||
public void createSnapshot(String snapshotName, String snapshotRoot)
|
||||
public void createSnapshot(String snapshotName, String path)
|
||||
throws SafeModeException, IOException {
|
||||
// TODO: implement
|
||||
writeLock();
|
||||
try {
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
if (isInSafeMode()) {
|
||||
throw new SafeModeException("Cannot create snapshot for " + path, safeMode);
|
||||
}
|
||||
checkOwner(path);
|
||||
|
||||
dir.writeLock();
|
||||
try {
|
||||
snapshotManager.createSnapshot(snapshotName, path);
|
||||
} finally {
|
||||
dir.writeUnlock();
|
||||
}
|
||||
} finally {
|
||||
writeUnlock();
|
||||
}
|
||||
getEditLog().logSync();
|
||||
|
||||
//TODO: audit log
|
||||
}
|
||||
}
|
||||
|
@ -386,7 +386,7 @@ static String constructPath(byte[][] components, int start, int end) {
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
boolean removeNode() {
|
||||
public boolean removeNode() {
|
||||
if (parent == null) {
|
||||
return false;
|
||||
} else {
|
||||
|
@ -48,18 +48,6 @@ public static INodeDirectory valueOf(INode inode, String path
|
||||
protected static final int DEFAULT_FILES_PER_DIRECTORY = 5;
|
||||
final static String ROOT_NAME = "";
|
||||
|
||||
/** Cast INode to INodeDirectory. */
|
||||
public static INodeDirectory valueOf(INode inode, String src
|
||||
) throws IOException {
|
||||
if (inode == null) {
|
||||
throw new FileNotFoundException(src + " does not exist.");
|
||||
}
|
||||
if (!inode.isDirectory()) {
|
||||
throw new IOException(src + " is not a directory.");
|
||||
}
|
||||
return (INodeDirectory)inode;
|
||||
}
|
||||
|
||||
private List<INode> children;
|
||||
|
||||
protected INodeDirectory(String name, PermissionStatus permissions) {
|
||||
@ -82,7 +70,7 @@ public INodeDirectory(PermissionStatus permissions, long mTime) {
|
||||
*
|
||||
* @param other
|
||||
*/
|
||||
INodeDirectory(INodeDirectory other) {
|
||||
public INodeDirectory(INodeDirectory other) {
|
||||
super(other);
|
||||
this.children = other.getChildren();
|
||||
}
|
||||
@ -297,7 +285,7 @@ int nextChild(byte[] name) {
|
||||
* @return null if the child with this name already exists;
|
||||
* node, otherwise
|
||||
*/
|
||||
<T extends INode> T addChild(final T node, boolean setModTime) {
|
||||
public <T extends INode> T addChild(final T node, boolean setModTime) {
|
||||
if (children == null) {
|
||||
children = new ArrayList<INode>(DEFAULT_FILES_PER_DIRECTORY);
|
||||
}
|
||||
@ -446,6 +434,10 @@ public List<INode> getChildrenList() {
|
||||
public List<INode> getChildren() {
|
||||
return children;
|
||||
}
|
||||
/** Set the children list. */
|
||||
public void setChildren(List<INode> children) {
|
||||
this.children = children;
|
||||
}
|
||||
|
||||
@Override
|
||||
int collectSubtreeBlocksAndClear(List<Block> v) {
|
||||
|
@ -192,7 +192,7 @@ long[] computeContentSummary(long[] summary) {
|
||||
/** Compute file size.
|
||||
* May or may not include BlockInfoUnderConstruction.
|
||||
*/
|
||||
protected long computeFileSize(boolean includesBlockInfoUnderConstruction) {
|
||||
public long computeFileSize(boolean includesBlockInfoUnderConstruction) {
|
||||
if (blocks == null || blocks.length == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
@ -34,7 +34,7 @@
|
||||
* I-node for file being written.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
class INodeFileUnderConstruction extends INodeFile implements MutableBlockCollection {
|
||||
public class INodeFileUnderConstruction extends INodeFile implements MutableBlockCollection {
|
||||
/** Cast INode to INodeFileUnderConstruction. */
|
||||
public static INodeFileUnderConstruction valueOf(INode inode, String path
|
||||
) throws IOException {
|
||||
|
@ -39,6 +39,14 @@ public class INodeSymlink extends INode {
|
||||
setModificationTimeForce(modTime);
|
||||
setAccessTime(atime);
|
||||
}
|
||||
|
||||
public INodeSymlink(INodeSymlink that) {
|
||||
super(that);
|
||||
|
||||
//copy symlink
|
||||
this.symlink = new byte[that.symlink.length];
|
||||
System.arraycopy(that.symlink, 0, this.symlink, 0, that.symlink.length);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isLink() {
|
||||
|
@ -34,7 +34,7 @@ public INodeFileSnapshot(INodeFile f, long size) {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected long computeFileSize(boolean includesBlockInfoUnderConstruction) {
|
||||
public long computeFileSize(boolean includesBlockInfoUnderConstruction) {
|
||||
//ignore includesBlockInfoUnderConstruction
|
||||
//since files in a snapshot are considered as closed.
|
||||
return size;
|
||||
|
@ -22,19 +22,26 @@
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeSymlink;
|
||||
|
||||
/** Manage snapshottable directories and their snapshots. */
|
||||
public class SnapshotManager {
|
||||
private final Namesystem namesystem;
|
||||
private final FSNamesystem namesystem;
|
||||
private final FSDirectory fsdir;
|
||||
|
||||
/** All snapshottable directories in the namesystem. */
|
||||
private final List<INodeDirectorySnapshottable> snapshottables
|
||||
= new ArrayList<INodeDirectorySnapshottable>();
|
||||
|
||||
public SnapshotManager(final Namesystem namesystem) {
|
||||
public SnapshotManager(final FSNamesystem namesystem,
|
||||
final FSDirectory fsdir) {
|
||||
this.namesystem = namesystem;
|
||||
this.fsdir = fsdir;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -43,8 +50,8 @@ public SnapshotManager(final Namesystem namesystem) {
|
||||
* Otherwise, the {@link INodeDirectory} of the path is replaced by an
|
||||
* {@link INodeDirectorySnapshottable}.
|
||||
*/
|
||||
public void setSnapshottable(final String path, final int snapshotQuota,
|
||||
final FSDirectory fsdir) throws IOException {
|
||||
public void setSnapshottable(final String path, final int snapshotQuota
|
||||
) throws IOException {
|
||||
namesystem.writeLock();
|
||||
try {
|
||||
final INodeDirectory d = INodeDirectory.valueOf(fsdir.getINode(path), path);
|
||||
@ -62,16 +69,108 @@ public void setSnapshottable(final String path, final int snapshotQuota,
|
||||
}
|
||||
}
|
||||
|
||||
/** Create a snapshot of given path. */
|
||||
public void createSnapshot(final String snapshotName, final String path,
|
||||
final FSDirectory fsdir) throws IOException {
|
||||
final INodeDirectorySnapshottable d = INodeDirectorySnapshottable.valueOf(
|
||||
fsdir.getINode(path), path);
|
||||
/**
|
||||
* Create a snapshot of the given path.
|
||||
*
|
||||
* @param snapshotName The name of the snapshot.
|
||||
* @param path The directory path where the snapshot will be taken.
|
||||
*/
|
||||
public void createSnapshot(final String snapshotName, final String path
|
||||
) throws IOException {
|
||||
new SnapshotCreation(path).run(snapshotName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a snapshot of subtrees for recursively coping the directory
|
||||
* structure from the source directory to the snapshot destination directory.
|
||||
* This creation algorithm requires O(N) running time and O(N) memory,
|
||||
* where N = # files + # directories + # symlinks.
|
||||
*/
|
||||
class SnapshotCreation {
|
||||
/** The source root directory path where the snapshot is taken. */
|
||||
final INodeDirectorySnapshottable srcRoot;
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
* @param path The path must be a snapshottable directory.
|
||||
*/
|
||||
private SnapshotCreation(final String path) throws IOException {
|
||||
srcRoot = INodeDirectorySnapshottable.valueOf(fsdir.getINode(path), path);
|
||||
}
|
||||
|
||||
void run(final String name) throws IOException {
|
||||
final INodeDirectorySnapshotRoot root = srcRoot.addSnapshotRoot(name);
|
||||
processRecursively(srcRoot, root);
|
||||
}
|
||||
|
||||
//TODO: check ns quota
|
||||
/** Process snapshot creation recursively. */
|
||||
private void processRecursively(final INodeDirectory srcDir,
|
||||
final INodeDirectory dstDir) throws IOException {
|
||||
final List<INode> children = srcDir.getChildren();
|
||||
if (children != null) {
|
||||
final List<INode> inodes = new ArrayList<INode>(children.size());
|
||||
for(final INode c : children) {
|
||||
final INode i;
|
||||
if (c == null) {
|
||||
i = null;
|
||||
} else if (c instanceof INodeDirectory) {
|
||||
//also handle INodeDirectoryWithQuota
|
||||
i = processINodeDirectory((INodeDirectory)c);
|
||||
} else if (c instanceof INodeFileUnderConstruction) {
|
||||
//TODO: support INodeFileUnderConstruction
|
||||
throw new IOException("Not yet supported.");
|
||||
} else if (c instanceof INodeFile) {
|
||||
i = processINodeFile(srcDir, (INodeFile)c);
|
||||
} else if (c instanceof INodeSymlink) {
|
||||
i = new INodeSymlink((INodeSymlink)c);
|
||||
} else {
|
||||
throw new AssertionError("Unknow INode type: " + c.getClass()
|
||||
+ ", inode = " + c);
|
||||
}
|
||||
inodes.add(i);
|
||||
}
|
||||
dstDir.setChildren(inodes);
|
||||
}
|
||||
}
|
||||
|
||||
final INodeDirectorySnapshotRoot root = d.addSnapshotRoot(snapshotName);
|
||||
|
||||
//TODO: create the remaining subtree
|
||||
/**
|
||||
* Create destination INodeDirectory and make the recursive call.
|
||||
* @return destination INodeDirectory.
|
||||
*/
|
||||
private INodeDirectory processINodeDirectory(final INodeDirectory srcChild
|
||||
) throws IOException {
|
||||
final INodeDirectory dstChild = new INodeDirectory(srcChild);
|
||||
dstChild.setChildren(null);
|
||||
processRecursively(srcChild, dstChild);
|
||||
return dstChild;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create destination INodeFileSnapshot and update source INode type.
|
||||
* @return destination INodeFileSnapshot.
|
||||
*/
|
||||
private INodeFileSnapshot processINodeFile(final INodeDirectory parent,
|
||||
final INodeFile file) {
|
||||
final INodeFileSnapshot snapshot = new INodeFileSnapshot(
|
||||
file, file.computeFileSize(true));
|
||||
|
||||
final INodeFileWithLink srcWithLink;
|
||||
//check source INode type
|
||||
if (file instanceof INodeFileWithLink) {
|
||||
srcWithLink = (INodeFileWithLink)file;
|
||||
} else {
|
||||
//source is an INodeFile, replace the source.
|
||||
srcWithLink = new INodeFileWithLink(file);
|
||||
file.removeNode();
|
||||
parent.addChild(srcWithLink, false);
|
||||
|
||||
//update block map
|
||||
namesystem.getBlockManager().addBlockCollection(srcWithLink);
|
||||
}
|
||||
|
||||
//insert the snapshot to src's linked list.
|
||||
srcWithLink.insert(snapshot);
|
||||
return snapshot;
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user