HDFS-4611. Update FSImage for INodeReference.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1463332 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2013-04-01 23:24:42 +00:00
parent 8ee6ecaea4
commit 1096917649
22 changed files with 515 additions and 217 deletions

View File

@ -223,3 +223,5 @@ Branch-2802 Snapshot (Unreleased)
HDFS-4637. INodeDirectory#replaceSelf4Quota may incorrectly convert a newly HDFS-4637. INodeDirectory#replaceSelf4Quota may incorrectly convert a newly
created directory to an INodeDirectoryWithSnapshot. (Jing Zhao via szetszwo) created directory to an INodeDirectoryWithSnapshot. (Jing Zhao via szetszwo)
HDFS-4611. Update FSImage for INodeReference. (szetszwo)

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.hdfs.security.token.delegation; package org.apache.hadoop.hdfs.security.token.delegation;
import java.io.DataInputStream; import java.io.DataInput;
import java.io.DataOutputStream; import java.io.DataOutputStream;
import java.io.IOException; import java.io.IOException;
import java.io.InterruptedIOException; import java.io.InterruptedIOException;
@ -110,7 +110,7 @@ public synchronized long getTokenExpiryTime(
* @param in input stream to read fsimage * @param in input stream to read fsimage
* @throws IOException * @throws IOException
*/ */
public synchronized void loadSecretManagerState(DataInputStream in) public synchronized void loadSecretManagerState(DataInput in)
throws IOException { throws IOException {
if (running) { if (running) {
// a safety check // a safety check
@ -266,7 +266,7 @@ private synchronized void saveAllKeys(DataOutputStream out)
/** /**
* Private helper methods to load Delegation tokens from fsimage * Private helper methods to load Delegation tokens from fsimage
*/ */
private synchronized void loadCurrentTokens(DataInputStream in) private synchronized void loadCurrentTokens(DataInput in)
throws IOException { throws IOException {
int numberOfTokens = in.readInt(); int numberOfTokens = in.readInt();
for (int i = 0; i < numberOfTokens; i++) { for (int i = 0; i < numberOfTokens; i++) {
@ -282,7 +282,7 @@ private synchronized void loadCurrentTokens(DataInputStream in)
* @param in * @param in
* @throws IOException * @throws IOException
*/ */
private synchronized void loadAllKeys(DataInputStream in) throws IOException { private synchronized void loadAllKeys(DataInput in) throws IOException {
int numberOfKeys = in.readInt(); int numberOfKeys = in.readInt();
for (int i = 0; i < numberOfKeys; i++) { for (int i = 0; i < numberOfKeys; i++) {
DelegationKey value = new DelegationKey(); DelegationKey value = new DelegationKey();

View File

@ -570,7 +570,8 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp)
+ " because the source can not be removed"); + " because the source can not be removed");
return false; return false;
} }
srcChild.setLocalName(dstComponents[dstComponents.length - 1]); //TODO: setLocalName breaks created/deleted lists
srcChild.setLocalName(dstIIP.getLastLocalName());
// add src to the destination // add src to the destination
added = addLastINodeNoQuotaCheck(dstIIP, srcChild); added = addLastINodeNoQuotaCheck(dstIIP, srcChild);
@ -750,6 +751,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp,
undoRemoveDst = true; undoRemoveDst = true;
} }
} }
//TODO: setLocalName breaks created/deleted lists
srcChild.setLocalName(dstIIP.getLastLocalName()); srcChild.setLocalName(dstIIP.getLastLocalName());
// add src as dst to complete rename // add src as dst to complete rename

View File

@ -17,23 +17,23 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.BufferedInputStream; import java.io.BufferedInputStream;
import java.io.BufferedOutputStream; import java.io.BufferedOutputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory; import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.io.Text;
/** /**
* Simple container class that handles support for compressed fsimage files. * Simple container class that handles support for compressed fsimage files.
*/ */
@ -108,15 +108,14 @@ private static FSImageCompression createCompression(Configuration conf,
* underlying IO fails. * underlying IO fails.
*/ */
static FSImageCompression readCompressionHeader( static FSImageCompression readCompressionHeader(
Configuration conf, Configuration conf, DataInput in) throws IOException
DataInputStream dis) throws IOException
{ {
boolean isCompressed = dis.readBoolean(); boolean isCompressed = in.readBoolean();
if (!isCompressed) { if (!isCompressed) {
return createNoopCompression(); return createNoopCompression();
} else { } else {
String codecClassName = Text.readString(dis); String codecClassName = Text.readString(in);
return createCompression(conf, codecClassName); return createCompression(conf, codecClassName);
} }
} }

View File

@ -19,6 +19,7 @@
import static org.apache.hadoop.util.Time.now; import static org.apache.hadoop.util.Time.now;
import java.io.DataInput;
import java.io.DataInputStream; import java.io.DataInputStream;
import java.io.DataOutputStream; import java.io.DataOutputStream;
import java.io.File; import java.io.File;
@ -58,6 +59,7 @@
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
import org.apache.hadoop.hdfs.util.ReadOnlyList; import org.apache.hadoop.hdfs.util.ReadOnlyList;
import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
@ -186,6 +188,9 @@ public static class Loader {
private long imgTxId; private long imgTxId;
/** The MD5 sum of the loaded file */ /** The MD5 sum of the loaded file */
private MD5Hash imgDigest; private MD5Hash imgDigest;
private Map<Integer, Snapshot> snapshotMap = null;
private final ReferenceMap referenceMap = new ReferenceMap();
Loader(Configuration conf, FSNamesystem namesystem) { Loader(Configuration conf, FSNamesystem namesystem) {
this.conf = conf; this.conf = conf;
@ -267,7 +272,7 @@ void load(File curFile) throws IOException {
} }
if (supportSnapshot) { if (supportSnapshot) {
namesystem.getSnapshotManager().read(in); snapshotMap = namesystem.getSnapshotManager().read(in, this);
} }
// read compression related info // read compression related info
@ -331,7 +336,7 @@ private void updateRootAttr(INodeWithAdditionalFields root) {
* *
* @param in Image input stream * @param in Image input stream
*/ */
private void loadLocalNameINodesWithSnapshot(DataInputStream in) private void loadLocalNameINodesWithSnapshot(DataInput in)
throws IOException { throws IOException {
assert LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION, assert LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION,
getLayoutVersion()); getLayoutVersion());
@ -350,7 +355,7 @@ private void loadLocalNameINodesWithSnapshot(DataInputStream in)
* @param in image input stream * @param in image input stream
* @throws IOException * @throws IOException
*/ */
private void loadLocalNameINodes(long numFiles, DataInputStream in) private void loadLocalNameINodes(long numFiles, DataInput in)
throws IOException { throws IOException {
assert LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION, assert LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION,
getLayoutVersion()); getLayoutVersion());
@ -373,20 +378,20 @@ private void loadLocalNameINodes(long numFiles, DataInputStream in)
/** /**
* Load information about root, and use the information to update the root * Load information about root, and use the information to update the root
* directory of NameSystem. * directory of NameSystem.
* @param in The {@link DataInputStream} instance to read. * @param in The {@link DataInput} instance to read.
*/ */
private void loadRoot(DataInputStream in) throws IOException { private void loadRoot(DataInput in) throws IOException {
// load root // load root
if (in.readShort() != 0) { if (in.readShort() != 0) {
throw new IOException("First node is not root"); throw new IOException("First node is not root");
} }
final INodeWithAdditionalFields root = loadINode(null, false, in); final INodeDirectory root = loadINode(null, false, in).asDirectory();
// update the root's attributes // update the root's attributes
updateRootAttr(root); updateRootAttr(root);
} }
/** Load children nodes for the parent directory. */ /** Load children nodes for the parent directory. */
private int loadChildren(INodeDirectory parent, DataInputStream in) private int loadChildren(INodeDirectory parent, DataInput in)
throws IOException { throws IOException {
int numChildren = in.readInt(); int numChildren = in.readInt();
for (int i = 0; i < numChildren; i++) { for (int i = 0; i < numChildren; i++) {
@ -399,9 +404,9 @@ private int loadChildren(INodeDirectory parent, DataInputStream in)
/** /**
* Load a directory when snapshot is supported. * Load a directory when snapshot is supported.
* @param in The {@link DataInputStream} instance to read. * @param in The {@link DataInput} instance to read.
*/ */
private void loadDirectoryWithSnapshot(DataInputStream in) private void loadDirectoryWithSnapshot(DataInput in)
throws IOException { throws IOException {
// Step 1. Identify the parent INode // Step 1. Identify the parent INode
String parentPath = FSImageSerialization.readString(in); String parentPath = FSImageSerialization.readString(in);
@ -443,7 +448,7 @@ private void loadDirectoryWithSnapshot(DataInputStream in)
* @return number of child inodes read * @return number of child inodes read
* @throws IOException * @throws IOException
*/ */
private int loadDirectory(DataInputStream in) throws IOException { private int loadDirectory(DataInput in) throws IOException {
String parentPath = FSImageSerialization.readString(in); String parentPath = FSImageSerialization.readString(in);
final INodeDirectory parent = INodeDirectory.valueOf( final INodeDirectory parent = INodeDirectory.valueOf(
namesystem.dir.rootDir.getNode(parentPath, true), parentPath); namesystem.dir.rootDir.getNode(parentPath, true), parentPath);
@ -458,19 +463,19 @@ private int loadDirectory(DataInputStream in) throws IOException {
* @throws IOException if any error occurs * @throws IOException if any error occurs
*/ */
private void loadFullNameINodes(long numFiles, private void loadFullNameINodes(long numFiles,
DataInputStream in) throws IOException { DataInput in) throws IOException {
byte[][] pathComponents; byte[][] pathComponents;
byte[][] parentPath = {{}}; byte[][] parentPath = {{}};
FSDirectory fsDir = namesystem.dir; FSDirectory fsDir = namesystem.dir;
INodeDirectory parentINode = fsDir.rootDir; INodeDirectory parentINode = fsDir.rootDir;
for (long i = 0; i < numFiles; i++) { for (long i = 0; i < numFiles; i++) {
pathComponents = FSImageSerialization.readPathComponents(in); pathComponents = FSImageSerialization.readPathComponents(in);
final INodeWithAdditionalFields newNode = loadINode( final INode newNode = loadINode(
pathComponents[pathComponents.length-1], false, in); pathComponents[pathComponents.length-1], false, in);
if (isRoot(pathComponents)) { // it is the root if (isRoot(pathComponents)) { // it is the root
// update the root's attributes // update the root's attributes
updateRootAttr(newNode); updateRootAttr(newNode.asDirectory());
continue; continue;
} }
// check if the new inode belongs to the same parent // check if the new inode belongs to the same parent
@ -527,12 +532,9 @@ public FSDirectory getFSDirectoryInLoading() {
} }
public INode loadINodeWithLocalName(boolean isSnapshotINode, public INode loadINodeWithLocalName(boolean isSnapshotINode,
DataInputStream in) throws IOException { DataInput in) throws IOException {
final byte[] localName = new byte[in.readShort()]; final byte[] localName = FSImageSerialization.readLocalName(in);
in.readFully(localName); return loadINode(localName, isSnapshotINode, in);
final INode inode = loadINode(localName, isSnapshotINode, in);
inode.setLocalName(localName);
return inode;
} }
/** /**
@ -541,8 +543,8 @@ public INode loadINodeWithLocalName(boolean isSnapshotINode,
* @param in data input stream from which image is read * @param in data input stream from which image is read
* @return an inode * @return an inode
*/ */
INodeWithAdditionalFields loadINode(final byte[] localName, boolean isSnapshotINode, INode loadINode(final byte[] localName, boolean isSnapshotINode,
DataInputStream in) throws IOException { DataInput in) throws IOException {
final int imgVersion = getLayoutVersion(); final int imgVersion = getLayoutVersion();
final long inodeId = namesystem.allocateNewInodeId(); final long inodeId = namesystem.allocateNewInodeId();
@ -632,12 +634,27 @@ INodeWithAdditionalFields loadINode(final byte[] localName, boolean isSnapshotIN
final PermissionStatus permissions = PermissionStatus.read(in); final PermissionStatus permissions = PermissionStatus.read(in);
return new INodeSymlink(inodeId, localName, permissions, return new INodeSymlink(inodeId, localName, permissions,
modificationTime, atime, symlink); modificationTime, atime, symlink);
} else if (numBlocks == -3) {
//reference
final boolean isWithName = in.readBoolean();
final INodeReference.WithCount withCount
= referenceMap.loadINodeReferenceWithCount(isSnapshotINode, in, this);
if (isWithName) {
return new INodeReference.WithName(null, withCount, localName);
} else {
final INodeReference ref = new INodeReference(null, withCount);
withCount.setParentReference(ref);
return ref;
}
} }
throw new IOException("Unknown inode type: numBlocks=" + numBlocks); throw new IOException("Unknown inode type: numBlocks=" + numBlocks);
} }
private void loadFilesUnderConstruction(DataInputStream in, private void loadFilesUnderConstruction(DataInput in,
boolean supportSnapshot) throws IOException { boolean supportSnapshot) throws IOException {
FSDirectory fsDir = namesystem.dir; FSDirectory fsDir = namesystem.dir;
int size = in.readInt(); int size = in.readInt();
@ -665,7 +682,7 @@ private void loadFilesUnderConstruction(DataInputStream in,
} }
} }
private void loadSecretManagerState(DataInputStream in) private void loadSecretManagerState(DataInput in)
throws IOException { throws IOException {
int imgVersion = getLayoutVersion(); int imgVersion = getLayoutVersion();
@ -713,6 +730,10 @@ byte[][] getParent(byte[][] path) {
} }
return result; return result;
} }
public Snapshot getSnapshot(DataInput in) throws IOException {
return snapshotMap.get(in.readInt());
}
} }
/** /**
@ -727,6 +748,7 @@ static class Saver {
/** The MD5 checksum of the file that was written */ /** The MD5 checksum of the file that was written */
private MD5Hash savedDigest; private MD5Hash savedDigest;
private final ReferenceMap referenceMap = new ReferenceMap();
static private final byte[] PATH_SEPARATOR = DFSUtil.string2Bytes(Path.SEPARATOR); static private final byte[] PATH_SEPARATOR = DFSUtil.string2Bytes(Path.SEPARATOR);
@ -792,7 +814,7 @@ void save(File newFile, FSImageCompression compression) throws IOException {
byte[] byteStore = new byte[4*HdfsConstants.MAX_PATH_LENGTH]; byte[] byteStore = new byte[4*HdfsConstants.MAX_PATH_LENGTH];
ByteBuffer strbuf = ByteBuffer.wrap(byteStore); ByteBuffer strbuf = ByteBuffer.wrap(byteStore);
// save the root // save the root
FSImageSerialization.saveINode2Image(fsDir.rootDir, out, false); FSImageSerialization.saveINode2Image(fsDir.rootDir, out, false, referenceMap);
// save the rest of the nodes // save the rest of the nodes
saveImage(strbuf, fsDir.rootDir, out, null); saveImage(strbuf, fsDir.rootDir, out, null);
// save files under construction // save files under construction
@ -805,6 +827,7 @@ void save(File newFile, FSImageCompression compression) throws IOException {
context.checkCancelled(); context.checkCancelled();
fout.getChannel().force(true); fout.getChannel().force(true);
} finally { } finally {
referenceMap.removeAllINodeReferenceWithId();
out.close(); out.close();
} }
@ -830,7 +853,7 @@ private int saveChildren(ReadOnlyList<INode> children, DataOutputStream out)
int i = 0; int i = 0;
for(INode child : children) { for(INode child : children) {
// print all children first // print all children first
FSImageSerialization.saveINode2Image(child, out, false); FSImageSerialization.saveINode2Image(child, out, false, referenceMap);
if (child.isDirectory()) { if (child.isDirectory()) {
dirNum++; dirNum++;
} }
@ -927,7 +950,7 @@ private void saveImage(ByteBuffer currentDirName, INodeDirectory current,
dirNum += saveChildren(children, out); dirNum += saveChildren(children, out);
// 4. Write DirectoryDiff lists, if there is any. // 4. Write DirectoryDiff lists, if there is any.
SnapshotFSImageFormat.saveDirectoryDiffList(current, out); SnapshotFSImageFormat.saveDirectoryDiffList(current, out, referenceMap);
// Write sub-tree of sub-directories, including possible snapshots of // Write sub-tree of sub-directories, including possible snapshots of
// deleted sub-directories // deleted sub-directories

View File

@ -17,7 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import java.io.DataInputStream; import java.io.DataInput;
import java.io.DataOutput; import java.io.DataOutput;
import java.io.DataOutputStream; import java.io.DataOutputStream;
import java.io.IOException; import java.io.IOException;
@ -36,6 +36,7 @@
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.ShortWritable; import org.apache.hadoop.io.ShortWritable;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
@ -87,7 +88,7 @@ private static void writePermissionStatus(INodeWithAdditionalFields inode,
} }
private static void writeBlocks(final Block[] blocks, private static void writeBlocks(final Block[] blocks,
final DataOutputStream out) throws IOException { final DataOutput out) throws IOException {
if (blocks == null) { if (blocks == null) {
out.writeInt(0); out.writeInt(0);
} else { } else {
@ -102,7 +103,7 @@ private static void writeBlocks(final Block[] blocks,
// from the input stream // from the input stream
// //
static INodeFileUnderConstruction readINodeUnderConstruction( static INodeFileUnderConstruction readINodeUnderConstruction(
DataInputStream in) throws IOException { DataInput in) throws IOException {
byte[] name = readBytes(in); byte[] name = readBytes(in);
short blockReplication = in.readShort(); short blockReplication = in.readShort();
long modificationTime = in.readLong(); long modificationTime = in.readLong();
@ -164,7 +165,7 @@ static void writeINodeUnderConstruction(DataOutputStream out,
* @param out The {@link DataOutputStream} where the fields are written * @param out The {@link DataOutputStream} where the fields are written
* @param writeBlock Whether to write block information * @param writeBlock Whether to write block information
*/ */
public static void writeINodeFile(INodeFile file, DataOutputStream out, public static void writeINodeFile(INodeFile file, DataOutput out,
boolean writeUnderConstruction) throws IOException { boolean writeUnderConstruction) throws IOException {
writeLocalName(file, out); writeLocalName(file, out);
out.writeShort(file.getFileReplication()); out.writeShort(file.getFileReplication());
@ -233,17 +234,37 @@ private static void writeINodeSymlink(INodeSymlink node, DataOutput out)
writePermissionStatus(node, out); writePermissionStatus(node, out);
} }
/** Serialize a {@link INodeReference} node */
private static void writeINodeReference(INodeReference ref, DataOutput out,
boolean writeUnderConstruction, ReferenceMap referenceMap
) throws IOException {
writeLocalName(ref, out);
out.writeShort(0); // replication
out.writeLong(0); // modification time
out.writeLong(0); // access time
out.writeLong(0); // preferred block size
out.writeInt(-3); // # of blocks
out.writeBoolean(ref instanceof INodeReference.WithName);
final INodeReference.WithCount withCount
= (INodeReference.WithCount)ref.getReferredINode();
referenceMap.writeINodeReferenceWithCount(withCount, out, writeUnderConstruction);
}
/** /**
* Save one inode's attributes to the image. * Save one inode's attributes to the image.
*/ */
public static void saveINode2Image(INode node, DataOutputStream out, public static void saveINode2Image(INode node, DataOutput out,
boolean writeUnderConstruction) boolean writeUnderConstruction, ReferenceMap referenceMap)
throws IOException { throws IOException {
if (node.isDirectory()) { if (node.isReference()) {
writeINodeReference(node.asReference(), out, writeUnderConstruction, referenceMap);
} else if (node.isDirectory()) {
writeINodeDirectory(node.asDirectory(), out); writeINodeDirectory(node.asDirectory(), out);
} else if (node.isSymlink()) { } else if (node.isSymlink()) {
writeINodeSymlink(node.asSymlink(), out); writeINodeSymlink(node.asSymlink(), out);
} else { } else if (node.isFile()) {
writeINodeFile(node.asFile(), out, writeUnderConstruction); writeINodeFile(node.asFile(), out, writeUnderConstruction);
} }
} }
@ -252,19 +273,19 @@ public static void saveINode2Image(INode node, DataOutputStream out,
// code is moved into this package. This method should not be called // code is moved into this package. This method should not be called
// by other code. // by other code.
@SuppressWarnings("deprecation") @SuppressWarnings("deprecation")
public static String readString(DataInputStream in) throws IOException { public static String readString(DataInput in) throws IOException {
DeprecatedUTF8 ustr = TL_DATA.get().U_STR; DeprecatedUTF8 ustr = TL_DATA.get().U_STR;
ustr.readFields(in); ustr.readFields(in);
return ustr.toStringChecked(); return ustr.toStringChecked();
} }
static String readString_EmptyAsNull(DataInputStream in) throws IOException { static String readString_EmptyAsNull(DataInput in) throws IOException {
final String s = readString(in); final String s = readString(in);
return s.isEmpty()? null: s; return s.isEmpty()? null: s;
} }
@SuppressWarnings("deprecation") @SuppressWarnings("deprecation")
public static void writeString(String str, DataOutputStream out) throws IOException { public static void writeString(String str, DataOutput out) throws IOException {
DeprecatedUTF8 ustr = TL_DATA.get().U_STR; DeprecatedUTF8 ustr = TL_DATA.get().U_STR;
ustr.set(str); ustr.set(str);
ustr.write(out); ustr.write(out);
@ -272,7 +293,7 @@ public static void writeString(String str, DataOutputStream out) throws IOExcept
/** read the long value */ /** read the long value */
static long readLong(DataInputStream in) throws IOException { static long readLong(DataInput in) throws IOException {
LongWritable ustr = TL_DATA.get().U_LONG; LongWritable ustr = TL_DATA.get().U_LONG;
ustr.readFields(in); ustr.readFields(in);
return ustr.get(); return ustr.get();
@ -286,7 +307,7 @@ static void writeLong(long value, DataOutputStream out) throws IOException {
} }
/** read short value */ /** read short value */
static short readShort(DataInputStream in) throws IOException { static short readShort(DataInput in) throws IOException {
ShortWritable uShort = TL_DATA.get().U_SHORT; ShortWritable uShort = TL_DATA.get().U_SHORT;
uShort.readFields(in); uShort.readFields(in);
return uShort.get(); return uShort.get();
@ -301,7 +322,7 @@ static void writeShort(short value, DataOutputStream out) throws IOException {
// Same comments apply for this method as for readString() // Same comments apply for this method as for readString()
@SuppressWarnings("deprecation") @SuppressWarnings("deprecation")
public static byte[] readBytes(DataInputStream in) throws IOException { public static byte[] readBytes(DataInput in) throws IOException {
DeprecatedUTF8 ustr = TL_DATA.get().U_STR; DeprecatedUTF8 ustr = TL_DATA.get().U_STR;
ustr.readFields(in); ustr.readFields(in);
int len = ustr.getLength(); int len = ustr.getLength();
@ -319,7 +340,7 @@ public static byte[] readBytes(DataInputStream in) throws IOException {
* @throws IOException * @throws IOException
*/ */
@SuppressWarnings("deprecation") @SuppressWarnings("deprecation")
public static byte[][] readPathComponents(DataInputStream in) public static byte[][] readPathComponents(DataInput in)
throws IOException { throws IOException {
DeprecatedUTF8 ustr = TL_DATA.get().U_STR; DeprecatedUTF8 ustr = TL_DATA.get().U_STR;
@ -328,6 +349,12 @@ public static byte[][] readPathComponents(DataInputStream in)
ustr.getLength(), (byte) Path.SEPARATOR_CHAR); ustr.getLength(), (byte) Path.SEPARATOR_CHAR);
} }
public static byte[] readLocalName(DataInput in) throws IOException {
byte[] createdNodeName = new byte[in.readShort()];
in.readFully(createdNodeName);
return createdNodeName;
}
private static void writeLocalName(INode inode, DataOutput out) private static void writeLocalName(INode inode, DataOutput out)
throws IOException { throws IOException {
final byte[] name = inode.getLocalNameBytes(); final byte[] name = inode.getLocalNameBytes();
@ -358,7 +385,7 @@ public static void writeCompactBlockArray(
} }
public static Block[] readCompactBlockArray( public static Block[] readCompactBlockArray(
DataInputStream in, int logVersion) throws IOException { DataInput in, int logVersion) throws IOException {
int num = WritableUtils.readVInt(in); int num = WritableUtils.readVInt(in);
if (num < 0) { if (num < 0) {
throw new IOException("Invalid block array length: " + num); throw new IOException("Invalid block array length: " + num);

View File

@ -33,8 +33,8 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOGGERS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOGGERS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DEFAULT_AUDIT_LOGGER_NAME; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DEFAULT_AUDIT_LOGGER_NAME;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT;
@ -75,6 +75,7 @@
import java.io.BufferedWriter; import java.io.BufferedWriter;
import java.io.ByteArrayInputStream; import java.io.ByteArrayInputStream;
import java.io.DataInput;
import java.io.DataInputStream; import java.io.DataInputStream;
import java.io.DataOutputStream; import java.io.DataOutputStream;
import java.io.File; import java.io.File;
@ -5353,7 +5354,7 @@ void saveSecretManagerState(DataOutputStream out) throws IOException {
/** /**
* @param in load the state of secret manager from input stream * @param in load the state of secret manager from input stream
*/ */
void loadSecretManagerState(DataInputStream in) throws IOException { void loadSecretManagerState(DataInput in) throws IOException {
dtSecretManager.loadSecretManagerState(in); dtSecretManager.loadSecretManagerState(in);
} }

View File

@ -17,6 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import java.io.PrintStream;
import java.io.PrintWriter; import java.io.PrintWriter;
import java.io.StringWriter; import java.io.StringWriter;
import java.util.ArrayList; import java.util.ArrayList;
@ -42,7 +43,6 @@
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import com.google.common.primitives.SignedBytes; import com.google.common.primitives.SignedBytes;
//import org.apache.hadoop.hdfs.util.EnumCounters;
/** /**
* We keep an in-memory representation of the file/block hierarchy. * We keep an in-memory representation of the file/block hierarchy.
@ -420,21 +420,30 @@ public String toString() {
} }
@VisibleForTesting @VisibleForTesting
public String getObjectString() { public final String getObjectString() {
return getClass().getSimpleName() + "@" return getClass().getSimpleName() + "@"
+ Integer.toHexString(super.hashCode()); + Integer.toHexString(super.hashCode());
} }
/** @return a string description of the parent. */
@VisibleForTesting @VisibleForTesting
public String toStringWithObjectType() { public final String getParentString() {
return toString() + "(" + getObjectString() + ")"; final INodeReference parentRef = getParentReference();
if (parentRef != null) {
return "parentRef=" + parentRef.getLocalName() + "->";
} else {
final INodeDirectory parentDir = getParent();
if (parentDir != null) {
return "parentDir=" + parentDir.getLocalName() + "/";
} else {
return "parent=null";
}
}
} }
@VisibleForTesting @VisibleForTesting
public String toDetailString() { public String toDetailString() {
final INodeDirectory p = getParent(); return toString() + "(" + getObjectString() + "), " + getParentString();
return toStringWithObjectType()
+ ", parent=" + (p == null? null: p.toStringWithObjectType());
} }
/** @return the parent directory */ /** @return the parent directory */
@ -611,6 +620,11 @@ public final StringBuffer dumpTreeRecursively() {
return out.getBuffer(); return out.getBuffer();
} }
@VisibleForTesting
public final void dumpTreeRecursively(PrintStream out) {
dumpTreeRecursively(new PrintWriter(out, true), new StringBuilder(), null);
}
/** /**
* Dump tree recursively. * Dump tree recursively.
* @param prefix The prefix string that each line should print. * @param prefix The prefix string that each line should print.
@ -623,10 +637,8 @@ public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
out.print(getLocalName()); out.print(getLocalName());
out.print(" ("); out.print(" (");
out.print(getObjectString()); out.print(getObjectString());
out.print("), parent="); out.print("), ");
out.print(getParentString());
final INodeDirectory p = getParent();
out.print(p == null? null: p.getLocalName() + "/");
out.print(", " + getPermissionStatus(snapshot)); out.print(", " + getPermissionStatus(snapshot));
} }

View File

@ -227,9 +227,9 @@ public void replaceChild(final INode oldChild, final INode newChild) {
INodeReference.WithCount replaceChild4Reference(INode oldChild) { INodeReference.WithCount replaceChild4Reference(INode oldChild) {
Preconditions.checkArgument(!oldChild.isReference()); Preconditions.checkArgument(!oldChild.isReference());
final INodeReference.WithCount withCount final INodeReference.WithCount withCount
= new INodeReference.WithCount(oldChild); = new INodeReference.WithCount(null, oldChild);
final INodeReference ref = new INodeReference(withCount); final INodeReference ref = new INodeReference(this, withCount);
withCount.incrementReferenceCount(); withCount.setParentReference(ref);
replaceChild(oldChild, ref); replaceChild(oldChild, ref);
return withCount; return withCount;
} }
@ -897,6 +897,9 @@ public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
if (this instanceof INodeDirectoryWithQuota) { if (this instanceof INodeDirectoryWithQuota) {
out.print(((INodeDirectoryWithQuota)this).quotaString()); out.print(((INodeDirectoryWithQuota)this).quotaString());
} }
if (this instanceof Snapshot.Root) {
out.print(", snapshotId=" + snapshot.getId());
}
out.println(); out.println();
if (prefix.length() >= 2) { if (prefix.length() >= 2) {

View File

@ -17,11 +17,15 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import java.io.PrintWriter;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import com.google.common.base.Preconditions;
/** /**
* An anonymous reference to an inode. * An anonymous reference to an inode.
* *
@ -76,11 +80,9 @@ private static int removeReference(INodeReference ref) {
private INode referred; private INode referred;
INodeReference(INode referred) { public INodeReference(INode parent, INode referred) {
super(referred.getParent()); super(parent);
this.referred = referred; this.referred = referred;
referred.setParentReference(this);
} }
@ -210,7 +212,9 @@ public final void setAccessTime(long accessTime) {
@Override @Override
final INode recordModification(Snapshot latest) throws QuotaExceededException { final INode recordModification(Snapshot latest) throws QuotaExceededException {
return referred.recordModification(latest); referred.recordModification(latest);
// reference is never replaced
return this;
} }
@Override @Override
@ -265,22 +269,47 @@ public final long getDsQuota() {
@Override @Override
public final void clear() { public final void clear() {
super.clear(); super.clear();
referred.clear();
referred = null; referred = null;
} }
@Override
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
final Snapshot snapshot) {
super.dumpTreeRecursively(out, prefix, snapshot);
if (this instanceof WithCount) {
out.print(", count=" + ((WithCount)this).getReferenceCount());
}
out.println();
final StringBuilder b = new StringBuilder();
for(int i = 0; i < prefix.length(); i++) {
b.append(' ');
}
b.append("->");
getReferredINode().dumpTreeRecursively(out, b, snapshot);
}
/** An anonymous reference with reference count. */ /** An anonymous reference with reference count. */
public static class WithCount extends INodeReference { public static class WithCount extends INodeReference {
private int referenceCount = 0; private int referenceCount = 1;
WithCount(INode inode) { public WithCount(INodeReference parent, INode referred) {
super(inode); super(parent, referred);
Preconditions.checkArgument(!referred.isReference());
referred.setParentReference(this);
} }
/** @return the reference count. */
public int getReferenceCount() {
return referenceCount;
}
/** Increment and then return the reference count. */
public int incrementReferenceCount() { public int incrementReferenceCount() {
return ++referenceCount; return ++referenceCount;
} }
/** Decrement and then return the reference count. */
public int decrementReferenceCount() { public int decrementReferenceCount() {
return --referenceCount; return --referenceCount;
} }
@ -291,8 +320,8 @@ public static class WithName extends INodeReference {
private final byte[] name; private final byte[] name;
public WithName(WithCount referred, byte[] name) { public WithName(INodeDirectory parent, WithCount referred, byte[] name) {
super(referred); super(parent, referred);
this.name = name; this.name = name;
} }

View File

@ -17,13 +17,13 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode.snapshot; package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.DataOutputStream; import java.io.DataOutput;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.Quota;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.Quota;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
@ -144,11 +144,11 @@ public String toString() {
+ (posteriorDiff == null? null: posteriorDiff.snapshot) + ")"; + (posteriorDiff == null? null: posteriorDiff.snapshot) + ")";
} }
void writeSnapshotPath(DataOutputStream out) throws IOException { void writeSnapshot(DataOutput out) throws IOException {
// Assume the snapshot is recorded before. // Assume the snapshot is recorded before, write id only.
// The root path is sufficient for looking up the Snapshot object. out.writeInt(snapshot.getId());
FSImageSerialization.writeString(snapshot.getRoot().getFullPathName(), out);
} }
abstract void write(DataOutputStream out) throws IOException; abstract void write(DataOutput out, ReferenceMap referenceMap
) throws IOException;
} }

View File

@ -17,7 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode.snapshot; package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.DataOutputStream; import java.io.DataOutput;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
@ -26,6 +26,7 @@
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.Quota; import org.apache.hadoop.hdfs.server.namenode.Quota;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
/** /**
* {@link INodeFile} with a link to the next element. * {@link INodeFile} with a link to the next element.
@ -90,9 +91,9 @@ public String toString() {
+ (snapshotINode == null? "?": snapshotINode.getFileReplication()); + (snapshotINode == null? "?": snapshotINode.getFileReplication());
} }
/** Serialize fields to out */ @Override
void write(DataOutputStream out) throws IOException { void write(DataOutput out, ReferenceMap referenceMap) throws IOException {
writeSnapshotPath(out); writeSnapshot(out);
out.writeLong(fileSize); out.writeLong(fileSize);
// write snapshotINode // write snapshotINode

View File

@ -457,6 +457,11 @@ void replaceSelf(final Snapshot latest) throws QuotaExceededException {
} }
} }
@Override
public String toDetailString() {
return super.toDetailString() + ", snapshotsByNames=" + snapshotsByNames;
}
@Override @Override
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix, public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
Snapshot snapshot) { Snapshot snapshot) {

View File

@ -17,7 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode.snapshot; package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.DataOutputStream; import java.io.DataOutput;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayDeque; import java.util.ArrayDeque;
import java.util.ArrayList; import java.util.ArrayList;
@ -38,6 +38,7 @@
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota; import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota;
import org.apache.hadoop.hdfs.server.namenode.INodeReference; import org.apache.hadoop.hdfs.server.namenode.INodeReference;
import org.apache.hadoop.hdfs.server.namenode.Quota; import org.apache.hadoop.hdfs.server.namenode.Quota;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
import org.apache.hadoop.hdfs.util.Diff; import org.apache.hadoop.hdfs.util.Diff;
import org.apache.hadoop.hdfs.util.Diff.Container; import org.apache.hadoop.hdfs.util.Diff.Container;
import org.apache.hadoop.hdfs.util.Diff.ListType; import org.apache.hadoop.hdfs.util.Diff.ListType;
@ -122,7 +123,7 @@ private Quota.Counts destroyDeletedList(
} }
/** Serialize {@link #created} */ /** Serialize {@link #created} */
private void writeCreated(DataOutputStream out) throws IOException { private void writeCreated(DataOutput out) throws IOException {
final List<INode> created = getList(ListType.CREATED); final List<INode> created = getList(ListType.CREATED);
out.writeInt(created.size()); out.writeInt(created.size());
for (INode node : created) { for (INode node : created) {
@ -134,18 +135,20 @@ private void writeCreated(DataOutputStream out) throws IOException {
} }
/** Serialize {@link #deleted} */ /** Serialize {@link #deleted} */
private void writeDeleted(DataOutputStream out) throws IOException { private void writeDeleted(DataOutput out,
ReferenceMap referenceMap) throws IOException {
final List<INode> deleted = getList(ListType.DELETED); final List<INode> deleted = getList(ListType.DELETED);
out.writeInt(deleted.size()); out.writeInt(deleted.size());
for (INode node : deleted) { for (INode node : deleted) {
FSImageSerialization.saveINode2Image(node, out, true); FSImageSerialization.saveINode2Image(node, out, true, referenceMap);
} }
} }
/** Serialize to out */ /** Serialize to out */
private void write(DataOutputStream out) throws IOException { private void write(DataOutput out, ReferenceMap referenceMap
) throws IOException {
writeCreated(out); writeCreated(out);
writeDeleted(out); writeDeleted(out, referenceMap);
} }
/** @return The list of INodeDirectory contained in the deleted list */ /** @return The list of INodeDirectory contained in the deleted list */
@ -339,8 +342,8 @@ public String toString() {
} }
@Override @Override
void write(DataOutputStream out) throws IOException { void write(DataOutput out, ReferenceMap referenceMap) throws IOException {
writeSnapshotPath(out); writeSnapshot(out);
out.writeInt(childrenSize); out.writeInt(childrenSize);
// write snapshotINode // write snapshotINode
@ -356,7 +359,7 @@ void write(DataOutputStream out) throws IOException {
} }
} }
// Write diff. Node need to write poseriorDiff, since diffs is a list. // Write diff. Node need to write poseriorDiff, since diffs is a list.
diff.write(out); diff.write(out, referenceMap);
} }
@Override @Override
@ -604,9 +607,10 @@ public void replaceChild(final INode oldChild, final INode newChild) {
/** The child just has been removed, replace it with a reference. */ /** The child just has been removed, replace it with a reference. */
public INodeReference.WithName replaceRemovedChild4Reference( public INodeReference.WithName replaceRemovedChild4Reference(
INode oldChild, INodeReference.WithCount newChild, byte[] childName) { INode oldChild, INodeReference.WithCount newChild, byte[] childName) {
final INodeReference.WithName ref = new INodeReference.WithName( final INodeReference.WithName ref = new INodeReference.WithName(this,
newChild, childName); newChild, childName);
newChild.incrementReferenceCount(); newChild.incrementReferenceCount();
diffs.replaceChild(ListType.CREATED, oldChild, ref); diffs.replaceChild(ListType.CREATED, oldChild, ref);
// the old child must be in the deleted list // the old child must be in the deleted list
Preconditions.checkState( Preconditions.checkState(

View File

@ -17,6 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode.snapshot; package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.DataInput;
import java.io.DataOutput; import java.io.DataOutput;
import java.io.IOException; import java.io.IOException;
import java.util.Comparator; import java.util.Comparator;
@ -25,6 +26,7 @@
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization; import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
@ -122,14 +124,17 @@ public String getFullPathName() {
this.root.setLocalName(DFSUtil.string2Bytes(name)); this.root.setLocalName(DFSUtil.string2Bytes(name));
} }
Snapshot(int id, INodeDirectory dir, Snapshot(int id, INodeDirectory dir, INodeDirectorySnapshottable parent) {
INodeDirectorySnapshottable parent) {
this.id = id; this.id = id;
this.root = new Root(dir); this.root = new Root(dir);
this.root.setParent(parent); this.root.setParent(parent);
} }
public int getId() {
return id;
}
/** @return the root directory of the snapshot. */ /** @return the root directory of the snapshot. */
public Root getRoot() { public Root getRoot() {
return root; return root;
@ -157,7 +162,7 @@ public int hashCode() {
@Override @Override
public String toString() { public String toString() {
return getClass().getSimpleName() + "." + root.getLocalName(); return getClass().getSimpleName() + "." + root.getLocalName() + "(id=" + id + ")";
} }
/** Serialize the fields to out */ /** Serialize the fields to out */
@ -166,4 +171,10 @@ void write(DataOutput out) throws IOException {
// write root // write root
FSImageSerialization.writeINodeDirectory(root, out); FSImageSerialization.writeINodeDirectory(root, out);
} }
static Snapshot read(DataInput in, FSImageFormat.Loader loader) throws IOException {
final int snapshotId = in.readInt();
final INode root = loader.loadINodeWithLocalName(false, in);
return new Snapshot(snapshotId, root.asDirectory(), null);
}
} }

View File

@ -17,48 +17,51 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode.snapshot; package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.DataInputStream; import java.io.DataInput;
import java.io.DataOutputStream; import java.io.DataOutput;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormat; import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormat.Loader; import org.apache.hadoop.hdfs.server.namenode.FSImageFormat.Loader;
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization; import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiffList; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiffList;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.Root; import org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff;
import org.apache.hadoop.hdfs.util.Diff.ListType; import org.apache.hadoop.hdfs.util.Diff.ListType;
import org.apache.hadoop.hdfs.util.ReadOnlyList; import org.apache.hadoop.hdfs.util.ReadOnlyList;
import com.google.common.base.Preconditions;
/** /**
* A helper class defining static methods for reading/writing snapshot related * A helper class defining static methods for reading/writing snapshot related
* information from/to FSImage. * information from/to FSImage.
*/ */
public class SnapshotFSImageFormat { public class SnapshotFSImageFormat {
/** /**
* Save snapshots and snapshot quota for a snapshottable directory. * Save snapshots and snapshot quota for a snapshottable directory.
* @param current The directory that the snapshots belongs to. * @param current The directory that the snapshots belongs to.
* @param out The {@link DataOutputStream} to write. * @param out The {@link DataOutput} to write.
* @throws IOException * @throws IOException
*/ */
public static void saveSnapshots(INodeDirectorySnapshottable current, public static void saveSnapshots(INodeDirectorySnapshottable current,
DataOutputStream out) throws IOException { DataOutput out) throws IOException {
// list of snapshots in snapshotsByNames // list of snapshots in snapshotsByNames
ReadOnlyList<Snapshot> snapshots = current.getSnapshotsByNames(); ReadOnlyList<Snapshot> snapshots = current.getSnapshotsByNames();
out.writeInt(snapshots.size()); out.writeInt(snapshots.size());
for (Snapshot ss : snapshots) { for (Snapshot s : snapshots) {
// write the snapshot // write the snapshot id
ss.write(out); out.writeInt(s.getId());
} }
// snapshot quota // snapshot quota
out.writeInt(current.getSnapshotQuota()); out.writeInt(current.getSnapshotQuota());
@ -67,11 +70,11 @@ public static void saveSnapshots(INodeDirectorySnapshottable current,
/** /**
* Save SnapshotDiff list for an INodeDirectoryWithSnapshot. * Save SnapshotDiff list for an INodeDirectoryWithSnapshot.
* @param sNode The directory that the SnapshotDiff list belongs to. * @param sNode The directory that the SnapshotDiff list belongs to.
* @param out The {@link DataOutputStream} to write. * @param out The {@link DataOutput} to write.
*/ */
private static <N extends INode, D extends AbstractINodeDiff<N, D>> private static <N extends INode, D extends AbstractINodeDiff<N, D>>
void saveINodeDiffs(final AbstractINodeDiffList<N, D> diffs, void saveINodeDiffs(final AbstractINodeDiffList<N, D> diffs,
final DataOutputStream out) throws IOException { final DataOutput out, ReferenceMap referenceMap) throws IOException {
// Record the diffs in reversed order, so that we can find the correct // Record the diffs in reversed order, so that we can find the correct
// reference for INodes in the created list when loading the FSImage // reference for INodes in the created list when loading the FSImage
if (diffs == null) { if (diffs == null) {
@ -81,24 +84,25 @@ void saveINodeDiffs(final AbstractINodeDiffList<N, D> diffs,
final int size = list.size(); final int size = list.size();
out.writeInt(size); out.writeInt(size);
for (int i = size - 1; i >= 0; i--) { for (int i = size - 1; i >= 0; i--) {
list.get(i).write(out); list.get(i).write(out, referenceMap);
} }
} }
} }
public static void saveDirectoryDiffList(final INodeDirectory dir, public static void saveDirectoryDiffList(final INodeDirectory dir,
final DataOutputStream out) throws IOException { final DataOutput out, final ReferenceMap referenceMap
) throws IOException {
saveINodeDiffs(dir instanceof INodeDirectoryWithSnapshot? saveINodeDiffs(dir instanceof INodeDirectoryWithSnapshot?
((INodeDirectoryWithSnapshot)dir).getDiffs(): null, out); ((INodeDirectoryWithSnapshot)dir).getDiffs(): null, out, referenceMap);
} }
public static void saveFileDiffList(final INodeFile file, public static void saveFileDiffList(final INodeFile file,
final DataOutputStream out) throws IOException { final DataOutput out) throws IOException {
saveINodeDiffs(file instanceof FileWithSnapshot? saveINodeDiffs(file instanceof FileWithSnapshot?
((FileWithSnapshot)file).getDiffs(): null, out); ((FileWithSnapshot)file).getDiffs(): null, out, null);
} }
public static FileDiffList loadFileDiffList(DataInputStream in, public static FileDiffList loadFileDiffList(DataInput in,
FSImageFormat.Loader loader) throws IOException { FSImageFormat.Loader loader) throws IOException {
final int size = in.readInt(); final int size = in.readInt();
if (size == -1) { if (size == -1) {
@ -115,11 +119,10 @@ public static FileDiffList loadFileDiffList(DataInputStream in,
} }
} }
private static FileDiff loadFileDiff(FileDiff posterior, DataInputStream in, private static FileDiff loadFileDiff(FileDiff posterior, DataInput in,
FSImageFormat.Loader loader) throws IOException { FSImageFormat.Loader loader) throws IOException {
// 1. Read the full path of the Snapshot root to identify the Snapshot // 1. Read the full path of the Snapshot root to identify the Snapshot
Snapshot snapshot = findSnapshot(FSImageSerialization.readString(in), final Snapshot snapshot = loader.getSnapshot(in);
loader.getFSDirectoryInLoading());
// 2. Load file size // 2. Load file size
final long fileSize = in.readLong(); final long fileSize = in.readLong();
@ -161,17 +164,16 @@ private static INode loadCreated(byte[] createdNodeName,
/** /**
* Load the created list from fsimage. * Load the created list from fsimage.
* @param parent The directory that the created list belongs to. * @param parent The directory that the created list belongs to.
* @param in The {@link DataInputStream} to read. * @param in The {@link DataInput} to read.
* @return The created list. * @return The created list.
*/ */
private static List<INode> loadCreatedList(INodeDirectoryWithSnapshot parent, private static List<INode> loadCreatedList(INodeDirectoryWithSnapshot parent,
DataInputStream in) throws IOException { DataInput in) throws IOException {
// read the size of the created list // read the size of the created list
int createdSize = in.readInt(); int createdSize = in.readInt();
List<INode> createdList = new ArrayList<INode>(createdSize); List<INode> createdList = new ArrayList<INode>(createdSize);
for (int i = 0; i < createdSize; i++) { for (int i = 0; i < createdSize; i++) {
byte[] createdNodeName = new byte[in.readShort()]; byte[] createdNodeName = FSImageSerialization.readLocalName(in);
in.readFully(createdNodeName);
INode created = loadCreated(createdNodeName, parent); INode created = loadCreated(createdNodeName, parent);
createdList.add(created); createdList.add(created);
} }
@ -184,12 +186,12 @@ private static List<INode> loadCreatedList(INodeDirectoryWithSnapshot parent,
* @param parent The directory that the deleted list belongs to. * @param parent The directory that the deleted list belongs to.
* @param createdList The created list associated with the deleted list in * @param createdList The created list associated with the deleted list in
* the same Diff. * the same Diff.
* @param in The {@link DataInputStream} to read. * @param in The {@link DataInput} to read.
* @param loader The {@link Loader} instance. * @param loader The {@link Loader} instance.
* @return The deleted list. * @return The deleted list.
*/ */
private static List<INode> loadDeletedList(INodeDirectoryWithSnapshot parent, private static List<INode> loadDeletedList(INodeDirectoryWithSnapshot parent,
List<INode> createdList, DataInputStream in, FSImageFormat.Loader loader) List<INode> createdList, DataInput in, FSImageFormat.Loader loader)
throws IOException { throws IOException {
int deletedSize = in.readInt(); int deletedSize = in.readInt();
List<INode> deletedList = new ArrayList<INode>(deletedSize); List<INode> deletedList = new ArrayList<INode>(deletedSize);
@ -208,49 +210,35 @@ private static List<INode> loadDeletedList(INodeDirectoryWithSnapshot parent,
* Load snapshots and snapshotQuota for a Snapshottable directory. * Load snapshots and snapshotQuota for a Snapshottable directory.
* @param snapshottableParent The snapshottable directory for loading. * @param snapshottableParent The snapshottable directory for loading.
* @param numSnapshots The number of snapshots that the directory has. * @param numSnapshots The number of snapshots that the directory has.
* @param in The {@link DataInputStream} instance to read. * @param in The {@link DataInput} instance to read.
* @param loader The {@link Loader} instance that this loading procedure is * @param loader The {@link Loader} instance that this loading procedure is
* using. * using.
*/ */
public static void loadSnapshotList( public static void loadSnapshotList(
INodeDirectorySnapshottable snapshottableParent, int numSnapshots, INodeDirectorySnapshottable snapshottableParent, int numSnapshots,
DataInputStream in, FSImageFormat.Loader loader) throws IOException { DataInput in, FSImageFormat.Loader loader) throws IOException {
for (int i = 0; i < numSnapshots; i++) { for (int i = 0; i < numSnapshots; i++) {
// read snapshots // read snapshots
Snapshot ss = loadSnapshot(snapshottableParent, in, loader); final Snapshot s = loader.getSnapshot(in);
snapshottableParent.addSnapshot(ss); s.getRoot().setParent(snapshottableParent);
snapshottableParent.addSnapshot(s);
} }
int snapshotQuota = in.readInt(); int snapshotQuota = in.readInt();
snapshottableParent.setSnapshotQuota(snapshotQuota); snapshottableParent.setSnapshotQuota(snapshotQuota);
} }
/**
* Load a {@link Snapshot} from fsimage.
* @param parent The directory that the snapshot belongs to.
* @param in The {@link DataInputStream} instance to read.
* @param loader The {@link Loader} instance that this loading procedure is
* using.
* @return The snapshot.
*/
private static Snapshot loadSnapshot(INodeDirectorySnapshottable parent,
DataInputStream in, FSImageFormat.Loader loader) throws IOException {
int snapshotId = in.readInt();
final INode root = loader.loadINodeWithLocalName(false, in);
return new Snapshot(snapshotId, root.asDirectory(), parent);
}
/** /**
* Load the {@link SnapshotDiff} list for the INodeDirectoryWithSnapshot * Load the {@link SnapshotDiff} list for the INodeDirectoryWithSnapshot
* directory. * directory.
* @param dir The snapshottable directory for loading. * @param dir The snapshottable directory for loading.
* @param numSnapshotDiffs The number of {@link SnapshotDiff} that the * @param numSnapshotDiffs The number of {@link SnapshotDiff} that the
* directory has. * directory has.
* @param in The {@link DataInputStream} instance to read. * @param in The {@link DataInput} instance to read.
* @param loader The {@link Loader} instance that this loading procedure is * @param loader The {@link Loader} instance that this loading procedure is
* using. * using.
*/ */
public static void loadDirectoryDiffList(INodeDirectory dir, public static void loadDirectoryDiffList(INodeDirectory dir,
DataInputStream in, FSImageFormat.Loader loader) throws IOException { DataInput in, FSImageFormat.Loader loader) throws IOException {
final int size = in.readInt(); final int size = in.readInt();
if (size != -1) { if (size != -1) {
INodeDirectoryWithSnapshot withSnapshot = (INodeDirectoryWithSnapshot)dir; INodeDirectoryWithSnapshot withSnapshot = (INodeDirectoryWithSnapshot)dir;
@ -261,30 +249,16 @@ public static void loadDirectoryDiffList(INodeDirectory dir,
} }
} }
/**
* Use the given full path to a {@link Root} directory to find the
* associated snapshot.
*/
private static Snapshot findSnapshot(String sRootFullPath, FSDirectory fsdir)
throws IOException {
// find the root
INode root = fsdir.getINode(sRootFullPath);
INodeDirectorySnapshottable snapshotRoot = INodeDirectorySnapshottable
.valueOf(root.getParent(), root.getParent().getFullPathName());
// find the snapshot
return snapshotRoot.getSnapshot(root.getLocalNameBytes());
}
/** /**
* Load the snapshotINode field of {@link SnapshotDiff}. * Load the snapshotINode field of {@link SnapshotDiff}.
* @param snapshot The Snapshot associated with the {@link SnapshotDiff}. * @param snapshot The Snapshot associated with the {@link SnapshotDiff}.
* @param in The {@link DataInputStream} to read. * @param in The {@link DataInput} to read.
* @param loader The {@link Loader} instance that this loading procedure is * @param loader The {@link Loader} instance that this loading procedure is
* using. * using.
* @return The snapshotINode. * @return The snapshotINode.
*/ */
private static INodeDirectory loadSnapshotINodeInDirectoryDiff( private static INodeDirectory loadSnapshotINodeInDirectoryDiff(
Snapshot snapshot, DataInputStream in, FSImageFormat.Loader loader) Snapshot snapshot, DataInput in, FSImageFormat.Loader loader)
throws IOException { throws IOException {
// read the boolean indicating whether snapshotINode == Snapshot.Root // read the boolean indicating whether snapshotINode == Snapshot.Root
boolean useRoot = in.readBoolean(); boolean useRoot = in.readBoolean();
@ -300,17 +274,16 @@ private static INodeDirectory loadSnapshotINodeInDirectoryDiff(
/** /**
* Load {@link DirectoryDiff} from fsimage. * Load {@link DirectoryDiff} from fsimage.
* @param parent The directory that the SnapshotDiff belongs to. * @param parent The directory that the SnapshotDiff belongs to.
* @param in The {@link DataInputStream} instance to read. * @param in The {@link DataInput} instance to read.
* @param loader The {@link Loader} instance that this loading procedure is * @param loader The {@link Loader} instance that this loading procedure is
* using. * using.
* @return A {@link DirectoryDiff}. * @return A {@link DirectoryDiff}.
*/ */
private static DirectoryDiff loadDirectoryDiff( private static DirectoryDiff loadDirectoryDiff(
INodeDirectoryWithSnapshot parent, DataInputStream in, INodeDirectoryWithSnapshot parent, DataInput in,
FSImageFormat.Loader loader) throws IOException { FSImageFormat.Loader loader) throws IOException {
// 1. Read the full path of the Snapshot root to identify the Snapshot // 1. Read the full path of the Snapshot root to identify the Snapshot
Snapshot snapshot = findSnapshot(FSImageSerialization.readString(in), final Snapshot snapshot = loader.getSnapshot(in);
loader.getFSDirectoryInLoading());
// 2. Load DirectoryDiff#childrenSize // 2. Load DirectoryDiff#childrenSize
int childrenSize = in.readInt(); int childrenSize = in.readInt();
@ -333,4 +306,78 @@ private static DirectoryDiff loadDirectoryDiff(
return sdiff; return sdiff;
} }
/** A reference with a fixed id for fsimage serialization. */
private static class INodeReferenceWithId extends INodeReference {
final long id;
private INodeReferenceWithId(WithCount parent, INode referred, long id) {
super(parent, referred);
this.id = id;
}
/** @return the reference id. */
private long getReferenceId() {
return id;
}
}
/** A reference map for fsimage serialization. */
public static class ReferenceMap {
private final Map<Long, INodeReference.WithCount> referenceMap
= new HashMap<Long, INodeReference.WithCount>();
private long referenceId = 0;
public void writeINodeReferenceWithCount(INodeReference.WithCount withCount,
DataOutput out, boolean writeUnderConstruction) throws IOException {
final INode referred = withCount.getReferredINode();
final boolean firstReferred = !(referred instanceof INodeReferenceWithId);
out.writeBoolean(firstReferred);
if (firstReferred) {
FSImageSerialization.saveINode2Image(referred, out,
writeUnderConstruction, this);
final long id = ++referenceId;
referenceMap.put(id, withCount);
final INodeReferenceWithId withId = new INodeReferenceWithId(
withCount, referred, id);
withCount.setReferredINode(withId);
referred.setParentReference(withId);
} else {
final long id = ((INodeReferenceWithId)referred).getReferenceId();
Preconditions.checkState(referenceMap.containsKey(id));
out.writeLong(id);
}
}
public INodeReference.WithCount loadINodeReferenceWithCount(
boolean isSnapshotINode, DataInput in, FSImageFormat.Loader loader
) throws IOException {
final boolean firstReferred = in.readBoolean();
final INodeReference.WithCount withCount;
if (firstReferred) {
final INode referred = loader.loadINodeWithLocalName(isSnapshotINode, in);
withCount = new INodeReference.WithCount(null, referred);
referenceMap.put(++referenceId, withCount);
} else {
final long id = in.readLong();
withCount = referenceMap.get(id);
withCount.incrementReferenceCount();
}
return withCount;
}
public void removeAllINodeReferenceWithId() {
for(INodeReference.WithCount withCount : referenceMap.values()) {
final INodeReference ref = withCount.getReferredINode().asReference();
final INode referred = ref.getReferredINode();
withCount.setReferredINode(referred);
referred.setParentReference(withCount);
ref.clear();
}
referenceMap.clear();
}
}
} }

View File

@ -21,12 +21,15 @@
import java.io.DataOutput; import java.io.DataOutput;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
@ -195,23 +198,39 @@ public long getNumSnapshots() {
} }
/** /**
* Write {@link #snapshotCounter}, {@link #numSnapshots}, and * Write {@link #snapshotCounter}, {@link #numSnapshots},
* {@link #numSnapshottableDirs} to the DataOutput. * {@link #numSnapshottableDirs} and all snapshots to the DataOutput.
*/ */
public void write(DataOutput out) throws IOException { public void write(DataOutput out) throws IOException {
out.writeInt(snapshotCounter); out.writeInt(snapshotCounter);
out.writeInt(numSnapshots.get());
out.writeInt(numSnapshottableDirs.get()); out.writeInt(numSnapshottableDirs.get());
out.writeInt(numSnapshots.get());
// write all snapshots.
for(INodeDirectorySnapshottable snapshottableDir : snapshottables) {
for(Snapshot s : snapshottableDir.getSnapshotsByNames()) {
s.write(out);
}
}
} }
/** /**
* Read values of {@link #snapshotCounter}, {@link #numSnapshots}, and * Read values of {@link #snapshotCounter}, {@link #numSnapshots}, and
* {@link #numSnapshottableDirs} from the DataInput * {@link #numSnapshottableDirs} and all snapshots from the DataInput
*/ */
public void read(DataInput in) throws IOException { public Map<Integer, Snapshot> read(DataInput in, FSImageFormat.Loader loader
) throws IOException {
snapshotCounter = in.readInt(); snapshotCounter = in.readInt();
numSnapshots.set(in.readInt());
numSnapshottableDirs.set(in.readInt()); numSnapshottableDirs.set(in.readInt());
numSnapshots.set(in.readInt());
// read snapshots
final Map<Integer, Snapshot> snapshotMap = new HashMap<Integer, Snapshot>();
for(int i = 0; i < numSnapshots.get(); i++) {
final Snapshot s = Snapshot.read(in, loader);
snapshotMap.put(s.getId(), s);
}
return snapshotMap;
} }
/** /**

View File

@ -87,7 +87,7 @@ public void tearDown() throws Exception {
cluster.shutdown(); cluster.shutdown();
} }
} }
/** /**
* Create a temp fsimage file for testing. * Create a temp fsimage file for testing.
* @param dir The directory where the fsimage file resides * @param dir The directory where the fsimage file resides
@ -205,7 +205,16 @@ public void testSaveLoadImage() throws Exception {
hdfs.setReplication(sub1file1, (short) (REPLICATION - 1)); hdfs.setReplication(sub1file1, (short) (REPLICATION - 1));
hdfs.delete(sub1file2, true); hdfs.delete(sub1file2, true);
hdfs.setOwner(sub2, "dr.who", "unknown"); hdfs.setOwner(sub2, "dr.who", "unknown");
hdfs.delete(sub2file2, true); hdfs.delete(sub2file1, true);
checkImage(s);
hdfs.createSnapshot(dir, "s" + ++s);
Path sub1_sub2file2 = new Path(sub1, "sub2file2");
hdfs.rename(sub2file2, sub1_sub2file2);
hdfs.rename(sub1file1, sub2file1);
// TODO: fix case hdfs.rename(sub1file1, sub1file2);
checkImage(s); checkImage(s);
} }
@ -222,8 +231,14 @@ void checkImage(int s) throws IOException {
long numSnapshotBefore = fsn.getNumSnapshots(); long numSnapshotBefore = fsn.getNumSnapshots();
SnapshottableDirectoryStatus[] dirBefore = hdfs.getSnapshottableDirListing(); SnapshottableDirectoryStatus[] dirBefore = hdfs.getSnapshottableDirListing();
// restart the cluster, and format the cluster // shutdown the cluster
cluster.shutdown(); cluster.shutdown();
// dump the fsdir tree
File fsnBetween = dumpTree2File(name + "_between");
SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnBetween);
// restart the cluster, and format the cluster
cluster = new MiniDFSCluster.Builder(conf).format(true) cluster = new MiniDFSCluster.Builder(conf).format(true)
.numDataNodes(REPLICATION).build(); .numDataNodes(REPLICATION).build();
cluster.waitActive(); cluster.waitActive();

View File

@ -38,9 +38,11 @@
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
@ -269,7 +271,6 @@ public static void dumpTree2File(FSDirectory fsdir, File f) throws IOException{
/** /**
* Generate the path for a snapshot file. * Generate the path for a snapshot file.
* *
* @param fs FileSystem instance
* @param snapshotRoot of format * @param snapshotRoot of format
* {@literal <snapshottble_dir>/.snapshot/<snapshot_name>} * {@literal <snapshottble_dir>/.snapshot/<snapshot_name>}
* @param file path to a file * @param file path to a file
@ -279,7 +280,7 @@ public static void dumpTree2File(FSDirectory fsdir, File f) throws IOException{
* . Null if the file is not under the directory associated with the * . Null if the file is not under the directory associated with the
* snapshot root. * snapshot root.
*/ */
static Path getSnapshotFile(FileSystem fs, Path snapshotRoot, Path file) { static Path getSnapshotFile(Path snapshotRoot, Path file) {
Path rootParent = snapshotRoot.getParent(); Path rootParent = snapshotRoot.getParent();
if (rootParent != null && rootParent.getName().equals(".snapshot")) { if (rootParent != null && rootParent.getName().equals(".snapshot")) {
Path snapshotDir = rootParent.getParent(); Path snapshotDir = rootParent.getParent();
@ -463,11 +464,11 @@ public int hashCode() {
} }
} }
} }
static void dumpTreeRecursively(INode inode) { public static void dumpTree(String message, MiniDFSCluster cluster
if (INode.LOG.isDebugEnabled()) { ) throws UnresolvedLinkException {
inode.dumpTreeRecursively( System.out.println("XXX " + message);
new PrintWriter(System.out, true), new StringBuilder(), null); cluster.getNameNode().getNamesystem().getFSDirectory().getINode("/"
} ).dumpTreeRecursively(System.out);
} }
} }

View File

@ -33,7 +33,6 @@
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
import org.junit.AfterClass; import org.junit.AfterClass;
@ -55,7 +54,6 @@ public class TestNestedSnapshots {
private static Configuration conf = new Configuration(); private static Configuration conf = new Configuration();
private static MiniDFSCluster cluster; private static MiniDFSCluster cluster;
private static FSNamesystem fsn;
private static DistributedFileSystem hdfs; private static DistributedFileSystem hdfs;
@BeforeClass @BeforeClass
@ -63,8 +61,6 @@ public static void setUp() throws Exception {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION) cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
.build(); .build();
cluster.waitActive(); cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem(); hdfs = cluster.getFileSystem();
} }
@ -112,8 +108,7 @@ public void testNestedSnapshots() throws Exception {
} }
private static void print(String message) throws UnresolvedLinkException { private static void print(String message) throws UnresolvedLinkException {
System.out.println("XXX " + message); SnapshotTestHelper.dumpTree(message, cluster);
SnapshotTestHelper.dumpTreeRecursively(fsn.getFSDirectory().getINode("/"));
} }
private static void assertFile(Path s1, Path s2, Path file, private static void assertFile(Path s1, Path s2, Path file,

View File

@ -0,0 +1,107 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
/** Testing rename with snapshots. */
public class TestRenameWithSnapshots {
{
SnapshotTestHelper.disableLogs();
}
private static final long SEED = 0;
private static final short REPL = 3;
private static final long BLOCKSIZE = 1024;
private static Configuration conf = new Configuration();
private static MiniDFSCluster cluster;
private static FSNamesystem fsn;
private static FSDirectory fsdir;
private static DistributedFileSystem hdfs;
@BeforeClass
public static void setUp() throws Exception {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
fsdir = fsn.getFSDirectory();
hdfs = cluster.getFileSystem();
}
@AfterClass
public static void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
@Test (timeout=300000)
public void testRenameWithSnapshot() throws Exception {
final String dirStr = "/testRenameWithSnapshot";
final String abcStr = dirStr + "/abc";
final Path abc = new Path(abcStr);
hdfs.mkdirs(abc, new FsPermission((short)0777));
hdfs.allowSnapshot(abcStr);
final Path foo = new Path(abc, "foo");
DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
hdfs.createSnapshot(abc, "s0");
final INode fooINode = fsdir.getINode(foo.toString());
final String xyzStr = dirStr + "/xyz";
final Path xyz = new Path(xyzStr);
hdfs.mkdirs(xyz, new FsPermission((short)0777));
final Path bar = new Path(xyz, "bar");
hdfs.rename(foo, bar);
final INode fooRef = fsdir.getINode(
SnapshotTestHelper.getSnapshotPath(abc, "s0", "foo").toString());
Assert.assertTrue(fooRef.isReference());
Assert.assertTrue(fooRef.asReference() instanceof INodeReference.WithName);
final INodeReference.WithCount withCount
= (INodeReference.WithCount)fooRef.asReference().getReferredINode();
Assert.assertEquals(2, withCount.getReferenceCount());
final INode barRef = fsdir.getINode(bar.toString());
Assert.assertTrue(barRef.isReference());
Assert.assertSame(withCount, barRef.asReference().getReferredINode());
Assert.assertSame(fooINode, withCount.asReference().getReferredINode());
hdfs.delete(bar, false);
Assert.assertEquals(1, withCount.getReferenceCount());
}
}

View File

@ -76,7 +76,7 @@ public class TestSnapshot {
public static final int DIRECTORY_TREE_LEVEL = 5; public static final int DIRECTORY_TREE_LEVEL = 5;
protected Configuration conf; protected Configuration conf;
protected MiniDFSCluster cluster; protected static MiniDFSCluster cluster;
protected static FSNamesystem fsn; protected static FSNamesystem fsn;
protected static FSDirectory fsdir; protected static FSDirectory fsdir;
protected DistributedFileSystem hdfs; protected DistributedFileSystem hdfs;
@ -222,7 +222,7 @@ public void testSnapshot() throws Throwable {
runTestSnapshot(); runTestSnapshot();
} catch(Throwable t) { } catch(Throwable t) {
SnapshotTestHelper.LOG.info("FAILED", t); SnapshotTestHelper.LOG.info("FAILED", t);
SnapshotTestHelper.dumpTreeRecursively(fsdir.getINode("/")); SnapshotTestHelper.dumpTree("FAILED", cluster);
throw t; throw t;
} }
} }
@ -471,7 +471,7 @@ static abstract class FileStatusChange extends Modification {
@Override @Override
void loadSnapshots() throws Exception { void loadSnapshots() throws Exception {
for (Path snapshotRoot : snapshotList) { for (Path snapshotRoot : snapshotList) {
Path snapshotFile = SnapshotTestHelper.getSnapshotFile(fs, Path snapshotFile = SnapshotTestHelper.getSnapshotFile(
snapshotRoot, file); snapshotRoot, file);
if (snapshotFile != null) { if (snapshotFile != null) {
if (fs.exists(snapshotFile)) { if (fs.exists(snapshotFile)) {
@ -501,8 +501,7 @@ void checkSnapshots() throws Exception {
+ "\n\nfile : " + fsdir.getINode(file.toString()).toDetailString() + "\n\nfile : " + fsdir.getINode(file.toString()).toDetailString()
+ "\n\nsnapshotFile: " + fsdir.getINode(snapshotFile.toString()).toDetailString(); + "\n\nsnapshotFile: " + fsdir.getINode(snapshotFile.toString()).toDetailString();
System.out.println(s); SnapshotTestHelper.dumpTree(s, cluster);
SnapshotTestHelper.dumpTreeRecursively(fsdir.getINode("/"));
} }
assertEquals(s, currentStatus.toString(), originalStatus.toString()); assertEquals(s, currentStatus.toString(), originalStatus.toString());
} }
@ -582,7 +581,7 @@ static class FileAppend extends Modification {
@Override @Override
void loadSnapshots() throws Exception { void loadSnapshots() throws Exception {
for (Path snapshotRoot : snapshotList) { for (Path snapshotRoot : snapshotList) {
Path snapshotFile = SnapshotTestHelper.getSnapshotFile(fs, Path snapshotFile = SnapshotTestHelper.getSnapshotFile(
snapshotRoot, file); snapshotRoot, file);
if (snapshotFile != null) { if (snapshotFile != null) {
long snapshotFileLen = fs.exists(snapshotFile) ? fs.getFileStatus( long snapshotFileLen = fs.exists(snapshotFile) ? fs.getFileStatus(
@ -613,9 +612,7 @@ void checkSnapshots() throws Exception {
+ "\noriginalSnapshotFileLen = " + originalSnapshotFileLen + "\noriginalSnapshotFileLen = " + originalSnapshotFileLen
+ "\n\nfile : " + fsdir.getINode(file.toString()).toDetailString() + "\n\nfile : " + fsdir.getINode(file.toString()).toDetailString()
+ "\n\nsnapshotFile: " + fsdir.getINode(snapshotFile.toString()).toDetailString(); + "\n\nsnapshotFile: " + fsdir.getINode(snapshotFile.toString()).toDetailString();
SnapshotTestHelper.dumpTree(s, cluster);
System.out.println(s);
SnapshotTestHelper.dumpTreeRecursively(fsdir.getINode("/"));
} }
assertEquals(s, originalSnapshotFileLen, currentSnapshotFileLen); assertEquals(s, originalSnapshotFileLen, currentSnapshotFileLen);
// Read the snapshot file out of the boundary // Read the snapshot file out of the boundary
@ -630,9 +627,7 @@ void checkSnapshots() throws Exception {
+ "\n readLen = " + readLen + "\n readLen = " + readLen
+ "\n\nfile : " + fsdir.getINode(file.toString()).toDetailString() + "\n\nfile : " + fsdir.getINode(file.toString()).toDetailString()
+ "\n\nsnapshotFile: " + fsdir.getINode(snapshotFile.toString()).toDetailString(); + "\n\nsnapshotFile: " + fsdir.getINode(snapshotFile.toString()).toDetailString();
SnapshotTestHelper.dumpTree(s, cluster);
System.out.println(s);
SnapshotTestHelper.dumpTreeRecursively(fsdir.getINode("/"));
} }
assertEquals(s, -1, readLen); assertEquals(s, -1, readLen);
input.close(); input.close();
@ -703,7 +698,7 @@ static class FileCreation extends Modification {
@Override @Override
void loadSnapshots() throws Exception { void loadSnapshots() throws Exception {
for (Path snapshotRoot : snapshotList) { for (Path snapshotRoot : snapshotList) {
Path snapshotFile = SnapshotTestHelper.getSnapshotFile(fs, Path snapshotFile = SnapshotTestHelper.getSnapshotFile(
snapshotRoot, file); snapshotRoot, file);
if (snapshotFile != null) { if (snapshotFile != null) {
FileStatus status = FileStatus status =
@ -722,7 +717,7 @@ void modify() throws Exception {
@Override @Override
void checkSnapshots() throws Exception { void checkSnapshots() throws Exception {
for (Path snapshotRoot : snapshotList) { for (Path snapshotRoot : snapshotList) {
Path snapshotFile = SnapshotTestHelper.getSnapshotFile(fs, Path snapshotFile = SnapshotTestHelper.getSnapshotFile(
snapshotRoot, file); snapshotRoot, file);
if (snapshotFile != null) { if (snapshotFile != null) {
boolean computed = fs.exists(snapshotFile); boolean computed = fs.exists(snapshotFile);
@ -755,7 +750,7 @@ static class FileDeletion extends Modification {
@Override @Override
void loadSnapshots() throws Exception { void loadSnapshots() throws Exception {
for (Path snapshotRoot : snapshotList) { for (Path snapshotRoot : snapshotList) {
boolean existence = SnapshotTestHelper.getSnapshotFile(fs, boolean existence = SnapshotTestHelper.getSnapshotFile(
snapshotRoot, file) != null; snapshotRoot, file) != null;
snapshotFileExistenceMap.put(snapshotRoot, existence); snapshotFileExistenceMap.put(snapshotRoot, existence);
} }
@ -770,7 +765,7 @@ void modify() throws Exception {
void checkSnapshots() throws Exception { void checkSnapshots() throws Exception {
for (Path snapshotRoot : snapshotList) { for (Path snapshotRoot : snapshotList) {
boolean currentSnapshotFileExist = SnapshotTestHelper.getSnapshotFile( boolean currentSnapshotFileExist = SnapshotTestHelper.getSnapshotFile(
fs, snapshotRoot, file) != null; snapshotRoot, file) != null;
boolean originalSnapshotFileExist = snapshotFileExistenceMap boolean originalSnapshotFileExist = snapshotFileExistenceMap
.get(snapshotRoot); .get(snapshotRoot);
assertEquals(currentSnapshotFileExist, originalSnapshotFileExist); assertEquals(currentSnapshotFileExist, originalSnapshotFileExist);
@ -809,7 +804,7 @@ class DirCreationOrDeletion extends Modification {
@Override @Override
void loadSnapshots() throws Exception { void loadSnapshots() throws Exception {
for (Path snapshotRoot : snapshotList) { for (Path snapshotRoot : snapshotList) {
Path snapshotDir = SnapshotTestHelper.getSnapshotFile(fs, snapshotRoot, Path snapshotDir = SnapshotTestHelper.getSnapshotFile(snapshotRoot,
changedPath); changedPath);
if (snapshotDir != null) { if (snapshotDir != null) {
FileStatus status = fs.exists(snapshotDir) ? fs FileStatus status = fs.exists(snapshotDir) ? fs