HDFS-4611. Update FSImage for INodeReference.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1463332 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
8ee6ecaea4
commit
1096917649
@ -223,3 +223,5 @@ Branch-2802 Snapshot (Unreleased)
|
||||
|
||||
HDFS-4637. INodeDirectory#replaceSelf4Quota may incorrectly convert a newly
|
||||
created directory to an INodeDirectoryWithSnapshot. (Jing Zhao via szetszwo)
|
||||
|
||||
HDFS-4611. Update FSImage for INodeReference. (szetszwo)
|
||||
|
@ -18,7 +18,7 @@
|
||||
|
||||
package org.apache.hadoop.hdfs.security.token.delegation;
|
||||
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
@ -110,7 +110,7 @@ public synchronized long getTokenExpiryTime(
|
||||
* @param in input stream to read fsimage
|
||||
* @throws IOException
|
||||
*/
|
||||
public synchronized void loadSecretManagerState(DataInputStream in)
|
||||
public synchronized void loadSecretManagerState(DataInput in)
|
||||
throws IOException {
|
||||
if (running) {
|
||||
// a safety check
|
||||
@ -266,7 +266,7 @@ private synchronized void saveAllKeys(DataOutputStream out)
|
||||
/**
|
||||
* Private helper methods to load Delegation tokens from fsimage
|
||||
*/
|
||||
private synchronized void loadCurrentTokens(DataInputStream in)
|
||||
private synchronized void loadCurrentTokens(DataInput in)
|
||||
throws IOException {
|
||||
int numberOfTokens = in.readInt();
|
||||
for (int i = 0; i < numberOfTokens; i++) {
|
||||
@ -282,7 +282,7 @@ private synchronized void loadCurrentTokens(DataInputStream in)
|
||||
* @param in
|
||||
* @throws IOException
|
||||
*/
|
||||
private synchronized void loadAllKeys(DataInputStream in) throws IOException {
|
||||
private synchronized void loadAllKeys(DataInput in) throws IOException {
|
||||
int numberOfKeys = in.readInt();
|
||||
for (int i = 0; i < numberOfKeys; i++) {
|
||||
DelegationKey value = new DelegationKey();
|
||||
|
@ -570,7 +570,8 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp)
|
||||
+ " because the source can not be removed");
|
||||
return false;
|
||||
}
|
||||
srcChild.setLocalName(dstComponents[dstComponents.length - 1]);
|
||||
//TODO: setLocalName breaks created/deleted lists
|
||||
srcChild.setLocalName(dstIIP.getLastLocalName());
|
||||
|
||||
// add src to the destination
|
||||
added = addLastINodeNoQuotaCheck(dstIIP, srcChild);
|
||||
@ -750,6 +751,7 @@ boolean unprotectedRenameTo(String src, String dst, long timestamp,
|
||||
undoRemoveDst = true;
|
||||
}
|
||||
}
|
||||
//TODO: setLocalName breaks created/deleted lists
|
||||
srcChild.setLocalName(dstIIP.getLastLocalName());
|
||||
|
||||
// add src as dst to complete rename
|
||||
|
@ -17,23 +17,23 @@
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.BufferedOutputStream;
|
||||
import java.io.DataInput;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.compress.CompressionCodec;
|
||||
import org.apache.hadoop.io.compress.CompressionCodecFactory;
|
||||
|
||||
import org.apache.hadoop.io.Text;
|
||||
|
||||
/**
|
||||
* Simple container class that handles support for compressed fsimage files.
|
||||
*/
|
||||
@ -108,15 +108,14 @@ private static FSImageCompression createCompression(Configuration conf,
|
||||
* underlying IO fails.
|
||||
*/
|
||||
static FSImageCompression readCompressionHeader(
|
||||
Configuration conf,
|
||||
DataInputStream dis) throws IOException
|
||||
Configuration conf, DataInput in) throws IOException
|
||||
{
|
||||
boolean isCompressed = dis.readBoolean();
|
||||
boolean isCompressed = in.readBoolean();
|
||||
|
||||
if (!isCompressed) {
|
||||
return createNoopCompression();
|
||||
} else {
|
||||
String codecClassName = Text.readString(dis);
|
||||
String codecClassName = Text.readString(in);
|
||||
return createCompression(conf, codecClassName);
|
||||
}
|
||||
}
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
import static org.apache.hadoop.util.Time.now;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.File;
|
||||
@ -58,6 +59,7 @@
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
|
||||
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||
import org.apache.hadoop.io.MD5Hash;
|
||||
import org.apache.hadoop.io.Text;
|
||||
@ -187,6 +189,9 @@ public static class Loader {
|
||||
/** The MD5 sum of the loaded file */
|
||||
private MD5Hash imgDigest;
|
||||
|
||||
private Map<Integer, Snapshot> snapshotMap = null;
|
||||
private final ReferenceMap referenceMap = new ReferenceMap();
|
||||
|
||||
Loader(Configuration conf, FSNamesystem namesystem) {
|
||||
this.conf = conf;
|
||||
this.namesystem = namesystem;
|
||||
@ -267,7 +272,7 @@ void load(File curFile) throws IOException {
|
||||
}
|
||||
|
||||
if (supportSnapshot) {
|
||||
namesystem.getSnapshotManager().read(in);
|
||||
snapshotMap = namesystem.getSnapshotManager().read(in, this);
|
||||
}
|
||||
|
||||
// read compression related info
|
||||
@ -331,7 +336,7 @@ private void updateRootAttr(INodeWithAdditionalFields root) {
|
||||
*
|
||||
* @param in Image input stream
|
||||
*/
|
||||
private void loadLocalNameINodesWithSnapshot(DataInputStream in)
|
||||
private void loadLocalNameINodesWithSnapshot(DataInput in)
|
||||
throws IOException {
|
||||
assert LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION,
|
||||
getLayoutVersion());
|
||||
@ -350,7 +355,7 @@ private void loadLocalNameINodesWithSnapshot(DataInputStream in)
|
||||
* @param in image input stream
|
||||
* @throws IOException
|
||||
*/
|
||||
private void loadLocalNameINodes(long numFiles, DataInputStream in)
|
||||
private void loadLocalNameINodes(long numFiles, DataInput in)
|
||||
throws IOException {
|
||||
assert LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION,
|
||||
getLayoutVersion());
|
||||
@ -373,20 +378,20 @@ private void loadLocalNameINodes(long numFiles, DataInputStream in)
|
||||
/**
|
||||
* Load information about root, and use the information to update the root
|
||||
* directory of NameSystem.
|
||||
* @param in The {@link DataInputStream} instance to read.
|
||||
* @param in The {@link DataInput} instance to read.
|
||||
*/
|
||||
private void loadRoot(DataInputStream in) throws IOException {
|
||||
private void loadRoot(DataInput in) throws IOException {
|
||||
// load root
|
||||
if (in.readShort() != 0) {
|
||||
throw new IOException("First node is not root");
|
||||
}
|
||||
final INodeWithAdditionalFields root = loadINode(null, false, in);
|
||||
final INodeDirectory root = loadINode(null, false, in).asDirectory();
|
||||
// update the root's attributes
|
||||
updateRootAttr(root);
|
||||
}
|
||||
|
||||
/** Load children nodes for the parent directory. */
|
||||
private int loadChildren(INodeDirectory parent, DataInputStream in)
|
||||
private int loadChildren(INodeDirectory parent, DataInput in)
|
||||
throws IOException {
|
||||
int numChildren = in.readInt();
|
||||
for (int i = 0; i < numChildren; i++) {
|
||||
@ -399,9 +404,9 @@ private int loadChildren(INodeDirectory parent, DataInputStream in)
|
||||
|
||||
/**
|
||||
* Load a directory when snapshot is supported.
|
||||
* @param in The {@link DataInputStream} instance to read.
|
||||
* @param in The {@link DataInput} instance to read.
|
||||
*/
|
||||
private void loadDirectoryWithSnapshot(DataInputStream in)
|
||||
private void loadDirectoryWithSnapshot(DataInput in)
|
||||
throws IOException {
|
||||
// Step 1. Identify the parent INode
|
||||
String parentPath = FSImageSerialization.readString(in);
|
||||
@ -443,7 +448,7 @@ private void loadDirectoryWithSnapshot(DataInputStream in)
|
||||
* @return number of child inodes read
|
||||
* @throws IOException
|
||||
*/
|
||||
private int loadDirectory(DataInputStream in) throws IOException {
|
||||
private int loadDirectory(DataInput in) throws IOException {
|
||||
String parentPath = FSImageSerialization.readString(in);
|
||||
final INodeDirectory parent = INodeDirectory.valueOf(
|
||||
namesystem.dir.rootDir.getNode(parentPath, true), parentPath);
|
||||
@ -458,19 +463,19 @@ private int loadDirectory(DataInputStream in) throws IOException {
|
||||
* @throws IOException if any error occurs
|
||||
*/
|
||||
private void loadFullNameINodes(long numFiles,
|
||||
DataInputStream in) throws IOException {
|
||||
DataInput in) throws IOException {
|
||||
byte[][] pathComponents;
|
||||
byte[][] parentPath = {{}};
|
||||
FSDirectory fsDir = namesystem.dir;
|
||||
INodeDirectory parentINode = fsDir.rootDir;
|
||||
for (long i = 0; i < numFiles; i++) {
|
||||
pathComponents = FSImageSerialization.readPathComponents(in);
|
||||
final INodeWithAdditionalFields newNode = loadINode(
|
||||
final INode newNode = loadINode(
|
||||
pathComponents[pathComponents.length-1], false, in);
|
||||
|
||||
if (isRoot(pathComponents)) { // it is the root
|
||||
// update the root's attributes
|
||||
updateRootAttr(newNode);
|
||||
updateRootAttr(newNode.asDirectory());
|
||||
continue;
|
||||
}
|
||||
// check if the new inode belongs to the same parent
|
||||
@ -527,12 +532,9 @@ public FSDirectory getFSDirectoryInLoading() {
|
||||
}
|
||||
|
||||
public INode loadINodeWithLocalName(boolean isSnapshotINode,
|
||||
DataInputStream in) throws IOException {
|
||||
final byte[] localName = new byte[in.readShort()];
|
||||
in.readFully(localName);
|
||||
final INode inode = loadINode(localName, isSnapshotINode, in);
|
||||
inode.setLocalName(localName);
|
||||
return inode;
|
||||
DataInput in) throws IOException {
|
||||
final byte[] localName = FSImageSerialization.readLocalName(in);
|
||||
return loadINode(localName, isSnapshotINode, in);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -541,8 +543,8 @@ public INode loadINodeWithLocalName(boolean isSnapshotINode,
|
||||
* @param in data input stream from which image is read
|
||||
* @return an inode
|
||||
*/
|
||||
INodeWithAdditionalFields loadINode(final byte[] localName, boolean isSnapshotINode,
|
||||
DataInputStream in) throws IOException {
|
||||
INode loadINode(final byte[] localName, boolean isSnapshotINode,
|
||||
DataInput in) throws IOException {
|
||||
final int imgVersion = getLayoutVersion();
|
||||
final long inodeId = namesystem.allocateNewInodeId();
|
||||
|
||||
@ -632,12 +634,27 @@ INodeWithAdditionalFields loadINode(final byte[] localName, boolean isSnapshotIN
|
||||
final PermissionStatus permissions = PermissionStatus.read(in);
|
||||
return new INodeSymlink(inodeId, localName, permissions,
|
||||
modificationTime, atime, symlink);
|
||||
} else if (numBlocks == -3) {
|
||||
//reference
|
||||
|
||||
final boolean isWithName = in.readBoolean();
|
||||
|
||||
final INodeReference.WithCount withCount
|
||||
= referenceMap.loadINodeReferenceWithCount(isSnapshotINode, in, this);
|
||||
|
||||
if (isWithName) {
|
||||
return new INodeReference.WithName(null, withCount, localName);
|
||||
} else {
|
||||
final INodeReference ref = new INodeReference(null, withCount);
|
||||
withCount.setParentReference(ref);
|
||||
return ref;
|
||||
}
|
||||
}
|
||||
|
||||
throw new IOException("Unknown inode type: numBlocks=" + numBlocks);
|
||||
}
|
||||
|
||||
private void loadFilesUnderConstruction(DataInputStream in,
|
||||
private void loadFilesUnderConstruction(DataInput in,
|
||||
boolean supportSnapshot) throws IOException {
|
||||
FSDirectory fsDir = namesystem.dir;
|
||||
int size = in.readInt();
|
||||
@ -665,7 +682,7 @@ private void loadFilesUnderConstruction(DataInputStream in,
|
||||
}
|
||||
}
|
||||
|
||||
private void loadSecretManagerState(DataInputStream in)
|
||||
private void loadSecretManagerState(DataInput in)
|
||||
throws IOException {
|
||||
int imgVersion = getLayoutVersion();
|
||||
|
||||
@ -713,6 +730,10 @@ byte[][] getParent(byte[][] path) {
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
public Snapshot getSnapshot(DataInput in) throws IOException {
|
||||
return snapshotMap.get(in.readInt());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -727,6 +748,7 @@ static class Saver {
|
||||
|
||||
/** The MD5 checksum of the file that was written */
|
||||
private MD5Hash savedDigest;
|
||||
private final ReferenceMap referenceMap = new ReferenceMap();
|
||||
|
||||
static private final byte[] PATH_SEPARATOR = DFSUtil.string2Bytes(Path.SEPARATOR);
|
||||
|
||||
@ -792,7 +814,7 @@ void save(File newFile, FSImageCompression compression) throws IOException {
|
||||
byte[] byteStore = new byte[4*HdfsConstants.MAX_PATH_LENGTH];
|
||||
ByteBuffer strbuf = ByteBuffer.wrap(byteStore);
|
||||
// save the root
|
||||
FSImageSerialization.saveINode2Image(fsDir.rootDir, out, false);
|
||||
FSImageSerialization.saveINode2Image(fsDir.rootDir, out, false, referenceMap);
|
||||
// save the rest of the nodes
|
||||
saveImage(strbuf, fsDir.rootDir, out, null);
|
||||
// save files under construction
|
||||
@ -805,6 +827,7 @@ void save(File newFile, FSImageCompression compression) throws IOException {
|
||||
context.checkCancelled();
|
||||
fout.getChannel().force(true);
|
||||
} finally {
|
||||
referenceMap.removeAllINodeReferenceWithId();
|
||||
out.close();
|
||||
}
|
||||
|
||||
@ -830,7 +853,7 @@ private int saveChildren(ReadOnlyList<INode> children, DataOutputStream out)
|
||||
int i = 0;
|
||||
for(INode child : children) {
|
||||
// print all children first
|
||||
FSImageSerialization.saveINode2Image(child, out, false);
|
||||
FSImageSerialization.saveINode2Image(child, out, false, referenceMap);
|
||||
if (child.isDirectory()) {
|
||||
dirNum++;
|
||||
}
|
||||
@ -927,7 +950,7 @@ private void saveImage(ByteBuffer currentDirName, INodeDirectory current,
|
||||
dirNum += saveChildren(children, out);
|
||||
|
||||
// 4. Write DirectoryDiff lists, if there is any.
|
||||
SnapshotFSImageFormat.saveDirectoryDiffList(current, out);
|
||||
SnapshotFSImageFormat.saveDirectoryDiffList(current, out, referenceMap);
|
||||
|
||||
// Write sub-tree of sub-directories, including possible snapshots of
|
||||
// deleted sub-directories
|
||||
|
@ -17,7 +17,7 @@
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.IOException;
|
||||
@ -36,6 +36,7 @@
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
|
||||
import org.apache.hadoop.io.LongWritable;
|
||||
import org.apache.hadoop.io.ShortWritable;
|
||||
import org.apache.hadoop.io.Text;
|
||||
@ -87,7 +88,7 @@ private static void writePermissionStatus(INodeWithAdditionalFields inode,
|
||||
}
|
||||
|
||||
private static void writeBlocks(final Block[] blocks,
|
||||
final DataOutputStream out) throws IOException {
|
||||
final DataOutput out) throws IOException {
|
||||
if (blocks == null) {
|
||||
out.writeInt(0);
|
||||
} else {
|
||||
@ -102,7 +103,7 @@ private static void writeBlocks(final Block[] blocks,
|
||||
// from the input stream
|
||||
//
|
||||
static INodeFileUnderConstruction readINodeUnderConstruction(
|
||||
DataInputStream in) throws IOException {
|
||||
DataInput in) throws IOException {
|
||||
byte[] name = readBytes(in);
|
||||
short blockReplication = in.readShort();
|
||||
long modificationTime = in.readLong();
|
||||
@ -164,7 +165,7 @@ static void writeINodeUnderConstruction(DataOutputStream out,
|
||||
* @param out The {@link DataOutputStream} where the fields are written
|
||||
* @param writeBlock Whether to write block information
|
||||
*/
|
||||
public static void writeINodeFile(INodeFile file, DataOutputStream out,
|
||||
public static void writeINodeFile(INodeFile file, DataOutput out,
|
||||
boolean writeUnderConstruction) throws IOException {
|
||||
writeLocalName(file, out);
|
||||
out.writeShort(file.getFileReplication());
|
||||
@ -233,17 +234,37 @@ private static void writeINodeSymlink(INodeSymlink node, DataOutput out)
|
||||
writePermissionStatus(node, out);
|
||||
}
|
||||
|
||||
/** Serialize a {@link INodeReference} node */
|
||||
private static void writeINodeReference(INodeReference ref, DataOutput out,
|
||||
boolean writeUnderConstruction, ReferenceMap referenceMap
|
||||
) throws IOException {
|
||||
writeLocalName(ref, out);
|
||||
out.writeShort(0); // replication
|
||||
out.writeLong(0); // modification time
|
||||
out.writeLong(0); // access time
|
||||
out.writeLong(0); // preferred block size
|
||||
out.writeInt(-3); // # of blocks
|
||||
|
||||
out.writeBoolean(ref instanceof INodeReference.WithName);
|
||||
|
||||
final INodeReference.WithCount withCount
|
||||
= (INodeReference.WithCount)ref.getReferredINode();
|
||||
referenceMap.writeINodeReferenceWithCount(withCount, out, writeUnderConstruction);
|
||||
}
|
||||
|
||||
/**
|
||||
* Save one inode's attributes to the image.
|
||||
*/
|
||||
public static void saveINode2Image(INode node, DataOutputStream out,
|
||||
boolean writeUnderConstruction)
|
||||
public static void saveINode2Image(INode node, DataOutput out,
|
||||
boolean writeUnderConstruction, ReferenceMap referenceMap)
|
||||
throws IOException {
|
||||
if (node.isDirectory()) {
|
||||
if (node.isReference()) {
|
||||
writeINodeReference(node.asReference(), out, writeUnderConstruction, referenceMap);
|
||||
} else if (node.isDirectory()) {
|
||||
writeINodeDirectory(node.asDirectory(), out);
|
||||
} else if (node.isSymlink()) {
|
||||
writeINodeSymlink(node.asSymlink(), out);
|
||||
} else {
|
||||
} else if (node.isFile()) {
|
||||
writeINodeFile(node.asFile(), out, writeUnderConstruction);
|
||||
}
|
||||
}
|
||||
@ -252,19 +273,19 @@ public static void saveINode2Image(INode node, DataOutputStream out,
|
||||
// code is moved into this package. This method should not be called
|
||||
// by other code.
|
||||
@SuppressWarnings("deprecation")
|
||||
public static String readString(DataInputStream in) throws IOException {
|
||||
public static String readString(DataInput in) throws IOException {
|
||||
DeprecatedUTF8 ustr = TL_DATA.get().U_STR;
|
||||
ustr.readFields(in);
|
||||
return ustr.toStringChecked();
|
||||
}
|
||||
|
||||
static String readString_EmptyAsNull(DataInputStream in) throws IOException {
|
||||
static String readString_EmptyAsNull(DataInput in) throws IOException {
|
||||
final String s = readString(in);
|
||||
return s.isEmpty()? null: s;
|
||||
}
|
||||
|
||||
@SuppressWarnings("deprecation")
|
||||
public static void writeString(String str, DataOutputStream out) throws IOException {
|
||||
public static void writeString(String str, DataOutput out) throws IOException {
|
||||
DeprecatedUTF8 ustr = TL_DATA.get().U_STR;
|
||||
ustr.set(str);
|
||||
ustr.write(out);
|
||||
@ -272,7 +293,7 @@ public static void writeString(String str, DataOutputStream out) throws IOExcept
|
||||
|
||||
|
||||
/** read the long value */
|
||||
static long readLong(DataInputStream in) throws IOException {
|
||||
static long readLong(DataInput in) throws IOException {
|
||||
LongWritable ustr = TL_DATA.get().U_LONG;
|
||||
ustr.readFields(in);
|
||||
return ustr.get();
|
||||
@ -286,7 +307,7 @@ static void writeLong(long value, DataOutputStream out) throws IOException {
|
||||
}
|
||||
|
||||
/** read short value */
|
||||
static short readShort(DataInputStream in) throws IOException {
|
||||
static short readShort(DataInput in) throws IOException {
|
||||
ShortWritable uShort = TL_DATA.get().U_SHORT;
|
||||
uShort.readFields(in);
|
||||
return uShort.get();
|
||||
@ -301,7 +322,7 @@ static void writeShort(short value, DataOutputStream out) throws IOException {
|
||||
|
||||
// Same comments apply for this method as for readString()
|
||||
@SuppressWarnings("deprecation")
|
||||
public static byte[] readBytes(DataInputStream in) throws IOException {
|
||||
public static byte[] readBytes(DataInput in) throws IOException {
|
||||
DeprecatedUTF8 ustr = TL_DATA.get().U_STR;
|
||||
ustr.readFields(in);
|
||||
int len = ustr.getLength();
|
||||
@ -319,7 +340,7 @@ public static byte[] readBytes(DataInputStream in) throws IOException {
|
||||
* @throws IOException
|
||||
*/
|
||||
@SuppressWarnings("deprecation")
|
||||
public static byte[][] readPathComponents(DataInputStream in)
|
||||
public static byte[][] readPathComponents(DataInput in)
|
||||
throws IOException {
|
||||
DeprecatedUTF8 ustr = TL_DATA.get().U_STR;
|
||||
|
||||
@ -328,6 +349,12 @@ public static byte[][] readPathComponents(DataInputStream in)
|
||||
ustr.getLength(), (byte) Path.SEPARATOR_CHAR);
|
||||
}
|
||||
|
||||
public static byte[] readLocalName(DataInput in) throws IOException {
|
||||
byte[] createdNodeName = new byte[in.readShort()];
|
||||
in.readFully(createdNodeName);
|
||||
return createdNodeName;
|
||||
}
|
||||
|
||||
private static void writeLocalName(INode inode, DataOutput out)
|
||||
throws IOException {
|
||||
final byte[] name = inode.getLocalNameBytes();
|
||||
@ -358,7 +385,7 @@ public static void writeCompactBlockArray(
|
||||
}
|
||||
|
||||
public static Block[] readCompactBlockArray(
|
||||
DataInputStream in, int logVersion) throws IOException {
|
||||
DataInput in, int logVersion) throws IOException {
|
||||
int num = WritableUtils.readVInt(in);
|
||||
if (num < 0) {
|
||||
throw new IOException("Invalid block array length: " + num);
|
||||
|
@ -33,8 +33,8 @@
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOGGERS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DEFAULT_AUDIT_LOGGER_NAME;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT;
|
||||
@ -75,6 +75,7 @@
|
||||
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.DataInput;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.File;
|
||||
@ -5353,7 +5354,7 @@ void saveSecretManagerState(DataOutputStream out) throws IOException {
|
||||
/**
|
||||
* @param in load the state of secret manager from input stream
|
||||
*/
|
||||
void loadSecretManagerState(DataInputStream in) throws IOException {
|
||||
void loadSecretManagerState(DataInput in) throws IOException {
|
||||
dtSecretManager.loadSecretManagerState(in);
|
||||
}
|
||||
|
||||
|
@ -17,6 +17,7 @@
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.io.PrintStream;
|
||||
import java.io.PrintWriter;
|
||||
import java.io.StringWriter;
|
||||
import java.util.ArrayList;
|
||||
@ -42,7 +43,6 @@
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.primitives.SignedBytes;
|
||||
//import org.apache.hadoop.hdfs.util.EnumCounters;
|
||||
|
||||
/**
|
||||
* We keep an in-memory representation of the file/block hierarchy.
|
||||
@ -420,21 +420,30 @@ public String toString() {
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public String getObjectString() {
|
||||
public final String getObjectString() {
|
||||
return getClass().getSimpleName() + "@"
|
||||
+ Integer.toHexString(super.hashCode());
|
||||
}
|
||||
|
||||
/** @return a string description of the parent. */
|
||||
@VisibleForTesting
|
||||
public String toStringWithObjectType() {
|
||||
return toString() + "(" + getObjectString() + ")";
|
||||
public final String getParentString() {
|
||||
final INodeReference parentRef = getParentReference();
|
||||
if (parentRef != null) {
|
||||
return "parentRef=" + parentRef.getLocalName() + "->";
|
||||
} else {
|
||||
final INodeDirectory parentDir = getParent();
|
||||
if (parentDir != null) {
|
||||
return "parentDir=" + parentDir.getLocalName() + "/";
|
||||
} else {
|
||||
return "parent=null";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public String toDetailString() {
|
||||
final INodeDirectory p = getParent();
|
||||
return toStringWithObjectType()
|
||||
+ ", parent=" + (p == null? null: p.toStringWithObjectType());
|
||||
return toString() + "(" + getObjectString() + "), " + getParentString();
|
||||
}
|
||||
|
||||
/** @return the parent directory */
|
||||
@ -611,6 +620,11 @@ public final StringBuffer dumpTreeRecursively() {
|
||||
return out.getBuffer();
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public final void dumpTreeRecursively(PrintStream out) {
|
||||
dumpTreeRecursively(new PrintWriter(out, true), new StringBuilder(), null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Dump tree recursively.
|
||||
* @param prefix The prefix string that each line should print.
|
||||
@ -623,10 +637,8 @@ public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
|
||||
out.print(getLocalName());
|
||||
out.print(" (");
|
||||
out.print(getObjectString());
|
||||
out.print("), parent=");
|
||||
|
||||
final INodeDirectory p = getParent();
|
||||
out.print(p == null? null: p.getLocalName() + "/");
|
||||
out.print("), ");
|
||||
out.print(getParentString());
|
||||
out.print(", " + getPermissionStatus(snapshot));
|
||||
}
|
||||
|
||||
|
@ -227,9 +227,9 @@ public void replaceChild(final INode oldChild, final INode newChild) {
|
||||
INodeReference.WithCount replaceChild4Reference(INode oldChild) {
|
||||
Preconditions.checkArgument(!oldChild.isReference());
|
||||
final INodeReference.WithCount withCount
|
||||
= new INodeReference.WithCount(oldChild);
|
||||
final INodeReference ref = new INodeReference(withCount);
|
||||
withCount.incrementReferenceCount();
|
||||
= new INodeReference.WithCount(null, oldChild);
|
||||
final INodeReference ref = new INodeReference(this, withCount);
|
||||
withCount.setParentReference(ref);
|
||||
replaceChild(oldChild, ref);
|
||||
return withCount;
|
||||
}
|
||||
@ -897,6 +897,9 @@ public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
|
||||
if (this instanceof INodeDirectoryWithQuota) {
|
||||
out.print(((INodeDirectoryWithQuota)this).quotaString());
|
||||
}
|
||||
if (this instanceof Snapshot.Root) {
|
||||
out.print(", snapshotId=" + snapshot.getId());
|
||||
}
|
||||
out.println();
|
||||
|
||||
if (prefix.length() >= 2) {
|
||||
|
@ -17,11 +17,15 @@
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.io.PrintWriter;
|
||||
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
/**
|
||||
* An anonymous reference to an inode.
|
||||
*
|
||||
@ -76,11 +80,9 @@ private static int removeReference(INodeReference ref) {
|
||||
|
||||
private INode referred;
|
||||
|
||||
INodeReference(INode referred) {
|
||||
super(referred.getParent());
|
||||
public INodeReference(INode parent, INode referred) {
|
||||
super(parent);
|
||||
this.referred = referred;
|
||||
|
||||
referred.setParentReference(this);
|
||||
}
|
||||
|
||||
|
||||
@ -210,7 +212,9 @@ public final void setAccessTime(long accessTime) {
|
||||
|
||||
@Override
|
||||
final INode recordModification(Snapshot latest) throws QuotaExceededException {
|
||||
return referred.recordModification(latest);
|
||||
referred.recordModification(latest);
|
||||
// reference is never replaced
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -265,22 +269,47 @@ public final long getDsQuota() {
|
||||
@Override
|
||||
public final void clear() {
|
||||
super.clear();
|
||||
referred.clear();
|
||||
referred = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
|
||||
final Snapshot snapshot) {
|
||||
super.dumpTreeRecursively(out, prefix, snapshot);
|
||||
if (this instanceof WithCount) {
|
||||
out.print(", count=" + ((WithCount)this).getReferenceCount());
|
||||
}
|
||||
out.println();
|
||||
|
||||
final StringBuilder b = new StringBuilder();
|
||||
for(int i = 0; i < prefix.length(); i++) {
|
||||
b.append(' ');
|
||||
}
|
||||
b.append("->");
|
||||
getReferredINode().dumpTreeRecursively(out, b, snapshot);
|
||||
}
|
||||
|
||||
/** An anonymous reference with reference count. */
|
||||
public static class WithCount extends INodeReference {
|
||||
private int referenceCount = 0;
|
||||
private int referenceCount = 1;
|
||||
|
||||
WithCount(INode inode) {
|
||||
super(inode);
|
||||
public WithCount(INodeReference parent, INode referred) {
|
||||
super(parent, referred);
|
||||
Preconditions.checkArgument(!referred.isReference());
|
||||
referred.setParentReference(this);
|
||||
}
|
||||
|
||||
/** @return the reference count. */
|
||||
public int getReferenceCount() {
|
||||
return referenceCount;
|
||||
}
|
||||
|
||||
/** Increment and then return the reference count. */
|
||||
public int incrementReferenceCount() {
|
||||
return ++referenceCount;
|
||||
}
|
||||
|
||||
/** Decrement and then return the reference count. */
|
||||
public int decrementReferenceCount() {
|
||||
return --referenceCount;
|
||||
}
|
||||
@ -291,8 +320,8 @@ public static class WithName extends INodeReference {
|
||||
|
||||
private final byte[] name;
|
||||
|
||||
public WithName(WithCount referred, byte[] name) {
|
||||
super(referred);
|
||||
public WithName(INodeDirectory parent, WithCount referred, byte[] name) {
|
||||
super(parent, referred);
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
|
@ -17,13 +17,13 @@
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
||||
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.Quota;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
|
||||
import org.apache.hadoop.hdfs.server.namenode.Quota;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
@ -144,11 +144,11 @@ public String toString() {
|
||||
+ (posteriorDiff == null? null: posteriorDiff.snapshot) + ")";
|
||||
}
|
||||
|
||||
void writeSnapshotPath(DataOutputStream out) throws IOException {
|
||||
// Assume the snapshot is recorded before.
|
||||
// The root path is sufficient for looking up the Snapshot object.
|
||||
FSImageSerialization.writeString(snapshot.getRoot().getFullPathName(), out);
|
||||
void writeSnapshot(DataOutput out) throws IOException {
|
||||
// Assume the snapshot is recorded before, write id only.
|
||||
out.writeInt(snapshot.getId());
|
||||
}
|
||||
|
||||
abstract void write(DataOutputStream out) throws IOException;
|
||||
abstract void write(DataOutput out, ReferenceMap referenceMap
|
||||
) throws IOException;
|
||||
}
|
||||
|
@ -17,7 +17,7 @@
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
||||
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
@ -26,6 +26,7 @@
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||
import org.apache.hadoop.hdfs.server.namenode.Quota;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
|
||||
|
||||
/**
|
||||
* {@link INodeFile} with a link to the next element.
|
||||
@ -90,9 +91,9 @@ public String toString() {
|
||||
+ (snapshotINode == null? "?": snapshotINode.getFileReplication());
|
||||
}
|
||||
|
||||
/** Serialize fields to out */
|
||||
void write(DataOutputStream out) throws IOException {
|
||||
writeSnapshotPath(out);
|
||||
@Override
|
||||
void write(DataOutput out, ReferenceMap referenceMap) throws IOException {
|
||||
writeSnapshot(out);
|
||||
out.writeLong(fileSize);
|
||||
|
||||
// write snapshotINode
|
||||
|
@ -457,6 +457,11 @@ void replaceSelf(final Snapshot latest) throws QuotaExceededException {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toDetailString() {
|
||||
return super.toDetailString() + ", snapshotsByNames=" + snapshotsByNames;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
|
||||
Snapshot snapshot) {
|
||||
|
@ -17,7 +17,7 @@
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
||||
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayDeque;
|
||||
import java.util.ArrayList;
|
||||
@ -38,6 +38,7 @@
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryWithQuota;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
|
||||
import org.apache.hadoop.hdfs.server.namenode.Quota;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
|
||||
import org.apache.hadoop.hdfs.util.Diff;
|
||||
import org.apache.hadoop.hdfs.util.Diff.Container;
|
||||
import org.apache.hadoop.hdfs.util.Diff.ListType;
|
||||
@ -122,7 +123,7 @@ private Quota.Counts destroyDeletedList(
|
||||
}
|
||||
|
||||
/** Serialize {@link #created} */
|
||||
private void writeCreated(DataOutputStream out) throws IOException {
|
||||
private void writeCreated(DataOutput out) throws IOException {
|
||||
final List<INode> created = getList(ListType.CREATED);
|
||||
out.writeInt(created.size());
|
||||
for (INode node : created) {
|
||||
@ -134,18 +135,20 @@ private void writeCreated(DataOutputStream out) throws IOException {
|
||||
}
|
||||
|
||||
/** Serialize {@link #deleted} */
|
||||
private void writeDeleted(DataOutputStream out) throws IOException {
|
||||
private void writeDeleted(DataOutput out,
|
||||
ReferenceMap referenceMap) throws IOException {
|
||||
final List<INode> deleted = getList(ListType.DELETED);
|
||||
out.writeInt(deleted.size());
|
||||
for (INode node : deleted) {
|
||||
FSImageSerialization.saveINode2Image(node, out, true);
|
||||
FSImageSerialization.saveINode2Image(node, out, true, referenceMap);
|
||||
}
|
||||
}
|
||||
|
||||
/** Serialize to out */
|
||||
private void write(DataOutputStream out) throws IOException {
|
||||
private void write(DataOutput out, ReferenceMap referenceMap
|
||||
) throws IOException {
|
||||
writeCreated(out);
|
||||
writeDeleted(out);
|
||||
writeDeleted(out, referenceMap);
|
||||
}
|
||||
|
||||
/** @return The list of INodeDirectory contained in the deleted list */
|
||||
@ -339,8 +342,8 @@ public String toString() {
|
||||
}
|
||||
|
||||
@Override
|
||||
void write(DataOutputStream out) throws IOException {
|
||||
writeSnapshotPath(out);
|
||||
void write(DataOutput out, ReferenceMap referenceMap) throws IOException {
|
||||
writeSnapshot(out);
|
||||
out.writeInt(childrenSize);
|
||||
|
||||
// write snapshotINode
|
||||
@ -356,7 +359,7 @@ void write(DataOutputStream out) throws IOException {
|
||||
}
|
||||
}
|
||||
// Write diff. Node need to write poseriorDiff, since diffs is a list.
|
||||
diff.write(out);
|
||||
diff.write(out, referenceMap);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -604,9 +607,10 @@ public void replaceChild(final INode oldChild, final INode newChild) {
|
||||
/** The child just has been removed, replace it with a reference. */
|
||||
public INodeReference.WithName replaceRemovedChild4Reference(
|
||||
INode oldChild, INodeReference.WithCount newChild, byte[] childName) {
|
||||
final INodeReference.WithName ref = new INodeReference.WithName(
|
||||
final INodeReference.WithName ref = new INodeReference.WithName(this,
|
||||
newChild, childName);
|
||||
newChild.incrementReferenceCount();
|
||||
|
||||
diffs.replaceChild(ListType.CREATED, oldChild, ref);
|
||||
// the old child must be in the deleted list
|
||||
Preconditions.checkState(
|
||||
|
@ -17,6 +17,7 @@
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.Comparator;
|
||||
@ -25,6 +26,7 @@
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
||||
@ -122,14 +124,17 @@ public String getFullPathName() {
|
||||
this.root.setLocalName(DFSUtil.string2Bytes(name));
|
||||
}
|
||||
|
||||
Snapshot(int id, INodeDirectory dir,
|
||||
INodeDirectorySnapshottable parent) {
|
||||
Snapshot(int id, INodeDirectory dir, INodeDirectorySnapshottable parent) {
|
||||
this.id = id;
|
||||
this.root = new Root(dir);
|
||||
|
||||
this.root.setParent(parent);
|
||||
}
|
||||
|
||||
public int getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
/** @return the root directory of the snapshot. */
|
||||
public Root getRoot() {
|
||||
return root;
|
||||
@ -157,7 +162,7 @@ public int hashCode() {
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return getClass().getSimpleName() + "." + root.getLocalName();
|
||||
return getClass().getSimpleName() + "." + root.getLocalName() + "(id=" + id + ")";
|
||||
}
|
||||
|
||||
/** Serialize the fields to out */
|
||||
@ -166,4 +171,10 @@ void write(DataOutput out) throws IOException {
|
||||
// write root
|
||||
FSImageSerialization.writeINodeDirectory(root, out);
|
||||
}
|
||||
|
||||
static Snapshot read(DataInput in, FSImageFormat.Loader loader) throws IOException {
|
||||
final int snapshotId = in.readInt();
|
||||
final INode root = loader.loadINodeWithLocalName(false, in);
|
||||
return new Snapshot(snapshotId, root.asDirectory(), null);
|
||||
}
|
||||
}
|
||||
|
@ -17,48 +17,51 @@
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
||||
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImageFormat.Loader;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiffList;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiffList;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.Root;
|
||||
import org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff;
|
||||
import org.apache.hadoop.hdfs.util.Diff.ListType;
|
||||
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
/**
|
||||
* A helper class defining static methods for reading/writing snapshot related
|
||||
* information from/to FSImage.
|
||||
*/
|
||||
public class SnapshotFSImageFormat {
|
||||
|
||||
/**
|
||||
* Save snapshots and snapshot quota for a snapshottable directory.
|
||||
* @param current The directory that the snapshots belongs to.
|
||||
* @param out The {@link DataOutputStream} to write.
|
||||
* @param out The {@link DataOutput} to write.
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void saveSnapshots(INodeDirectorySnapshottable current,
|
||||
DataOutputStream out) throws IOException {
|
||||
DataOutput out) throws IOException {
|
||||
// list of snapshots in snapshotsByNames
|
||||
ReadOnlyList<Snapshot> snapshots = current.getSnapshotsByNames();
|
||||
out.writeInt(snapshots.size());
|
||||
for (Snapshot ss : snapshots) {
|
||||
// write the snapshot
|
||||
ss.write(out);
|
||||
for (Snapshot s : snapshots) {
|
||||
// write the snapshot id
|
||||
out.writeInt(s.getId());
|
||||
}
|
||||
// snapshot quota
|
||||
out.writeInt(current.getSnapshotQuota());
|
||||
@ -67,11 +70,11 @@ public static void saveSnapshots(INodeDirectorySnapshottable current,
|
||||
/**
|
||||
* Save SnapshotDiff list for an INodeDirectoryWithSnapshot.
|
||||
* @param sNode The directory that the SnapshotDiff list belongs to.
|
||||
* @param out The {@link DataOutputStream} to write.
|
||||
* @param out The {@link DataOutput} to write.
|
||||
*/
|
||||
private static <N extends INode, D extends AbstractINodeDiff<N, D>>
|
||||
void saveINodeDiffs(final AbstractINodeDiffList<N, D> diffs,
|
||||
final DataOutputStream out) throws IOException {
|
||||
final DataOutput out, ReferenceMap referenceMap) throws IOException {
|
||||
// Record the diffs in reversed order, so that we can find the correct
|
||||
// reference for INodes in the created list when loading the FSImage
|
||||
if (diffs == null) {
|
||||
@ -81,24 +84,25 @@ void saveINodeDiffs(final AbstractINodeDiffList<N, D> diffs,
|
||||
final int size = list.size();
|
||||
out.writeInt(size);
|
||||
for (int i = size - 1; i >= 0; i--) {
|
||||
list.get(i).write(out);
|
||||
list.get(i).write(out, referenceMap);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static void saveDirectoryDiffList(final INodeDirectory dir,
|
||||
final DataOutputStream out) throws IOException {
|
||||
final DataOutput out, final ReferenceMap referenceMap
|
||||
) throws IOException {
|
||||
saveINodeDiffs(dir instanceof INodeDirectoryWithSnapshot?
|
||||
((INodeDirectoryWithSnapshot)dir).getDiffs(): null, out);
|
||||
((INodeDirectoryWithSnapshot)dir).getDiffs(): null, out, referenceMap);
|
||||
}
|
||||
|
||||
public static void saveFileDiffList(final INodeFile file,
|
||||
final DataOutputStream out) throws IOException {
|
||||
final DataOutput out) throws IOException {
|
||||
saveINodeDiffs(file instanceof FileWithSnapshot?
|
||||
((FileWithSnapshot)file).getDiffs(): null, out);
|
||||
((FileWithSnapshot)file).getDiffs(): null, out, null);
|
||||
}
|
||||
|
||||
public static FileDiffList loadFileDiffList(DataInputStream in,
|
||||
public static FileDiffList loadFileDiffList(DataInput in,
|
||||
FSImageFormat.Loader loader) throws IOException {
|
||||
final int size = in.readInt();
|
||||
if (size == -1) {
|
||||
@ -115,11 +119,10 @@ public static FileDiffList loadFileDiffList(DataInputStream in,
|
||||
}
|
||||
}
|
||||
|
||||
private static FileDiff loadFileDiff(FileDiff posterior, DataInputStream in,
|
||||
private static FileDiff loadFileDiff(FileDiff posterior, DataInput in,
|
||||
FSImageFormat.Loader loader) throws IOException {
|
||||
// 1. Read the full path of the Snapshot root to identify the Snapshot
|
||||
Snapshot snapshot = findSnapshot(FSImageSerialization.readString(in),
|
||||
loader.getFSDirectoryInLoading());
|
||||
final Snapshot snapshot = loader.getSnapshot(in);
|
||||
|
||||
// 2. Load file size
|
||||
final long fileSize = in.readLong();
|
||||
@ -161,17 +164,16 @@ private static INode loadCreated(byte[] createdNodeName,
|
||||
/**
|
||||
* Load the created list from fsimage.
|
||||
* @param parent The directory that the created list belongs to.
|
||||
* @param in The {@link DataInputStream} to read.
|
||||
* @param in The {@link DataInput} to read.
|
||||
* @return The created list.
|
||||
*/
|
||||
private static List<INode> loadCreatedList(INodeDirectoryWithSnapshot parent,
|
||||
DataInputStream in) throws IOException {
|
||||
DataInput in) throws IOException {
|
||||
// read the size of the created list
|
||||
int createdSize = in.readInt();
|
||||
List<INode> createdList = new ArrayList<INode>(createdSize);
|
||||
for (int i = 0; i < createdSize; i++) {
|
||||
byte[] createdNodeName = new byte[in.readShort()];
|
||||
in.readFully(createdNodeName);
|
||||
byte[] createdNodeName = FSImageSerialization.readLocalName(in);
|
||||
INode created = loadCreated(createdNodeName, parent);
|
||||
createdList.add(created);
|
||||
}
|
||||
@ -184,12 +186,12 @@ private static List<INode> loadCreatedList(INodeDirectoryWithSnapshot parent,
|
||||
* @param parent The directory that the deleted list belongs to.
|
||||
* @param createdList The created list associated with the deleted list in
|
||||
* the same Diff.
|
||||
* @param in The {@link DataInputStream} to read.
|
||||
* @param in The {@link DataInput} to read.
|
||||
* @param loader The {@link Loader} instance.
|
||||
* @return The deleted list.
|
||||
*/
|
||||
private static List<INode> loadDeletedList(INodeDirectoryWithSnapshot parent,
|
||||
List<INode> createdList, DataInputStream in, FSImageFormat.Loader loader)
|
||||
List<INode> createdList, DataInput in, FSImageFormat.Loader loader)
|
||||
throws IOException {
|
||||
int deletedSize = in.readInt();
|
||||
List<INode> deletedList = new ArrayList<INode>(deletedSize);
|
||||
@ -208,49 +210,35 @@ private static List<INode> loadDeletedList(INodeDirectoryWithSnapshot parent,
|
||||
* Load snapshots and snapshotQuota for a Snapshottable directory.
|
||||
* @param snapshottableParent The snapshottable directory for loading.
|
||||
* @param numSnapshots The number of snapshots that the directory has.
|
||||
* @param in The {@link DataInputStream} instance to read.
|
||||
* @param in The {@link DataInput} instance to read.
|
||||
* @param loader The {@link Loader} instance that this loading procedure is
|
||||
* using.
|
||||
*/
|
||||
public static void loadSnapshotList(
|
||||
INodeDirectorySnapshottable snapshottableParent, int numSnapshots,
|
||||
DataInputStream in, FSImageFormat.Loader loader) throws IOException {
|
||||
DataInput in, FSImageFormat.Loader loader) throws IOException {
|
||||
for (int i = 0; i < numSnapshots; i++) {
|
||||
// read snapshots
|
||||
Snapshot ss = loadSnapshot(snapshottableParent, in, loader);
|
||||
snapshottableParent.addSnapshot(ss);
|
||||
final Snapshot s = loader.getSnapshot(in);
|
||||
s.getRoot().setParent(snapshottableParent);
|
||||
snapshottableParent.addSnapshot(s);
|
||||
}
|
||||
int snapshotQuota = in.readInt();
|
||||
snapshottableParent.setSnapshotQuota(snapshotQuota);
|
||||
}
|
||||
|
||||
/**
|
||||
* Load a {@link Snapshot} from fsimage.
|
||||
* @param parent The directory that the snapshot belongs to.
|
||||
* @param in The {@link DataInputStream} instance to read.
|
||||
* @param loader The {@link Loader} instance that this loading procedure is
|
||||
* using.
|
||||
* @return The snapshot.
|
||||
*/
|
||||
private static Snapshot loadSnapshot(INodeDirectorySnapshottable parent,
|
||||
DataInputStream in, FSImageFormat.Loader loader) throws IOException {
|
||||
int snapshotId = in.readInt();
|
||||
final INode root = loader.loadINodeWithLocalName(false, in);
|
||||
return new Snapshot(snapshotId, root.asDirectory(), parent);
|
||||
}
|
||||
|
||||
/**
|
||||
* Load the {@link SnapshotDiff} list for the INodeDirectoryWithSnapshot
|
||||
* directory.
|
||||
* @param dir The snapshottable directory for loading.
|
||||
* @param numSnapshotDiffs The number of {@link SnapshotDiff} that the
|
||||
* directory has.
|
||||
* @param in The {@link DataInputStream} instance to read.
|
||||
* @param in The {@link DataInput} instance to read.
|
||||
* @param loader The {@link Loader} instance that this loading procedure is
|
||||
* using.
|
||||
*/
|
||||
public static void loadDirectoryDiffList(INodeDirectory dir,
|
||||
DataInputStream in, FSImageFormat.Loader loader) throws IOException {
|
||||
DataInput in, FSImageFormat.Loader loader) throws IOException {
|
||||
final int size = in.readInt();
|
||||
if (size != -1) {
|
||||
INodeDirectoryWithSnapshot withSnapshot = (INodeDirectoryWithSnapshot)dir;
|
||||
@ -261,30 +249,16 @@ public static void loadDirectoryDiffList(INodeDirectory dir,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Use the given full path to a {@link Root} directory to find the
|
||||
* associated snapshot.
|
||||
*/
|
||||
private static Snapshot findSnapshot(String sRootFullPath, FSDirectory fsdir)
|
||||
throws IOException {
|
||||
// find the root
|
||||
INode root = fsdir.getINode(sRootFullPath);
|
||||
INodeDirectorySnapshottable snapshotRoot = INodeDirectorySnapshottable
|
||||
.valueOf(root.getParent(), root.getParent().getFullPathName());
|
||||
// find the snapshot
|
||||
return snapshotRoot.getSnapshot(root.getLocalNameBytes());
|
||||
}
|
||||
|
||||
/**
|
||||
* Load the snapshotINode field of {@link SnapshotDiff}.
|
||||
* @param snapshot The Snapshot associated with the {@link SnapshotDiff}.
|
||||
* @param in The {@link DataInputStream} to read.
|
||||
* @param in The {@link DataInput} to read.
|
||||
* @param loader The {@link Loader} instance that this loading procedure is
|
||||
* using.
|
||||
* @return The snapshotINode.
|
||||
*/
|
||||
private static INodeDirectory loadSnapshotINodeInDirectoryDiff(
|
||||
Snapshot snapshot, DataInputStream in, FSImageFormat.Loader loader)
|
||||
Snapshot snapshot, DataInput in, FSImageFormat.Loader loader)
|
||||
throws IOException {
|
||||
// read the boolean indicating whether snapshotINode == Snapshot.Root
|
||||
boolean useRoot = in.readBoolean();
|
||||
@ -300,17 +274,16 @@ private static INodeDirectory loadSnapshotINodeInDirectoryDiff(
|
||||
/**
|
||||
* Load {@link DirectoryDiff} from fsimage.
|
||||
* @param parent The directory that the SnapshotDiff belongs to.
|
||||
* @param in The {@link DataInputStream} instance to read.
|
||||
* @param in The {@link DataInput} instance to read.
|
||||
* @param loader The {@link Loader} instance that this loading procedure is
|
||||
* using.
|
||||
* @return A {@link DirectoryDiff}.
|
||||
*/
|
||||
private static DirectoryDiff loadDirectoryDiff(
|
||||
INodeDirectoryWithSnapshot parent, DataInputStream in,
|
||||
INodeDirectoryWithSnapshot parent, DataInput in,
|
||||
FSImageFormat.Loader loader) throws IOException {
|
||||
// 1. Read the full path of the Snapshot root to identify the Snapshot
|
||||
Snapshot snapshot = findSnapshot(FSImageSerialization.readString(in),
|
||||
loader.getFSDirectoryInLoading());
|
||||
final Snapshot snapshot = loader.getSnapshot(in);
|
||||
|
||||
// 2. Load DirectoryDiff#childrenSize
|
||||
int childrenSize = in.readInt();
|
||||
@ -333,4 +306,78 @@ private static DirectoryDiff loadDirectoryDiff(
|
||||
return sdiff;
|
||||
}
|
||||
|
||||
|
||||
/** A reference with a fixed id for fsimage serialization. */
|
||||
private static class INodeReferenceWithId extends INodeReference {
|
||||
final long id;
|
||||
|
||||
private INodeReferenceWithId(WithCount parent, INode referred, long id) {
|
||||
super(parent, referred);
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
/** @return the reference id. */
|
||||
private long getReferenceId() {
|
||||
return id;
|
||||
}
|
||||
}
|
||||
|
||||
/** A reference map for fsimage serialization. */
|
||||
public static class ReferenceMap {
|
||||
private final Map<Long, INodeReference.WithCount> referenceMap
|
||||
= new HashMap<Long, INodeReference.WithCount>();
|
||||
private long referenceId = 0;
|
||||
|
||||
public void writeINodeReferenceWithCount(INodeReference.WithCount withCount,
|
||||
DataOutput out, boolean writeUnderConstruction) throws IOException {
|
||||
final INode referred = withCount.getReferredINode();
|
||||
final boolean firstReferred = !(referred instanceof INodeReferenceWithId);
|
||||
out.writeBoolean(firstReferred);
|
||||
|
||||
if (firstReferred) {
|
||||
FSImageSerialization.saveINode2Image(referred, out,
|
||||
writeUnderConstruction, this);
|
||||
final long id = ++referenceId;
|
||||
referenceMap.put(id, withCount);
|
||||
|
||||
final INodeReferenceWithId withId = new INodeReferenceWithId(
|
||||
withCount, referred, id);
|
||||
withCount.setReferredINode(withId);
|
||||
referred.setParentReference(withId);
|
||||
} else {
|
||||
final long id = ((INodeReferenceWithId)referred).getReferenceId();
|
||||
Preconditions.checkState(referenceMap.containsKey(id));
|
||||
out.writeLong(id);
|
||||
}
|
||||
}
|
||||
|
||||
public INodeReference.WithCount loadINodeReferenceWithCount(
|
||||
boolean isSnapshotINode, DataInput in, FSImageFormat.Loader loader
|
||||
) throws IOException {
|
||||
final boolean firstReferred = in.readBoolean();
|
||||
|
||||
final INodeReference.WithCount withCount;
|
||||
if (firstReferred) {
|
||||
final INode referred = loader.loadINodeWithLocalName(isSnapshotINode, in);
|
||||
withCount = new INodeReference.WithCount(null, referred);
|
||||
referenceMap.put(++referenceId, withCount);
|
||||
} else {
|
||||
final long id = in.readLong();
|
||||
withCount = referenceMap.get(id);
|
||||
withCount.incrementReferenceCount();
|
||||
}
|
||||
return withCount;
|
||||
}
|
||||
|
||||
public void removeAllINodeReferenceWithId() {
|
||||
for(INodeReference.WithCount withCount : referenceMap.values()) {
|
||||
final INodeReference ref = withCount.getReferredINode().asReference();
|
||||
final INode referred = ref.getReferredINode();
|
||||
withCount.setReferredINode(referred);
|
||||
referred.setParentReference(withCount);
|
||||
ref.clear();
|
||||
}
|
||||
referenceMap.clear();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -21,12 +21,15 @@
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
||||
@ -195,23 +198,39 @@ public long getNumSnapshots() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Write {@link #snapshotCounter}, {@link #numSnapshots}, and
|
||||
* {@link #numSnapshottableDirs} to the DataOutput.
|
||||
* Write {@link #snapshotCounter}, {@link #numSnapshots},
|
||||
* {@link #numSnapshottableDirs} and all snapshots to the DataOutput.
|
||||
*/
|
||||
public void write(DataOutput out) throws IOException {
|
||||
out.writeInt(snapshotCounter);
|
||||
out.writeInt(numSnapshots.get());
|
||||
out.writeInt(numSnapshottableDirs.get());
|
||||
out.writeInt(numSnapshots.get());
|
||||
|
||||
// write all snapshots.
|
||||
for(INodeDirectorySnapshottable snapshottableDir : snapshottables) {
|
||||
for(Snapshot s : snapshottableDir.getSnapshotsByNames()) {
|
||||
s.write(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Read values of {@link #snapshotCounter}, {@link #numSnapshots}, and
|
||||
* {@link #numSnapshottableDirs} from the DataInput
|
||||
* {@link #numSnapshottableDirs} and all snapshots from the DataInput
|
||||
*/
|
||||
public void read(DataInput in) throws IOException {
|
||||
public Map<Integer, Snapshot> read(DataInput in, FSImageFormat.Loader loader
|
||||
) throws IOException {
|
||||
snapshotCounter = in.readInt();
|
||||
numSnapshots.set(in.readInt());
|
||||
numSnapshottableDirs.set(in.readInt());
|
||||
numSnapshots.set(in.readInt());
|
||||
|
||||
// read snapshots
|
||||
final Map<Integer, Snapshot> snapshotMap = new HashMap<Integer, Snapshot>();
|
||||
for(int i = 0; i < numSnapshots.get(); i++) {
|
||||
final Snapshot s = Snapshot.read(in, loader);
|
||||
snapshotMap.put(s.getId(), s);
|
||||
}
|
||||
return snapshotMap;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -205,7 +205,16 @@ public void testSaveLoadImage() throws Exception {
|
||||
hdfs.setReplication(sub1file1, (short) (REPLICATION - 1));
|
||||
hdfs.delete(sub1file2, true);
|
||||
hdfs.setOwner(sub2, "dr.who", "unknown");
|
||||
hdfs.delete(sub2file2, true);
|
||||
hdfs.delete(sub2file1, true);
|
||||
checkImage(s);
|
||||
|
||||
hdfs.createSnapshot(dir, "s" + ++s);
|
||||
Path sub1_sub2file2 = new Path(sub1, "sub2file2");
|
||||
hdfs.rename(sub2file2, sub1_sub2file2);
|
||||
|
||||
hdfs.rename(sub1file1, sub2file1);
|
||||
// TODO: fix case hdfs.rename(sub1file1, sub1file2);
|
||||
|
||||
checkImage(s);
|
||||
}
|
||||
|
||||
@ -222,8 +231,14 @@ void checkImage(int s) throws IOException {
|
||||
long numSnapshotBefore = fsn.getNumSnapshots();
|
||||
SnapshottableDirectoryStatus[] dirBefore = hdfs.getSnapshottableDirListing();
|
||||
|
||||
// restart the cluster, and format the cluster
|
||||
// shutdown the cluster
|
||||
cluster.shutdown();
|
||||
|
||||
// dump the fsdir tree
|
||||
File fsnBetween = dumpTree2File(name + "_between");
|
||||
SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnBetween);
|
||||
|
||||
// restart the cluster, and format the cluster
|
||||
cluster = new MiniDFSCluster.Builder(conf).format(true)
|
||||
.numDataNodes(REPLICATION).build();
|
||||
cluster.waitActive();
|
||||
|
@ -38,9 +38,11 @@
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
import org.apache.hadoop.hdfs.DFSClient;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
|
||||
@ -269,7 +271,6 @@ public static void dumpTree2File(FSDirectory fsdir, File f) throws IOException{
|
||||
/**
|
||||
* Generate the path for a snapshot file.
|
||||
*
|
||||
* @param fs FileSystem instance
|
||||
* @param snapshotRoot of format
|
||||
* {@literal <snapshottble_dir>/.snapshot/<snapshot_name>}
|
||||
* @param file path to a file
|
||||
@ -279,7 +280,7 @@ public static void dumpTree2File(FSDirectory fsdir, File f) throws IOException{
|
||||
* . Null if the file is not under the directory associated with the
|
||||
* snapshot root.
|
||||
*/
|
||||
static Path getSnapshotFile(FileSystem fs, Path snapshotRoot, Path file) {
|
||||
static Path getSnapshotFile(Path snapshotRoot, Path file) {
|
||||
Path rootParent = snapshotRoot.getParent();
|
||||
if (rootParent != null && rootParent.getName().equals(".snapshot")) {
|
||||
Path snapshotDir = rootParent.getParent();
|
||||
@ -464,10 +465,10 @@ public int hashCode() {
|
||||
}
|
||||
}
|
||||
|
||||
static void dumpTreeRecursively(INode inode) {
|
||||
if (INode.LOG.isDebugEnabled()) {
|
||||
inode.dumpTreeRecursively(
|
||||
new PrintWriter(System.out, true), new StringBuilder(), null);
|
||||
}
|
||||
public static void dumpTree(String message, MiniDFSCluster cluster
|
||||
) throws UnresolvedLinkException {
|
||||
System.out.println("XXX " + message);
|
||||
cluster.getNameNode().getNamesystem().getFSDirectory().getINode("/"
|
||||
).dumpTreeRecursively(System.out);
|
||||
}
|
||||
}
|
||||
|
@ -33,7 +33,6 @@
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.junit.AfterClass;
|
||||
@ -55,7 +54,6 @@ public class TestNestedSnapshots {
|
||||
|
||||
private static Configuration conf = new Configuration();
|
||||
private static MiniDFSCluster cluster;
|
||||
private static FSNamesystem fsn;
|
||||
private static DistributedFileSystem hdfs;
|
||||
|
||||
@BeforeClass
|
||||
@ -63,8 +61,6 @@ public static void setUp() throws Exception {
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
|
||||
.build();
|
||||
cluster.waitActive();
|
||||
|
||||
fsn = cluster.getNamesystem();
|
||||
hdfs = cluster.getFileSystem();
|
||||
}
|
||||
|
||||
@ -112,8 +108,7 @@ public void testNestedSnapshots() throws Exception {
|
||||
}
|
||||
|
||||
private static void print(String message) throws UnresolvedLinkException {
|
||||
System.out.println("XXX " + message);
|
||||
SnapshotTestHelper.dumpTreeRecursively(fsn.getFSDirectory().getINode("/"));
|
||||
SnapshotTestHelper.dumpTree(message, cluster);
|
||||
}
|
||||
|
||||
private static void assertFile(Path s1, Path s2, Path file,
|
||||
|
@ -0,0 +1,107 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
/** Testing rename with snapshots. */
|
||||
public class TestRenameWithSnapshots {
|
||||
{
|
||||
SnapshotTestHelper.disableLogs();
|
||||
}
|
||||
|
||||
private static final long SEED = 0;
|
||||
|
||||
private static final short REPL = 3;
|
||||
private static final long BLOCKSIZE = 1024;
|
||||
|
||||
private static Configuration conf = new Configuration();
|
||||
private static MiniDFSCluster cluster;
|
||||
private static FSNamesystem fsn;
|
||||
private static FSDirectory fsdir;
|
||||
private static DistributedFileSystem hdfs;
|
||||
|
||||
@BeforeClass
|
||||
public static void setUp() throws Exception {
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL).build();
|
||||
cluster.waitActive();
|
||||
|
||||
fsn = cluster.getNamesystem();
|
||||
fsdir = fsn.getFSDirectory();
|
||||
|
||||
hdfs = cluster.getFileSystem();
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() throws Exception {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
@Test (timeout=300000)
|
||||
public void testRenameWithSnapshot() throws Exception {
|
||||
final String dirStr = "/testRenameWithSnapshot";
|
||||
final String abcStr = dirStr + "/abc";
|
||||
final Path abc = new Path(abcStr);
|
||||
hdfs.mkdirs(abc, new FsPermission((short)0777));
|
||||
hdfs.allowSnapshot(abcStr);
|
||||
|
||||
final Path foo = new Path(abc, "foo");
|
||||
DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
|
||||
hdfs.createSnapshot(abc, "s0");
|
||||
final INode fooINode = fsdir.getINode(foo.toString());
|
||||
|
||||
final String xyzStr = dirStr + "/xyz";
|
||||
final Path xyz = new Path(xyzStr);
|
||||
hdfs.mkdirs(xyz, new FsPermission((short)0777));
|
||||
final Path bar = new Path(xyz, "bar");
|
||||
hdfs.rename(foo, bar);
|
||||
|
||||
final INode fooRef = fsdir.getINode(
|
||||
SnapshotTestHelper.getSnapshotPath(abc, "s0", "foo").toString());
|
||||
Assert.assertTrue(fooRef.isReference());
|
||||
Assert.assertTrue(fooRef.asReference() instanceof INodeReference.WithName);
|
||||
|
||||
final INodeReference.WithCount withCount
|
||||
= (INodeReference.WithCount)fooRef.asReference().getReferredINode();
|
||||
Assert.assertEquals(2, withCount.getReferenceCount());
|
||||
|
||||
final INode barRef = fsdir.getINode(bar.toString());
|
||||
Assert.assertTrue(barRef.isReference());
|
||||
|
||||
Assert.assertSame(withCount, barRef.asReference().getReferredINode());
|
||||
Assert.assertSame(fooINode, withCount.asReference().getReferredINode());
|
||||
|
||||
hdfs.delete(bar, false);
|
||||
Assert.assertEquals(1, withCount.getReferenceCount());
|
||||
}
|
||||
}
|
@ -76,7 +76,7 @@ public class TestSnapshot {
|
||||
public static final int DIRECTORY_TREE_LEVEL = 5;
|
||||
|
||||
protected Configuration conf;
|
||||
protected MiniDFSCluster cluster;
|
||||
protected static MiniDFSCluster cluster;
|
||||
protected static FSNamesystem fsn;
|
||||
protected static FSDirectory fsdir;
|
||||
protected DistributedFileSystem hdfs;
|
||||
@ -222,7 +222,7 @@ public void testSnapshot() throws Throwable {
|
||||
runTestSnapshot();
|
||||
} catch(Throwable t) {
|
||||
SnapshotTestHelper.LOG.info("FAILED", t);
|
||||
SnapshotTestHelper.dumpTreeRecursively(fsdir.getINode("/"));
|
||||
SnapshotTestHelper.dumpTree("FAILED", cluster);
|
||||
throw t;
|
||||
}
|
||||
}
|
||||
@ -471,7 +471,7 @@ static abstract class FileStatusChange extends Modification {
|
||||
@Override
|
||||
void loadSnapshots() throws Exception {
|
||||
for (Path snapshotRoot : snapshotList) {
|
||||
Path snapshotFile = SnapshotTestHelper.getSnapshotFile(fs,
|
||||
Path snapshotFile = SnapshotTestHelper.getSnapshotFile(
|
||||
snapshotRoot, file);
|
||||
if (snapshotFile != null) {
|
||||
if (fs.exists(snapshotFile)) {
|
||||
@ -501,8 +501,7 @@ void checkSnapshots() throws Exception {
|
||||
+ "\n\nfile : " + fsdir.getINode(file.toString()).toDetailString()
|
||||
+ "\n\nsnapshotFile: " + fsdir.getINode(snapshotFile.toString()).toDetailString();
|
||||
|
||||
System.out.println(s);
|
||||
SnapshotTestHelper.dumpTreeRecursively(fsdir.getINode("/"));
|
||||
SnapshotTestHelper.dumpTree(s, cluster);
|
||||
}
|
||||
assertEquals(s, currentStatus.toString(), originalStatus.toString());
|
||||
}
|
||||
@ -582,7 +581,7 @@ static class FileAppend extends Modification {
|
||||
@Override
|
||||
void loadSnapshots() throws Exception {
|
||||
for (Path snapshotRoot : snapshotList) {
|
||||
Path snapshotFile = SnapshotTestHelper.getSnapshotFile(fs,
|
||||
Path snapshotFile = SnapshotTestHelper.getSnapshotFile(
|
||||
snapshotRoot, file);
|
||||
if (snapshotFile != null) {
|
||||
long snapshotFileLen = fs.exists(snapshotFile) ? fs.getFileStatus(
|
||||
@ -613,9 +612,7 @@ void checkSnapshots() throws Exception {
|
||||
+ "\noriginalSnapshotFileLen = " + originalSnapshotFileLen
|
||||
+ "\n\nfile : " + fsdir.getINode(file.toString()).toDetailString()
|
||||
+ "\n\nsnapshotFile: " + fsdir.getINode(snapshotFile.toString()).toDetailString();
|
||||
|
||||
System.out.println(s);
|
||||
SnapshotTestHelper.dumpTreeRecursively(fsdir.getINode("/"));
|
||||
SnapshotTestHelper.dumpTree(s, cluster);
|
||||
}
|
||||
assertEquals(s, originalSnapshotFileLen, currentSnapshotFileLen);
|
||||
// Read the snapshot file out of the boundary
|
||||
@ -630,9 +627,7 @@ void checkSnapshots() throws Exception {
|
||||
+ "\n readLen = " + readLen
|
||||
+ "\n\nfile : " + fsdir.getINode(file.toString()).toDetailString()
|
||||
+ "\n\nsnapshotFile: " + fsdir.getINode(snapshotFile.toString()).toDetailString();
|
||||
|
||||
System.out.println(s);
|
||||
SnapshotTestHelper.dumpTreeRecursively(fsdir.getINode("/"));
|
||||
SnapshotTestHelper.dumpTree(s, cluster);
|
||||
}
|
||||
assertEquals(s, -1, readLen);
|
||||
input.close();
|
||||
@ -703,7 +698,7 @@ static class FileCreation extends Modification {
|
||||
@Override
|
||||
void loadSnapshots() throws Exception {
|
||||
for (Path snapshotRoot : snapshotList) {
|
||||
Path snapshotFile = SnapshotTestHelper.getSnapshotFile(fs,
|
||||
Path snapshotFile = SnapshotTestHelper.getSnapshotFile(
|
||||
snapshotRoot, file);
|
||||
if (snapshotFile != null) {
|
||||
FileStatus status =
|
||||
@ -722,7 +717,7 @@ void modify() throws Exception {
|
||||
@Override
|
||||
void checkSnapshots() throws Exception {
|
||||
for (Path snapshotRoot : snapshotList) {
|
||||
Path snapshotFile = SnapshotTestHelper.getSnapshotFile(fs,
|
||||
Path snapshotFile = SnapshotTestHelper.getSnapshotFile(
|
||||
snapshotRoot, file);
|
||||
if (snapshotFile != null) {
|
||||
boolean computed = fs.exists(snapshotFile);
|
||||
@ -755,7 +750,7 @@ static class FileDeletion extends Modification {
|
||||
@Override
|
||||
void loadSnapshots() throws Exception {
|
||||
for (Path snapshotRoot : snapshotList) {
|
||||
boolean existence = SnapshotTestHelper.getSnapshotFile(fs,
|
||||
boolean existence = SnapshotTestHelper.getSnapshotFile(
|
||||
snapshotRoot, file) != null;
|
||||
snapshotFileExistenceMap.put(snapshotRoot, existence);
|
||||
}
|
||||
@ -770,7 +765,7 @@ void modify() throws Exception {
|
||||
void checkSnapshots() throws Exception {
|
||||
for (Path snapshotRoot : snapshotList) {
|
||||
boolean currentSnapshotFileExist = SnapshotTestHelper.getSnapshotFile(
|
||||
fs, snapshotRoot, file) != null;
|
||||
snapshotRoot, file) != null;
|
||||
boolean originalSnapshotFileExist = snapshotFileExistenceMap
|
||||
.get(snapshotRoot);
|
||||
assertEquals(currentSnapshotFileExist, originalSnapshotFileExist);
|
||||
@ -809,7 +804,7 @@ class DirCreationOrDeletion extends Modification {
|
||||
@Override
|
||||
void loadSnapshots() throws Exception {
|
||||
for (Path snapshotRoot : snapshotList) {
|
||||
Path snapshotDir = SnapshotTestHelper.getSnapshotFile(fs, snapshotRoot,
|
||||
Path snapshotDir = SnapshotTestHelper.getSnapshotFile(snapshotRoot,
|
||||
changedPath);
|
||||
if (snapshotDir != null) {
|
||||
FileStatus status = fs.exists(snapshotDir) ? fs
|
||||
|
Loading…
Reference in New Issue
Block a user