From 12e8ba804f9454d9bb07099e35ce7ef63c0d4e1e Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Fri, 25 Jan 2013 03:09:26 +0000 Subject: [PATCH] HDFS-4429. When the latest snapshot exists, INodeFileUnderConstruction should be replaced with INodeFileWithSnapshot but not INodeFile. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1438304 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES.HDFS-2802.txt | 4 + .../hdfs/server/namenode/FSNamesystem.java | 16 ++ .../hadoop/hdfs/server/namenode/INode.java | 23 +- .../snapshot/INodeDirectoryWithSnapshot.java | 2 +- ...NodeFileUnderConstructionWithSnapshot.java | 17 +- .../snapshot/INodeFileWithSnapshot.java | 2 +- ...NodeFileUnderConstructionWithSnapshot.java | 240 ++++++++++++++++++ .../namenode/snapshot/TestSnapshot.java | 76 ++++-- 8 files changed, 352 insertions(+), 28 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt index 0e04ec396d..35e42d521c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt @@ -120,3 +120,7 @@ Branch-2802 Snapshot (Unreleased) HDFS-4436. Change INode.recordModification(..) to return only the current inode and remove the updateCircularList parameter from some methods in INodeDirectoryWithSnapshot.Diff. (szetszwo) + + HDFS-4429. When the latest snapshot exists, INodeFileUnderConstruction should + be replaced with INodeFileWithSnapshot but not INodeFile. (Jing Zhao + via szetszwo) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 9e45f09099..f01f5628ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -178,6 +178,7 @@ import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable; +import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileUnderConstructionWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager; @@ -3293,6 +3294,21 @@ private void finalizeINodeFileUnderConstruction(String src, throws IOException, UnresolvedLinkException { assert hasWriteLock(); leaseManager.removeLease(pendingFile.getClientName(), src); + + if (latestSnapshot != null) { + if (!(pendingFile instanceof INodeFileUnderConstructionWithSnapshot)) { + // replace INodeFileUnderConstruction with + // INodeFileUnderConstructionWithSnapshot. This replacement does not + // need to be recorded in snapshot. + INodeFileUnderConstructionWithSnapshot pendingFileWithSnaphsot = + new INodeFileUnderConstructionWithSnapshot(pendingFile); + dir.replaceINodeFile(src, pendingFile, + pendingFileWithSnaphsot, null); + pendingFile = pendingFileWithSnaphsot; + } + pendingFile = (INodeFileUnderConstruction) pendingFile + .recordModification(latestSnapshot); + } // The file is no longer pending. // Create permanent INode, update blocks diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index 96edf12665..e7eb7f008d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileSnapshot; +import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileUnderConstructionSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.util.ReadOnlyList; @@ -431,8 +432,8 @@ public String toString() { @VisibleForTesting public String getObjectString() { - final String s = super.toString(); - return s.substring(s.lastIndexOf(getClass().getSimpleName())); + return getClass().getSimpleName() + "@" + + Integer.toHexString(super.hashCode()); } @VisibleForTesting @@ -693,15 +694,21 @@ public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix, String blocksInfo = ((INodeFile) this).printBlocksInfo(); out.print(", blocks=[" + blocksInfo + "]"); } - if (this instanceof INodeFileWithSnapshot) { - INodeFileWithSnapshot nodeWithLink = (INodeFileWithSnapshot) this; - FileWithSnapshot next = nodeWithLink.getNext(); - out.print(", next=" - + (next != null ? next.asINodeFile().getObjectString() : "null")); - if (this instanceof INodeFileSnapshot) { + if (this instanceof FileWithSnapshot) { + if (this instanceof INodeFileSnapshot + || this instanceof INodeFileUnderConstructionSnapshot) { out.print(", computedSize=" + ((INodeFileSnapshot) this).computeFileSize(true)); } + FileWithSnapshot nodeWithLink = (FileWithSnapshot) this; + FileWithSnapshot next = nodeWithLink.getNext(); + // An INodeFileWithSnapshot whose next link pointing to itself should be + // equivalent with a normal INodeFile + if (!(this instanceof INodeFileWithSnapshot && + ((INodeFileWithSnapshot) this).getNext() == this)) { + out.print(", next=" + + (next != null ? next.asINodeFile().getObjectString() : "null")); + } } out.println(); } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java index ca4236b9ce..c3d772c50c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java @@ -528,7 +528,7 @@ private SnapshotDiff(Snapshot snapshot, INodeDirectory dir) { diff.deleted = deletedList; } - public Diff getDiff() { + Diff getDiff() { return diff; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java index 890f647b2b..82c15c7a74 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java @@ -39,6 +39,20 @@ public class INodeFileUnderConstructionWithSnapshot super(f.asINodeFile(), clientName, clientMachine, clientNode); } + /** + * The constructor that creates an + * {@link INodeFileUnderConstructionWithSnapshot} based on an + * {@link INodeFileUnderConstruction} + * + * @param child The given {@link INodeFileUnderConstruction} instance + */ + public INodeFileUnderConstructionWithSnapshot( + INodeFileUnderConstruction child) { + super(child, child.getClientName(), child.getClientMachine(), child + .getClientNode()); + next = this; + } + @Override protected INodeFileWithSnapshot toINodeFile(final long mtime) { assertAllBlocksComplete(); @@ -46,7 +60,8 @@ protected INodeFileWithSnapshot toINodeFile(final long mtime) { final INodeFileWithSnapshot f = new INodeFileWithSnapshot(this); f.setModificationTime(mtime, null); f.setAccessTime(atime, null); - Util.replace(this, f); + // link f with this + this.insertBefore(f); return f; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java index ee135cc69c..316e3410ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java @@ -43,7 +43,7 @@ public INodeFileUnderConstructionWithSnapshot toUnderConstruction( final INodeFileUnderConstructionWithSnapshot f = new INodeFileUnderConstructionWithSnapshot(this, clientName, clientMachine, clientNode); - Util.replace(this, f); + this.insertBefore(f); return f; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java new file mode 100644 index 0000000000..43a922ed90 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java @@ -0,0 +1,240 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode.snapshot; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.Random; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; +import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag; +import org.apache.hadoop.hdfs.server.namenode.FSDirectory; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.namenode.INode; +import org.apache.hadoop.hdfs.server.namenode.INodeFile; +import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.Diff; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * Test snapshot functionalities while file appending. + */ +public class TestINodeFileUnderConstructionWithSnapshot { + + static final long seed = 0; + static final short REPLICATION = 3; + static final int BLOCKSIZE = 1024; + + private final Path dir = new Path("/TestSnapshot"); + + Configuration conf; + MiniDFSCluster cluster; + FSNamesystem fsn; + DistributedFileSystem hdfs; + FSDirectory fsdir; + + @Before + public void setUp() throws Exception { + conf = new Configuration(); + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION) + .build(); + cluster.waitActive(); + fsn = cluster.getNamesystem(); + fsdir = fsn.getFSDirectory(); + hdfs = cluster.getFileSystem(); + hdfs.mkdirs(dir); + } + + @After + public void tearDown() throws Exception { + if (cluster != null) { + cluster.shutdown(); + } + } + + /** + * Check if the given nodes can form a circular list + */ + private void checkCircularList(FileWithSnapshot... nodes) { + for (int i = 0; i < nodes.length; i++) { + FileWithSnapshot next = nodes[i].getNext(); + FileWithSnapshot expectedNext = nodes[(i + 1) % nodes.length]; + Assert.assertTrue(next == expectedNext); + } + } + + /** + * Test snapshot after file appending + */ + @Test + public void testSnapshotAfterAppending() throws Exception { + Path file = new Path(dir, "file"); + // 1. create snapshot --> create file --> append + SnapshotTestHelper.createSnapshot(hdfs, dir, "s0"); + DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed); + DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE); + + // check the circular list and corresponding inodes: there should only be a + // reference of the current node in the created list + INodeFile fileNode = (INodeFile) fsdir.getINode(file.toString()); + INodeDirectorySnapshottable dirNode = (INodeDirectorySnapshottable) fsdir + .getINode(dir.toString()); + Diff diff = dirNode.getLastSnapshotDiff().getDiff(); + INode nodeInCreated = diff.searchCreated(fileNode.getLocalNameBytes()); + assertTrue(fileNode == nodeInCreated); + INode nodeInDeleted = diff.searchDeleted(fileNode.getLocalNameBytes()); + assertNull(nodeInDeleted); + + // 2. create snapshot --> modify the file --> append + hdfs.createSnapshot(dir, "s1"); + hdfs.setReplication(file, (short) (REPLICATION - 1)); + DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE); + + // check the circular list and corresponding inodes + diff = dirNode.getLastSnapshotDiff().getDiff(); + fileNode = (INodeFile) fsdir.getINode(file.toString()); + nodeInCreated = diff.searchCreated(fileNode.getLocalNameBytes()); + assertTrue(fileNode == nodeInCreated); + assertEquals(REPLICATION - 1, + ((INodeFile) nodeInCreated).getFileReplication()); + assertEquals(BLOCKSIZE * 3, ((INodeFile) fileNode).computeFileSize(true)); + nodeInDeleted = diff.searchDeleted(fileNode.getLocalNameBytes()); + assertEquals(REPLICATION, + ((INodeFile) nodeInDeleted).getFileReplication()); + assertEquals(BLOCKSIZE * 2, + ((INodeFile) nodeInDeleted).computeFileSize(true)); + checkCircularList((INodeFileWithSnapshot) fileNode, + (INodeFileSnapshot) nodeInDeleted); + + // 3. create snapshot --> append + hdfs.createSnapshot(dir, "s2"); + DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE); + + // check the circular list and corresponding inodes + diff = dirNode.getLastSnapshotDiff().getDiff(); + fileNode = (INodeFile) fsdir.getINode(file.toString()); + nodeInCreated = diff.searchCreated(fileNode.getLocalNameBytes()); + assertTrue(fileNode == nodeInCreated); + assertEquals(REPLICATION - 1, + ((INodeFile) nodeInCreated).getFileReplication()); + assertEquals(BLOCKSIZE * 4, ((INodeFile) fileNode).computeFileSize(true)); + INode nodeInDeleted2 = diff.searchDeleted(fileNode.getLocalNameBytes()); + assertEquals(REPLICATION - 1, + ((INodeFile) nodeInDeleted2).getFileReplication()); + assertEquals(BLOCKSIZE * 3, + ((INodeFile) nodeInDeleted2).computeFileSize(true)); + checkCircularList((INodeFileWithSnapshot) fileNode, + (INodeFileSnapshot) nodeInDeleted2, (INodeFileSnapshot) nodeInDeleted); + } + + private HdfsDataOutputStream appendFileWithoutClosing(Path file, int length) + throws IOException { + byte[] toAppend = new byte[length]; + Random random = new Random(); + random.nextBytes(toAppend); + HdfsDataOutputStream out = (HdfsDataOutputStream) hdfs.append(file); + out.write(toAppend); + return out; + } + + /** + * Test snapshot during file appending, before the corresponding + * {@link FSDataOutputStream} instance closes. + */ + @Test + public void testSnapshotWhileAppending() throws Exception { + Path file = new Path(dir, "file"); + DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed); + + // 1. append without closing stream --> create snapshot + HdfsDataOutputStream out = appendFileWithoutClosing(file, BLOCKSIZE); + out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH)); + SnapshotTestHelper.createSnapshot(hdfs, dir, "s0"); + out.close(); + + // check: an INodeFileUnderConstructionSnapshot should be stored into s0's + // deleted list, with size BLOCKSIZE*2 + INodeFile fileNode = (INodeFile) fsdir.getINode(file.toString()); + assertEquals(BLOCKSIZE * 2, ((INodeFile) fileNode).computeFileSize(true)); + INodeDirectorySnapshottable dirNode = (INodeDirectorySnapshottable) fsdir + .getINode(dir.toString()); + Diff diff = dirNode.getLastSnapshotDiff().getDiff(); + INode nodeInDeleted_S0 = diff.searchDeleted(fileNode.getLocalNameBytes()); + assertTrue(nodeInDeleted_S0 instanceof INodeFileUnderConstructionSnapshot); + assertEquals(BLOCKSIZE * 2, + ((INodeFile) nodeInDeleted_S0).computeFileSize(true)); + + // 2. append without closing stream + out = appendFileWithoutClosing(file, BLOCKSIZE); + out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH)); + + // re-check nodeInDeleted_S0 + dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString()); + diff = dirNode.getLastSnapshotDiff().getDiff(); + nodeInDeleted_S0 = diff.searchDeleted(fileNode.getLocalNameBytes()); + assertTrue(nodeInDeleted_S0 instanceof INodeFileUnderConstructionSnapshot); + assertEquals(BLOCKSIZE * 2, + ((INodeFile) nodeInDeleted_S0).computeFileSize(true)); + + // 3. take snapshot --> close stream + hdfs.createSnapshot(dir, "s1"); + out.close(); + + // check: an INodeFileUnderConstructionSnapshot with size BLOCKSIZE*3 should + // have been stored in s1's deleted list + fileNode = (INodeFile) fsdir.getINode(file.toString()); + dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString()); + diff = dirNode.getLastSnapshotDiff().getDiff(); + INode nodeInCreated_S1 = diff.searchCreated(fileNode.getLocalNameBytes()); + assertTrue(fileNode == nodeInCreated_S1); + assertTrue(fileNode instanceof INodeFileWithSnapshot); + INode nodeInDeleted_S1 = diff.searchDeleted(fileNode.getLocalNameBytes()); + assertTrue(nodeInDeleted_S1 instanceof INodeFileUnderConstructionSnapshot); + assertEquals(BLOCKSIZE * 3, + ((INodeFile) nodeInDeleted_S1).computeFileSize(true)); + // also check the circular linked list + checkCircularList((INodeFileWithSnapshot) fileNode, + (INodeFileUnderConstructionSnapshot) nodeInDeleted_S1, + (INodeFileUnderConstructionSnapshot) nodeInDeleted_S0); + + // 4. modify file --> append without closing stream --> take snapshot --> + // close stream + hdfs.setReplication(file, (short) (REPLICATION - 1)); + out = appendFileWithoutClosing(file, BLOCKSIZE); + hdfs.createSnapshot(dir, "s2"); + out.close(); + + // re-check the size of nodeInDeleted_S1 + assertEquals(BLOCKSIZE * 3, + ((INodeFile) nodeInDeleted_S1).computeFileSize(true)); + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java index 93e097456a..945568a6b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java @@ -20,6 +20,12 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import java.io.BufferedReader; +import java.io.File; +import java.io.FileReader; +import java.io.FileWriter; +import java.io.IOException; +import java.io.PrintWriter; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -42,6 +48,7 @@ import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.util.Time; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -68,6 +75,9 @@ public class TestSnapshot { private static Random random = new Random(seed); + private static String testDir = + System.getProperty("test.build.data", "build/test/data"); + @Rule public ExpectedException exception = ExpectedException.none(); @@ -144,13 +154,23 @@ protected TestDirectoryTree.Node[] createSnapshots() throws Exception { return nodes; } + private File getDumpTreeFile(String dir, String suffix) { + return new File(dir, String.format("dumptree_%s", suffix)); + } + /** * Restart the cluster to check edit log applying and fsimage saving/loading */ private void checkFSImage() throws Exception { + File fsnBefore = getDumpTreeFile(testDir, "before"); + File fsnMiddle = getDumpTreeFile(testDir, "middle"); + File fsnAfter = getDumpTreeFile(testDir, "after"); + String rootDir = "/"; - StringBuffer fsnStrBefore = fsn.getFSDirectory().getINode(rootDir) - .dumpTreeRecursively(); + PrintWriter out = new PrintWriter(new FileWriter(fsnBefore, false), true); + fsn.getFSDirectory().getINode(rootDir) + .dumpTreeRecursively(out, new StringBuilder(), null); + out.close(); cluster.shutdown(); cluster = new MiniDFSCluster.Builder(conf).format(false) @@ -158,11 +178,13 @@ private void checkFSImage() throws Exception { cluster.waitActive(); fsn = cluster.getNamesystem(); hdfs = cluster.getFileSystem(); - // later check fsnStrMiddle to see if the edit log is recorded and applied + // later check fsnMiddle to see if the edit log is recorded and applied // correctly - StringBuffer fsnStrMiddle = fsn.getFSDirectory().getINode(rootDir) - .dumpTreeRecursively(); - + out = new PrintWriter(new FileWriter(fsnMiddle, false), true); + fsn.getFSDirectory().getINode(rootDir) + .dumpTreeRecursively(out, new StringBuilder(), null); + out.close(); + // save namespace and restart cluster hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); hdfs.saveNamespace(); @@ -174,18 +196,38 @@ private void checkFSImage() throws Exception { fsn = cluster.getNamesystem(); hdfs = cluster.getFileSystem(); // dump the namespace loaded from fsimage - StringBuffer fsnStrAfter = fsn.getFSDirectory().getINode(rootDir) - .dumpTreeRecursively(); + out = new PrintWriter(new FileWriter(fsnAfter, false), true); + fsn.getFSDirectory().getINode(rootDir) + .dumpTreeRecursively(out, new StringBuilder(), null); + out.close(); - System.out.println("================== Original FSDir =================="); - System.out.println(fsnStrBefore.toString()); - System.out.println("================== FSDir After Applying Edit Logs =================="); - System.out.println(fsnStrMiddle.toString()); - System.out.println("================ FSDir After FSImage Saving/Loading ================"); - System.out.println(fsnStrAfter.toString()); - System.out.println("===================================================="); - assertEquals(fsnStrBefore.toString(), fsnStrMiddle.toString()); - assertEquals(fsnStrBefore.toString(), fsnStrAfter.toString()); + compareFile(fsnBefore, fsnMiddle); + compareFile(fsnBefore, fsnAfter); + } + + /** compare two file's content */ + private void compareFile(File file1, File file2) throws IOException { + BufferedReader reader1 = new BufferedReader(new FileReader(file1)); + BufferedReader reader2 = new BufferedReader(new FileReader(file2)); + try { + String line1 = ""; + String line2 = ""; + while ((line1 = reader1.readLine()) != null + && (line2 = reader2.readLine()) != null) { + // skip the hashCode part of the object string during the comparison, + // also ignore the difference between INodeFile/INodeFileWithSnapshot + line1 = line1.replaceAll("INodeFileWithSnapshot", "INodeFile"); + line2 = line2.replaceAll("INodeFileWithSnapshot", "INodeFile"); + line1 = line1.replaceAll("@[\\dabcdef]+", ""); + line2 = line2.replaceAll("@[\\dabcdef]+", ""); + assertEquals(line1, line2); + } + Assert.assertNull(reader1.readLine()); + Assert.assertNull(reader2.readLine()); + } finally { + reader1.close(); + reader2.close(); + } } /**