HDFS-5988. Bad fsimage always generated after upgrade. (wang)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1570429 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Andrew Wang 2014-02-21 02:48:40 +00:00
parent 772ead791c
commit b00817b54a
4 changed files with 80 additions and 27 deletions

View File

@ -546,6 +546,8 @@ Release 2.4.0 - UNRELEASED
HDFS-5982. Need to update snapshot manager when applying editlog for deleting HDFS-5982. Need to update snapshot manager when applying editlog for deleting
a snapshottable directory. (jing9) a snapshottable directory. (jing9)
HDFS-5988. Bad fsimage always generated after upgrade. (wang)
BREAKDOWN OF HDFS-5698 SUBTASKS AND RELATED JIRAS BREAKDOWN OF HDFS-5698 SUBTASKS AND RELATED JIRAS
HDFS-5717. Save FSImage header in protobuf. (Haohui Mai via jing9) HDFS-5717. Save FSImage header in protobuf. (Haohui Mai via jing9)

View File

@ -694,8 +694,7 @@ public INode loadINodeWithLocalName(boolean isSnapshotINode,
localName = localName =
renameReservedComponentOnUpgrade(localName, getLayoutVersion()); renameReservedComponentOnUpgrade(localName, getLayoutVersion());
INode inode = loadINode(localName, isSnapshotINode, in, counter); INode inode = loadINode(localName, isSnapshotINode, in, counter);
if (updateINodeMap if (updateINodeMap) {
&& LayoutVersion.supports(Feature.ADD_INODE_ID, getLayoutVersion())) {
namesystem.dir.addToInodeMap(inode); namesystem.dir.addToInodeMap(inode);
} }
return inode; return inode;

View File

@ -28,6 +28,8 @@
import java.util.Comparator; import java.util.Comparator;
import java.util.HashMap; import java.util.HashMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
@ -63,6 +65,9 @@
* output of the lsr command. * output of the lsr command.
*/ */
final class LsrPBImage { final class LsrPBImage {
private static final Log LOG = LogFactory.getLog(LsrPBImage.class);
private final Configuration conf; private final Configuration conf;
private final PrintWriter out; private final PrintWriter out;
private String[] stringTable; private String[] stringTable;
@ -133,6 +138,10 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) {
private void list(String parent, long dirId) { private void list(String parent, long dirId) {
INode inode = inodes.get(dirId); INode inode = inodes.get(dirId);
if (LOG.isTraceEnabled()) {
LOG.trace("Listing directory id " + dirId + " parent '" + parent
+ "' (INode is " + inode + ")");
}
listINode(parent.isEmpty() ? "/" : parent, inode); listINode(parent.isEmpty() ? "/" : parent, inode);
long[] children = dirmap.get(dirId); long[] children = dirmap.get(dirId);
if (children == null) { if (children == null) {
@ -189,6 +198,9 @@ private long getFileSize(INodeFile f) {
} }
private void loadINodeDirectorySection(InputStream in) throws IOException { private void loadINodeDirectorySection(InputStream in) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Loading directory section");
}
while (true) { while (true) {
INodeDirectorySection.DirEntry e = INodeDirectorySection.DirEntry INodeDirectorySection.DirEntry e = INodeDirectorySection.DirEntry
.parseDelimitedFrom(in); .parseDelimitedFrom(in);
@ -205,10 +217,21 @@ private void loadINodeDirectorySection(InputStream in) throws IOException {
l[i] = refList.get(refId).getReferredId(); l[i] = refList.get(refId).getReferredId();
} }
dirmap.put(e.getParent(), l); dirmap.put(e.getParent(), l);
if (LOG.isDebugEnabled()) {
LOG.debug("Loaded directory (parent " + e.getParent()
+ ") with " + e.getChildrenCount() + " children and "
+ e.getRefChildrenCount() + " reference children");
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("Loaded " + dirmap.size() + " directories");
} }
} }
private void loadINodeReferenceSection(InputStream in) throws IOException { private void loadINodeReferenceSection(InputStream in) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Loading inode reference section");
}
while (true) { while (true) {
INodeReferenceSection.INodeReference e = INodeReferenceSection INodeReferenceSection.INodeReference e = INodeReferenceSection
.INodeReference.parseDelimitedFrom(in); .INodeReference.parseDelimitedFrom(in);
@ -216,24 +239,44 @@ private void loadINodeReferenceSection(InputStream in) throws IOException {
break; break;
} }
refList.add(e); refList.add(e);
if (LOG.isTraceEnabled()) {
LOG.trace("Loaded inode reference named '" + e.getName()
+ "' referring to id " + e.getReferredId() + "");
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("Loaded " + refList.size() + " inode references");
} }
} }
private void loadINodeSection(InputStream in) throws IOException { private void loadINodeSection(InputStream in) throws IOException {
INodeSection s = INodeSection.parseDelimitedFrom(in); INodeSection s = INodeSection.parseDelimitedFrom(in);
if (LOG.isDebugEnabled()) {
LOG.debug("Found " + s.getNumInodes() + " inodes in inode section");
}
for (int i = 0; i < s.getNumInodes(); ++i) { for (int i = 0; i < s.getNumInodes(); ++i) {
INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in); INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
inodes.put(p.getId(), p); inodes.put(p.getId(), p);
if (LOG.isTraceEnabled()) {
LOG.trace("Loaded inode id " + p.getId() + " type " + p.getType()
+ " name '" + p.getName().toStringUtf8() + "'");
}
} }
} }
private void loadStringTable(InputStream in) throws IOException { private void loadStringTable(InputStream in) throws IOException {
StringTableSection s = StringTableSection.parseDelimitedFrom(in); StringTableSection s = StringTableSection.parseDelimitedFrom(in);
if (LOG.isDebugEnabled()) {
LOG.debug("Found " + s.getNumEntry() + " strings in string section");
}
stringTable = new String[s.getNumEntry() + 1]; stringTable = new String[s.getNumEntry() + 1];
for (int i = 0; i < s.getNumEntry(); ++i) { for (int i = 0; i < s.getNumEntry(); ++i) {
StringTableSection.Entry e = StringTableSection.Entry StringTableSection.Entry e = StringTableSection.Entry
.parseDelimitedFrom(in); .parseDelimitedFrom(in);
stringTable[e.getId()] = e.getStr(); stringTable[e.getId()] = e.getStr();
if (LOG.isTraceEnabled()) {
LOG.trace("Loaded string " + e.getStr());
}
} }
} }
} }

View File

@ -330,13 +330,14 @@ public void testUpgradeFromCorruptRel22Image() throws IOException {
* paths to test renaming on upgrade * paths to test renaming on upgrade
*/ */
@Test @Test
public void testUpgradeFromRel2ReservedImage() throws IOException { public void testUpgradeFromRel2ReservedImage() throws Exception {
unpackStorage(HADOOP2_RESERVED_IMAGE); unpackStorage(HADOOP2_RESERVED_IMAGE);
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
// Try it once without setting the upgrade flag to ensure it fails // Try it once without setting the upgrade flag to ensure it fails
final Configuration conf = new Configuration();
try { try {
cluster = cluster =
new MiniDFSCluster.Builder(new Configuration()) new MiniDFSCluster.Builder(conf)
.format(false) .format(false)
.startupOption(StartupOption.UPGRADE) .startupOption(StartupOption.UPGRADE)
.numDataNodes(0).build(); .numDataNodes(0).build();
@ -355,28 +356,15 @@ public void testUpgradeFromRel2ReservedImage() throws IOException {
".snapshot=.user-snapshot," + ".snapshot=.user-snapshot," +
".reserved=.my-reserved"); ".reserved=.my-reserved");
cluster = cluster =
new MiniDFSCluster.Builder(new Configuration()) new MiniDFSCluster.Builder(conf)
.format(false) .format(false)
.startupOption(StartupOption.UPGRADE) .startupOption(StartupOption.UPGRADE)
.numDataNodes(0).build(); .numDataNodes(0).build();
// Make sure the paths were renamed as expected
DistributedFileSystem dfs = cluster.getFileSystem(); DistributedFileSystem dfs = cluster.getFileSystem();
ArrayList<Path> toList = new ArrayList<Path>(); // Make sure the paths were renamed as expected
ArrayList<String> found = new ArrayList<String>(); // Also check that paths are present after a restart, checks that the
toList.add(new Path("/")); // upgraded fsimage has the same state.
while (!toList.isEmpty()) { final String[] expected = new String[] {
Path p = toList.remove(0);
FileStatus[] statuses = dfs.listStatus(p);
for (FileStatus status: statuses) {
final String path = status.getPath().toUri().getPath();
System.out.println("Found path " + path);
found.add(path);
if (status.isDirectory()) {
toList.add(status.getPath());
}
}
}
String[] expected = new String[] {
"/edits", "/edits",
"/edits/.reserved", "/edits/.reserved",
"/edits/.user-snapshot", "/edits/.user-snapshot",
@ -393,12 +381,33 @@ public void testUpgradeFromRel2ReservedImage() throws IOException {
"/.my-reserved/edits-touch", "/.my-reserved/edits-touch",
"/.my-reserved/image-touch" "/.my-reserved/image-touch"
}; };
for (int i=0; i<2; i++) {
for (String s: expected) { // Restart the second time through this loop
assertTrue("Did not find expected path " + s, found.contains(s)); if (i==1) {
cluster.finalizeCluster(conf);
cluster.restartNameNode(true);
}
ArrayList<Path> toList = new ArrayList<Path>();
toList.add(new Path("/"));
ArrayList<String> found = new ArrayList<String>();
while (!toList.isEmpty()) {
Path p = toList.remove(0);
FileStatus[] statuses = dfs.listStatus(p);
for (FileStatus status: statuses) {
final String path = status.getPath().toUri().getPath();
System.out.println("Found path " + path);
found.add(path);
if (status.isDirectory()) {
toList.add(status.getPath());
}
}
}
for (String s: expected) {
assertTrue("Did not find expected path " + s, found.contains(s));
}
assertEquals("Found an unexpected path while listing filesystem",
found.size(), expected.length);
} }
assertEquals("Found an unexpected path while listing filesystem",
found.size(), expected.length);
} finally { } finally {
if (cluster != null) { if (cluster != null) {
cluster.shutdown(); cluster.shutdown();