HDFS-7287. The OfflineImageViewer (OIV) can output invalid XML depending on the filename (Ravi Prakash via Colin P. McCabe)

This commit is contained in:
Colin Patrick Mccabe 2014-10-29 12:31:59 -07:00
parent c2575fb046
commit d33e07dc49
4 changed files with 14 additions and 4 deletions

View File

@ -1113,6 +1113,9 @@ Release 2.6.0 - UNRELEASED
HDFS-7128. Decommission slows way down when it gets towards the end. HDFS-7128. Decommission slows way down when it gets towards the end.
(Ming Ma via cnauroth) (Ming Ma via cnauroth)
HDFS-7287. The OfflineImageViewer (OIV) can output invalid XML depending on
the filename (Ravi Prakash via Colin P. McCabe)
BREAKDOWN OF HDFS-6584 ARCHIVAL STORAGE BREAKDOWN OF HDFS-6584 ARCHIVAL STORAGE
HDFS-6677. Change INodeFile and FSImage to support storage policy ID. HDFS-6677. Change INodeFile and FSImage to support storage policy ID.

View File

@ -49,6 +49,7 @@
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection;
import org.apache.hadoop.hdfs.util.XMLUtils;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
@ -410,7 +411,7 @@ private void loadStringTable(InputStream in) throws IOException {
} }
private PBImageXmlWriter o(final String e, final Object v) { private PBImageXmlWriter o(final String e, final Object v) {
out.print("<" + e + ">" + v + "</" + e + ">"); out.print("<" + e + ">" + XMLUtils.mangleXmlString(v.toString()) + "</" + e + ">");
return this; return this;
} }
} }

View File

@ -20,6 +20,7 @@
import java.io.IOException; import java.io.IOException;
import java.util.LinkedList; import java.util.LinkedList;
import org.apache.hadoop.hdfs.util.XMLUtils;
/** /**
* An XmlImageVisitor walks over an fsimage structure and writes out * An XmlImageVisitor walks over an fsimage structure and writes out
* an equivalent XML document that contains the fsimage's components. * an equivalent XML document that contains the fsimage's components.
@ -83,6 +84,6 @@ void visitEnclosingElement(ImageElement element,
} }
private void writeTag(String tag, String value) throws IOException { private void writeTag(String tag, String value) throws IOException {
write("<" + tag + ">" + value + "</" + tag + ">\n"); write("<" + tag + ">" + XMLUtils.mangleXmlString(value) + "</" + tag + ">\n");
} }
} }

View File

@ -23,6 +23,7 @@
import java.io.File; import java.io.File;
import java.io.FileInputStream; import java.io.FileInputStream;
import java.io.FileOutputStream; import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException; import java.io.IOException;
import java.io.PrintWriter; import java.io.PrintWriter;
import java.io.RandomAccessFile; import java.io.RandomAccessFile;
@ -125,6 +126,10 @@ public static void createOriginalFSImage() throws IOException {
hdfs.mkdirs(emptydir); hdfs.mkdirs(emptydir);
writtenFiles.put(emptydir.toString(), hdfs.getFileStatus(emptydir)); writtenFiles.put(emptydir.toString(), hdfs.getFileStatus(emptydir));
//Create a directory whose name should be escaped in XML
Path invalidXMLDir = new Path("/dirContainingInvalidXMLChar\u0000here");
hdfs.mkdirs(invalidXMLDir);
// Get delegation tokens so we log the delegation token op // Get delegation tokens so we log the delegation token op
Token<?>[] delegationTokens = hdfs Token<?>[] delegationTokens = hdfs
.addDelegationTokens(TEST_RENEWER, null); .addDelegationTokens(TEST_RENEWER, null);
@ -220,7 +225,7 @@ public void testFileDistributionCalculator() throws IOException {
assertTrue(matcher.find() && matcher.groupCount() == 1); assertTrue(matcher.find() && matcher.groupCount() == 1);
int totalDirs = Integer.parseInt(matcher.group(1)); int totalDirs = Integer.parseInt(matcher.group(1));
// totalDirs includes root directory, empty directory, and xattr directory // totalDirs includes root directory, empty directory, and xattr directory
assertEquals(NUM_DIRS + 3, totalDirs); assertEquals(NUM_DIRS + 4, totalDirs);
FileStatus maxFile = Collections.max(writtenFiles.values(), FileStatus maxFile = Collections.max(writtenFiles.values(),
new Comparator<FileStatus>() { new Comparator<FileStatus>() {
@ -272,7 +277,7 @@ public void testWebImageViewer() throws Exception {
// verify the number of directories // verify the number of directories
FileStatus[] statuses = webhdfs.listStatus(new Path("/")); FileStatus[] statuses = webhdfs.listStatus(new Path("/"));
assertEquals(NUM_DIRS + 2, statuses.length); // contains empty and xattr directory assertEquals(NUM_DIRS + 3, statuses.length); // contains empty and xattr directory
// verify the number of files in the directory // verify the number of files in the directory
statuses = webhdfs.listStatus(new Path("/dir0")); statuses = webhdfs.listStatus(new Path("/dir0"));