HDFS-13744. OIV tool should better handle control characters present in file or directory names. Contributed by Zsolt Venczel.

This commit is contained in:
Sean Mackrory 2018-09-07 12:34:31 -06:00
parent 3dc2988a37
commit 410dd3faa5
2 changed files with 32 additions and 3 deletions

View File

@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.text.StringEscapeUtils;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode;
@ -71,9 +73,19 @@ private void append(StringBuffer buffer, long field) {
buffer.append(field);
}
static final String CRLF = StringUtils.CR + StringUtils.LF;
private void append(StringBuffer buffer, String field) {
buffer.append(delimiter);
buffer.append(field);
String escapedField = StringEscapeUtils.escapeCsv(field);
if (escapedField.contains(CRLF)) {
escapedField = escapedField.replace(CRLF, "%x0D%x0A");
} else if (escapedField.contains(StringUtils.LF)) {
escapedField = escapedField.replace(StringUtils.LF, "%x0A");
}
buffer.append(escapedField);
}
@Override
@ -82,7 +94,7 @@ public String getEntry(String parent, INode inode) {
String inodeName = inode.getName().toStringUtf8();
Path path = new Path(parent.isEmpty() ? "/" : parent,
inodeName.isEmpty() ? "/" : inodeName);
buffer.append(path.toString());
append(buffer, path.toString());
PermissionStatus p = null;
boolean isDir = false;
boolean hasAcl = false;
@ -136,7 +148,7 @@ public String getEntry(String parent, INode inode) {
append(buffer, dirString + p.getPermission().toString() + aclString);
append(buffer, p.getUserName());
append(buffer, p.getGroupName());
return buffer.toString();
return buffer.substring(1);
}
@Override

View File

@ -27,6 +27,8 @@
import static org.apache.hadoop.fs.permission.FsAction.ALL;
import static org.apache.hadoop.fs.permission.FsAction.EXECUTE;
import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyState;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
@ -209,6 +211,21 @@ public static void createOriginalFSImage() throws IOException {
writtenFiles.put(entityRefXMLDir.toString(),
hdfs.getFileStatus(entityRefXMLDir));
//Create directories with new line characters
Path newLFDir = new Path("/dirContainingNewLineChar"
+ StringUtils.LF + "here");
hdfs.mkdirs(newLFDir);
dirCount++;
writtenFiles.put("\"/dirContainingNewLineChar%x0Ahere\"",
hdfs.getFileStatus(newLFDir));
Path newCRLFDir = new Path("/dirContainingNewLineChar"
+ PBImageDelimitedTextWriter.CRLF + "here");
hdfs.mkdirs(newCRLFDir);
dirCount++;
writtenFiles.put("\"/dirContainingNewLineChar%x0D%x0Ahere\"",
hdfs.getFileStatus(newCRLFDir));
//Create a directory with sticky bits
Path stickyBitDir = new Path("/stickyBit");
hdfs.mkdirs(stickyBitDir);