HDFS-7789. DFSck should resolve the path to support cross-FS symlinks. (gera)

This commit is contained in:
Gera Shegalov 2015-02-12 04:32:43 -08:00
parent 67ed59348d
commit cbb492578e
4 changed files with 53 additions and 15 deletions

View File

@ -697,6 +697,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7439. Add BlockOpResponseProto's message to the exception messages. HDFS-7439. Add BlockOpResponseProto's message to the exception messages.
(Takanobu Asanuma via szetszwo) (Takanobu Asanuma via szetszwo)
HDFS-7789. DFSck should resolve the path to support cross-FS symlinks.
(gera)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-7454. Reduce memory footprint for AclEntries in NameNode. HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

View File

@ -225,6 +225,14 @@ private Integer listCorruptFileBlocks(String dir, String baseUrl)
return errCode; return errCode;
} }
private Path getResolvedPath(String dir) throws IOException {
Configuration conf = getConf();
Path dirPath = new Path(dir);
FileSystem fs = dirPath.getFileSystem(conf);
return fs.resolvePath(dirPath);
}
/** /**
* Derive the namenode http address from the current file system, * Derive the namenode http address from the current file system,
* either default or as set by "-fs" in the generic options. * either default or as set by "-fs" in the generic options.
@ -236,19 +244,12 @@ private URI getCurrentNamenodeAddress(Path target) throws IOException {
Configuration conf = getConf(); Configuration conf = getConf();
//get the filesystem object to verify it is an HDFS system //get the filesystem object to verify it is an HDFS system
final FileSystem fs; final FileSystem fs = target.getFileSystem(conf);
try {
fs = target.getFileSystem(conf);
} catch (IOException ioe) {
System.err.println("FileSystem is inaccessible due to:\n"
+ StringUtils.stringifyException(ioe));
return null;
}
if (!(fs instanceof DistributedFileSystem)) { if (!(fs instanceof DistributedFileSystem)) {
System.err.println("FileSystem is " + fs.getUri()); System.err.println("FileSystem is " + fs.getUri());
return null; return null;
} }
return DFSUtil.getInfoServer(HAUtil.getAddressOfActive(fs), conf, return DFSUtil.getInfoServer(HAUtil.getAddressOfActive(fs), conf,
DFSUtil.getHttpClientScheme(conf)); DFSUtil.getHttpClientScheme(conf));
} }
@ -303,8 +304,16 @@ else if (args[idx].equals("-list-corruptfileblocks")) {
dir = "/"; dir = "/";
} }
final Path dirpath = new Path(dir); Path dirpath = null;
final URI namenodeAddress = getCurrentNamenodeAddress(dirpath); URI namenodeAddress = null;
try {
dirpath = getResolvedPath(dir);
namenodeAddress = getCurrentNamenodeAddress(dirpath);
} catch (IOException ioe) {
System.err.println("FileSystem is inaccessible due to:\n"
+ StringUtils.stringifyException(ioe));
}
if (namenodeAddress == null) { if (namenodeAddress == null) {
//Error message already output in {@link #getCurrentNamenodeAddress()} //Error message already output in {@link #getCurrentNamenodeAddress()}
System.err.println("DFSck exiting."); System.err.println("DFSck exiting.");

View File

@ -211,10 +211,16 @@ private void verifyAuditLogs() throws IOException {
try { try {
// Audit log should contain one getfileinfo and one fsck // Audit log should contain one getfileinfo and one fsck
reader = new BufferedReader(new FileReader(auditLogFile)); reader = new BufferedReader(new FileReader(auditLogFile));
String line = reader.readLine(); String line;
assertNotNull(line);
assertTrue("Expected getfileinfo event not found in audit log", // one extra getfileinfo stems from resolving the path
getfileinfoPattern.matcher(line).matches()); //
for (int i = 0; i < 2; i++) {
line = reader.readLine();
assertNotNull(line);
assertTrue("Expected getfileinfo event not found in audit log",
getfileinfoPattern.matcher(line).matches());
}
line = reader.readLine(); line = reader.readLine();
assertNotNull(line); assertNotNull(line);
assertTrue("Expected fsck event not found in audit log", fsckPattern assertTrue("Expected fsck event not found in audit log", fsckPattern

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException; import java.io.IOException;
import java.net.URI;
import java.util.Random; import java.util.Random;
import java.util.concurrent.TimeoutException; import java.util.concurrent.TimeoutException;
@ -26,6 +27,8 @@
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.viewfs.ConfigUtil;
import org.apache.hadoop.fs.viewfs.ViewFileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
@ -119,6 +122,23 @@ private void runTest(final int nNameNodes, final int nDataNodes,
LOG.info("result=" + result); LOG.info("result=" + result);
Assert.assertTrue(result.contains("Status: HEALTHY")); Assert.assertTrue(result.contains("Status: HEALTHY"));
} }
// Test viewfs
//
LOG.info("RUN_TEST 3");
final String[] vurls = new String[nNameNodes];
for (int i = 0; i < vurls.length; i++) {
String link = "/mount/nn_" + i + FILE_NAME;
ConfigUtil.addLink(conf, link, new URI(urls[i]));
vurls[i] = "viewfs:" + link;
}
for(int i = 0; i < vurls.length; i++) {
LOG.info("vurls[" + i + "]=" + vurls[i]);
final String result = TestFsck.runFsck(conf, 0, false, vurls[i]);
LOG.info("result=" + result);
Assert.assertTrue(result.contains("Status: HEALTHY"));
}
} finally { } finally {
cluster.shutdown(); cluster.shutdown();
} }