HDFS-10976. Report erasure coding policy of EC files in Fsck. Contributed by Wei-Chiu Chuang.
This commit is contained in:
parent
3fbf4cd5da
commit
5e83a21cb6
@ -55,6 +55,7 @@
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
|
||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
@ -540,11 +541,20 @@ private void collectFileSummary(String path, HdfsFileStatus file, Result res,
|
||||
res.totalFiles++;
|
||||
res.totalSize += fileLen;
|
||||
res.totalBlocks += blocks.locatedBlockCount();
|
||||
String redundancyPolicy;
|
||||
ErasureCodingPolicy ecPolicy = file.getErasureCodingPolicy();
|
||||
if (ecPolicy == null) { // a replicated file
|
||||
redundancyPolicy = "replicated: replication=" +
|
||||
file.getReplication() + ",";
|
||||
} else {
|
||||
redundancyPolicy = "erasure-coded: policy=" + ecPolicy.getName() + ",";
|
||||
}
|
||||
|
||||
if (showOpenFiles && isOpen) {
|
||||
out.print(path + " " + fileLen + " bytes, " +
|
||||
out.print(path + " " + fileLen + " bytes, " + redundancyPolicy + " " +
|
||||
blocks.locatedBlockCount() + " block(s), OPENFORWRITE: ");
|
||||
} else if (showFiles) {
|
||||
out.print(path + " " + fileLen + " bytes, " +
|
||||
out.print(path + " " + fileLen + " bytes, " + redundancyPolicy + " " +
|
||||
blocks.locatedBlockCount() + " block(s): ");
|
||||
} else if (showprogress) {
|
||||
out.print('.');
|
||||
|
@ -1700,9 +1700,21 @@ public void testECFsck() throws Exception {
|
||||
// restart the cluster; bring up namenode but not the data nodes
|
||||
cluster = new MiniDFSCluster.Builder(conf)
|
||||
.numDataNodes(0).format(false).build();
|
||||
outStr = runFsck(conf, 1, true, "/");
|
||||
outStr = runFsck(conf, 1, true, "/", "-files", "-blocks");
|
||||
// expect the result is corrupt
|
||||
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
|
||||
String[] outLines = outStr.split("\\r?\\n");
|
||||
for (String line: outLines) {
|
||||
if (line.contains(largeFilePath.toString())) {
|
||||
final HdfsFileStatus file = cluster.getNameNode().getRpcServer().
|
||||
getFileInfo(largeFilePath.toString());
|
||||
assertTrue(line.contains("policy=" +
|
||||
file.getErasureCodingPolicy().getName()));
|
||||
} else if (line.contains(replFilePath.toString())) {
|
||||
assertTrue(line.contains("replication=" + cluster.getFileSystem().
|
||||
getFileStatus(replFilePath).getReplication()));
|
||||
}
|
||||
}
|
||||
System.out.println(outStr);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user