HDFS-15009. FSCK -list-corruptfileblocks return Invalid Entries. Contributed by hemanthboyina.
This commit is contained in:
parent
a2dadac790
commit
6b2d6d4aaf
@ -24,7 +24,7 @@
|
|||||||
import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE_DEFAULT;
|
import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_CACHE_ENABLE;
|
import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_CACHE_ENABLE;
|
||||||
import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_CACHE_ENABLE_DEFAULT;
|
import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_MOUNT_TABLE_CACHE_ENABLE_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.server.federation.router.FederationUtil.isParentEntry;
|
import static org.apache.hadoop.hdfs.DFSUtil.isParentEntry;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
@ -27,7 +27,6 @@
|
|||||||
import java.util.EnumSet;
|
import java.util.EnumSet;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.Path;
|
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
@ -224,25 +223,6 @@ public static ActiveNamenodeResolver newActiveNamenodeResolver(
|
|||||||
return newInstance(conf, null, null, clazz);
|
return newInstance(conf, null, null, clazz);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Check if the given path is the child of parent path.
|
|
||||||
* @param path Path to be check.
|
|
||||||
* @param parent Parent path.
|
|
||||||
* @return True if parent path is parent entry for given path.
|
|
||||||
*/
|
|
||||||
public static boolean isParentEntry(final String path, final String parent) {
|
|
||||||
if (!path.startsWith(parent)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (path.equals(parent)) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return path.charAt(parent.length()) == Path.SEPARATOR_CHAR
|
|
||||||
|| parent.equals(Path.SEPARATOR);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add the the number of children for an existing HdfsFileStatus object.
|
* Add the the number of children for an existing HdfsFileStatus object.
|
||||||
* @param dirStatus HdfsfileStatus object.
|
* @param dirStatus HdfsfileStatus object.
|
||||||
|
@ -17,6 +17,8 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.federation.router;
|
package org.apache.hadoop.hdfs.server.federation.router;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.hdfs.DFSUtil.isParentEntry;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
@ -217,7 +219,7 @@ private List<RemoteLocation> getValidQuotaLocations(String path)
|
|||||||
boolean isChildPath = false;
|
boolean isChildPath = false;
|
||||||
|
|
||||||
for (RemoteLocation d : dests) {
|
for (RemoteLocation d : dests) {
|
||||||
if (FederationUtil.isParentEntry(loc.getDest(), d.getDest())) {
|
if (isParentEntry(loc.getDest(), d.getDest())) {
|
||||||
isChildPath = true;
|
isChildPath = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.federation.router;
|
package org.apache.hadoop.hdfs.server.federation.router;
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.server.federation.router.FederationUtil.isParentEntry;
|
import static org.apache.hadoop.hdfs.DFSUtil.isParentEntry;
|
||||||
|
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.Map.Entry;
|
import java.util.Map.Entry;
|
||||||
|
@ -17,6 +17,8 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.federation.store.impl;
|
package org.apache.hadoop.hdfs.server.federation.store.impl;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.hdfs.DFSUtil.isParentEntry;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
@ -25,7 +27,6 @@
|
|||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.fs.permission.FsAction;
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.hdfs.server.federation.router.FederationUtil;
|
|
||||||
import org.apache.hadoop.hdfs.server.federation.router.RouterAdminServer;
|
import org.apache.hadoop.hdfs.server.federation.router.RouterAdminServer;
|
||||||
import org.apache.hadoop.hdfs.server.federation.router.RouterPermissionChecker;
|
import org.apache.hadoop.hdfs.server.federation.router.RouterPermissionChecker;
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.MountTableStore;
|
import org.apache.hadoop.hdfs.server.federation.store.MountTableStore;
|
||||||
@ -140,7 +141,7 @@ public GetMountTableEntriesResponse getMountTableEntries(
|
|||||||
while (it.hasNext()) {
|
while (it.hasNext()) {
|
||||||
MountTable record = it.next();
|
MountTable record = it.next();
|
||||||
String srcPath = record.getSourcePath();
|
String srcPath = record.getSourcePath();
|
||||||
if (!FederationUtil.isParentEntry(srcPath, reqSrcPath)) {
|
if (!isParentEntry(srcPath, reqSrcPath)) {
|
||||||
it.remove();
|
it.remove();
|
||||||
} else if (pc != null) {
|
} else if (pc != null) {
|
||||||
// do the READ permission check
|
// do the READ permission check
|
||||||
|
@ -1816,4 +1816,22 @@ public static EnumSet<HdfsFileStatus.Flags> getFlags(
|
|||||||
return flags;
|
return flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if the given path is the child of parent path.
|
||||||
|
* @param path Path to be check.
|
||||||
|
* @param parent Parent path.
|
||||||
|
* @return True if parent path is parent entry for given path.
|
||||||
|
*/
|
||||||
|
public static boolean isParentEntry(final String path, final String parent) {
|
||||||
|
if (!path.startsWith(parent)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (path.equals(parent)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return path.charAt(parent.length()) == Path.SEPARATOR_CHAR
|
||||||
|
|| parent.equals(Path.SEPARATOR);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -89,6 +89,7 @@
|
|||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DIFF_LISTING_LIMIT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DIFF_LISTING_LIMIT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DIFF_LISTING_LIMIT_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DIFF_LISTING_LIMIT_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSUtil.isParentEntry;
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
|
||||||
@ -5665,7 +5666,7 @@ Collection<CorruptFileBlockInfo> listCorruptFileBlocks(String path,
|
|||||||
skip++;
|
skip++;
|
||||||
if (inode != null) {
|
if (inode != null) {
|
||||||
String src = inode.getFullPathName();
|
String src = inode.getFullPathName();
|
||||||
if (src.startsWith(path)){
|
if (isParentEntry(src, path)) {
|
||||||
corruptFiles.add(new CorruptFileBlockInfo(src, blk));
|
corruptFiles.add(new CorruptFileBlockInfo(src, blk));
|
||||||
count++;
|
count++;
|
||||||
if (count >= maxCorruptFileBlocksReturn)
|
if (count >= maxCorruptFileBlocksReturn)
|
||||||
|
@ -94,7 +94,8 @@
|
|||||||
* This class provides rudimentary checking of DFS volumes for errors and
|
* This class provides rudimentary checking of DFS volumes for errors and
|
||||||
* sub-optimal conditions.
|
* sub-optimal conditions.
|
||||||
* <p>The tool scans all files and directories, starting from an indicated
|
* <p>The tool scans all files and directories, starting from an indicated
|
||||||
* root path. The following abnormal conditions are detected and handled:</p>
|
* root path and its descendants. The following abnormal conditions are
|
||||||
|
* detected and handled:</p>
|
||||||
* <ul>
|
* <ul>
|
||||||
* <li>files with blocks that are completely missing from all datanodes.<br>
|
* <li>files with blocks that are completely missing from all datanodes.<br>
|
||||||
* In this case the tool can perform one of the following actions:
|
* In this case the tool can perform one of the following actions:
|
||||||
|
@ -1171,8 +1171,14 @@ public void testFsckListCorruptFilesBlocks() throws Exception {
|
|||||||
outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks");
|
outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks");
|
||||||
System.out.println("3. good fsck out: " + outStr);
|
System.out.println("3. good fsck out: " + outStr);
|
||||||
assertTrue(outStr.contains("has 0 CORRUPT files"));
|
assertTrue(outStr.contains("has 0 CORRUPT files"));
|
||||||
util.cleanup(fs, "/corruptData");
|
|
||||||
util.cleanup(fs, "/goodData");
|
util.cleanup(fs, "/goodData");
|
||||||
|
|
||||||
|
// validate if a directory have any invalid entries
|
||||||
|
util.createFiles(fs, "/corruptDa");
|
||||||
|
outStr = runFsck(conf, 0, true, "/corruptDa", "-list-corruptfileblocks");
|
||||||
|
assertTrue(outStr.contains("has 0 CORRUPT files"));
|
||||||
|
util.cleanup(fs, "/corruptData");
|
||||||
|
util.cleanup(fs, "/corruptDa");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
Loading…
Reference in New Issue
Block a user