HDFS-14908. LeaseManager should check parent-child relationship when filter open files. Contributed by Jinglun.

This commit is contained in:
Inigo Goiri 2019-12-16 18:41:45 -08:00
parent 578bd101a6
commit 24080666e5
4 changed files with 71 additions and 9 deletions

View File

@ -1826,16 +1826,17 @@ BatchedListEntries<OpenFileEntry> listOpenFiles(long prevId,
checkSuperuserPrivilege();
checkOperation(OperationCategory.READ);
BatchedListEntries<OpenFileEntry> batchedListEntries;
String normalizedPath = new Path(path).toString(); // normalize path.
try {
readLock();
try {
checkOperation(OperationCategory.READ);
if (openFilesTypes.contains(OpenFilesType.ALL_OPEN_FILES)) {
batchedListEntries = leaseManager.getUnderConstructionFiles(prevId,
path);
normalizedPath);
} else {
if (openFilesTypes.contains(OpenFilesType.BLOCKING_DECOMMISSION)) {
batchedListEntries = getFilesBlockingDecom(prevId, path);
batchedListEntries = getFilesBlockingDecom(prevId, normalizedPath);
} else {
throw new IllegalArgumentException("Unknown OpenFileType: "
+ openFilesTypes);
@ -1874,7 +1875,7 @@ public BatchedListEntries<OpenFileEntry> getFilesBlockingDecom(long prevId,
String fullPathName = inodeFile.getFullPathName();
if (org.apache.commons.lang3.StringUtils.isEmpty(path)
|| fullPathName.startsWith(path)) {
|| DFSUtil.isParentEntry(fullPathName, path)) {
openFileEntries.add(new OpenFileEntry(inodeFile.getId(),
inodeFile.getFullPathName(),
inodeFile.getFileUnderConstructionFeature().getClientName(),

View File

@ -42,6 +42,7 @@
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
@ -315,7 +316,8 @@ public BatchedListEntries<OpenFileEntry> getUnderConstructionFiles(
}
fullPathName = inodeFile.getFullPathName();
if (StringUtils.isEmpty(path) || fullPathName.startsWith(path)) {
if (StringUtils.isEmpty(path) ||
DFSUtil.isParentEntry(fullPathName, path)) {
openFileEntries.add(new OpenFileEntry(inodeFile.getId(), fullPathName,
inodeFile.getFileUnderConstructionFeature().getClientName(),
inodeFile.getFileUnderConstructionFeature().getClientMachine()));

View File

@ -2392,13 +2392,32 @@ public static void verifyDelete(FsShell shell, FileSystem fs, Path path,
}
}
/**
* Create open files under root path.
* @param fs the filesystem.
* @param filePrefix the prefix of the files.
* @param numFilesToCreate the number of files to create.
*/
public static Map<Path, FSDataOutputStream> createOpenFiles(FileSystem fs,
String filePrefix, int numFilesToCreate) throws IOException {
return createOpenFiles(fs, new Path("/"), filePrefix, numFilesToCreate);
}
/**
* Create open files.
* @param fs the filesystem.
* @param baseDir the base path of the files.
* @param filePrefix the prefix of the files.
* @param numFilesToCreate the number of files to create.
*/
public static Map<Path, FSDataOutputStream> createOpenFiles(FileSystem fs,
Path baseDir, String filePrefix, int numFilesToCreate)
throws IOException {
final Map<Path, FSDataOutputStream> filesCreated = new HashMap<>();
final byte[] buffer = new byte[(int) (1024 * 1.75)];
final Random rand = new Random(0xFEED0BACL);
for (int i = 0; i < numFilesToCreate; i++) {
Path file = new Path("/" + filePrefix + "-" + i);
Path file = new Path(baseDir, filePrefix + "-" + i);
FSDataOutputStream stm = fs.create(file, true, 1024, (short) 1, 1024);
rand.nextBytes(buffer);
stm.write(buffer);

View File

@ -157,13 +157,22 @@ private void verifyOpenFiles(Map<Path, FSDataOutputStream> openFiles,
remainingFiles.size() == 0);
}
/**
* Verify all open files.
*/
private void verifyOpenFiles(Map<Path, FSDataOutputStream> openFiles)
throws IOException {
verifyOpenFiles(openFiles, EnumSet.of(OpenFilesType.ALL_OPEN_FILES),
OpenFilesIterator.FILTER_PATH_DEFAULT);
verifyOpenFiles(openFiles, OpenFilesIterator.FILTER_PATH_DEFAULT);
}
/**
* Verify open files with specified filter path.
*/
private void verifyOpenFiles(Map<Path, FSDataOutputStream> openFiles,
String path) throws IOException {
verifyOpenFiles(openFiles, EnumSet.of(OpenFilesType.ALL_OPEN_FILES), path);
verifyOpenFiles(new HashMap<>(),
EnumSet.of(OpenFilesType.BLOCKING_DECOMMISSION),
OpenFilesIterator.FILTER_PATH_DEFAULT);
EnumSet.of(OpenFilesType.BLOCKING_DECOMMISSION), path);
}
private Set<Path> createFiles(FileSystem fileSystem, String fileNamePrefix,
@ -255,4 +264,35 @@ public void run() {
}
}
}
@Test(timeout = 120000)
public void testListOpenFilesWithFilterPath() throws IOException {
HashMap<Path, FSDataOutputStream> openFiles = new HashMap<>();
createFiles(fs, "closed", 10);
verifyOpenFiles(openFiles, OpenFilesIterator.FILTER_PATH_DEFAULT);
BatchedEntries<OpenFileEntry> openFileEntryBatchedEntries = nnRpc
.listOpenFiles(0, EnumSet.of(OpenFilesType.ALL_OPEN_FILES),
OpenFilesIterator.FILTER_PATH_DEFAULT);
assertTrue("Open files list should be empty!",
openFileEntryBatchedEntries.size() == 0);
BatchedEntries<OpenFileEntry> openFilesBlockingDecomEntries = nnRpc
.listOpenFiles(0, EnumSet.of(OpenFilesType.BLOCKING_DECOMMISSION),
OpenFilesIterator.FILTER_PATH_DEFAULT);
assertTrue("Open files list blocking decommission should be empty!",
openFilesBlockingDecomEntries.size() == 0);
openFiles.putAll(
DFSTestUtil.createOpenFiles(fs, new Path("/base"), "open-1", 1));
Map<Path, FSDataOutputStream> baseOpen =
DFSTestUtil.createOpenFiles(fs, new Path("/base-open"), "open-1", 1);
verifyOpenFiles(openFiles, "/base");
verifyOpenFiles(openFiles, "/base/");
openFiles.putAll(baseOpen);
while (openFiles.size() > 0) {
DFSTestUtil.closeOpenFiles(openFiles, 1);
verifyOpenFiles(openFiles, OpenFilesIterator.FILTER_PATH_DEFAULT);
}
}
}