HDFS-16861. RBF. Truncate API always fails when dirs use AllResolver oder on Router (#5184)
Co-authored-by: xiezhineng <xiezhineng@corp.netease.com> Reviewed-by: Inigo Goiri <inigoiri@apache.org> Signed-off-by: Tao Li <tomscut@apache.org>
This commit is contained in:
parent
049d1762bd
commit
d25c1be517
@ -702,8 +702,9 @@ public boolean truncate(String src, long newLength, String clientName)
|
|||||||
RemoteMethod method = new RemoteMethod("truncate",
|
RemoteMethod method = new RemoteMethod("truncate",
|
||||||
new Class<?>[] {String.class, long.class, String.class},
|
new Class<?>[] {String.class, long.class, String.class},
|
||||||
new RemoteParam(), newLength, clientName);
|
new RemoteParam(), newLength, clientName);
|
||||||
|
// Truncate can return true/false, so don't expect a result
|
||||||
return rpcClient.invokeSequential(locations, method, Boolean.class,
|
return rpcClient.invokeSequential(locations, method, Boolean.class,
|
||||||
Boolean.TRUE);
|
null);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -33,6 +33,7 @@
|
|||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
|
import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
|
||||||
import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.NamenodeContext;
|
import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.NamenodeContext;
|
||||||
import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext;
|
import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext;
|
||||||
@ -46,6 +47,7 @@
|
|||||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
|
import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
|
||||||
import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
|
import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.TestFileTruncate;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
@ -191,6 +193,18 @@ private void testAll(final String path) throws Exception {
|
|||||||
assertDirsEverywhere(path, 9);
|
assertDirsEverywhere(path, 9);
|
||||||
assertFilesDistributed(path, 15);
|
assertFilesDistributed(path, 15);
|
||||||
|
|
||||||
|
// Test truncate
|
||||||
|
String testTruncateFile = path + "/dir2/dir22/dir220/file-truncate.txt";
|
||||||
|
createTestFile(routerFs, testTruncateFile);
|
||||||
|
Path testTruncateFilePath = new Path(testTruncateFile);
|
||||||
|
routerFs.truncate(testTruncateFilePath, 10);
|
||||||
|
TestFileTruncate.checkBlockRecovery(testTruncateFilePath,
|
||||||
|
(DistributedFileSystem) routerFs);
|
||||||
|
assertEquals("Truncate file fails", 10,
|
||||||
|
routerFs.getFileStatus(testTruncateFilePath).getLen());
|
||||||
|
assertDirsEverywhere(path, 9);
|
||||||
|
assertFilesDistributed(path, 16);
|
||||||
|
|
||||||
// Removing a directory should remove it from every subcluster
|
// Removing a directory should remove it from every subcluster
|
||||||
routerFs.delete(new Path(path + "/dir2/dir22/dir220"), true);
|
routerFs.delete(new Path(path + "/dir2/dir22/dir220"), true);
|
||||||
assertDirsEverywhere(path, 8);
|
assertDirsEverywhere(path, 8);
|
||||||
|
Loading…
Reference in New Issue
Block a user