HADOOP-18668. Path capability probe for truncate is only honored by RawLocalFileSystem (#5492)
This commit is contained in:
parent
9a8287c36f
commit
0dbe1d3284
@ -53,6 +53,7 @@ public static Optional<Boolean> hasPathCapability(final Path path,
|
|||||||
case CommonPathCapabilities.FS_SNAPSHOTS:
|
case CommonPathCapabilities.FS_SNAPSHOTS:
|
||||||
case CommonPathCapabilities.FS_STORAGEPOLICY:
|
case CommonPathCapabilities.FS_STORAGEPOLICY:
|
||||||
case CommonPathCapabilities.FS_XATTRS:
|
case CommonPathCapabilities.FS_XATTRS:
|
||||||
|
case CommonPathCapabilities.FS_TRUNCATE:
|
||||||
return Optional.of(true);
|
return Optional.of(true);
|
||||||
case CommonPathCapabilities.FS_SYMLINKS:
|
case CommonPathCapabilities.FS_SYMLINKS:
|
||||||
return Optional.of(FileSystem.areSymlinksEnabled());
|
return Optional.of(FileSystem.areSymlinksEnabled());
|
||||||
|
@ -1646,6 +1646,7 @@ public boolean hasPathCapability(final Path path, final String capability)
|
|||||||
case CommonPathCapabilities.FS_SNAPSHOTS:
|
case CommonPathCapabilities.FS_SNAPSHOTS:
|
||||||
case CommonPathCapabilities.FS_STORAGEPOLICY:
|
case CommonPathCapabilities.FS_STORAGEPOLICY:
|
||||||
case CommonPathCapabilities.FS_XATTRS:
|
case CommonPathCapabilities.FS_XATTRS:
|
||||||
|
case CommonPathCapabilities.FS_TRUNCATE:
|
||||||
return true;
|
return true;
|
||||||
case CommonPathCapabilities.FS_SYMLINKS:
|
case CommonPathCapabilities.FS_SYMLINKS:
|
||||||
return false;
|
return false;
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
import org.apache.hadoop.fs.BlockLocation;
|
import org.apache.hadoop.fs.BlockLocation;
|
||||||
import org.apache.hadoop.fs.BlockStoragePolicySpi;
|
import org.apache.hadoop.fs.BlockStoragePolicySpi;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||||
|
import org.apache.hadoop.fs.CommonPathCapabilities;
|
||||||
import org.apache.hadoop.fs.ContentSummary;
|
import org.apache.hadoop.fs.ContentSummary;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.FileChecksum;
|
import org.apache.hadoop.fs.FileChecksum;
|
||||||
@ -302,9 +303,17 @@ private void testTruncate() throws Exception {
|
|||||||
AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());
|
AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());
|
||||||
|
|
||||||
fs.close();
|
fs.close();
|
||||||
|
assertPathCapabilityForTruncate(file);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void assertPathCapabilityForTruncate(Path file) throws Exception {
|
||||||
|
FileSystem fs = this.getHttpFSFileSystem();
|
||||||
|
assertTrue("HttpFS/WebHdfs/SWebHdfs support truncate",
|
||||||
|
fs.hasPathCapability(file, CommonPathCapabilities.FS_TRUNCATE));
|
||||||
|
fs.close();
|
||||||
|
}
|
||||||
|
|
||||||
private void testConcat() throws Exception {
|
private void testConcat() throws Exception {
|
||||||
Configuration config = getProxiedFSConf();
|
Configuration config = getProxiedFSConf();
|
||||||
config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
|
config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
|
|
||||||
import java.util.function.Supplier;
|
import java.util.function.Supplier;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.CommonPathCapabilities;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.FileSystemTestHelper;
|
import org.apache.hadoop.fs.FileSystemTestHelper;
|
||||||
@ -103,6 +104,8 @@ public void testTruncateWithViewFileSystem()
|
|||||||
out.writeBytes("drtatedasfdasfgdfas");
|
out.writeBytes("drtatedasfdasfgdfas");
|
||||||
out.close();
|
out.close();
|
||||||
int newLength = 10;
|
int newLength = 10;
|
||||||
|
assertTrue("ViewFS supports truncate",
|
||||||
|
fsView.hasPathCapability(filePath, CommonPathCapabilities.FS_TRUNCATE));
|
||||||
boolean isReady = fsView.truncate(filePath, newLength);
|
boolean isReady = fsView.truncate(filePath, newLength);
|
||||||
if (!isReady) {
|
if (!isReady) {
|
||||||
GenericTestUtils.waitFor(new Supplier<Boolean>() {
|
GenericTestUtils.waitFor(new Supplier<Boolean>() {
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
|
import org.apache.hadoop.fs.CommonPathCapabilities;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Options;
|
import org.apache.hadoop.fs.Options;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
@ -191,4 +192,21 @@ public void testQuota() throws IOException {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testPathCapabilities() throws IOException {
|
||||||
|
Configuration conf = getTestConfiguration();
|
||||||
|
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build()) {
|
||||||
|
URI defaultUri = URI.create(conf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY));
|
||||||
|
conf.set("fs.viewfs.mounttable." + defaultUri.getHost() + ".linkFallback",
|
||||||
|
defaultUri.toString());
|
||||||
|
try (ViewDistributedFileSystem fileSystem = (ViewDistributedFileSystem) FileSystem.get(
|
||||||
|
conf)) {
|
||||||
|
final Path testFile = new Path("/test");
|
||||||
|
assertTrue("ViewDfs supports truncate",
|
||||||
|
fileSystem.hasPathCapability(testFile, CommonPathCapabilities.FS_TRUNCATE));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -33,6 +33,7 @@
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.concurrent.ThreadLocalRandom;
|
import java.util.concurrent.ThreadLocalRandom;
|
||||||
|
|
||||||
|
import org.apache.hadoop.fs.CommonPathCapabilities;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
|
import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
@ -143,6 +144,8 @@ public void testBasicTruncate() throws IOException {
|
|||||||
writeContents(contents, fileLength, p);
|
writeContents(contents, fileLength, p);
|
||||||
|
|
||||||
int newLength = fileLength - toTruncate;
|
int newLength = fileLength - toTruncate;
|
||||||
|
assertTrue("DFS supports truncate",
|
||||||
|
fs.hasPathCapability(p, CommonPathCapabilities.FS_TRUNCATE));
|
||||||
boolean isReady = fs.truncate(p, newLength);
|
boolean isReady = fs.truncate(p, newLength);
|
||||||
LOG.info("fileLength=" + fileLength + ", newLength=" + newLength
|
LOG.info("fileLength=" + fileLength + ", newLength=" + newLength
|
||||||
+ ", toTruncate=" + toTruncate + ", isReady=" + isReady);
|
+ ", toTruncate=" + toTruncate + ", isReady=" + isReady);
|
||||||
@ -176,6 +179,8 @@ public void testMultipleTruncate() throws IOException {
|
|||||||
|
|
||||||
for(int n = data.length; n > 0; ) {
|
for(int n = data.length; n > 0; ) {
|
||||||
final int newLength = ThreadLocalRandom.current().nextInt(n);
|
final int newLength = ThreadLocalRandom.current().nextInt(n);
|
||||||
|
assertTrue("DFS supports truncate",
|
||||||
|
fs.hasPathCapability(p, CommonPathCapabilities.FS_TRUNCATE));
|
||||||
final boolean isReady = fs.truncate(p, newLength);
|
final boolean isReady = fs.truncate(p, newLength);
|
||||||
LOG.info("newLength=" + newLength + ", isReady=" + isReady);
|
LOG.info("newLength=" + newLength + ", isReady=" + isReady);
|
||||||
assertEquals("File must be closed for truncating at the block boundary",
|
assertEquals("File must be closed for truncating at the block boundary",
|
||||||
@ -209,6 +214,8 @@ public void testSnapshotTruncateThenDeleteSnapshot() throws IOException {
|
|||||||
final int newLength = data.length - 1;
|
final int newLength = data.length - 1;
|
||||||
assert newLength % BLOCK_SIZE != 0 :
|
assert newLength % BLOCK_SIZE != 0 :
|
||||||
" newLength must not be multiple of BLOCK_SIZE";
|
" newLength must not be multiple of BLOCK_SIZE";
|
||||||
|
assertTrue("DFS supports truncate",
|
||||||
|
fs.hasPathCapability(p, CommonPathCapabilities.FS_TRUNCATE));
|
||||||
final boolean isReady = fs.truncate(p, newLength);
|
final boolean isReady = fs.truncate(p, newLength);
|
||||||
LOG.info("newLength=" + newLength + ", isReady=" + isReady);
|
LOG.info("newLength=" + newLength + ", isReady=" + isReady);
|
||||||
assertEquals("File must be closed for truncating at the block boundary",
|
assertEquals("File must be closed for truncating at the block boundary",
|
||||||
|
@ -29,6 +29,7 @@
|
|||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.CommonPathCapabilities;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
@ -160,6 +161,8 @@ public void testTruncate() throws Exception {
|
|||||||
if (nnRestCsrf && !clientRestCsrf) {
|
if (nnRestCsrf && !clientRestCsrf) {
|
||||||
expectException();
|
expectException();
|
||||||
}
|
}
|
||||||
|
assertTrue("WebHdfs supports truncate",
|
||||||
|
webhdfs.hasPathCapability(FILE, CommonPathCapabilities.FS_TRUNCATE));
|
||||||
assertTrue(webhdfs.truncate(FILE, 0L));
|
assertTrue(webhdfs.truncate(FILE, 0L));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user