HDFS-8332. DFS client API calls should check filesystem closed. Contributed by Rakesh R.

This commit is contained in:
Uma Maheswara Rao G 2015-05-08 12:26:47 +05:30
parent ef3d66d462
commit e16f4b7f70
5 changed files with 247 additions and 7 deletions

View File

@ -661,6 +661,8 @@ Release 2.8.0 - UNRELEASED
HDFS-6291. FSImage may be left unclosed in BootstrapStandby#doRun() HDFS-6291. FSImage may be left unclosed in BootstrapStandby#doRun()
(Sanghyun Yun via vinayakumarb) (Sanghyun Yun via vinayakumarb)
HDFS-8332. DFS client API calls should check filesystem closed (Rakesh R via umamahesh)
Release 2.7.1 - UNRELEASED Release 2.7.1 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -638,6 +638,7 @@ public void closeOutputStreams(boolean abort) {
* @see ClientProtocol#getPreferredBlockSize(String) * @see ClientProtocol#getPreferredBlockSize(String)
*/ */
public long getBlockSize(String f) throws IOException { public long getBlockSize(String f) throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("getBlockSize", f); TraceScope scope = getPathTraceScope("getBlockSize", f);
try { try {
return namenode.getPreferredBlockSize(f); return namenode.getPreferredBlockSize(f);
@ -654,6 +655,7 @@ public long getBlockSize(String f) throws IOException {
* @see ClientProtocol#getServerDefaults() * @see ClientProtocol#getServerDefaults()
*/ */
public FsServerDefaults getServerDefaults() throws IOException { public FsServerDefaults getServerDefaults() throws IOException {
checkOpen();
long now = Time.monotonicNow(); long now = Time.monotonicNow();
if ((serverDefaults == null) || if ((serverDefaults == null) ||
(now - serverDefaultsLastUpdate > SERVER_DEFAULTS_VALIDITY_PERIOD)) { (now - serverDefaultsLastUpdate > SERVER_DEFAULTS_VALIDITY_PERIOD)) {
@ -845,6 +847,7 @@ public boolean isManaged(Token<?> token) throws IOException {
* @see ClientProtocol#reportBadBlocks(LocatedBlock[]) * @see ClientProtocol#reportBadBlocks(LocatedBlock[])
*/ */
public void reportBadBlocks(LocatedBlock[] blocks) throws IOException { public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
checkOpen();
namenode.reportBadBlocks(blocks); namenode.reportBadBlocks(blocks);
} }
@ -918,6 +921,7 @@ boolean recoverLease(String src) throws IOException {
*/ */
public BlockLocation[] getBlockLocations(String src, long start, public BlockLocation[] getBlockLocations(String src, long start,
long length) throws IOException, UnresolvedLinkException { long length) throws IOException, UnresolvedLinkException {
checkOpen();
TraceScope scope = getPathTraceScope("getBlockLocations", src); TraceScope scope = getPathTraceScope("getBlockLocations", src);
try { try {
LocatedBlocks blocks = getLocatedBlocks(src, start, length); LocatedBlocks blocks = getLocatedBlocks(src, start, length);
@ -952,6 +956,7 @@ public BlockLocation[] getBlockLocations(String src, long start,
public BlockStorageLocation[] getBlockStorageLocations( public BlockStorageLocation[] getBlockStorageLocations(
List<BlockLocation> blockLocations) throws IOException, List<BlockLocation> blockLocations) throws IOException,
UnsupportedOperationException, InvalidBlockTokenException { UnsupportedOperationException, InvalidBlockTokenException {
checkOpen();
if (!getConf().isHdfsBlocksMetadataEnabled()) { if (!getConf().isHdfsBlocksMetadataEnabled()) {
throw new UnsupportedOperationException("Datanode-side support for " + throw new UnsupportedOperationException("Datanode-side support for " +
"getVolumeBlockLocations() must also be enabled in the client " + "getVolumeBlockLocations() must also be enabled in the client " +
@ -1418,6 +1423,7 @@ public DFSOutputStream primitiveCreate(String src,
*/ */
public void createSymlink(String target, String link, boolean createParent) public void createSymlink(String target, String link, boolean createParent)
throws IOException { throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("createSymlink", target); TraceScope scope = getPathTraceScope("createSymlink", target);
try { try {
final FsPermission dirPerm = applyUMask(null); final FsPermission dirPerm = applyUMask(null);
@ -1540,6 +1546,7 @@ private DFSOutputStream append(String src, int buffersize,
*/ */
public boolean setReplication(String src, short replication) public boolean setReplication(String src, short replication)
throws IOException { throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("setReplication", src); TraceScope scope = getPathTraceScope("setReplication", src);
try { try {
return namenode.setReplication(src, replication); return namenode.setReplication(src, replication);
@ -1563,6 +1570,7 @@ public boolean setReplication(String src, short replication)
*/ */
public void setStoragePolicy(String src, String policyName) public void setStoragePolicy(String src, String policyName)
throws IOException { throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("setStoragePolicy", src); TraceScope scope = getPathTraceScope("setStoragePolicy", src);
try { try {
namenode.setStoragePolicy(src, policyName); namenode.setStoragePolicy(src, policyName);
@ -1582,6 +1590,7 @@ public void setStoragePolicy(String src, String policyName)
* @return All the existing storage policies * @return All the existing storage policies
*/ */
public BlockStoragePolicy[] getStoragePolicies() throws IOException { public BlockStoragePolicy[] getStoragePolicies() throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("getStoragePolicies", traceSampler); TraceScope scope = Trace.startSpan("getStoragePolicies", traceSampler);
try { try {
return namenode.getStoragePolicies(); return namenode.getStoragePolicies();
@ -2232,6 +2241,7 @@ public DatanodeStorageReport[] getDatanodeStorageReport(
* @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction,boolean) * @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction,boolean)
*/ */
public boolean setSafeMode(SafeModeAction action) throws IOException { public boolean setSafeMode(SafeModeAction action) throws IOException {
checkOpen();
return setSafeMode(action, false); return setSafeMode(action, false);
} }
@ -2434,6 +2444,7 @@ public void removeCacheDirective(long id)
public RemoteIterator<CacheDirectiveEntry> listCacheDirectives( public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
CacheDirectiveInfo filter) throws IOException { CacheDirectiveInfo filter) throws IOException {
checkOpen();
return new CacheDirectiveIterator(namenode, filter, traceSampler); return new CacheDirectiveIterator(namenode, filter, traceSampler);
} }
@ -2474,6 +2485,7 @@ public void removeCachePool(String poolName) throws IOException {
} }
public RemoteIterator<CachePoolEntry> listCachePools() throws IOException { public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
checkOpen();
return new CachePoolIterator(namenode, traceSampler); return new CachePoolIterator(namenode, traceSampler);
} }
@ -2483,6 +2495,7 @@ public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
* @see ClientProtocol#saveNamespace(long, long) * @see ClientProtocol#saveNamespace(long, long)
*/ */
boolean saveNamespace(long timeWindow, long txGap) throws IOException { boolean saveNamespace(long timeWindow, long txGap) throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("saveNamespace", traceSampler); TraceScope scope = Trace.startSpan("saveNamespace", traceSampler);
try { try {
return namenode.saveNamespace(timeWindow, txGap); return namenode.saveNamespace(timeWindow, txGap);
@ -2500,6 +2513,7 @@ boolean saveNamespace(long timeWindow, long txGap) throws IOException {
* @see ClientProtocol#rollEdits() * @see ClientProtocol#rollEdits()
*/ */
long rollEdits() throws AccessControlException, IOException { long rollEdits() throws AccessControlException, IOException {
checkOpen();
TraceScope scope = Trace.startSpan("rollEdits", traceSampler); TraceScope scope = Trace.startSpan("rollEdits", traceSampler);
try { try {
return namenode.rollEdits(); return namenode.rollEdits();
@ -2522,6 +2536,7 @@ ExtendedBlock getPreviousBlock(long fileId) {
*/ */
boolean restoreFailedStorage(String arg) boolean restoreFailedStorage(String arg)
throws AccessControlException, IOException{ throws AccessControlException, IOException{
checkOpen();
TraceScope scope = Trace.startSpan("restoreFailedStorage", traceSampler); TraceScope scope = Trace.startSpan("restoreFailedStorage", traceSampler);
try { try {
return namenode.restoreFailedStorage(arg); return namenode.restoreFailedStorage(arg);
@ -2538,6 +2553,7 @@ boolean restoreFailedStorage(String arg)
* @see ClientProtocol#refreshNodes() * @see ClientProtocol#refreshNodes()
*/ */
public void refreshNodes() throws IOException { public void refreshNodes() throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("refreshNodes", traceSampler); TraceScope scope = Trace.startSpan("refreshNodes", traceSampler);
try { try {
namenode.refreshNodes(); namenode.refreshNodes();
@ -2552,6 +2568,7 @@ public void refreshNodes() throws IOException {
* @see ClientProtocol#metaSave(String) * @see ClientProtocol#metaSave(String)
*/ */
public void metaSave(String pathname) throws IOException { public void metaSave(String pathname) throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("metaSave", traceSampler); TraceScope scope = Trace.startSpan("metaSave", traceSampler);
try { try {
namenode.metaSave(pathname); namenode.metaSave(pathname);
@ -2569,6 +2586,7 @@ public void metaSave(String pathname) throws IOException {
* @see ClientProtocol#setBalancerBandwidth(long) * @see ClientProtocol#setBalancerBandwidth(long)
*/ */
public void setBalancerBandwidth(long bandwidth) throws IOException { public void setBalancerBandwidth(long bandwidth) throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("setBalancerBandwidth", traceSampler); TraceScope scope = Trace.startSpan("setBalancerBandwidth", traceSampler);
try { try {
namenode.setBalancerBandwidth(bandwidth); namenode.setBalancerBandwidth(bandwidth);
@ -2581,6 +2599,7 @@ public void setBalancerBandwidth(long bandwidth) throws IOException {
* @see ClientProtocol#finalizeUpgrade() * @see ClientProtocol#finalizeUpgrade()
*/ */
public void finalizeUpgrade() throws IOException { public void finalizeUpgrade() throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("finalizeUpgrade", traceSampler); TraceScope scope = Trace.startSpan("finalizeUpgrade", traceSampler);
try { try {
namenode.finalizeUpgrade(); namenode.finalizeUpgrade();
@ -2590,6 +2609,7 @@ public void finalizeUpgrade() throws IOException {
} }
RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action) throws IOException { RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action) throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("rollingUpgrade", traceSampler); TraceScope scope = Trace.startSpan("rollingUpgrade", traceSampler);
try { try {
return namenode.rollingUpgrade(action); return namenode.rollingUpgrade(action);
@ -2675,6 +2695,7 @@ public boolean primitiveMkdir(String src, FsPermission absPermission,
* @see ClientProtocol#getContentSummary(String) * @see ClientProtocol#getContentSummary(String)
*/ */
ContentSummary getContentSummary(String src) throws IOException { ContentSummary getContentSummary(String src) throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("getContentSummary", src); TraceScope scope = getPathTraceScope("getContentSummary", src);
try { try {
return namenode.getContentSummary(src); return namenode.getContentSummary(src);
@ -2693,6 +2714,7 @@ ContentSummary getContentSummary(String src) throws IOException {
*/ */
void setQuota(String src, long namespaceQuota, long storagespaceQuota) void setQuota(String src, long namespaceQuota, long storagespaceQuota)
throws IOException { throws IOException {
checkOpen();
// sanity check // sanity check
if ((namespaceQuota <= 0 && namespaceQuota != HdfsConstants.QUOTA_DONT_SET && if ((namespaceQuota <= 0 && namespaceQuota != HdfsConstants.QUOTA_DONT_SET &&
namespaceQuota != HdfsConstants.QUOTA_RESET) || namespaceQuota != HdfsConstants.QUOTA_RESET) ||
@ -2726,6 +2748,7 @@ void setQuota(String src, long namespaceQuota, long storagespaceQuota)
*/ */
void setQuotaByStorageType(String src, StorageType type, long quota) void setQuotaByStorageType(String src, StorageType type, long quota)
throws IOException { throws IOException {
checkOpen();
if (quota <= 0 && quota != HdfsConstants.QUOTA_DONT_SET && if (quota <= 0 && quota != HdfsConstants.QUOTA_DONT_SET &&
quota != HdfsConstants.QUOTA_RESET) { quota != HdfsConstants.QUOTA_RESET) {
throw new IllegalArgumentException("Invalid values for quota :" + throw new IllegalArgumentException("Invalid values for quota :" +
@ -3071,11 +3094,13 @@ public void checkAccess(String src, FsAction mode) throws IOException {
} }
public DFSInotifyEventInputStream getInotifyEventStream() throws IOException { public DFSInotifyEventInputStream getInotifyEventStream() throws IOException {
checkOpen();
return new DFSInotifyEventInputStream(traceSampler, namenode); return new DFSInotifyEventInputStream(traceSampler, namenode);
} }
public DFSInotifyEventInputStream getInotifyEventStream(long lastReadTxid) public DFSInotifyEventInputStream getInotifyEventStream(long lastReadTxid)
throws IOException { throws IOException {
checkOpen();
return new DFSInotifyEventInputStream(traceSampler, namenode, lastReadTxid); return new DFSInotifyEventInputStream(traceSampler, namenode, lastReadTxid);
} }

View File

@ -62,11 +62,15 @@
import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.VolumeId; import org.apache.hadoop.fs.VolumeId;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.client.impl.LeaseRenewer;
import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector; import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.hdfs.web.WebHdfsConstants; import org.apache.hadoop.hdfs.web.WebHdfsConstants;
@ -160,25 +164,176 @@ public void testDFSClose() throws Exception {
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;
try { try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fileSys = cluster.getFileSystem(); DistributedFileSystem fileSys = cluster.getFileSystem();
// create two files, leaving them open // create two files, leaving them open
fileSys.create(new Path("/test/dfsclose/file-0")); fileSys.create(new Path("/test/dfsclose/file-0"));
fileSys.create(new Path("/test/dfsclose/file-1")); fileSys.create(new Path("/test/dfsclose/file-1"));
// create another file, close it, and read it, so // create another file, close it, and read it, so
// the client gets a socket in its SocketCache // the client gets a socket in its SocketCache
Path p = new Path("/non-empty-file"); Path p = new Path("/non-empty-file");
DFSTestUtil.createFile(fileSys, p, 1L, (short)1, 0L); DFSTestUtil.createFile(fileSys, p, 1L, (short)1, 0L);
DFSTestUtil.readFile(fileSys, p); DFSTestUtil.readFile(fileSys, p);
fileSys.close(); fileSys.close();
DFSClient dfsClient = fileSys.getClient();
verifyOpsUsingClosedClient(dfsClient);
} finally { } finally {
if (cluster != null) {cluster.shutdown();} if (cluster != null) {cluster.shutdown();}
} }
} }
private void verifyOpsUsingClosedClient(DFSClient dfsClient) {
Path p = new Path("/non-empty-file");
try {
dfsClient.getBlockSize(p.getName());
fail("getBlockSize using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfsClient.getServerDefaults();
fail("getServerDefaults using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfsClient.reportBadBlocks(new LocatedBlock[0]);
fail("reportBadBlocks using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfsClient.getBlockLocations(p.getName(), 0, 1);
fail("getBlockLocations using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfsClient.getBlockStorageLocations(new ArrayList<BlockLocation>());
fail("getBlockStorageLocations using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfsClient.createSymlink("target", "link", true);
fail("createSymlink using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfsClient.getLinkTarget(p.getName());
fail("getLinkTarget using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfsClient.setReplication(p.getName(), (short) 3);
fail("setReplication using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfsClient.setStoragePolicy(p.getName(),
HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
fail("setStoragePolicy using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfsClient.getStoragePolicies();
fail("getStoragePolicies using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfsClient.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
fail("setSafeMode using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfsClient.refreshNodes();
fail("refreshNodes using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfsClient.metaSave(p.getName());
fail("metaSave using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfsClient.setBalancerBandwidth(1000L);
fail("setBalancerBandwidth using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfsClient.finalizeUpgrade();
fail("finalizeUpgrade using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfsClient.rollingUpgrade(RollingUpgradeAction.QUERY);
fail("rollingUpgrade using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfsClient.getInotifyEventStream();
fail("getInotifyEventStream using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfsClient.getInotifyEventStream(100L);
fail("getInotifyEventStream using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfsClient.saveNamespace(1000L, 200L);
fail("saveNamespace using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfsClient.rollEdits();
fail("rollEdits using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfsClient.restoreFailedStorage("");
fail("restoreFailedStorage using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfsClient.getContentSummary(p.getName());
fail("getContentSummary using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfsClient.setQuota(p.getName(), 1000L, 500L);
fail("setQuota using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfsClient.setQuotaByStorageType(p.getName(), StorageType.DISK, 500L);
fail("setQuotaByStorageType using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
}
@Test @Test
public void testDFSCloseOrdering() throws Exception { public void testDFSCloseOrdering() throws Exception {
DistributedFileSystem fs = new MyDistributedFileSystem(); DistributedFileSystem fs = new MyDistributedFileSystem();

View File

@ -228,6 +228,7 @@ public void testRollbackWithHAQJM() throws Exception {
dfs.mkdirs(bar); dfs.mkdirs(bar);
dfs.close(); dfs.close();
dfs = dfsCluster.getFileSystem(0);
TestRollingUpgrade.queryForPreparation(dfs); TestRollingUpgrade.queryForPreparation(dfs);
// If the query returns true, both active and the standby NN should have // If the query returns true, both active and the standby NN should have

View File

@ -59,7 +59,6 @@
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.LogVerificationAppender;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
@ -296,6 +295,35 @@ public void testBasicPoolOperations() throws Exception {
info = new CachePoolInfo("pool2"); info = new CachePoolInfo("pool2");
dfs.addCachePool(info); dfs.addCachePool(info);
// Perform cache pool operations using a closed file system.
DistributedFileSystem dfs1 = (DistributedFileSystem) cluster
.getNewFileSystemInstance(0);
dfs1.close();
try {
dfs1.listCachePools();
fail("listCachePools using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfs1.addCachePool(info);
fail("addCachePool using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfs1.modifyCachePool(info);
fail("modifyCachePool using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfs1.removeCachePool(poolName);
fail("removeCachePool using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
} }
@Test(timeout=60000) @Test(timeout=60000)
@ -538,6 +566,35 @@ public void testAddRemoveDirectives() throws Exception {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder( dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(
directive).setId(id).setReplication((short)2).build()); directive).setId(id).setReplication((short)2).build());
dfs.removeCacheDirective(id); dfs.removeCacheDirective(id);
// Perform cache directive operations using a closed file system.
DistributedFileSystem dfs1 = (DistributedFileSystem) cluster
.getNewFileSystemInstance(0);
dfs1.close();
try {
dfs1.listCacheDirectives(null);
fail("listCacheDirectives using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfs1.addCacheDirective(alpha);
fail("addCacheDirective using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfs1.modifyCacheDirective(alpha);
fail("modifyCacheDirective using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfs1.removeCacheDirective(alphaId);
fail("removeCacheDirective using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
} }
@Test(timeout=60000) @Test(timeout=60000)