From 018893e81ec1c43e6c79c77adec92c2edfb20cab Mon Sep 17 00:00:00 2001 From: Vinayakumar B Date: Tue, 17 Mar 2015 15:32:34 +0530 Subject: [PATCH] HDFS-5356. MiniDFSCluster should close all open FileSystems when shutdown() (Contributed by Rakesh R) --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../apache/hadoop/hdfs/MiniDFSCluster.java | 32 ++++++++++++++++--- .../apache/hadoop/hdfs/TestFileCreation.java | 4 +-- .../snapshot/TestRenameWithSnapshots.java | 4 +-- 4 files changed, 35 insertions(+), 8 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index ad3e880f2f..bbe1f026fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -327,6 +327,9 @@ Release 2.8.0 - UNRELEASED BUG FIXES + HDFS-5356. MiniDFSCluster should close all open FileSystems when shutdown() + (Rakesh R via vinayakumarb) + Release 2.7.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 9208ed2d0c..a6cc71ffcc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -60,6 +60,7 @@ import java.util.List; import java.util.Map; import java.util.Random; +import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -118,6 +119,7 @@ import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; +import com.google.common.collect.Sets; /** * This class creates a single-process DFS cluster for junit testing. @@ -523,7 +525,8 @@ public void setDnArgs(String ... args) { private boolean federation; private boolean checkExitOnShutdown = true; protected final int storagesPerDatanode; - + private Set fileSystems = Sets.newHashSet(); + /** * A unique instance identifier for the cluster. This * is used to disambiguate HA filesystems in the case where @@ -1705,6 +1708,13 @@ public void shutdown() { * Shutdown all the nodes in the cluster. */ public void shutdown(boolean deleteDfsDir) { + shutdown(deleteDfsDir, true); + } + + /** + * Shutdown all the nodes in the cluster. + */ + public void shutdown(boolean deleteDfsDir, boolean closeFileSystem) { LOG.info("Shutting down the Mini HDFS Cluster"); if (checkExitOnShutdown) { if (ExitUtil.terminateCalled()) { @@ -1714,6 +1724,16 @@ public void shutdown(boolean deleteDfsDir) { throw new AssertionError("Test resulted in an unexpected exit"); } } + if (closeFileSystem) { + for (FileSystem fs : fileSystems) { + try { + fs.close(); + } catch (IOException ioe) { + LOG.warn("Exception while closing file system", ioe); + } + } + fileSystems.clear(); + } shutdownDataNodes(); for (NameNodeInfo nnInfo : nameNodes) { if (nnInfo == null) continue; @@ -2144,8 +2164,10 @@ public DistributedFileSystem getFileSystem() throws IOException { * Get a client handle to the DFS cluster for the namenode at given index. */ public DistributedFileSystem getFileSystem(int nnIndex) throws IOException { - return (DistributedFileSystem)FileSystem.get(getURI(nnIndex), - nameNodes[nnIndex].conf); + DistributedFileSystem dfs = (DistributedFileSystem) FileSystem.get( + getURI(nnIndex), nameNodes[nnIndex].conf); + fileSystems.add(dfs); + return dfs; } /** @@ -2153,7 +2175,9 @@ public DistributedFileSystem getFileSystem(int nnIndex) throws IOException { * This simulating different threads working on different FileSystem instances. */ public FileSystem getNewFileSystemInstance(int nnIndex) throws IOException { - return FileSystem.newInstance(getURI(nnIndex), nameNodes[nnIndex].conf); + FileSystem dfs = FileSystem.newInstance(getURI(nnIndex), nameNodes[nnIndex].conf); + fileSystems.add(dfs); + return dfs; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java index a0129da1f4..e1c547be6f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java @@ -675,7 +675,7 @@ public void testFileCreationNamenodeRestart() throws IOException { // restart cluster with the same namenode port as before. // This ensures that leases are persisted in fsimage. - cluster.shutdown(); + cluster.shutdown(false, false); try { Thread.sleep(2*MAX_IDLE_TIME); } catch (InterruptedException e) { @@ -687,7 +687,7 @@ public void testFileCreationNamenodeRestart() throws IOException { // restart cluster yet again. This triggers the code to read in // persistent leases from fsimage. - cluster.shutdown(); + cluster.shutdown(false, false); try { Thread.sleep(5000); } catch (InterruptedException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java index a215beed29..09bd2dcf3c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java @@ -519,8 +519,8 @@ private void restartClusterAndCheckImage(boolean compareQuota) File fsnAfter = new File(testDir, "dumptree_after"); SnapshotTestHelper.dumpTree2File(fsdir, fsnBefore); - - cluster.shutdown(); + + cluster.shutdown(false, false); cluster = new MiniDFSCluster.Builder(conf).format(false) .numDataNodes(REPL).build(); cluster.waitActive();