From ef591b1d6a08f08358b19763a874de6010227307 Mon Sep 17 00:00:00 2001 From: Colin Patrick Mccabe Date: Fri, 3 Apr 2015 16:34:23 -0700 Subject: [PATCH] HDFS-8051. FsVolumeList#addVolume should release volume reference if not put it into BlockScanner. (Lei (Eddy) Xu via Colin P. McCabe) --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../datanode/fsdataset/impl/FsVolumeList.java | 5 +++++ .../fsdataset/impl/TestFsVolumeList.java | 19 +++++++++++++++++++ 3 files changed, 27 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 2d399a4f5b..6fafec8ee9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1376,6 +1376,9 @@ Release 2.7.0 - UNRELEASED HDFS-7996. After swapping a volume, BlockReceiver reports ReplicaNotFoundException (Lei (Eddy) Xu via Colin P. McCabe) + HDFS-8051. FsVolumeList#addVolume should release volume reference if not + put it into BlockScanner. (Lei (Eddy) Xu via Colin P. McCabe) + BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS HDFS-7720. Quota by Storage Type API, tools and ClientNameNode diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java index 4fddfb9e64..d87595cece 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy; import org.apache.hadoop.hdfs.server.datanode.BlockScanner; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.Time; @@ -292,6 +293,10 @@ void addVolume(FsVolumeReference ref) { } if (blockScanner != null) { blockScanner.addVolumeScanner(ref); + } else { + // If the volume is not put into a volume scanner, it does not need to + // hold the reference. + IOUtils.cleanup(FsDatasetImpl.LOG, ref); } // If the volume is used to replace a failed volume, it needs to reset the // volume failure info for this volume. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java index 46189ba6df..eccff896bf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java @@ -35,6 +35,7 @@ import java.util.List; import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; public class TestFsVolumeList { @@ -101,4 +102,22 @@ public void testCheckDirsWithClosedVolume() throws IOException { // checkDirs() should ignore the 2nd volume since it is closed. volumeList.checkDirs(); } + + @Test + public void testReleaseVolumeRefIfNoBlockScanner() throws IOException { + FsVolumeList volumeList = new FsVolumeList( + Collections.emptyList(), null, blockChooser); + File volDir = new File(baseDir, "volume-0"); + volDir.mkdirs(); + FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", volDir, + conf, StorageType.DEFAULT); + FsVolumeReference ref = volume.obtainReference(); + volumeList.addVolume(ref); + try { + ref.close(); + fail("Should throw exception because the reference is closed in " + + "VolumeList#addVolume()."); + } catch (IllegalStateException e) { + } + } }