diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 0dceb8c713..e3ec5a464b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -509,6 +509,8 @@ Release 2.0.0 - UNRELEASED HDFS-3256. HDFS considers blocks under-replicated if topology script is configured with only 1 rack. (atm) + HDFS-2799. Trim fs.checkpoint.dir values. (Amith D K via eli) + BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index a9bf5c7066..70d184d914 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -1076,7 +1076,8 @@ synchronized public void close() throws IOException { */ static Collection getCheckpointDirs(Configuration conf, String defaultValue) { - Collection dirNames = conf.getStringCollection(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY); + Collection dirNames = conf.getTrimmedStringCollection( + DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY); if (dirNames.size() == 0 && defaultValue != null) { dirNames.add(defaultValue); } @@ -1085,8 +1086,8 @@ static Collection getCheckpointDirs(Configuration conf, static List getCheckpointEditsDirs(Configuration conf, String defaultName) { - Collection dirNames = - conf.getStringCollection(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY); + Collection dirNames = conf.getTrimmedStringCollection( + DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY); if (dirNames.size() == 0 && defaultName != null) { dirNames.add(defaultName); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java index 5a188152c6..70f5b57785 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java @@ -490,4 +490,46 @@ public void testNameEditsConfigsFailure() throws IOException { cluster.shutdown(); } } + + /** + * Test dfs.namenode.checkpoint.dir and dfs.namenode.checkpoint.edits.dir + * should tolerate white space between values. + */ + @Test + public void testCheckPointDirsAreTrimmed() throws Exception { + MiniDFSCluster cluster = null; + SecondaryNameNode secondary = null; + File checkpointNameDir1 = new File(base_dir, "chkptName1"); + File checkpointEditsDir1 = new File(base_dir, "chkptEdits1"); + File checkpointNameDir2 = new File(base_dir, "chkptName2"); + File checkpointEditsDir2 = new File(base_dir, "chkptEdits2"); + File nameDir = new File(base_dir, "name1"); + String whiteSpace = " \n \n "; + Configuration conf = new HdfsConfiguration(); + conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getPath()); + conf.setStrings(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, whiteSpace + + checkpointNameDir1.getPath() + whiteSpace, whiteSpace + + checkpointNameDir2.getPath() + whiteSpace); + conf.setStrings(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, + whiteSpace + checkpointEditsDir1.getPath() + whiteSpace, whiteSpace + + checkpointEditsDir2.getPath() + whiteSpace); + cluster = new MiniDFSCluster.Builder(conf).manageNameDfsDirs(false) + .numDataNodes(3).build(); + try { + cluster.waitActive(); + secondary = startSecondaryNameNode(conf); + secondary.doCheckpoint(); + assertTrue(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + " must be trimmed ", + checkpointNameDir1.exists()); + assertTrue(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + " must be trimmed ", + checkpointNameDir2.exists()); + assertTrue(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY + + " must be trimmed ", checkpointEditsDir1.exists()); + assertTrue(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY + + " must be trimmed ", checkpointEditsDir2.exists()); + } finally { + secondary.shutdown(); + cluster.shutdown(); + } + } }