diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 2ea4cf3971..54f197bb22 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -525,6 +525,8 @@ Branch-2 ( Unreleased changes ) HDFS-3608. fuse_dfs: detect changes in UID ticket cache. (Colin Patrick McCabe via atm) + HDFS-3709. TestStartup tests still binding to the ephemeral port. (eli) + BREAKDOWN OF HDFS-3042 SUBTASKS HDFS-2185. HDFS portion of ZK-based FailoverController (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java index 90fa4d475f..28e22aa4be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @@ -443,16 +443,15 @@ public void testImageChecksum() throws Exception { private void testImageChecksum(boolean compress) throws Exception { MiniDFSCluster cluster = null; - Configuration conf = new HdfsConfiguration(); if (compress) { - conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, true); + config.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, true); } try { LOG.info("\n===========================================\n" + "Starting empty cluster"); - cluster = new MiniDFSCluster.Builder(conf) + cluster = new MiniDFSCluster.Builder(config) .numDataNodes(0) .format(true) .build(); @@ -479,7 +478,7 @@ private void testImageChecksum(boolean compress) throws Exception { LOG.info("\n===========================================\n" + "Starting same cluster after simulated crash"); try { - cluster = new MiniDFSCluster.Builder(conf) + cluster = new MiniDFSCluster.Builder(config) .numDataNodes(0) .format(false) .build(); @@ -507,19 +506,18 @@ public void testNNRestart() throws IOException, InterruptedException { FileSystem localFileSys; Path hostsFile; Path excludeFile; - Configuration conf = new HdfsConfiguration(); int HEARTBEAT_INTERVAL = 1; // heartbeat interval in seconds // Set up the hosts/exclude files. - localFileSys = FileSystem.getLocal(conf); + localFileSys = FileSystem.getLocal(config); Path workingDir = localFileSys.getWorkingDirectory(); Path dir = new Path(workingDir, "build/test/data/work-dir/restartnn"); hostsFile = new Path(dir, "hosts"); excludeFile = new Path(dir, "exclude"); // Setup conf - conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); + config.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); writeConfigFile(localFileSys, excludeFile, null); - conf.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath()); + config.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath()); // write into hosts file ArrayListlist = new ArrayList(); byte b[] = {127, 0, 0, 1}; @@ -529,7 +527,7 @@ public void testNNRestart() throws IOException, InterruptedException { int numDatanodes = 1; try { - cluster = new MiniDFSCluster.Builder(conf) + cluster = new MiniDFSCluster.Builder(config) .numDataNodes(numDatanodes).setupHostsFile(true).build(); cluster.waitActive();