diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index f896544118..4af15d9d59 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -2516,6 +2516,9 @@ Release 2.8.0 - UNRELEASED HDFS-9572. Prevent DataNode log spam if a client connects on the data transfer port but sends no data. (cnauroth) + HDFS-9571. Fix ASF Licence warnings in Jenkins reports + (Brahma Reddy Battula via cnauroth) + Release 2.7.3 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java index ec5895b9a3..78ae8b18a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java @@ -63,17 +63,24 @@ public void setUp() { */ @Test(timeout=100000) public void testClusterWithoutSystemProperties() throws Throwable { + String oldPrp = System.getProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA); System.clearProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA); - Configuration conf = new HdfsConfiguration(); - File testDataCluster1 = new File(testDataPath, CLUSTER_1); - String c1Path = testDataCluster1.getAbsolutePath(); - conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c1Path); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + MiniDFSCluster cluster = null; try { + Configuration conf = new HdfsConfiguration(); + File testDataCluster1 = new File(testDataPath, CLUSTER_1); + String c1Path = testDataCluster1.getAbsolutePath(); + conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c1Path); + cluster = new MiniDFSCluster.Builder(conf).build(); assertEquals(new File(c1Path + "/data"), new File(cluster.getDataDirectory())); } finally { - cluster.shutdown(); + if (oldPrp != null) { + System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, oldPrp); + } + if (cluster != null) { + cluster.shutdown(); + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java index 95f6cd8e5e..ea994a2bb5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java @@ -387,8 +387,8 @@ public void testNodeDecomissionRespectsRackPolicy() throws Exception { // Configure an excludes file FileSystem localFileSys = FileSystem.getLocal(conf); - Path workingDir = localFileSys.getWorkingDirectory(); - Path dir = new Path(workingDir, "build/test/data/temp/decommission"); + Path workingDir = new Path(MiniDFSCluster.getBaseDirectory()); + Path dir = new Path(workingDir, "temp/decommission"); Path excludeFile = new Path(dir, "exclude"); Path includeFile = new Path(dir, "include"); assertTrue(localFileSys.mkdirs(dir)); @@ -440,8 +440,8 @@ public void testNodeDecomissionWithOverreplicationRespectsRackPolicy() // Configure an excludes file FileSystem localFileSys = FileSystem.getLocal(conf); - Path workingDir = localFileSys.getWorkingDirectory(); - Path dir = new Path(workingDir, "build/test/data/temp/decommission"); + Path workingDir = new Path(MiniDFSCluster.getBaseDirectory()); + Path dir = new Path(workingDir, "temp/decommission"); Path excludeFile = new Path(dir, "exclude"); Path includeFile = new Path(dir, "include"); assertTrue(localFileSys.mkdirs(dir)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java index a93cc2a562..08ad38b903 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java @@ -83,8 +83,8 @@ public void testHostsExcludeInUI() throws Exception { // Configure an excludes file FileSystem localFileSys = FileSystem.getLocal(conf); - Path workingDir = localFileSys.getWorkingDirectory(); - Path dir = new Path(workingDir, "build/test/data/temp/decommission"); + Path workingDir = new Path(MiniDFSCluster.getBaseDirectory()); + Path dir = new Path(workingDir, "temp/decommission"); Path excludeFile = new Path(dir, "exclude"); Path includeFile = new Path(dir, "include"); assertTrue(localFileSys.mkdirs(dir)); @@ -143,8 +143,8 @@ public void testHostsIncludeForDeadCount() throws Exception { // Configure an excludes file FileSystem localFileSys = FileSystem.getLocal(conf); - Path workingDir = localFileSys.getWorkingDirectory(); - Path dir = new Path(workingDir, "build/test/data/temp/decommission"); + Path workingDir = new Path(MiniDFSCluster.getBaseDirectory()); + Path dir = new Path(workingDir, "temp/decommission"); Path excludeFile = new Path(dir, "exclude"); Path includeFile = new Path(dir, "include"); assertTrue(localFileSys.mkdirs(dir));