diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index ec4674ba1c..4b6b0db5df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -950,6 +950,11 @@ Release 2.6.0 - UNRELEASED HDFS-6534. Fix build on macosx: HDFS parts (Binglin Chang via aw) + HDFS-7111. TestSafeMode assumes Unix line endings in safe mode tip. + (cnauroth) + + HDFS-7127. TestLeaseRecovery leaks MiniDFSCluster instances. (cnauroth) + Release 2.5.1 - 2014-09-05 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java index 3328d8e80e..b84989f876 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.security.UserGroupInformation; +import org.junit.After; import org.junit.Test; public class TestLeaseRecovery { @@ -48,6 +49,15 @@ public class TestLeaseRecovery { static final short REPLICATION_NUM = (short)3; private static final long LEASE_PERIOD = 300L; + private MiniDFSCluster cluster; + + @After + public void shutdown() throws IOException { + if (cluster != null) { + cluster.shutdown(); + } + } + static void checkMetaInfo(ExtendedBlock b, DataNode dn ) throws IOException { TestInterDatanodeProtocol.checkMetaInfo(b, dn); @@ -82,79 +92,71 @@ public void testBlockSynchronization() throws Exception { final int ORG_FILE_SIZE = 3000; Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); - MiniDFSCluster cluster = null; + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build(); + cluster.waitActive(); - try { - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build(); - cluster.waitActive(); + //create a file + DistributedFileSystem dfs = cluster.getFileSystem(); + String filestr = "/foo"; + Path filepath = new Path(filestr); + DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L); + assertTrue(dfs.exists(filepath)); + DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM); - //create a file - DistributedFileSystem dfs = cluster.getFileSystem(); - String filestr = "/foo"; - Path filepath = new Path(filestr); - DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L); - assertTrue(dfs.exists(filepath)); - DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM); + //get block info for the last block + LocatedBlock locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock( + dfs.dfs.getNamenode(), filestr); + DatanodeInfo[] datanodeinfos = locatedblock.getLocations(); + assertEquals(REPLICATION_NUM, datanodeinfos.length); - //get block info for the last block - LocatedBlock locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock( - dfs.dfs.getNamenode(), filestr); - DatanodeInfo[] datanodeinfos = locatedblock.getLocations(); - assertEquals(REPLICATION_NUM, datanodeinfos.length); - - //connect to data nodes - DataNode[] datanodes = new DataNode[REPLICATION_NUM]; - for(int i = 0; i < REPLICATION_NUM; i++) { - datanodes[i] = cluster.getDataNode(datanodeinfos[i].getIpcPort()); - assertTrue(datanodes[i] != null); - } - - //verify Block Info - ExtendedBlock lastblock = locatedblock.getBlock(); - DataNode.LOG.info("newblocks=" + lastblock); - for(int i = 0; i < REPLICATION_NUM; i++) { - checkMetaInfo(lastblock, datanodes[i]); - } - - - DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName); - cluster.getNameNodeRpc().append(filestr, dfs.dfs.clientName); - - // expire lease to trigger block recovery. - waitLeaseRecovery(cluster); - - Block[] updatedmetainfo = new Block[REPLICATION_NUM]; - long oldSize = lastblock.getNumBytes(); - lastblock = TestInterDatanodeProtocol.getLastLocatedBlock( - dfs.dfs.getNamenode(), filestr).getBlock(); - long currentGS = lastblock.getGenerationStamp(); - for(int i = 0; i < REPLICATION_NUM; i++) { - updatedmetainfo[i] = DataNodeTestUtils.getFSDataset(datanodes[i]).getStoredBlock( - lastblock.getBlockPoolId(), lastblock.getBlockId()); - assertEquals(lastblock.getBlockId(), updatedmetainfo[i].getBlockId()); - assertEquals(oldSize, updatedmetainfo[i].getNumBytes()); - assertEquals(currentGS, updatedmetainfo[i].getGenerationStamp()); - } - - // verify that lease recovery does not occur when namenode is in safemode - System.out.println("Testing that lease recovery cannot happen during safemode."); - filestr = "/foo.safemode"; - filepath = new Path(filestr); - dfs.create(filepath, (short)1); - cluster.getNameNodeRpc().setSafeMode( - HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false); - assertTrue(dfs.dfs.exists(filestr)); - DFSTestUtil.waitReplication(dfs, filepath, (short)1); - waitLeaseRecovery(cluster); - // verify that we still cannot recover the lease - LeaseManager lm = NameNodeAdapter.getLeaseManager(cluster.getNamesystem()); - assertTrue("Found " + lm.countLease() + " lease, expected 1", lm.countLease() == 1); - cluster.getNameNodeRpc().setSafeMode( - HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false); + //connect to data nodes + DataNode[] datanodes = new DataNode[REPLICATION_NUM]; + for(int i = 0; i < REPLICATION_NUM; i++) { + datanodes[i] = cluster.getDataNode(datanodeinfos[i].getIpcPort()); + assertTrue(datanodes[i] != null); } - finally { - if (cluster != null) {cluster.shutdown();} + + //verify Block Info + ExtendedBlock lastblock = locatedblock.getBlock(); + DataNode.LOG.info("newblocks=" + lastblock); + for(int i = 0; i < REPLICATION_NUM; i++) { + checkMetaInfo(lastblock, datanodes[i]); } + + DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName); + cluster.getNameNodeRpc().append(filestr, dfs.dfs.clientName); + + // expire lease to trigger block recovery. + waitLeaseRecovery(cluster); + + Block[] updatedmetainfo = new Block[REPLICATION_NUM]; + long oldSize = lastblock.getNumBytes(); + lastblock = TestInterDatanodeProtocol.getLastLocatedBlock( + dfs.dfs.getNamenode(), filestr).getBlock(); + long currentGS = lastblock.getGenerationStamp(); + for(int i = 0; i < REPLICATION_NUM; i++) { + updatedmetainfo[i] = DataNodeTestUtils.getFSDataset(datanodes[i]).getStoredBlock( + lastblock.getBlockPoolId(), lastblock.getBlockId()); + assertEquals(lastblock.getBlockId(), updatedmetainfo[i].getBlockId()); + assertEquals(oldSize, updatedmetainfo[i].getNumBytes()); + assertEquals(currentGS, updatedmetainfo[i].getGenerationStamp()); + } + + // verify that lease recovery does not occur when namenode is in safemode + System.out.println("Testing that lease recovery cannot happen during safemode."); + filestr = "/foo.safemode"; + filepath = new Path(filestr); + dfs.create(filepath, (short)1); + cluster.getNameNodeRpc().setSafeMode( + HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false); + assertTrue(dfs.dfs.exists(filestr)); + DFSTestUtil.waitReplication(dfs, filepath, (short)1); + waitLeaseRecovery(cluster); + // verify that we still cannot recover the lease + LeaseManager lm = NameNodeAdapter.getLeaseManager(cluster.getNamesystem()); + assertTrue("Found " + lm.countLease() + " lease, expected 1", lm.countLease() == 1); + cluster.getNameNodeRpc().setSafeMode( + HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false); } /** @@ -166,8 +168,7 @@ public void testBlockRecoveryWithLessMetafile() throws Exception { Configuration conf = new Configuration(); conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY, UserGroupInformation.getCurrentUser().getShortUserName()); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) - .build(); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); Path file = new Path("/testRecoveryFile"); DistributedFileSystem dfs = cluster.getFileSystem(); FSDataOutputStream out = dfs.create(file); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java index 3db66f52c9..5c78dbe304 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java @@ -66,6 +66,7 @@ public class TestSafeMode { public static final Log LOG = LogFactory.getLog(TestSafeMode.class); private static final Path TEST_PATH = new Path("/test"); private static final int BLOCK_SIZE = 1024; + private static final String NEWLINE = System.getProperty("line.separator"); Configuration conf; MiniDFSCluster cluster; FileSystem fs; @@ -196,7 +197,7 @@ public void testInitializeReplQueuesEarly() throws Exception { String status = nn.getNamesystem().getSafemode(); assertEquals("Safe mode is ON. The reported blocks 0 needs additional " + - "15 blocks to reach the threshold 0.9990 of total blocks 15.\n" + + "15 blocks to reach the threshold 0.9990 of total blocks 15." + NEWLINE + "The number of live datanodes 0 has reached the minimum number 0. " + "Safe mode will be turned off automatically once the thresholds " + "have been reached.", status); @@ -448,9 +449,9 @@ public void testDatanodeThreshold() throws IOException { String tipMsg = cluster.getNamesystem().getSafemode(); assertTrue("Safemode tip message doesn't look right: " + tipMsg, - tipMsg.contains("The number of live datanodes 0 needs an additional " + - "1 live datanodes to reach the minimum number 1.\n" + - "Safe mode will be turned off automatically")); + tipMsg.contains("The number of live datanodes 0 needs an additional " + + "1 live datanodes to reach the minimum number 1." + + NEWLINE + "Safe mode will be turned off automatically")); // Start a datanode cluster.startDataNodes(conf, 1, true, null, null); diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 5176304aed..d53e7484e7 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -372,6 +372,11 @@ Release 2.6.0 - UNRELEASED MAPREDUCE-6104. TestJobHistoryParsing.testPartialJob fails in branch-2 (Mit Desai via jlowe) + MAPREDUCE-6109. Fix minor typo in distcp -p usage text (Charles Lamb + via aw) + + MAPREDUCE-6093. minor distcp doc edits (Charles Lamb via aw) + Release 2.5.1 - 2014-09-05 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/DistCp.md.vm b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/DistCp.md.vm index 3e8de4f567..447e515452 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/DistCp.md.vm +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/DistCp.md.vm @@ -119,13 +119,13 @@ $H3 Basic Usage $H3 Update and Overwrite `-update` is used to copy files from source that don't exist at the target - or differ than the target version. `-overwrite` overwrites target-files that + or differ from the target version. `-overwrite` overwrites target-files that exist at the target. - Update and Overwrite options warrant special attention, since their handling - of source-paths varies from the defaults in a very subtle manner. Consider a - copy from `/source/first/` and `/source/second/` to `/target/`, where the - source paths have the following contents: + The Update and Overwrite options warrant special attention since their + handling of source-paths varies from the defaults in a very subtle manner. + Consider a copy from `/source/first/` and `/source/second/` to `/target/`, + where the source paths have the following contents: hdfs://nn1:8020/source/first/1 hdfs://nn1:8020/source/first/2 diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java index 9b85997ef9..d263f8273d 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java @@ -54,7 +54,7 @@ public enum DistCpOptionSwitch { "and timestamps. " + "raw.* xattrs are preserved when both the source and destination " + "paths are in the /.reserved/raw hierarchy (HDFS only). raw.* xattr" + - "preservation is independent of the -p flag." + + "preservation is independent of the -p flag. " + "Refer to the DistCp documentation for more details.")), /**