From 9a153334ac5a83a49a44ead02466453f3127120f Mon Sep 17 00:00:00 2001 From: Arun Murthy Date: Tue, 24 Jan 2012 01:00:51 +0000 Subject: [PATCH 1/3] MAPREDUCE-3681. Fixed computation of queue's usedCapacity. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1235103 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 2 + .../scheduler/capacity/LeafQueue.java | 23 ++-- .../scheduler/capacity/ParentQueue.java | 23 ++-- .../scheduler/capacity/TestParentQueue.java | 114 ++++++++---------- 4 files changed, 83 insertions(+), 79 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index c0c75d072d..799ce495ec 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -538,6 +538,8 @@ Release 0.23.1 - Unreleased MAPREDUCE-3646. Remove redundant URL info from "mapred job" output. (Jonathan Eagles via mahadev) + MAPREDUCE-3681. Fixed computation of queue's usedCapacity. (acmurthy) + Release 0.23.0 - 2011-11-01 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index 5f1314113e..b8d89878ec 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -272,9 +272,9 @@ private synchronized void setupQueueConfigs( "maxActiveApplicationsPerUser = " + maxActiveApplicationsPerUser + " [= (int)(maxActiveApplications * (userLimit / 100.0f) * userLimitFactor) ]" + "\n" + "utilization = " + utilization + - " [= usedResourcesMemory / queueLimit ]" + "\n" + + " [= usedResourcesMemory / (clusterResourceMemory * absoluteCapacity)]" + "\n" + "usedCapacity = " + usedCapacity + - " [= usedResourcesMemory / (clusterResourceMemory * capacity) ]" + "\n" + + " [= usedResourcesMemory / (clusterResourceMemory * parent.absoluteCapacity)]" + "\n" + "maxAMResourcePercent = " + maxAMResourcePercent + " [= configuredMaximumAMResourcePercent ]" + "\n" + "minimumAllocationFactor = " + minimumAllocationFactor + @@ -502,9 +502,14 @@ public synchronized QueueInfo getQueueInfo( } public String toString() { - return queueName + ":" + capacity + ":" + absoluteCapacity + ":" + - getUsedCapacity() + ":" + getUtilization() + ":" + - getNumApplications() + ":" + getNumContainers(); + return queueName + ": " + + "capacity=" + capacity + ", " + + "absoluteCapacity=" + absoluteCapacity + ", " + + "usedResources=" + usedResources.getMemory() + "MB, " + + "usedCapacity=" + getUsedCapacity() + ", " + + "utilization=" + getUtilization() + ", " + + "numApps=" + getNumApplications() + ", " + + "numContainers=" + getNumContainers(); } private synchronized User getUser(String userName) { @@ -1316,11 +1321,11 @@ public synchronized void updateClusterResource(Resource clusterResource) { } private synchronized void updateResource(Resource clusterResource) { - float queueLimit = clusterResource.getMemory() * absoluteCapacity; + float queueLimit = clusterResource.getMemory() * absoluteCapacity; setUtilization(usedResources.getMemory() / queueLimit); - setUsedCapacity( - usedResources.getMemory() / (clusterResource.getMemory() * capacity)); - + setUsedCapacity(usedResources.getMemory() + / (clusterResource.getMemory() * parent.getAbsoluteCapacity())); + Resource resourceLimit = Resources.createResource(roundUp((int)queueLimit)); metrics.setAvailableResourcesToQueue( diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java index 4fea3227c1..41ef854847 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java @@ -333,10 +333,15 @@ public synchronized List getQueueUserAclInfo( } public String toString() { - return queueName + ":" + capacity + ":" + absoluteCapacity + ":" + - getUsedCapacity() + ":" + getUtilization() + ":" + - getNumApplications() + ":" + getNumContainers() + ":" + - childQueues.size() + " child-queues"; + return queueName + ": " + + "numChildQueue= " + childQueues.size() + ", " + + "capacity=" + capacity + ", " + + "absoluteCapacity=" + absoluteCapacity + ", " + + "usedResources=" + usedResources.getMemory() + "MB, " + + "usedCapacity=" + getUsedCapacity() + ", " + + "utilization=" + getUtilization() + ", " + + "numApps=" + getNumApplications() + ", " + + "numContainers=" + getNumContainers(); } @Override @@ -688,11 +693,13 @@ public synchronized void updateClusterResource(Resource clusterResource) { } private synchronized void updateResource(Resource clusterResource) { - float queueLimit = clusterResource.getMemory() * absoluteCapacity; + float queueLimit = clusterResource.getMemory() * absoluteCapacity; + float parentAbsoluteCapacity = + (rootQueue) ? 1.0f : parent.getAbsoluteCapacity(); setUtilization(usedResources.getMemory() / queueLimit); - setUsedCapacity( - usedResources.getMemory() / (clusterResource.getMemory() * capacity)); - + setUsedCapacity(usedResources.getMemory() + / (clusterResource.getMemory() * parentAbsoluteCapacity)); + Resource resourceLimit = Resources.createResource((int)queueLimit); metrics.setAvailableResourcesToQueue( diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java index bbfd503b4d..6bddf87eeb 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java @@ -138,12 +138,34 @@ public CSAssignment answer(InvocationOnMock invocation) throws Throwable { when(queue).assignContainers(eq(clusterResource), eq(node)); } + private float computeQueueUsedCapacity(CSQueue queue, + int expectedMemory, Resource clusterResource) { + return ( + ((float)expectedMemory / clusterResource.getMemory()) * + queue.getParent().getAbsoluteCapacity() + ); + } + private float computeQueueUtilization(CSQueue queue, int expectedMemory, Resource clusterResource) { return (expectedMemory / (clusterResource.getMemory() * queue.getAbsoluteCapacity())); } + final static float DELTA = 0.0001f; + private void verifyQueueMetrics(CSQueue queue, + int expectedMemory, Resource clusterResource) { + assertEquals( + computeQueueUtilization(queue, expectedMemory, clusterResource), + queue.getUtilization(), + DELTA); + assertEquals( + computeQueueUsedCapacity(queue, expectedMemory, clusterResource), + queue.getUsedCapacity(), + DELTA); + + } + @Test public void testSingleLevelQueues() throws Exception { // Setup queue configs @@ -173,15 +195,13 @@ public void testSingleLevelQueues() throws Exception { // Start testing LeafQueue a = (LeafQueue)queues.get(A); LeafQueue b = (LeafQueue)queues.get(B); - final float delta = 0.0001f; // Simulate B returning a container on node_0 stubQueueAllocation(a, clusterResource, node_0, 0*GB); stubQueueAllocation(b, clusterResource, node_0, 1*GB); root.assignContainers(clusterResource, node_0); - assertEquals(0.0f, a.getUtilization(), delta); - assertEquals(computeQueueUtilization(b, 1*GB, clusterResource), - b.getUtilization(), delta); + verifyQueueMetrics(a, 0*GB, clusterResource); + verifyQueueMetrics(b, 1*GB, clusterResource); // Now, A should get the scheduling opportunity since A=0G/6G, B=1G/14G stubQueueAllocation(a, clusterResource, node_1, 2*GB); @@ -192,10 +212,8 @@ public void testSingleLevelQueues() throws Exception { any(SchedulerNode.class)); allocationOrder.verify(b).assignContainers(eq(clusterResource), any(SchedulerNode.class)); - assertEquals(computeQueueUtilization(a, 2*GB, clusterResource), - a.getUtilization(), delta); - assertEquals(computeQueueUtilization(b, 2*GB, clusterResource), - b.getUtilization(), delta); + verifyQueueMetrics(a, 2*GB, clusterResource); + verifyQueueMetrics(b, 2*GB, clusterResource); // Now, B should get the scheduling opportunity // since A has 2/6G while B has 2/14G @@ -207,10 +225,8 @@ public void testSingleLevelQueues() throws Exception { any(SchedulerNode.class)); allocationOrder.verify(a).assignContainers(eq(clusterResource), any(SchedulerNode.class)); - assertEquals(computeQueueUtilization(a, 3*GB, clusterResource), - a.getUtilization(), delta); - assertEquals(computeQueueUtilization(b, 4*GB, clusterResource), - b.getUtilization(), delta); + verifyQueueMetrics(a, 3*GB, clusterResource); + verifyQueueMetrics(b, 4*GB, clusterResource); // Now, B should still get the scheduling opportunity // since A has 3/6G while B has 4/14G @@ -222,10 +238,8 @@ public void testSingleLevelQueues() throws Exception { any(SchedulerNode.class)); allocationOrder.verify(a).assignContainers(eq(clusterResource), any(SchedulerNode.class)); - assertEquals(computeQueueUtilization(a, 3*GB, clusterResource), - a.getUtilization(), delta); - assertEquals(computeQueueUtilization(b, 8*GB, clusterResource), - b.getUtilization(), delta); + verifyQueueMetrics(a, 3*GB, clusterResource); + verifyQueueMetrics(b, 8*GB, clusterResource); // Now, A should get the scheduling opportunity // since A has 3/6G while B has 8/14G @@ -237,10 +251,8 @@ public void testSingleLevelQueues() throws Exception { any(SchedulerNode.class)); allocationOrder.verify(a).assignContainers(eq(clusterResource), any(SchedulerNode.class)); - assertEquals(computeQueueUtilization(a, 4*GB, clusterResource), - a.getUtilization(), delta); - assertEquals(computeQueueUtilization(b, 9*GB, clusterResource), - b.getUtilization(), delta); + verifyQueueMetrics(a, 4*GB, clusterResource); + verifyQueueMetrics(b, 9*GB, clusterResource); } private static final String C = "c"; @@ -323,22 +335,16 @@ public void testMultiLevelQueues() throws Exception { CSQueue b2 = queues.get(B2); CSQueue b3 = queues.get(B3); - final float delta = 0.0001f; - // Simulate C returning a container on node_0 stubQueueAllocation(a, clusterResource, node_0, 0*GB); stubQueueAllocation(b, clusterResource, node_0, 0*GB); stubQueueAllocation(c, clusterResource, node_0, 1*GB); stubQueueAllocation(d, clusterResource, node_0, 0*GB); root.assignContainers(clusterResource, node_0); - assertEquals(computeQueueUtilization(a, 0*GB, clusterResource), - a.getUtilization(), delta); - assertEquals(computeQueueUtilization(b, 0*GB, clusterResource), - b.getUtilization(), delta); - assertEquals(computeQueueUtilization(c, 1*GB, clusterResource), - c.getUtilization(), delta); - assertEquals(computeQueueUtilization(d, 0*GB, clusterResource), - d.getUtilization(), delta); + verifyQueueMetrics(a, 0*GB, clusterResource); + verifyQueueMetrics(b, 0*GB, clusterResource); + verifyQueueMetrics(c, 1*GB, clusterResource); + verifyQueueMetrics(d, 0*GB, clusterResource); reset(a); reset(b); reset(c); // Now get B2 to allocate @@ -347,12 +353,9 @@ public void testMultiLevelQueues() throws Exception { stubQueueAllocation(b2, clusterResource, node_1, 4*GB); stubQueueAllocation(c, clusterResource, node_1, 0*GB); root.assignContainers(clusterResource, node_1); - assertEquals(computeQueueUtilization(a, 0*GB, clusterResource), - a.getUtilization(), delta); - assertEquals(computeQueueUtilization(b, 4*GB, clusterResource), - b.getUtilization(), delta); - assertEquals(computeQueueUtilization(c, 1*GB, clusterResource), - c.getUtilization(), delta); + verifyQueueMetrics(a, 0*GB, clusterResource); + verifyQueueMetrics(b, 4*GB, clusterResource); + verifyQueueMetrics(c, 1*GB, clusterResource); reset(a); reset(b); reset(c); // Now get both A1, C & B3 to allocate in right order @@ -368,12 +371,9 @@ public void testMultiLevelQueues() throws Exception { any(SchedulerNode.class)); allocationOrder.verify(b).assignContainers(eq(clusterResource), any(SchedulerNode.class)); - assertEquals(computeQueueUtilization(a, 1*GB, clusterResource), - a.getUtilization(), delta); - assertEquals(computeQueueUtilization(b, 6*GB, clusterResource), - b.getUtilization(), delta); - assertEquals(computeQueueUtilization(c, 3*GB, clusterResource), - c.getUtilization(), delta); + verifyQueueMetrics(a, 1*GB, clusterResource); + verifyQueueMetrics(b, 6*GB, clusterResource); + verifyQueueMetrics(c, 3*GB, clusterResource); reset(a); reset(b); reset(c); // Now verify max-capacity @@ -399,16 +399,12 @@ public void testMultiLevelQueues() throws Exception { any(SchedulerNode.class)); allocationOrder.verify(c).assignContainers(eq(clusterResource), any(SchedulerNode.class)); - assertEquals(computeQueueUtilization(a, 3*GB, clusterResource), - a.getUtilization(), delta); - assertEquals(computeQueueUtilization(b, 8*GB, clusterResource), - b.getUtilization(), delta); - assertEquals(computeQueueUtilization(c, 4*GB, clusterResource), - c.getUtilization(), delta); + verifyQueueMetrics(a, 3*GB, clusterResource); + verifyQueueMetrics(b, 8*GB, clusterResource); + verifyQueueMetrics(c, 4*GB, clusterResource); reset(a); reset(b); reset(c); - } - + @Test public void testOffSwitchScheduling() throws Exception { // Setup queue configs @@ -438,15 +434,13 @@ public void testOffSwitchScheduling() throws Exception { // Start testing LeafQueue a = (LeafQueue)queues.get(A); LeafQueue b = (LeafQueue)queues.get(B); - final float delta = 0.0001f; // Simulate B returning a container on node_0 stubQueueAllocation(a, clusterResource, node_0, 0*GB, NodeType.OFF_SWITCH); stubQueueAllocation(b, clusterResource, node_0, 1*GB, NodeType.OFF_SWITCH); root.assignContainers(clusterResource, node_0); - assertEquals(0.0f, a.getUtilization(), delta); - assertEquals(computeQueueUtilization(b, 1*GB, clusterResource), - b.getUtilization(), delta); + verifyQueueMetrics(a, 0*GB, clusterResource); + verifyQueueMetrics(b, 1*GB, clusterResource); // Now, A should get the scheduling opportunity since A=0G/6G, B=1G/14G // also, B gets a scheduling opportunity since A allocates RACK_LOCAL @@ -458,10 +452,8 @@ public void testOffSwitchScheduling() throws Exception { any(SchedulerNode.class)); allocationOrder.verify(b).assignContainers(eq(clusterResource), any(SchedulerNode.class)); - assertEquals(computeQueueUtilization(a, 2*GB, clusterResource), - a.getUtilization(), delta); - assertEquals(computeQueueUtilization(b, 2*GB, clusterResource), - b.getUtilization(), delta); + verifyQueueMetrics(a, 2*GB, clusterResource); + verifyQueueMetrics(b, 2*GB, clusterResource); // Now, B should get the scheduling opportunity // since A has 2/6G while B has 2/14G, @@ -474,10 +466,8 @@ public void testOffSwitchScheduling() throws Exception { any(SchedulerNode.class)); allocationOrder.verify(a).assignContainers(eq(clusterResource), any(SchedulerNode.class)); - assertEquals(computeQueueUtilization(a, 2*GB, clusterResource), - a.getUtilization(), delta); - assertEquals(computeQueueUtilization(b, 4*GB, clusterResource), - b.getUtilization(), delta); + verifyQueueMetrics(a, 2*GB, clusterResource); + verifyQueueMetrics(b, 4*GB, clusterResource); } From 5dfe62d845424e688c144fc10446ff6392e54ae4 Mon Sep 17 00:00:00 2001 From: Eli Collins Date: Tue, 24 Jan 2012 06:05:52 +0000 Subject: [PATCH 2/3] HDFS-2397. Undeprecate SecondaryNameNode. Contributed by Eli Collins git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1235135 13f79535-47bb-0310-9956-ffa450edef68 --- .../content/xdocs/commands_manual.xml | 5 ---- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../content/xdocs/hdfs_user_guide.xml | 20 +++++++------- .../server/namenode/SecondaryNameNode.java | 1 - .../hdfs/server/namenode/TestCheckpoint.java | 26 ------------------- .../server/namenode/TestNameEditsConfigs.java | 4 --- .../server/namenode/TestSecondaryWebUi.java | 1 - .../hdfs/server/namenode/TestStartup.java | 2 -- .../server/namenode/TestStorageRestore.java | 2 -- .../src/site/apt/ClusterSetup.apt.vm | 20 +++++++------- 10 files changed, 21 insertions(+), 62 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/commands_manual.xml b/hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/commands_manual.xml index b3f25af40e..19df2fe9cf 100644 --- a/hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/commands_manual.xml +++ b/hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/commands_manual.xml @@ -753,11 +753,6 @@
secondarynamenode - - The Secondary NameNode has been deprecated. Instead, consider using the - Checkpoint Node or - Backup Node. -

Runs the HDFS secondary namenode. See Secondary NameNode diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 7511d5959e..d355bf4f80 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -276,6 +276,8 @@ Release 0.23.1 - UNRELEASED HDFS-2818. Fix a missing space issue in HDFS webapps' title tags. (Devaraj K via harsh) + HDFS-2397. Undeprecate SecondaryNameNode (eli) + OPTIMIZATIONS HDFS-2130. Switch default checksum to CRC32C. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_user_guide.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_user_guide.xml index 0d3ed89c7f..4d2c6dd073 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_user_guide.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_user_guide.xml @@ -112,17 +112,18 @@ problems.

  • - Secondary NameNode (deprecated): performs periodic checkpoints of the + Secondary NameNode: performs periodic checkpoints of the namespace and helps keep the size of file containing log of HDFS modifications within certain limits at the NameNode. - Replaced by Checkpoint node.
  • +
  • Checkpoint node: performs periodic checkpoints of the namespace and helps minimize the size of the log stored at the NameNode containing changes to the HDFS. - Replaces the role previously filled by the Secondary NameNode. - NameNode allows multiple Checkpoint nodes simultaneously, + Replaces the role previously filled by the Secondary NameNode, + though is not yet battle hardened. + The NameNode allows multiple Checkpoint nodes simultaneously, as long as there are no Backup nodes registered with the system.
  • @@ -132,6 +133,7 @@ which is always in sync with the active NameNode namespace state. Only one Backup node may be registered with the NameNode at once.
  • + @@ -234,12 +236,6 @@
    Secondary NameNode - - The Secondary NameNode has been deprecated. - Instead, consider using the - Checkpoint Node or - Backup Node. -

    The NameNode stores modifications to the file system as a log appended to a native file system file, edits. @@ -287,7 +283,9 @@ secondarynamenode.

    -
    Checkpoint Node +
    + +
    Checkpoint Node

    NameNode persists its namespace using two files: fsimage, which is the latest checkpoint of the namespace and edits, a journal (log) of changes to the namespace since the checkpoint. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index d84b1a2dd0..f605f61752 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -87,7 +87,6 @@ * primary NameNode. * **********************************************************/ -@Deprecated // use BackupNode with -checkpoint argument instead. @InterfaceAudience.Private public class SecondaryNameNode implements Runnable { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index 1717f6c0b2..52da95725a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -204,7 +204,6 @@ public void testWriteTransactionIdHandlesIOE() throws Exception { /* * Simulate namenode crashing after rolling edit log. */ - @SuppressWarnings("deprecation") public void testSecondaryNamenodeError1() throws IOException { LOG.info("Starting testSecondaryNamenodeError1"); @@ -266,7 +265,6 @@ public void testSecondaryNamenodeError1() /* * Simulate a namenode crash after uploading new image */ - @SuppressWarnings("deprecation") public void testSecondaryNamenodeError2() throws IOException { LOG.info("Starting testSecondaryNamenodeError2"); Configuration conf = new HdfsConfiguration(); @@ -325,7 +323,6 @@ public void testSecondaryNamenodeError2() throws IOException { /* * Simulate a secondary namenode crash after rolling the edit log. */ - @SuppressWarnings("deprecation") public void testSecondaryNamenodeError3() throws IOException { LOG.info("Starting testSecondaryNamenodeError3"); Configuration conf = new HdfsConfiguration(); @@ -395,7 +392,6 @@ public void testSecondaryNamenodeError3() throws IOException { * back to the name-node. * Used to truncate primary fsimage file. */ - @SuppressWarnings("deprecation") public void testSecondaryFailsToReturnImage() throws IOException { LOG.info("Starting testSecondaryFailsToReturnImage"); Configuration conf = new HdfsConfiguration(); @@ -472,7 +468,6 @@ public void testNameNodeImageSendFailWrongDigest() * @param errorType the ErrorSimulator type to trigger * @param exceptionSubstring an expected substring of the triggered exception */ - @SuppressWarnings("deprecation") private void doSendFailTest(int errorType, String exceptionSubstring) throws IOException { Configuration conf = new HdfsConfiguration(); @@ -587,7 +582,6 @@ public void testSeparateEditsDirLocking() throws IOException { /** * Test that the SecondaryNameNode properly locks its storage directories. */ - @SuppressWarnings("deprecation") public void testSecondaryNameNodeLocking() throws Exception { // Start a primary NN so that the secondary will start successfully Configuration conf = new HdfsConfiguration(); @@ -680,7 +674,6 @@ private static void assertClusterStartFailsWhenDirLocked( * 2. if the NN does not contain an image, importing a checkpoint * succeeds and re-saves the image */ - @SuppressWarnings("deprecation") public void testImportCheckpoint() throws Exception { Configuration conf = new HdfsConfiguration(); Path testPath = new Path("/testfile"); @@ -761,16 +754,12 @@ private static void removeAndRecreateDir(File dir) throws IOException { throw new IOException("Cannot create directory " + dir); } - // This deprecation suppress warning does not work due to known Java bug: - // http://bugs.sun.com/view_bug.do?bug_id=6460147 - @SuppressWarnings("deprecation") SecondaryNameNode startSecondaryNameNode(Configuration conf ) throws IOException { conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0"); return new SecondaryNameNode(conf); } - @SuppressWarnings("deprecation") SecondaryNameNode startSecondaryNameNode(Configuration conf, int index) throws IOException { Configuration snnConf = new Configuration(conf); @@ -783,7 +772,6 @@ SecondaryNameNode startSecondaryNameNode(Configuration conf, int index) /** * Tests checkpoint in HDFS. */ - @SuppressWarnings("deprecation") public void testCheckpoint() throws IOException { Path file1 = new Path("checkpoint.dat"); Path file2 = new Path("checkpoint2.dat"); @@ -1010,7 +998,6 @@ public void testCheckpointSignature() throws IOException { * - it then fails again for the same reason * - it then tries to checkpoint a third time */ - @SuppressWarnings("deprecation") public void testCheckpointAfterTwoFailedUploads() throws IOException { MiniDFSCluster cluster = null; SecondaryNameNode secondary = null; @@ -1065,7 +1052,6 @@ public void testCheckpointAfterTwoFailedUploads() throws IOException { * * @throws IOException */ - @SuppressWarnings("deprecation") public void testMultipleSecondaryNamenodes() throws IOException { Configuration conf = new HdfsConfiguration(); String nameserviceId1 = "ns1"; @@ -1115,7 +1101,6 @@ public void testMultipleSecondaryNamenodes() throws IOException { * Test that the secondary doesn't have to re-download image * if it hasn't changed. */ - @SuppressWarnings("deprecation") public void testSecondaryImageDownload() throws IOException { LOG.info("Starting testSecondaryImageDownload"); Configuration conf = new HdfsConfiguration(); @@ -1198,7 +1183,6 @@ public void testSecondaryImageDownload() throws IOException { * It verifies that this works even though the earlier-txid checkpoint gets * uploaded after the later-txid checkpoint. */ - @SuppressWarnings("deprecation") public void testMultipleSecondaryNNsAgainstSameNN() throws Exception { Configuration conf = new HdfsConfiguration(); @@ -1284,7 +1268,6 @@ public void testMultipleSecondaryNNsAgainstSameNN() throws Exception { * It verifies that one of the two gets an error that it's uploading a * duplicate checkpoint, and the other one succeeds. */ - @SuppressWarnings("deprecation") public void testMultipleSecondaryNNsAgainstSameNN2() throws Exception { Configuration conf = new HdfsConfiguration(); @@ -1383,7 +1366,6 @@ protected Object passThrough(InvocationOnMock invocation) throws Throwable { * is running. The secondary should shut itself down if if talks to a NN * with the wrong namespace. */ - @SuppressWarnings("deprecation") public void testReformatNNBetweenCheckpoints() throws IOException { MiniDFSCluster cluster = null; SecondaryNameNode secondary = null; @@ -1638,7 +1620,6 @@ public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception { /** * Test that the 2NN triggers a checkpoint after the configurable interval */ - @SuppressWarnings("deprecation") public void testCheckpointTriggerOnTxnCount() throws Exception { MiniDFSCluster cluster = null; SecondaryNameNode secondary = null; @@ -1692,7 +1673,6 @@ public Boolean get() { * logs that connect the 2NN's old checkpoint to the current txid * get archived. Then, the 2NN tries to checkpoint again. */ - @SuppressWarnings("deprecation") public void testSecondaryHasVeryOutOfDateImage() throws IOException { MiniDFSCluster cluster = null; SecondaryNameNode secondary = null; @@ -1730,7 +1710,6 @@ public void testSecondaryHasVeryOutOfDateImage() throws IOException { } } - @SuppressWarnings("deprecation") public void testCommandLineParsing() throws ParseException { SecondaryNameNode.CommandLineOpts opts = new SecondaryNameNode.CommandLineOpts(); @@ -1765,7 +1744,6 @@ public void testCommandLineParsing() throws ParseException { } catch (ParseException e) {} } - @SuppressWarnings("deprecation") private void cleanup(SecondaryNameNode snn) { if (snn != null) { try { @@ -1781,7 +1759,6 @@ private void cleanup(SecondaryNameNode snn) { * Assert that if any two files have the same name across the 2NNs * and NN, they should have the same content too. */ - @SuppressWarnings("deprecation") private void assertParallelFilesInvariant(MiniDFSCluster cluster, ImmutableList secondaries) throws Exception { List allCurrentDirs = Lists.newArrayList(); @@ -1793,7 +1770,6 @@ private void assertParallelFilesInvariant(MiniDFSCluster cluster, ImmutableSet.of("VERSION")); } - @SuppressWarnings("deprecation") private List getCheckpointCurrentDirs(SecondaryNameNode secondary) { List ret = Lists.newArrayList(); for (URI u : secondary.getCheckpointDirs()) { @@ -1803,7 +1779,6 @@ private List getCheckpointCurrentDirs(SecondaryNameNode secondary) { return ret; } - @SuppressWarnings("deprecation") private CheckpointStorage spyOnSecondaryImage(SecondaryNameNode secondary1) { CheckpointStorage spy = Mockito.spy((CheckpointStorage)secondary1.getFSImage());; secondary1.setFSImage(spy); @@ -1813,7 +1788,6 @@ private CheckpointStorage spyOnSecondaryImage(SecondaryNameNode secondary1) { /** * A utility class to perform a checkpoint in a different thread. */ - @SuppressWarnings("deprecation") private static class DoCheckpointThread extends Thread { private final SecondaryNameNode snn; private volatile Throwable thrown = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java index 577eab0898..5a188152c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java @@ -120,9 +120,6 @@ private void cleanupFile(FileSystem fileSys, Path name) assertTrue(!fileSys.exists(name)); } - // This deprecation suppress warning does not work due to known Java bug: - // http://bugs.sun.com/view_bug.do?bug_id=6460147 - @SuppressWarnings("deprecation") SecondaryNameNode startSecondaryNameNode(Configuration conf ) throws IOException { conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0"); @@ -142,7 +139,6 @@ SecondaryNameNode startSecondaryNameNode(Configuration conf * sure we are reading proper edits and image. * @throws Exception */ - @SuppressWarnings("deprecation") @Test public void testNameEditsConfigs() throws Exception { Path file1 = new Path("TestNameEditsConfigs1"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java index b11e26c064..16b2ac6c65 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryWebUi.java @@ -30,7 +30,6 @@ public class TestSecondaryWebUi { - @SuppressWarnings("deprecation") @Test public void testSecondaryWebUi() throws IOException { Configuration conf = new Configuration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java index ceb6261db4..9233009628 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @@ -121,7 +121,6 @@ public void tearDown() throws Exception { * start MiniDFScluster, create a file (to create edits) and do a checkpoint * @throws IOException */ - @SuppressWarnings("deprecation") public void createCheckPoint() throws IOException { LOG.info("--starting mini cluster"); // manage dirs parameter set to false @@ -303,7 +302,6 @@ public void testChkpointStartup1() throws IOException{ * secondary node copies fsimage and edits into correct separate directories. * @throws IOException */ - @SuppressWarnings("deprecation") public void testSNNStartup() throws IOException{ //setUpConfig(); LOG.info("--starting SecondNN startup test"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java index f3a4638f10..412ab4a461 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java @@ -155,7 +155,6 @@ private void printStorages(FSImage image) { * 7. run doCheckpoint * 8. verify that all the image and edits files are the same. */ - @SuppressWarnings("deprecation") @Test public void testStorageRestore() throws Exception { int numDatanodes = 0; @@ -312,7 +311,6 @@ public void testDfsAdminCmd() throws Exception { * then try to perform a checkpoint. The NN should not serve up the image or * edits from the restored (empty) dir. */ - @SuppressWarnings("deprecation") @Test public void testMultipleSecondaryCheckpoint() throws IOException { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm index 8fe515babe..0290c23b8a 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm @@ -95,7 +95,7 @@ Hadoop MapReduce Next Generation - Cluster Setup *--------------------------------------+--------------------------------------+ | DataNode | HADOOP_DATANODE_OPTS | *--------------------------------------+--------------------------------------+ -| Backup NameNode | HADOOP_SECONDARYNAMENODE_OPTS | +| Secondary NameNode | HADOOP_SECONDARYNAMENODE_OPTS | *--------------------------------------+--------------------------------------+ | ResourceManager | YARN_RESOURCEMANAGER_OPTS | *--------------------------------------+--------------------------------------+ @@ -537,15 +537,15 @@ Hadoop MapReduce Next Generation - Cluster Setup It's recommended to have them share a Unix group, for e.g. <<>>. -*--------------------------------------+--------------------------------------+ -|| User:Group || Daemons | -*--------------------------------------+--------------------------------------+ -| hdfs:hadoop | NameNode, Backup NameNode, DataNode | -*--------------------------------------+--------------------------------------+ -| yarn:hadoop | ResourceManager, NodeManager | -*--------------------------------------+--------------------------------------+ -| mapred:hadoop | MapReduce JobHistory Server | -*--------------------------------------+--------------------------------------+ +*--------------------------------------+----------------------------------------------------------------------+ +|| User:Group || Daemons | +*--------------------------------------+----------------------------------------------------------------------+ +| hdfs:hadoop | NameNode, Secondary NameNode, Checkpoint Node, Backup Node, DataNode | +*--------------------------------------+----------------------------------------------------------------------+ +| yarn:hadoop | ResourceManager, NodeManager | +*--------------------------------------+----------------------------------------------------------------------+ +| mapred:hadoop | MapReduce JobHistory Server | +*--------------------------------------+----------------------------------------------------------------------+ * <<>> From 7c6988391710b4a12aa2fdb30510bec2adbf3312 Mon Sep 17 00:00:00 2001 From: Harsh J Date: Tue, 24 Jan 2012 06:10:28 +0000 Subject: [PATCH 3/3] HDFS-442. dfsthroughput in test jar throws NPE (harsh) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1235137 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index d355bf4f80..7d5ea8cd22 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -361,6 +361,8 @@ Release 0.23.1 - UNRELEASED HDFS-2822. processMisReplicatedBlock incorrectly identifies under-construction blocks as under-replicated. (todd) + HDFS-442. dfsthroughput in test jar throws NPE (harsh) + Release 0.23.0 - 2011-11-01 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java index 77ac2652fd..db06c0d2ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java @@ -193,6 +193,10 @@ public int run(String[] args) throws IOException { BUFFER_SIZE = conf.getInt("dfsthroughput.buffer.size", 4 * 1024); String localDir = conf.get("mapred.temp.dir"); + if (localDir == null) { + localDir = conf.get("hadoop.tmp.dir"); + conf.set("mapred.temp.dir", localDir); + } dir = new LocalDirAllocator("mapred.temp.dir"); System.setProperty("test.build.data", localDir);