From fab9bc58ec03ea81cd5ce8a8746a4ee588f7bb08 Mon Sep 17 00:00:00 2001 From: cnauroth Date: Fri, 5 Sep 2014 11:03:58 -0700 Subject: [PATCH 01/18] HDFS-6979. hdfs.dll not produce .pdb files. Contributed by Chris Nauroth. --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ hadoop-hdfs-project/hadoop-hdfs/pom.xml | 6 +++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 5a087d2770..680af55ce2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -605,6 +605,8 @@ Release 2.6.0 - UNRELEASED HDFS-6831. Inconsistency between 'hdfs dfsadmin' and 'hdfs dfsadmin -help'. (Xiaoyu Yao via Arpit Agarwal) + HDFS-6979. hdfs.dll not produce .pdb files. (cnauroth) + BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS HDFS-6387. HDFS CLI admin tool for creating & deleting an diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 2c4ddf6437..ecdd1aeac4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -415,11 +415,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> - + - + @@ -437,7 +437,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> - + From b051327ab6a01774e1dad59e1e547dd16f603789 Mon Sep 17 00:00:00 2001 From: cnauroth Date: Fri, 5 Sep 2014 11:07:41 -0700 Subject: [PATCH 02/18] HDFS-6979. Fix minor error in CHANGES.txt. Contributed by Chris Nauroth. --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 680af55ce2..7b8917b87e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -605,7 +605,7 @@ Release 2.6.0 - UNRELEASED HDFS-6831. Inconsistency between 'hdfs dfsadmin' and 'hdfs dfsadmin -help'. (Xiaoyu Yao via Arpit Agarwal) - HDFS-6979. hdfs.dll not produce .pdb files. (cnauroth) + HDFS-6979. hdfs.dll does not produce .pdb files. (cnauroth) BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS From 9609b7303a98c8eff676c5a086b08b1ca9ab777c Mon Sep 17 00:00:00 2001 From: arp Date: Fri, 5 Sep 2014 11:08:03 -0700 Subject: [PATCH 03/18] HDFS-6862. Add missing timeout annotations to tests. (Contributed by Xiaoyu Yao) --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hadoop/hdfs/TestHDFSServerPorts.java | 24 +++---- .../TestValidateConfigurationSettings.java | 16 ++--- .../ha/TestDelegationTokensWithHA.java | 63 ++++++++----------- .../server/namenode/ha/TestHAMetrics.java | 10 +-- .../namenode/ha/TestHAStateTransitions.java | 52 +++++++-------- .../namenode/ha/TestStandbyCheckpoints.java | 54 +++++++--------- 7 files changed, 97 insertions(+), 125 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 7b8917b87e..0772ea6ae4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -607,6 +607,9 @@ Release 2.6.0 - UNRELEASED HDFS-6979. hdfs.dll does not produce .pdb files. (cnauroth) + HDFS-6862. Add missing timeout annotations to tests. (Xiaoyu Yao via + Arpit Agarwal) + BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS HDFS-6387. HDFS CLI admin tool for creating & deleting an diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java index 59d1615025..ce8a4e75d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java @@ -17,14 +17,6 @@ */ package org.apache.hadoop.hdfs; -import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -import java.io.File; -import java.io.IOException; -import java.net.UnknownHostException; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -39,6 +31,14 @@ import org.apache.hadoop.net.DNS; import org.apache.hadoop.test.PathUtils; import org.junit.Test; +import java.io.File; +import java.io.IOException; +import java.net.UnknownHostException; + +import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + /** * This test checks correctness of port usage by hdfs components: * NameNode, DataNode, SecondaryNamenode and BackupNode. @@ -245,7 +245,7 @@ public class TestHDFSServerPorts { return true; } - @Test + @Test(timeout = 300000) public void testNameNodePorts() throws Exception { runTestNameNodePorts(false); runTestNameNodePorts(true); @@ -296,7 +296,7 @@ public class TestHDFSServerPorts { /** * Verify datanode port usage. */ - @Test + @Test(timeout = 300000) public void testDataNodePorts() throws Exception { NameNode nn = null; try { @@ -332,7 +332,7 @@ public class TestHDFSServerPorts { /** * Verify secondary namenode port usage. */ - @Test + @Test(timeout = 300000) public void testSecondaryNodePorts() throws Exception { NameNode nn = null; try { @@ -361,7 +361,7 @@ public class TestHDFSServerPorts { /** * Verify BackupNode port usage. */ - @Test + @Test(timeout = 300000) public void testBackupNodePorts() throws Exception { NameNode nn = null; try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java index 9221653a80..0cf1fed81e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestValidateConfigurationSettings.java @@ -17,11 +17,6 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import java.io.File; -import java.io.IOException; -import java.net.BindException; -import java.util.Random; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; @@ -33,6 +28,11 @@ import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Test; +import java.io.File; +import java.io.IOException; +import java.net.BindException; +import java.util.Random; + /** * This class tests the validation of the configuration object when passed * to the NameNode @@ -49,7 +49,7 @@ public class TestValidateConfigurationSettings { * an exception * is thrown when trying to re-use the same port */ - @Test(expected = BindException.class) + @Test(expected = BindException.class, timeout = 300000) public void testThatMatchingRPCandHttpPortsThrowException() throws IOException { @@ -79,7 +79,7 @@ public class TestValidateConfigurationSettings { * Tests setting the rpc port to a different as the web port that an * exception is NOT thrown */ - @Test + @Test(timeout = 300000) public void testThatDifferentRPCandHttpPortsAreOK() throws IOException { @@ -117,7 +117,7 @@ public class TestValidateConfigurationSettings { * HDFS-3013: NameNode format command doesn't pick up * dfs.namenode.name.dir.NameServiceId configuration. */ - @Test + @Test(timeout = 300000) public void testGenericKeysForNameNodeFormat() throws IOException { Configuration conf = new HdfsConfiguration(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java index b2cc9197aa..33b5350222 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java @@ -17,27 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.mockito.Mockito.mock; - -import java.io.ByteArrayInputStream; -import java.io.DataInputStream; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.net.URI; -import java.security.PrivilegedExceptionAction; -import java.util.Collection; -import java.util.HashSet; -import java.util.Map; - -import javax.servlet.http.HttpServletResponse; -import javax.ws.rs.core.Response; - +import com.google.common.base.Joiner; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -45,11 +25,7 @@ import org.apache.hadoop.fs.AbstractFileSystem; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.HAUtil; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.apache.hadoop.hdfs.*; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; @@ -75,7 +51,20 @@ import org.junit.Test; import org.mockito.internal.util.reflection.Whitebox; import org.mortbay.util.ajax.JSON; -import com.google.common.base.Joiner; +import javax.servlet.http.HttpServletResponse; +import javax.ws.rs.core.Response; +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.URI; +import java.security.PrivilegedExceptionAction; +import java.util.Collection; +import java.util.HashSet; +import java.util.Map; + +import static org.junit.Assert.*; +import static org.mockito.Mockito.mock; /** * Test case for client support of delegation tokens in an HA cluster. @@ -128,8 +117,8 @@ public class TestDelegationTokensWithHA { cluster.shutdown(); } } - - @Test + + @Test(timeout = 300000) public void testDelegationTokenDFSApi() throws Exception { final Token token = getDelegationToken(fs, "JobTracker"); @@ -192,7 +181,7 @@ public class TestDelegationTokensWithHA { * Test if correct exception (StandbyException or RetriableException) can be * thrown during the NN failover. */ - @Test + @Test(timeout = 300000) public void testDelegationTokenDuringNNFailover() throws Exception { EditLogTailer editLogTailer = nn1.getNamesystem().getEditLogTailer(); // stop the editLogTailer of nn1 @@ -260,7 +249,7 @@ public class TestDelegationTokensWithHA { doRenewOrCancel(token, clientConf, TokenTestAction.CANCEL); } - @Test + @Test(timeout = 300000) public void testDelegationTokenWithDoAs() throws Exception { final Token token = getDelegationToken(fs, "JobTracker"); @@ -291,8 +280,8 @@ public class TestDelegationTokensWithHA { } }); } - - @Test + + @Test(timeout = 300000) public void testHAUtilClonesDelegationTokens() throws Exception { final Token token = getDelegationToken(fs, "JobTracker"); @@ -354,7 +343,7 @@ public class TestDelegationTokensWithHA { * exception if the URI is a logical URI. This bug fails the combination of * ha + mapred + security. */ - @Test + @Test(timeout = 300000) public void testDFSGetCanonicalServiceName() throws Exception { URI hAUri = HATestUtil.getLogicalUri(cluster); String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri, @@ -368,8 +357,8 @@ public class TestDelegationTokensWithHA { token.renew(dfs.getConf()); token.cancel(dfs.getConf()); } - - @Test + + @Test(timeout = 300000) public void testHdfsGetCanonicalServiceName() throws Exception { Configuration conf = dfs.getConf(); URI haUri = HATestUtil.getLogicalUri(cluster); @@ -390,7 +379,7 @@ public class TestDelegationTokensWithHA { * password. (HDFS-6475). With StandbyException, the client can failover to try * activeNN. */ - @Test + @Test(timeout = 300000) public void testDelegationTokenStandbyNNAppearFirst() throws Exception { // make nn0 the standby NN, and nn1 the active NN cluster.transitionToStandby(0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAMetrics.java index cc85c83b3d..1cd76f48fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAMetrics.java @@ -17,9 +17,6 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -33,14 +30,17 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.io.IOUtils; import org.junit.Test; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + /** * Make sure HA-related metrics are updated and reported appropriately. */ public class TestHAMetrics { private static final Log LOG = LogFactory.getLog(TestHAMetrics.class); - - @Test + + @Test(timeout = 300000) public void testHAMetrics() throws Exception { Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java index e33d807634..f7474b84e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java @@ -17,20 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.io.DataOutputStream; -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.net.URI; -import java.util.LinkedList; -import java.util.List; -import java.util.concurrent.locks.ReentrantReadWriteLock; - +import com.google.common.util.concurrent.Uninterruptibles; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; @@ -40,13 +27,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.ha.HAServiceProtocol.RequestSource; import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.DFSTestUtil; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.HAUtil; -import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.apache.hadoop.hdfs.*; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; @@ -66,7 +47,16 @@ import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; -import com.google.common.util.concurrent.Uninterruptibles; +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.net.URI; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import static org.junit.Assert.*; /** * Tests state transition from active->standby, and manual failover @@ -92,7 +82,7 @@ public class TestHAStateTransitions { * active and standby mode, making sure it doesn't * double-play any edits. */ - @Test + @Test(timeout = 300000) public void testTransitionActiveToStandby() throws Exception { Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) @@ -148,7 +138,7 @@ public class TestHAStateTransitions { * Test that transitioning a service to the state that it is already * in is a nop, specifically, an exception is not thrown. */ - @Test + @Test(timeout = 300000) public void testTransitionToCurrentStateIsANop() throws Exception { Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 1L); @@ -220,7 +210,7 @@ public class TestHAStateTransitions { /** * Tests manual failover back and forth between two NameNodes. */ - @Test + @Test(timeout = 300000) public void testManualFailoverAndFailback() throws Exception { Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) @@ -346,7 +336,7 @@ public class TestHAStateTransitions { /** * Test that delegation tokens continue to work after the failover. */ - @Test + @Test(timeout = 300000) public void testDelegationTokensAfterFailover() throws IOException { Configuration conf = new Configuration(); conf.setBoolean( @@ -383,7 +373,7 @@ public class TestHAStateTransitions { * Tests manual failover back and forth between two NameNodes * for federation cluster with two namespaces. */ - @Test + @Test(timeout = 300000) public void testManualFailoverFailbackFederationHA() throws Exception { Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) @@ -403,12 +393,12 @@ public class TestHAStateTransitions { } } - @Test + @Test(timeout = 300000) public void testFailoverWithEmptyInProgressEditLog() throws Exception { testFailoverAfterCrashDuringLogRoll(false); } - - @Test + + @Test(timeout = 300000) public void testFailoverWithEmptyInProgressEditLogWithHeader() throws Exception { testFailoverAfterCrashDuringLogRoll(true); @@ -570,7 +560,7 @@ public class TestHAStateTransitions { * by virtue of the fact that it wouldn't work properly if the proxies * returned were not for the correct NNs. */ - @Test + @Test(timeout = 300000) public void testIsAtLeastOneActive() throws Exception { MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()) .nnTopology(MiniDFSNNTopology.simpleHATopology()) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java index e9b91249c4..b00f91647d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java @@ -17,23 +17,11 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.io.File; -import java.io.IOException; -import java.io.OutputStream; -import java.net.BindException; -import java.lang.management.ManagementFactory; -import java.lang.management.ThreadInfo; -import java.lang.management.ThreadMXBean; -import java.net.URI; -import java.net.URL; -import java.util.List; -import java.util.Random; - +import com.google.common.base.Supplier; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import com.google.common.io.Files; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -43,14 +31,8 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; -import org.apache.hadoop.hdfs.server.namenode.FSImage; -import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; -import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; -import org.apache.hadoop.hdfs.server.namenode.JournalSet; -import org.apache.hadoop.hdfs.server.namenode.NNStorage; +import org.apache.hadoop.hdfs.server.namenode.*; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; -import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.util.Canceler; import org.apache.hadoop.io.compress.CompressionCodecFactory; import org.apache.hadoop.io.compress.CompressionOutputStream; @@ -64,11 +46,19 @@ import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; -import com.google.common.base.Supplier; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Lists; -import com.google.common.io.Files; +import java.io.File; +import java.io.IOException; +import java.io.OutputStream; +import java.lang.management.ManagementFactory; +import java.lang.management.ThreadInfo; +import java.lang.management.ThreadMXBean; +import java.net.BindException; +import java.net.URI; +import java.net.URL; +import java.util.List; +import java.util.Random; + +import static org.junit.Assert.*; public class TestStandbyCheckpoints { private static final int NUM_DIRS_IN_LOG = 200000; @@ -143,7 +133,7 @@ public class TestStandbyCheckpoints { } } - @Test + @Test(timeout = 300000) public void testSBNCheckpoints() throws Exception { JournalSet standbyJournalSet = NameNodeAdapter.spyOnJournalSet(nn1); @@ -185,7 +175,7 @@ public class TestStandbyCheckpoints { * checkpoint for the given txid, but this should not cause * an abort, etc. */ - @Test + @Test(timeout = 300000) public void testBothNodesInStandbyState() throws Exception { doEdits(0, 10); @@ -216,7 +206,7 @@ public class TestStandbyCheckpoints { * same txid, which is a no-op. This test makes sure this doesn't * cause any problem. */ - @Test + @Test(timeout = 300000) public void testCheckpointWhenNoNewTransactionsHappened() throws Exception { // Checkpoint as fast as we can, in a tight loop. From 71269f70971dc7aa7bcb5e78b19cb3f04fdaa2f4 Mon Sep 17 00:00:00 2001 From: arp Date: Fri, 5 Sep 2014 11:14:10 -0700 Subject: [PATCH 04/18] HDFS-6998. warning message 'ssl.client.truststore.location has not been set' gets printed for hftp command. (Contributed by Xiaoyu Yao) --- .../apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java | 2 +- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java index aabb815db3..4b81e17095 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java @@ -212,7 +212,7 @@ public class FileBasedKeyStoresFactory implements KeyStoresFactory { LOG.debug(mode.toString() + " Loaded TrustStore: " + truststoreLocation); trustManagers = new TrustManager[]{trustManager}; } else { - LOG.warn("The property '" + locationProperty + "' has not been set, " + + LOG.debug("The property '" + locationProperty + "' has not been set, " + "no TrustStore will be loaded"); trustManagers = null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 0772ea6ae4..5c4aeea234 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -610,6 +610,9 @@ Release 2.6.0 - UNRELEASED HDFS-6862. Add missing timeout annotations to tests. (Xiaoyu Yao via Arpit Agarwal) + HDFS-6998. warning message 'ssl.client.truststore.location has not been + set' gets printed for hftp command. (Xiaoyu Yao via Arpit Agarwal) + BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS HDFS-6387. HDFS CLI admin tool for creating & deleting an From 7a62515c8628430a163415e42c9526a123db213c Mon Sep 17 00:00:00 2001 From: Allen Wittenauer Date: Fri, 5 Sep 2014 11:31:49 -0700 Subject: [PATCH 05/18] HADOOP-11052. hadoop_verify_secure_prereq's results aren't checked in bin/hdfs (aw) --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../src/main/bin/hadoop-functions.sh | 4 ++-- hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs | 15 +++++++-------- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index d38fae955b..afd1cc7937 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -330,6 +330,9 @@ Trunk (Unreleased) HADOOP-11033. shell scripts ignore JAVA_HOME on OS X. (aw) + HADOOP-11052. hadoop_verify_secure_prereq's results aren't checked + in bin/hdfs (aw) + OPTIMIZATIONS HADOOP-7761. Improve the performance of raw comparisons. (todd) diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh index d430188cbf..1677cc06bf 100644 --- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh +++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh @@ -644,9 +644,9 @@ function hadoop_verify_secure_prereq # this. # ${EUID} comes from the shell itself! - if [[ "${EUID}" -ne 0 ]] || [[ -n "${HADOOP_SECURE_COMMAND}" ]]; then + if [[ "${EUID}" -ne 0 ]] && [[ -z "${HADOOP_SECURE_COMMAND}" ]]; then hadoop_error "ERROR: You must be a privileged in order to run a secure serice." - return 1 + exit 1 else return 0 fi diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs index 6872a0eb1a..2300dbfcc5 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs @@ -225,14 +225,13 @@ esac if [[ -n "${secure_service}" ]]; then HADOOP_SECURE_USER="${secure_user}" - if hadoop_verify_secure_prereq; then - hadoop_setup_secure_service - priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.out" - priv_errfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.err" - priv_pidfile="${HADOOP_PID_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.pid" - daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.out" - daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${COMMAND}.pid" - fi + hadoop_verify_secure_prereq + hadoop_setup_secure_service + priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.out" + priv_errfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.err" + priv_pidfile="${HADOOP_PID_DIR}/privileged-${HADOOP_IDENT_STRING}-${COMMAND-$HOSTNAME}.pid" + daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.out" + daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${COMMAND}.pid" else daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-${COMMAND}-${HOSTNAME}.out" daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_IDENT_STRING}-${COMMAND}.pid" From 0571b4561bad7e0230920e52d3758a3658fcf20d Mon Sep 17 00:00:00 2001 From: Karthik Kambatla Date: Fri, 5 Sep 2014 13:08:59 -0700 Subject: [PATCH 06/18] HADOOP-11065. Rat check should exclude **/build/**. (kasha) --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++ pom.xml | 1 + 2 files changed, 3 insertions(+) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index afd1cc7937..d20bf086d3 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -777,6 +777,8 @@ Release 2.5.1 - UNRELEASED HADOOP-11001. Fix test-patch to work with the git repo. (kasha) + HADOOP-11065. Rat check should exclude "**/build/**". (kasha) + Release 2.5.0 - 2014-08-11 INCOMPATIBLE CHANGES diff --git a/pom.xml b/pom.xml index a4f824102c..5cc30c24be 100644 --- a/pom.xml +++ b/pom.xml @@ -324,6 +324,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs .gitignore .git/** .idea/** + **/build/** From 21c0cdeec1034b18ad3a2d5b71941a84bcea5ebe Mon Sep 17 00:00:00 2001 From: arp Date: Fri, 5 Sep 2014 11:18:20 -0700 Subject: [PATCH 07/18] HADOOP-11067 [HDFS-6998]. Fix CHANGES.txt --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 --- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index d20bf086d3..88804cdd03 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -760,6 +760,9 @@ Release 2.6.0 - UNRELEASED HADOOP-11063. KMS cannot deploy on Windows, because class names are too long. (cnauroth) + HADOOP-11067. warning message 'ssl.client.truststore.location has not + been set' gets printed for hftp command. (Xiaoyu Yao via Arpit Agarwal) + Release 2.5.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 5c4aeea234..0772ea6ae4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -610,9 +610,6 @@ Release 2.6.0 - UNRELEASED HDFS-6862. Add missing timeout annotations to tests. (Xiaoyu Yao via Arpit Agarwal) - HDFS-6998. warning message 'ssl.client.truststore.location has not been - set' gets printed for hftp command. (Xiaoyu Yao via Arpit Agarwal) - BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS HDFS-6387. HDFS CLI admin tool for creating & deleting an From e6420fec0af9b8d4f424098688ae4926ff527fcf Mon Sep 17 00:00:00 2001 From: Jonathan Eagles Date: Fri, 5 Sep 2014 19:42:40 -0500 Subject: [PATCH 08/18] YARN-2508. Cross Origin configuration parameters prefix are not honored (Mit Desai via jeagles) --- hadoop-yarn-project/CHANGES.txt | 3 +++ .../webapp/CrossOriginFilterInitializer.java | 12 +++++++++++- .../webapp/TestCrossOriginFilterInitializer.java | 7 ++----- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 1a5ea07914..34a206af6e 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -181,6 +181,9 @@ Release 2.6.0 - UNRELEASED YARN-2511. Allowed all origins by default when CrossOriginFilter is enabled. (Jonathan Eagles via zjshen) + YARN-2508. Cross Origin configuration parameters prefix are not honored + (Mit Desai via jeagles) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/CrossOriginFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/CrossOriginFilterInitializer.java index 69e0188137..148cc631ad 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/CrossOriginFilterInitializer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/CrossOriginFilterInitializer.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.timeline.webapp; +import java.util.HashMap; import java.util.Map; import org.apache.hadoop.conf.Configuration; @@ -37,6 +38,15 @@ public class CrossOriginFilterInitializer extends FilterInitializer { } static Map getFilterParameters(Configuration conf) { - return conf.getValByRegex(PREFIX); + Map filterParams = + new HashMap(); + for (Map.Entry entry : conf.getValByRegex(PREFIX) + .entrySet()) { + String name = entry.getKey(); + String value = entry.getValue(); + name = name.substring(PREFIX.length()); + filterParams.put(name, value); + } + return filterParams; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestCrossOriginFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestCrossOriginFilterInitializer.java index 3199aac508..cf26368acd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestCrossOriginFilterInitializer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestCrossOriginFilterInitializer.java @@ -42,11 +42,8 @@ public class TestCrossOriginFilterInitializer { CrossOriginFilterInitializer.getFilterParameters(conf); // retrieve values - String rootvalue = - filterParameters.get(CrossOriginFilterInitializer.PREFIX + "rootparam"); - String nestedvalue = - filterParameters.get(CrossOriginFilterInitializer.PREFIX - + "nested.param"); + String rootvalue = filterParameters.get("rootparam"); + String nestedvalue = filterParameters.get("nested.param"); String outofscopeparam = filterParameters.get("outofscopeparam"); // verify expected values are in place From 71c8d735f5038e3b516947f12180d7568b6979dc Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Fri, 5 Sep 2014 14:09:22 -0700 Subject: [PATCH 09/18] HADOOP-11070. Create MiniKMS for testing. (tucu) --- .../hadoop-common/CHANGES.txt | 2 + hadoop-common-project/hadoop-kms/pom.xml | 4 +- .../hadoop/crypto/key/kms/server/MiniKMS.java | 197 ++++++++++++++++++ .../hadoop/crypto/key/kms/server/TestKMS.java | 82 +------- 4 files changed, 211 insertions(+), 74 deletions(-) create mode 100644 hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 88804cdd03..9aef131ef8 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -507,6 +507,8 @@ Release 2.6.0 - UNRELEASED HADOOP-11060. Create a CryptoCodec test that verifies interoperability between the JCE and OpenSSL implementations. (hitliuyi via tucu) + HADOOP-11070. Create MiniKMS for testing. (tucu) + OPTIMIZATIONS HADOOP-10838. Byte array native checksumming. (James Thomas via todd) diff --git a/hadoop-common-project/hadoop-kms/pom.xml b/hadoop-common-project/hadoop-kms/pom.xml index 3bb97c56b7..629ffdac70 100644 --- a/hadoop-common-project/hadoop-kms/pom.xml +++ b/hadoop-common-project/hadoop-kms/pom.xml @@ -222,9 +222,9 @@ - + - + diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java new file mode 100644 index 0000000000..5a6d4c5701 --- /dev/null +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java @@ -0,0 +1,197 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.crypto.key.kms.server; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.conf.Configuration; +import org.mortbay.jetty.Connector; +import org.mortbay.jetty.Server; +import org.mortbay.jetty.security.SslSocketConnector; +import org.mortbay.jetty.webapp.WebAppContext; + +import java.io.File; +import java.io.FileWriter; +import java.io.Writer; +import java.net.InetAddress; +import java.net.MalformedURLException; +import java.net.ServerSocket; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; + +public class MiniKMS { + + private static Server createJettyServer(String keyStore, String password) { + try { + boolean ssl = keyStore != null; + InetAddress localhost = InetAddress.getByName("localhost"); + String host = "localhost"; + ServerSocket ss = new ServerSocket(0, 50, localhost); + int port = ss.getLocalPort(); + ss.close(); + Server server = new Server(0); + if (!ssl) { + server.getConnectors()[0].setHost(host); + server.getConnectors()[0].setPort(port); + } else { + SslSocketConnector c = new SslSocketConnector(); + c.setHost(host); + c.setPort(port); + c.setNeedClientAuth(false); + c.setKeystore(keyStore); + c.setKeystoreType("jks"); + c.setKeyPassword(password); + server.setConnectors(new Connector[]{c}); + } + return server; + } catch (Exception ex) { + throw new RuntimeException("Could not start embedded servlet container, " + + ex.getMessage(), ex); + } + } + + private static URL getJettyURL(Server server) { + boolean ssl = server.getConnectors()[0].getClass() + == SslSocketConnector.class; + try { + String scheme = (ssl) ? "https" : "http"; + return new URL(scheme + "://" + + server.getConnectors()[0].getHost() + ":" + + server.getConnectors()[0].getPort()); + } catch (MalformedURLException ex) { + throw new RuntimeException("It should never happen, " + ex.getMessage(), + ex); + } + } + + public static class Builder { + private File kmsConfDir; + private String log4jConfFile; + private File keyStoreFile; + private String keyStorePassword; + + public Builder() { + kmsConfDir = new File("target/test-classes").getAbsoluteFile(); + log4jConfFile = "kms-log4j.properties"; + } + + public Builder setKmsConfDir(File confDir) { + Preconditions.checkNotNull(confDir, "KMS conf dir is NULL"); + Preconditions.checkArgument(confDir.exists(), + "KMS conf dir does not exist"); + kmsConfDir = confDir; + return this; + } + + public Builder setLog4jConfFile(String log4jConfFile) { + Preconditions.checkNotNull(log4jConfFile, "log4jconf file is NULL"); + this.log4jConfFile = log4jConfFile; + return this; + } + + public Builder setSslConf(File keyStoreFile, String keyStorePassword) { + Preconditions.checkNotNull(keyStoreFile, "keystore file is NULL"); + Preconditions.checkNotNull(keyStorePassword, "keystore password is NULL"); + Preconditions.checkArgument(keyStoreFile.exists(), + "keystore file does not exist"); + this.keyStoreFile = keyStoreFile; + this.keyStorePassword = keyStorePassword; + return this; + } + + public MiniKMS build() { + Preconditions.checkArgument(kmsConfDir.exists(), + "KMS conf dir does not exist"); + return new MiniKMS(kmsConfDir.getAbsolutePath(), log4jConfFile, + (keyStoreFile != null) ? keyStoreFile.getAbsolutePath() : null, + keyStorePassword); + } + } + + private String kmsConfDir; + private String log4jConfFile; + private String keyStore; + private String keyStorePassword; + private Server jetty; + private URL kmsURL; + + public MiniKMS(String kmsConfDir, String log4ConfFile, String keyStore, + String password) { + this.kmsConfDir = kmsConfDir; + this.log4jConfFile = log4ConfFile; + this.keyStore = keyStore; + this.keyStorePassword = password; + } + + public void start() throws Exception { + System.setProperty(KMSConfiguration.KMS_CONFIG_DIR, kmsConfDir); + File aclsFile = new File(kmsConfDir, "kms-acls.xml"); + if (!aclsFile.exists()) { + Configuration acls = new Configuration(false); + Writer writer = new FileWriter(aclsFile); + acls.writeXml(writer); + writer.close(); + } + File coreFile = new File(kmsConfDir, "core-site.xml"); + if (!coreFile.exists()) { + Configuration core = new Configuration(); + Writer writer = new FileWriter(coreFile); + core.writeXml(writer); + writer.close(); + } + File kmsFile = new File(kmsConfDir, "kms-site.xml"); + if (!kmsFile.exists()) { + Configuration kms = new Configuration(false); + kms.set("hadoop.security.key.provider.path", + "jceks://file@" + kmsConfDir + "/kms.keystore"); + kms.set("hadoop.kms.authentication.type", "simple"); + Writer writer = new FileWriter(kmsFile); + kms.writeXml(writer); + writer.close(); + } + System.setProperty("log4j.configuration", log4jConfFile); + jetty = createJettyServer(keyStore, keyStorePassword); + ClassLoader cl = Thread.currentThread().getContextClassLoader(); + URL url = cl.getResource("kms-webapp"); + if (url == null) { + throw new RuntimeException( + "Could not find kms-webapp/ dir in test classpath"); + } + WebAppContext context = new WebAppContext(url.getPath(), "/kms"); + jetty.addHandler(context); + jetty.start(); + kmsURL = new URL(getJettyURL(jetty), "kms"); + } + + public URL getKMSUrl() { + return kmsURL; + } + + public void stop() { + if (jetty != null && jetty.isRunning()) { + try { + jetty.stop(); + jetty = null; + } catch (Exception ex) { + throw new RuntimeException("Could not stop MiniKMS embedded Jetty, " + + ex.getMessage(), ex); + } + } + } + +} diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java index 52f6354cea..f381fa099c 100644 --- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java @@ -36,10 +36,6 @@ import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; -import org.mortbay.jetty.Connector; -import org.mortbay.jetty.Server; -import org.mortbay.jetty.security.SslSocketConnector; -import org.mortbay.jetty.webapp.WebAppContext; import javax.security.auth.Subject; import javax.security.auth.kerberos.KerberosPrincipal; @@ -52,7 +48,6 @@ import java.io.IOException; import java.io.Writer; import java.net.InetAddress; import java.net.InetSocketAddress; -import java.net.MalformedURLException; import java.net.ServerSocket; import java.net.SocketTimeoutException; import java.net.URI; @@ -91,49 +86,6 @@ public class TestKMS { return file; } - public static Server createJettyServer(String keyStore, String password) { - try { - boolean ssl = keyStore != null; - InetAddress localhost = InetAddress.getByName("localhost"); - String host = "localhost"; - ServerSocket ss = new ServerSocket(0, 50, localhost); - int port = ss.getLocalPort(); - ss.close(); - Server server = new Server(0); - if (!ssl) { - server.getConnectors()[0].setHost(host); - server.getConnectors()[0].setPort(port); - } else { - SslSocketConnector c = new SslSocketConnector(); - c.setHost(host); - c.setPort(port); - c.setNeedClientAuth(false); - c.setKeystore(keyStore); - c.setKeystoreType("jks"); - c.setKeyPassword(password); - server.setConnectors(new Connector[]{c}); - } - return server; - } catch (Exception ex) { - throw new RuntimeException("Could not start embedded servlet container, " - + ex.getMessage(), ex); - } - } - - public static URL getJettyURL(Server server) { - boolean ssl = server.getConnectors()[0].getClass() - == SslSocketConnector.class; - try { - String scheme = (ssl) ? "https" : "http"; - return new URL(scheme + "://" + - server.getConnectors()[0].getHost() + ":" + - server.getConnectors()[0].getPort()); - } catch (MalformedURLException ex) { - throw new RuntimeException("It should never happen, " + ex.getMessage(), - ex); - } - } - public static abstract class KMSCallable implements Callable { private URL kmsUrl; @@ -144,33 +96,19 @@ public class TestKMS { protected void runServer(String keystore, String password, File confDir, KMSCallable callable) throws Exception { - System.setProperty(KMSConfiguration.KMS_CONFIG_DIR, - confDir.getAbsolutePath()); - System.setProperty("log4j.configuration", "log4j.properties"); - Server jetty = createJettyServer(keystore, password); + MiniKMS.Builder miniKMSBuilder = new MiniKMS.Builder().setKmsConfDir(confDir) + .setLog4jConfFile("log4j.properties"); + if (keystore != null) { + miniKMSBuilder.setSslConf(new File(keystore), password); + } + MiniKMS miniKMS = miniKMSBuilder.build(); + miniKMS.start(); try { - ClassLoader cl = Thread.currentThread().getContextClassLoader(); - URL url = cl.getResource("webapp"); - if (url == null) { - throw new RuntimeException( - "Could not find webapp/ dir in test classpath"); - } - WebAppContext context = new WebAppContext(url.getPath(), "/kms"); - jetty.addHandler(context); - jetty.start(); - url = new URL(getJettyURL(jetty), "kms"); - System.out.println("Test KMS running at: " + url); - callable.kmsUrl = url; + System.out.println("Test KMS running at: " + miniKMS.getKMSUrl()); + callable.kmsUrl = miniKMS.getKMSUrl(); callable.call(); } finally { - if (jetty != null && jetty.isRunning()) { - try { - jetty.stop(); - } catch (Exception ex) { - throw new RuntimeException("Could not stop embedded Jetty, " + - ex.getMessage(), ex); - } - } + miniKMS.stop(); } } From 0f3c19c1bb9e341d8aed132ba3eb9e7fc7588306 Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Fri, 5 Sep 2014 10:04:07 -0700 Subject: [PATCH 10/18] HADOOP-11069. KMSClientProvider should use getAuthenticationMethod() to determine if in proxyuser mode or not. (tucu) --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../org/apache/hadoop/crypto/key/kms/KMSClientProvider.java | 6 +++--- .../org/apache/hadoop/crypto/key/kms/server/TestKMS.java | 6 +++--- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 9aef131ef8..c77fddc789 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -765,6 +765,9 @@ Release 2.6.0 - UNRELEASED HADOOP-11067. warning message 'ssl.client.truststore.location has not been set' gets printed for hftp command. (Xiaoyu Yao via Arpit Agarwal) + HADOOP-11069. KMSClientProvider should use getAuthenticationMethod() to + determine if in proxyuser mode or not. (tucu) + Release 2.5.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java index a4e336c402..acbe096e26 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java @@ -385,9 +385,9 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension, // if current UGI is different from UGI at constructor time, behave as // proxyuser UserGroupInformation currentUgi = UserGroupInformation.getCurrentUser(); - final String doAsUser = - (loginUgi.getShortUserName().equals(currentUgi.getShortUserName())) - ? null : currentUgi.getShortUserName(); + final String doAsUser = (currentUgi.getAuthenticationMethod() == + UserGroupInformation.AuthenticationMethod.PROXY) + ? currentUgi.getShortUserName() : null; // creating the HTTP connection using the current UGI at constructor time conn = loginUgi.doAs(new PrivilegedExceptionAction() { diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java index f381fa099c..b921c84dc2 100644 --- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java @@ -1157,7 +1157,7 @@ public class TestKMS { final URI uri = createKMSUri(getKMSUrl()); // proxyuser client using kerberos credentials - UserGroupInformation clientUgi = UserGroupInformation. + final UserGroupInformation clientUgi = UserGroupInformation. loginUserFromKeytabAndReturnUGI("client", keytab.getAbsolutePath()); clientUgi.doAs(new PrivilegedExceptionAction() { @Override @@ -1167,7 +1167,7 @@ public class TestKMS { // authorized proxyuser UserGroupInformation fooUgi = - UserGroupInformation.createRemoteUser("foo"); + UserGroupInformation.createProxyUser("foo", clientUgi); fooUgi.doAs(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { @@ -1179,7 +1179,7 @@ public class TestKMS { // unauthorized proxyuser UserGroupInformation foo1Ugi = - UserGroupInformation.createRemoteUser("foo1"); + UserGroupInformation.createProxyUser("foo1", clientUgi); foo1Ugi.doAs(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { From 3b35f81603bbfae119762b50bcb46de70a421368 Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Fri, 5 Sep 2014 22:33:48 -0700 Subject: [PATCH 11/18] HDFS-6986. DistributedFileSystem must get delegation tokens from configured KeyProvider. (zhz via tucu) --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../org/apache/hadoop/hdfs/DFSClient.java | 4 ++ .../hadoop/hdfs/DistributedFileSystem.java | 24 +++++++++++ .../hadoop/hdfs/TestEncryptionZones.java | 43 +++++++++++++++++++ 4 files changed, 74 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 0772ea6ae4..333bdceb3d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -711,6 +711,9 @@ Release 2.6.0 - UNRELEASED HDFS-6714. TestBlocksScheduledCounter#testBlocksScheduledCounter should shutdown cluster (vinayakumarb) + HDFS-6986. DistributedFileSystem must get delegation tokens from configured + KeyProvider. (zhz via tucu) + Release 2.5.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 8daf912431..e4215f0602 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -3084,4 +3084,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, DFSHedgedReadMetrics getHedgedReadMetrics() { return HEDGED_READ_METRIC; } + + public KeyProviderCryptoExtension getKeyProvider() { + return provider; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index bf7d62ebbd..dbdf5c1874 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -84,8 +84,10 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Progressable; +import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; @@ -1946,6 +1948,28 @@ public class DistributedFileSystem extends FileSystem { }.resolve(this, absF); } + @Override + public Token[] addDelegationTokens( + final String renewer, Credentials credentials) throws IOException { + Token[] tokens = super.addDelegationTokens(renewer, credentials); + if (dfs.getKeyProvider() != null) { + KeyProviderDelegationTokenExtension keyProviderDelegationTokenExtension = + KeyProviderDelegationTokenExtension. + createKeyProviderDelegationTokenExtension(dfs.getKeyProvider()); + Token[] kpTokens = keyProviderDelegationTokenExtension. + addDelegationTokens(renewer, credentials); + if (tokens != null && kpTokens != null) { + Token[] all = new Token[tokens.length + kpTokens.length]; + System.arraycopy(tokens, 0, all, 0, tokens.length); + System.arraycopy(kpTokens, 0, all, tokens.length, kpTokens.length); + tokens = all; + } else { + tokens = (tokens != null) ? tokens : kpTokens; + } + } + return tokens; + } + public DFSInotifyEventInputStream getInotifyEventStream() throws IOException { return dfs.getInotifyEventStream(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java index 1a13332a14..1cf9263c25 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java @@ -34,6 +34,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.crypto.key.JavaKeyStoreProvider; import org.apache.hadoop.crypto.key.KeyProvider; +import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.crypto.key.KeyProviderFactory; import org.apache.hadoop.fs.FSTestWrapper; import org.apache.hadoop.fs.FileContext; @@ -51,12 +52,22 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.namenode.EncryptionFaultInjector; import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager; import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension.DelegationTokenExtension; +import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension; +import org.apache.hadoop.io.Text; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.mockito.Mockito; +import static org.mockito.Mockito.withSettings; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyString; import static org.apache.hadoop.hdfs.DFSTestUtil.verifyFilesEqual; import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains; @@ -91,6 +102,7 @@ public class TestEncryptionZones { conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, JavaKeyStoreProvider.SCHEME_NAME + "://file" + testRootDir + "/test.jks" ); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); // Lower the batch size for testing conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES, 2); @@ -753,4 +765,35 @@ public class TestEncryptionZones { e.getCause()); } } + + /** + * Tests obtaining delegation token from stored key + */ + @Test(timeout = 120000) + public void testDelegationToken() throws Exception { + UserGroupInformation.createRemoteUser("JobTracker"); + DistributedFileSystem dfs = cluster.getFileSystem(); + KeyProviderCryptoExtension keyProvider = Mockito.mock(KeyProviderCryptoExtension.class, + withSettings().extraInterfaces( + DelegationTokenExtension.class, + CryptoExtension.class)); + Mockito.when(keyProvider.getConf()).thenReturn(conf); + byte[] testIdentifier = "Test identifier for delegation token".getBytes(); + + Token testToken = new Token(testIdentifier, new byte[0], + new Text(), new Text()); + Mockito.when(((DelegationTokenExtension)keyProvider). + addDelegationTokens(anyString(), (Credentials)any())). + thenReturn(new Token[] { testToken }); + + dfs.getClient().provider = keyProvider; + + Credentials creds = new Credentials(); + final Token tokens[] = dfs.addDelegationTokens("JobTracker", creds); + DistributedFileSystem.LOG.debug("Delegation tokens: " + + Arrays.asList(tokens)); + Assert.assertEquals(2, tokens.length); + Assert.assertEquals(tokens[1], testToken); + Assert.assertEquals(1, creds.numberOfTokens()); + } } From 88209ce181b5ecc55c0ae2bceff4893ab4817e88 Mon Sep 17 00:00:00 2001 From: Konstantin V Shvachko Date: Sat, 6 Sep 2014 12:07:52 -0700 Subject: [PATCH 12/18] HDFS-6940. Refactoring to allow ConsensusNode implementation. Contributed by Konstantin Shvachko. --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../server/blockmanagement/BlockManager.java | 23 ++++++++-- .../blockmanagement/DatanodeManager.java | 6 ++- .../blockmanagement/HostFileManager.java | 4 ++ .../hdfs/server/namenode/FSNamesystem.java | 46 ++++++++++--------- .../hdfs/server/namenode/NameNodeAdapter.java | 2 +- 6 files changed, 57 insertions(+), 26 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 333bdceb3d..4412b30710 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -444,6 +444,8 @@ Release 2.6.0 - UNRELEASED HDFS-6376. Distcp data between two HA clusters requires another configuration. (Dave Marion and Haohui Mai via jing9) + HDFS-6940. Refactoring to allow ConsensusNode implementation. (shv) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 8470680a98..6176188353 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -164,7 +164,7 @@ public class BlockManager { final BlocksMap blocksMap; /** Replication thread. */ - final Daemon replicationThread = new Daemon(new ReplicationMonitor()); + Daemon replicationThread; /** Store blocks -> datanodedescriptor(s) map of corrupt replicas */ final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap(); @@ -263,6 +263,7 @@ public class BlockManager { this.namesystem = namesystem; datanodeManager = new DatanodeManager(this, namesystem, conf); heartbeatManager = datanodeManager.getHeartbeatManager(); + setReplicationMonitor(new ReplicationMonitor()); final long pendingPeriod = conf.getLong( DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY, @@ -394,7 +395,23 @@ public class BlockManager { lifetimeMin*60*1000L, 0, null, encryptionAlgorithm); } } - + + public long getReplicationRecheckInterval() { + return replicationRecheckInterval; + } + + public AtomicLong excessBlocksCount() { + return excessBlocksCount; + } + + public void clearInvalidateBlocks() { + invalidateBlocks.clear(); + } + + void setReplicationMonitor(Runnable replicationMonitor) { + replicationThread = new Daemon(replicationMonitor); + } + public void setBlockPoolId(String blockPoolId) { if (isBlockTokenEnabled()) { blockTokenSecretManager.setBlockPoolId(blockPoolId); @@ -1616,7 +1633,7 @@ public class BlockManager { * If there were any replication requests that timed out, reap them * and put them back into the neededReplication queue */ - private void processPendingReplications() { + void processPendingReplications() { Block[] timedOutItems = pendingReplications.getTimedOutBlocks(); if (timedOutItems != null) { namesystem.writeLock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 709f060d23..55d616f699 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -1053,7 +1053,7 @@ public class DatanodeManager { * 3. Added to exclude --> start decommission. * 4. Removed from exclude --> stop decommission. */ - private void refreshDatanodes() { + void refreshDatanodes() { for(DatanodeDescriptor node : datanodeMap.values()) { // Check if not include. if (!hostFileManager.isIncluded(node)) { @@ -1586,5 +1586,9 @@ public class DatanodeManager { public void setShouldSendCachingCommands(boolean shouldSendCachingCommands) { this.shouldSendCachingCommands = shouldSendCachingCommands; } + + public HostFileManager getHostFileManager() { + return this.hostFileManager; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java index 0b8d6c5bc1..7db23e4150 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java @@ -129,6 +129,10 @@ class HostFileManager { void refresh(String includeFile, String excludeFile) throws IOException { HostSet newIncludes = readFile("included", includeFile); HostSet newExcludes = readFile("excluded", excludeFile); + setHosts(newIncludes, newExcludes); + } + + void setHosts(HostSet newIncludes, HostSet newExcludes) { synchronized (this) { includes = newIncludes; excludes = newExcludes; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index c1744f6421..a6b98a559d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -978,7 +978,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, return Collections.unmodifiableList(auditLoggers); } - private void loadFSImage(StartupOption startOpt) throws IOException { + protected void loadFSImage(StartupOption startOpt) throws IOException { final FSImage fsImage = getFSImage(); // format before starting up if requested @@ -1026,7 +1026,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, imageLoadComplete(); } - private void startSecretManager() { + protected void startSecretManager() { if (dtSecretManager != null) { try { dtSecretManager.startThreads(); @@ -1038,7 +1038,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } } - private void startSecretManagerIfNecessary() { + protected void startSecretManagerIfNecessary() { boolean shouldRun = shouldUseDelegationTokens() && !isInSafeMode() && getEditLog().isOpenForWrite(); boolean running = dtSecretManager.isRunning(); @@ -1188,7 +1188,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, return haEnabled && inActiveState() && startingActiveService; } - private boolean shouldUseDelegationTokens() { + protected boolean shouldUseDelegationTokens() { return UserGroupInformation.isSecurityEnabled() || alwaysUseDelegationTokensForTests; } @@ -2729,6 +2729,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, * @throws UnresolvedLinkException * @throws IOException */ + protected LocatedBlock prepareFileForWrite(String src, INodeFile file, String leaseHolder, String clientMachine, boolean writeToEditLog, @@ -3185,6 +3186,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, return new FileState(pendingFile, src); } + protected LocatedBlock makeLocatedBlock(Block blk, DatanodeStorageInfo[] locs, long offset) throws IOException { LocatedBlock lBlk = new LocatedBlock( @@ -3302,8 +3304,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats, return true; } - private INodeFile checkLease(String src, String holder, INode inode, - long fileId) + protected INodeFile checkLease(String src, String holder, INode inode, + long fileId) throws LeaseExpiredException, FileNotFoundException { assert hasReadLock(); final String ident = src + " (inode " + fileId + ")"; @@ -4420,7 +4422,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, return leaseManager.reassignLease(lease, src, newHolder); } - private void commitOrCompleteLastBlock(final INodeFile fileINode, + protected void commitOrCompleteLastBlock(final INodeFile fileINode, final Block commitBlock) throws IOException { assert hasWriteLock(); Preconditions.checkArgument(fileINode.isUnderConstruction()); @@ -4816,6 +4818,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, * @return an array of datanode commands * @throws IOException */ + protected HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg, StorageReport[] reports, long cacheCapacity, long cacheUsed, int xceiverCount, int xmitsInProgress, int failedVolumes) @@ -4865,8 +4868,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats, * @param file * @param logRetryCache */ - private void persistBlocks(String path, INodeFile file, - boolean logRetryCache) { + protected void persistBlocks(String path, INodeFile file, + boolean logRetryCache) { assert hasWriteLock(); Preconditions.checkArgument(file.isUnderConstruction()); getEditLog().logUpdateBlocks(path, file, logRetryCache); @@ -5297,7 +5300,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, * @param path * @param file */ - private void persistNewBlock(String path, INodeFile file) { + protected void persistNewBlock(String path, INodeFile file) { Preconditions.checkArgument(file.isUnderConstruction()); getEditLog().logAddBlock(path, file); if (NameNode.stateChangeLog.isDebugEnabled()) { @@ -7175,7 +7178,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, * * @return true if delegation token operation is allowed */ - private boolean isAllowedDelegationTokenOp() throws IOException { + protected boolean isAllowedDelegationTokenOp() throws IOException { AuthenticationMethod authMethod = getConnectionAuthenticationMethod(); if (UserGroupInformation.isSecurityEnabled() && (authMethod != AuthenticationMethod.KERBEROS) @@ -7342,7 +7345,13 @@ public class FSNamesystem implements Namesystem, FSClusterStats, final List live = new ArrayList(); blockManager.getDatanodeManager().fetchDatanodes(live, null, true); for (DatanodeDescriptor node : live) { - Map innerinfo = ImmutableMap.builder() + info.put(node.getHostName(), getLiveNodeInfo(node)); + } + return JSON.toString(info); + } + + protected Map getLiveNodeInfo(DatanodeDescriptor node) { + return ImmutableMap.builder() .put("infoAddr", node.getInfoAddr()) .put("infoSecureAddr", node.getInfoSecureAddr()) .put("xferaddr", node.getXferAddr()) @@ -7360,10 +7369,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats, .put("blockPoolUsedPercent", node.getBlockPoolUsedPercent()) .put("volfails", node.getVolumeFailures()) .build(); - - info.put(node.getHostName(), innerinfo); - } - return JSON.toString(info); } /** @@ -7648,17 +7653,16 @@ public class FSNamesystem implements Namesystem, FSClusterStats, public ReentrantLock getLongReadLockForTests() { return fsLock.longReadLock; } - - @VisibleForTesting - public SafeModeInfo getSafeModeInfoForTests() { - return safeMode; - } @VisibleForTesting public void setNNResourceChecker(NameNodeResourceChecker nnResourceChecker) { this.nnResourceChecker = nnResourceChecker; } + public SafeModeInfo getSafeModeInfo() { + return safeMode; + } + @Override public boolean isAvoidingStaleDataNodesForWrite() { return this.blockManager.getDatanodeManager() diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java index c32ed67d6e..d65d1ff5be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java @@ -223,7 +223,7 @@ public class NameNodeAdapter { * if safemode is not running. */ public static int getSafeModeSafeBlocks(NameNode nn) { - SafeModeInfo smi = nn.getNamesystem().getSafeModeInfoForTests(); + SafeModeInfo smi = nn.getNamesystem().getSafeModeInfo(); if (smi == null) { return -1; } From cbea1b10efd871d04c648af18449dc724685db74 Mon Sep 17 00:00:00 2001 From: cnauroth Date: Sat, 6 Sep 2014 20:05:07 -0700 Subject: [PATCH 13/18] YARN-2519. Credential Provider related unit tests failed on Windows. Contributed by Xiaoyu Yao. --- hadoop-yarn-project/CHANGES.txt | 3 +++ .../org/apache/hadoop/yarn/webapp/util/TestWebAppUtils.java | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 34a206af6e..beafc22518 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -287,6 +287,9 @@ Release 2.6.0 - UNRELEASED YARN-2431. NM restart: cgroup is not removed for reacquired containers (jlowe) + YARN-2519. Credential Provider related unit tests failed on Windows. + (Xiaoyu Yao via cnauroth) + Release 2.5.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/util/TestWebAppUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/util/TestWebAppUtils.java index 18600fdea6..2bd91b4ac6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/util/TestWebAppUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/util/TestWebAppUtils.java @@ -24,6 +24,7 @@ import static org.junit.Assert.assertEquals; import java.io.File; import java.io.IOException; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.http.HttpServer2; import org.apache.hadoop.http.HttpServer2.Builder; import org.apache.hadoop.security.alias.CredentialProvider; @@ -74,8 +75,9 @@ public class TestWebAppUtils { "target/test-dir")); Configuration conf = new Configuration(); + final Path jksPath = new Path(testDir.toString(), "test.jks"); final String ourUrl = - JavaKeyStoreProvider.SCHEME_NAME + "://file/" + testDir + "/test.jks"; + JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri(); File file = new File(testDir, "test.jks"); file.delete(); From d1fa58292e87bc29b4ef1278368c2be938a0afc4 Mon Sep 17 00:00:00 2001 From: arp Date: Sat, 6 Sep 2014 20:02:40 -0700 Subject: [PATCH 14/18] HDFS-6898. DN must reserve space for a full block when an RBW block is created. (Contributed by Arpit Agarwal) --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hadoop/hdfs/protocol/HdfsConstants.java | 2 +- .../server/datanode/ReplicaBeingWritten.java | 12 +- .../server/datanode/ReplicaInPipeline.java | 33 +- .../hdfs/server/datanode/ReplicaInfo.java | 7 + .../datanode/fsdataset/FsVolumeSpi.java | 11 + .../fsdataset/impl/BlockPoolSlice.java | 6 +- .../fsdataset/impl/FsDatasetImpl.java | 15 +- .../datanode/fsdataset/impl/FsVolumeImpl.java | 58 +++- .../server/datanode/TestDirectoryScanner.java | 8 + .../impl/TestRbwSpaceReservation.java | 288 ++++++++++++++++++ .../fsdataset/impl/TestWriteToReplica.java | 2 +- 12 files changed, 423 insertions(+), 22 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestRbwSpaceReservation.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 4412b30710..3d43171fca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -612,6 +612,9 @@ Release 2.6.0 - UNRELEASED HDFS-6862. Add missing timeout annotations to tests. (Xiaoyu Yao via Arpit Agarwal) + HDFS-6898. DN must reserve space for a full block when an RBW block is + created. (Arpit Agarwal) + BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS HDFS-6387. HDFS CLI admin tool for creating & deleting an diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java index 77fe543784..240dcd01ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java @@ -48,7 +48,7 @@ public class HdfsConstants { "org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol"; - public static final int MIN_BLOCKS_FOR_WRITE = 5; + public static final int MIN_BLOCKS_FOR_WRITE = 1; // Long that indicates "leave current quota unchanged" public static final long QUOTA_DONT_SET = Long.MAX_VALUE; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java index 728dd3806f..4a89493f03 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java @@ -34,10 +34,12 @@ public class ReplicaBeingWritten extends ReplicaInPipeline { * @param genStamp replica generation stamp * @param vol volume where replica is located * @param dir directory path where block and meta files are located + * @param bytesToReserve disk space to reserve for this replica, based on + * the estimated maximum block length. */ public ReplicaBeingWritten(long blockId, long genStamp, - FsVolumeSpi vol, File dir) { - super( blockId, genStamp, vol, dir); + FsVolumeSpi vol, File dir, long bytesToReserve) { + super(blockId, genStamp, vol, dir, bytesToReserve); } /** @@ -60,10 +62,12 @@ public class ReplicaBeingWritten extends ReplicaInPipeline { * @param vol volume where replica is located * @param dir directory path where block and meta files are located * @param writer a thread that is writing to this replica + * @param bytesToReserve disk space to reserve for this replica, based on + * the estimated maximum block length. */ public ReplicaBeingWritten(long blockId, long len, long genStamp, - FsVolumeSpi vol, File dir, Thread writer ) { - super( blockId, len, genStamp, vol, dir, writer); + FsVolumeSpi vol, File dir, Thread writer, long bytesToReserve) { + super(blockId, len, genStamp, vol, dir, writer, bytesToReserve); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java index f808e0107f..08395aa65c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java @@ -44,6 +44,13 @@ public class ReplicaInPipeline extends ReplicaInfo private long bytesOnDisk; private byte[] lastChecksum; private Thread writer; + + /** + * Bytes reserved for this replica on the containing volume. + * Based off difference between the estimated maximum block length and + * the bytes already written to this block. + */ + private long bytesReserved; /** * Constructor for a zero length replica @@ -51,10 +58,12 @@ public class ReplicaInPipeline extends ReplicaInfo * @param genStamp replica generation stamp * @param vol volume where replica is located * @param dir directory path where block and meta files are located + * @param bytesToReserve disk space to reserve for this replica, based on + * the estimated maximum block length. */ public ReplicaInPipeline(long blockId, long genStamp, - FsVolumeSpi vol, File dir) { - this( blockId, 0L, genStamp, vol, dir, Thread.currentThread()); + FsVolumeSpi vol, File dir, long bytesToReserve) { + this(blockId, 0L, genStamp, vol, dir, Thread.currentThread(), bytesToReserve); } /** @@ -67,7 +76,7 @@ public class ReplicaInPipeline extends ReplicaInfo ReplicaInPipeline(Block block, FsVolumeSpi vol, File dir, Thread writer) { this( block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(), - vol, dir, writer); + vol, dir, writer, 0L); } /** @@ -78,13 +87,16 @@ public class ReplicaInPipeline extends ReplicaInfo * @param vol volume where replica is located * @param dir directory path where block and meta files are located * @param writer a thread that is writing to this replica + * @param bytesToReserve disk space to reserve for this replica, based on + * the estimated maximum block length. */ ReplicaInPipeline(long blockId, long len, long genStamp, - FsVolumeSpi vol, File dir, Thread writer ) { + FsVolumeSpi vol, File dir, Thread writer, long bytesToReserve) { super( blockId, len, genStamp, vol, dir); this.bytesAcked = len; this.bytesOnDisk = len; this.writer = writer; + this.bytesReserved = bytesToReserve; } /** @@ -96,6 +108,7 @@ public class ReplicaInPipeline extends ReplicaInfo this.bytesAcked = from.getBytesAcked(); this.bytesOnDisk = from.getBytesOnDisk(); this.writer = from.writer; + this.bytesReserved = from.bytesReserved; } @Override @@ -115,13 +128,25 @@ public class ReplicaInPipeline extends ReplicaInfo @Override // ReplicaInPipelineInterface public void setBytesAcked(long bytesAcked) { + long newBytesAcked = bytesAcked - this.bytesAcked; this.bytesAcked = bytesAcked; + + // Once bytes are ACK'ed we can release equivalent space from the + // volume's reservedForRbw count. We could have released it as soon + // as the write-to-disk completed but that would be inefficient. + getVolume().releaseReservedSpace(newBytesAcked); + bytesReserved -= newBytesAcked; } @Override // ReplicaInPipelineInterface public long getBytesOnDisk() { return bytesOnDisk; } + + @Override + public long getBytesReserved() { + return bytesReserved; + } @Override // ReplicaInPipelineInterface public synchronized void setLastChecksumAndDataLen(long dataLength, byte[] lastChecksum) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java index 0dcdf0573e..49ac605a35 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java @@ -222,6 +222,13 @@ abstract public class ReplicaInfo extends Block implements Replica { public void setUnlinked() { // no need to be unlinked } + + /** + * Number of bytes reserved for this replica on disk. + */ + public long getBytesReserved() { + return 0; + } /** * Copy specified file into a temporary file. Then rename the diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java index b14ef56254..cba23c3d4d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java @@ -45,4 +45,15 @@ public interface FsVolumeSpi { public File getFinalizedDir(String bpid) throws IOException; public StorageType getStorageType(); + + /** + * Reserve disk space for an RBW block so a writer does not run out of + * space before the block is full. + */ + public void reserveSpaceForRbw(long bytesToReserve); + + /** + * Release disk space previously reserved for RBW block. + */ + public void releaseReservedSpace(long bytesToRelease); } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java index 57744073c2..96e4650c5c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java @@ -240,7 +240,7 @@ class BlockPoolSlice { return DatanodeUtil.createTmpFile(b, f); } - File addBlock(Block b, File f) throws IOException { + File addFinalizedBlock(Block b, File f) throws IOException { File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId()); if (!blockDir.exists()) { if (!blockDir.mkdirs()) { @@ -334,9 +334,11 @@ class BlockPoolSlice { // The restart meta file exists if (sc.hasNextLong() && (sc.nextLong() > Time.now())) { // It didn't expire. Load the replica as a RBW. + // We don't know the expected block length, so just use 0 + // and don't reserve any more space for writes. newReplica = new ReplicaBeingWritten(blockId, validateIntegrityAndSetLength(file, genStamp), - genStamp, volume, file.getParentFile(), null); + genStamp, volume, file.getParentFile(), null, 0); loadRwr = false; } sc.close(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 5306be7714..4511f21c51 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -593,7 +593,7 @@ class FsDatasetImpl implements FsDatasetSpi { + " from " + srcfile + " to " + dstfile.getAbsolutePath(), e); } if (LOG.isDebugEnabled()) { - LOG.debug("addBlock: Moved " + srcmeta + " to " + dstmeta + LOG.debug("addFinalizedBlock: Moved " + srcmeta + " to " + dstmeta + " and " + srcfile + " to " + dstfile); } return dstfile; @@ -712,7 +712,7 @@ class FsDatasetImpl implements FsDatasetSpi { File oldmeta = replicaInfo.getMetaFile(); ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten( replicaInfo.getBlockId(), replicaInfo.getNumBytes(), newGS, - v, newBlkFile.getParentFile(), Thread.currentThread()); + v, newBlkFile.getParentFile(), Thread.currentThread(), estimateBlockLen); File newmeta = newReplicaInfo.getMetaFile(); // rename meta file to rbw directory @@ -748,7 +748,7 @@ class FsDatasetImpl implements FsDatasetSpi { // Replace finalized replica by a RBW replica in replicas map volumeMap.add(bpid, newReplicaInfo); - + v.reserveSpaceForRbw(estimateBlockLen - replicaInfo.getNumBytes()); return newReplicaInfo; } @@ -876,7 +876,7 @@ class FsDatasetImpl implements FsDatasetSpi { // create a rbw file to hold block in the designated volume File f = v.createRbwFile(b.getBlockPoolId(), b.getLocalBlock()); ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten(b.getBlockId(), - b.getGenerationStamp(), v, f.getParentFile()); + b.getGenerationStamp(), v, f.getParentFile(), b.getNumBytes()); volumeMap.add(b.getBlockPoolId(), newReplicaInfo); return newReplicaInfo; } @@ -992,7 +992,7 @@ class FsDatasetImpl implements FsDatasetSpi { // create RBW final ReplicaBeingWritten rbw = new ReplicaBeingWritten( blockId, numBytes, expectedGs, - v, dest.getParentFile(), Thread.currentThread()); + v, dest.getParentFile(), Thread.currentThread(), 0); rbw.setBytesAcked(visible); // overwrite the RBW in the volume map volumeMap.add(b.getBlockPoolId(), rbw); @@ -1013,7 +1013,7 @@ class FsDatasetImpl implements FsDatasetSpi { // create a temporary file to hold block in the designated volume File f = v.createTmpFile(b.getBlockPoolId(), b.getLocalBlock()); ReplicaInPipeline newReplicaInfo = new ReplicaInPipeline(b.getBlockId(), - b.getGenerationStamp(), v, f.getParentFile()); + b.getGenerationStamp(), v, f.getParentFile(), 0); volumeMap.add(b.getBlockPoolId(), newReplicaInfo); return newReplicaInfo; @@ -1079,7 +1079,8 @@ class FsDatasetImpl implements FsDatasetSpi { " for block " + replicaInfo); } - File dest = v.addBlock(bpid, replicaInfo, f); + File dest = v.addFinalizedBlock( + bpid, replicaInfo, f, replicaInfo.getBytesReserved()); newReplicaInfo = new FinalizedReplica(replicaInfo, v, dest.getParentFile()); } volumeMap.add(bpid, newReplicaInfo); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java index 0b9fda83ae..3952c39159 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java @@ -28,6 +28,7 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; @@ -62,6 +63,9 @@ public class FsVolumeImpl implements FsVolumeSpi { private final DF usage; private final long reserved; + // Disk space reserved for open blocks. + private AtomicLong reservedForRbw; + // Capacity configured. This is useful when we want to // limit the visible capacity for tests. If negative, then we just // query from the filesystem. @@ -82,6 +86,7 @@ public class FsVolumeImpl implements FsVolumeSpi { this.reserved = conf.getLong( DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY, DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT); + this.reservedForRbw = new AtomicLong(0L); this.currentDir = currentDir; File parent = currentDir.getParentFile(); this.usage = new DF(parent, conf); @@ -166,13 +171,18 @@ public class FsVolumeImpl implements FsVolumeSpi { @Override public long getAvailable() throws IOException { - long remaining = getCapacity()-getDfsUsed(); + long remaining = getCapacity() - getDfsUsed() - reservedForRbw.get(); long available = usage.getAvailable(); if (remaining > available) { remaining = available; } return (remaining > 0) ? remaining : 0; } + + @VisibleForTesting + public long getReservedForRbw() { + return reservedForRbw.get(); + } long getReserved(){ return reserved; @@ -217,16 +227,58 @@ public class FsVolumeImpl implements FsVolumeSpi { return getBlockPoolSlice(bpid).createTmpFile(b); } + @Override + public void reserveSpaceForRbw(long bytesToReserve) { + if (bytesToReserve != 0) { + if (FsDatasetImpl.LOG.isDebugEnabled()) { + FsDatasetImpl.LOG.debug("Reserving " + bytesToReserve + " on volume " + getBasePath()); + } + reservedForRbw.addAndGet(bytesToReserve); + } + } + + @Override + public void releaseReservedSpace(long bytesToRelease) { + if (bytesToRelease != 0) { + if (FsDatasetImpl.LOG.isDebugEnabled()) { + FsDatasetImpl.LOG.debug("Releasing " + bytesToRelease + " on volume " + getBasePath()); + } + + long oldReservation, newReservation; + do { + oldReservation = reservedForRbw.get(); + newReservation = oldReservation - bytesToRelease; + if (newReservation < 0) { + // Failsafe, this should never occur in practice, but if it does we don't + // want to start advertising more space than we have available. + newReservation = 0; + } + } while (!reservedForRbw.compareAndSet(oldReservation, newReservation)); + } + } + /** * RBW files. They get moved to the finalized block directory when * the block is finalized. */ File createRbwFile(String bpid, Block b) throws IOException { + reserveSpaceForRbw(b.getNumBytes()); return getBlockPoolSlice(bpid).createRbwFile(b); } - File addBlock(String bpid, Block b, File f) throws IOException { - return getBlockPoolSlice(bpid).addBlock(b, f); + /** + * + * @param bytesReservedForRbw Space that was reserved during + * block creation. Now that the block is being finalized we + * can free up this space. + * @return + * @throws IOException + */ + File addFinalizedBlock(String bpid, Block b, + File f, long bytesReservedForRbw) + throws IOException { + releaseReservedSpace(bytesReservedForRbw); + return getBlockPoolSlice(bpid).addFinalizedBlock(b, f); } Executor getCacheExecutor() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java index 05924acc5a..bc50eaac3c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java @@ -424,6 +424,14 @@ public class TestDirectoryScanner { public String getStorageID() { return ""; } + + @Override + public void reserveSpaceForRbw(long bytesToReserve) { + } + + @Override + public void releaseReservedSpace(long bytesToRelease) { + } } private final static TestFsVolumeSpi TEST_VOLUME = new TestFsVolumeSpi(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestRbwSpaceReservation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestRbwSpaceReservation.java new file mode 100644 index 0000000000..74ac16708f --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestRbwSpaceReservation.java @@ -0,0 +1,288 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; + +import org.apache.commons.io.FileUtils; +import org.apache.commons.io.IOUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.commons.logging.impl.Log4JLogger; +import org.apache.hadoop.conf.Configuration; +import static org.apache.hadoop.hdfs.DFSConfigKeys.*; +import static org.hamcrest.core.Is.is; +import static org.junit.Assert.assertThat; + +import org.apache.hadoop.fs.DU; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.*; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.Daemon; +import org.apache.log4j.Level; +import org.junit.After; +import org.junit.Test; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.util.List; +import java.util.Random; + +/** + * Ensure that the DN reserves disk space equivalent to a full block for + * replica being written (RBW). + */ +public class TestRbwSpaceReservation { + static final Log LOG = LogFactory.getLog(TestRbwSpaceReservation.class); + + private static final short REPL_FACTOR = 1; + private static final int DU_REFRESH_INTERVAL_MSEC = 500; + private static final int STORAGES_PER_DATANODE = 1; + private static final int BLOCK_SIZE = 1024 * 1024; + private static final int SMALL_BLOCK_SIZE = 1024; + + protected MiniDFSCluster cluster; + private Configuration conf; + private DistributedFileSystem fs = null; + private DFSClient client = null; + FsVolumeImpl singletonVolume = null; + + private static Random rand = new Random(); + + private void initConfig(int blockSize) { + conf = new HdfsConfiguration(); + + // Refresh disk usage information frequently. + conf.setInt(FS_DU_INTERVAL_KEY, DU_REFRESH_INTERVAL_MSEC); + conf.setLong(DFS_BLOCK_SIZE_KEY, blockSize); + + // Disable the scanner + conf.setInt(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); + } + + static { + ((Log4JLogger) FsDatasetImpl.LOG).getLogger().setLevel(Level.ALL); + } + + private void startCluster(int blockSize, long perVolumeCapacity) throws IOException { + initConfig(blockSize); + + cluster = new MiniDFSCluster + .Builder(conf) + .storagesPerDatanode(STORAGES_PER_DATANODE) + .numDataNodes(REPL_FACTOR) + .build(); + fs = cluster.getFileSystem(); + client = fs.getClient(); + cluster.waitActive(); + + if (perVolumeCapacity >= 0) { + List volumes = + cluster.getDataNodes().get(0).getFSDataset().getVolumes(); + + assertThat(volumes.size(), is(1)); + singletonVolume = ((FsVolumeImpl) volumes.get(0)); + singletonVolume.setCapacityForTesting(perVolumeCapacity); + } + } + + @After + public void shutdownCluster() throws IOException { + if (client != null) { + client.close(); + client = null; + } + + if (fs != null) { + fs.close(); + fs = null; + } + + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } + } + + private void createFileAndTestSpaceReservation( + final String fileNamePrefix, final int fileBlockSize) + throws IOException, InterruptedException { + // Enough for 1 block + meta files + some delta. + final long configuredCapacity = fileBlockSize * 2 - 1; + startCluster(BLOCK_SIZE, configuredCapacity); + FSDataOutputStream out = null; + Path path = new Path("/" + fileNamePrefix + ".dat"); + + try { + out = fs.create(path, false, 4096, (short) 1, fileBlockSize); + + byte[] buffer = new byte[rand.nextInt(fileBlockSize / 4)]; + out.write(buffer); + out.hsync(); + int bytesWritten = buffer.length; + + // Check that space was reserved for a full block minus the bytesWritten. + assertThat(singletonVolume.getReservedForRbw(), + is((long) fileBlockSize - bytesWritten)); + out.close(); + out = null; + + // Check that the reserved space has been released since we closed the + // file. + assertThat(singletonVolume.getReservedForRbw(), is(0L)); + + // Reopen the file for appends and write 1 more byte. + out = fs.append(path); + out.write(buffer); + out.hsync(); + bytesWritten += buffer.length; + + // Check that space was again reserved for a full block minus the + // bytesWritten so far. + assertThat(singletonVolume.getReservedForRbw(), + is((long) fileBlockSize - bytesWritten)); + + // Write once again and again verify the available space. This ensures + // that the reserved space is progressively adjusted to account for bytes + // written to disk. + out.write(buffer); + out.hsync(); + bytesWritten += buffer.length; + assertThat(singletonVolume.getReservedForRbw(), + is((long) fileBlockSize - bytesWritten)); + } finally { + if (out != null) { + out.close(); + } + } + } + + @Test (timeout=300000) + public void testWithDefaultBlockSize() + throws IOException, InterruptedException { + createFileAndTestSpaceReservation(GenericTestUtils.getMethodName(), BLOCK_SIZE); + } + + @Test (timeout=300000) + public void testWithNonDefaultBlockSize() + throws IOException, InterruptedException { + // Same test as previous one, but with a non-default block size. + createFileAndTestSpaceReservation(GenericTestUtils.getMethodName(), BLOCK_SIZE * 2); + } + + /** + * Stress test to ensure we are not leaking reserved space. + * @throws IOException + * @throws InterruptedException + */ + @Test (timeout=600000) + public void stressTest() throws IOException, InterruptedException { + final int numWriters = 5; + startCluster(SMALL_BLOCK_SIZE, SMALL_BLOCK_SIZE * numWriters * 10); + Writer[] writers = new Writer[numWriters]; + + // Start a few writers and let them run for a while. + for (int i = 0; i < numWriters; ++i) { + writers[i] = new Writer(client, SMALL_BLOCK_SIZE); + writers[i].start(); + } + + Thread.sleep(60000); + + // Stop the writers. + for (Writer w : writers) { + w.stopWriter(); + } + int filesCreated = 0; + int numFailures = 0; + for (Writer w : writers) { + w.join(); + filesCreated += w.getFilesCreated(); + numFailures += w.getNumFailures(); + } + + LOG.info("Stress test created " + filesCreated + + " files and hit " + numFailures + " failures"); + + // Check no space was leaked. + assertThat(singletonVolume.getReservedForRbw(), is(0L)); + } + + private static class Writer extends Daemon { + private volatile boolean keepRunning; + private final DFSClient localClient; + private int filesCreated = 0; + private int numFailures = 0; + byte[] data; + + Writer(DFSClient client, int blockSize) throws IOException { + localClient = client; + keepRunning = true; + filesCreated = 0; + numFailures = 0; + + // At least some of the files should span a block boundary. + data = new byte[blockSize * 2]; + } + + @Override + public void run() { + /** + * Create a file, write up to 3 blocks of data and close the file. + * Do this in a loop until we are told to stop. + */ + while (keepRunning) { + OutputStream os = null; + try { + String filename = "/file-" + rand.nextLong(); + os = localClient.create(filename, false); + os.write(data, 0, rand.nextInt(data.length)); + IOUtils.closeQuietly(os); + os = null; + localClient.delete(filename, false); + Thread.sleep(50); // Sleep for a bit to avoid killing the system. + ++filesCreated; + } catch (IOException ioe) { + // Just ignore the exception and keep going. + ++numFailures; + } catch (InterruptedException ie) { + return; + } finally { + if (os != null) { + IOUtils.closeQuietly(os); + } + } + } + } + + public void stopWriter() { + keepRunning = false; + } + + public int getFilesCreated() { + return filesCreated; + } + + public int getNumFailures() { + return numFailures; + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java index b8246c3191..e6a03d231e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java @@ -158,7 +158,7 @@ public class TestWriteToReplica { replicasMap.add(bpid, new ReplicaInPipeline( blocks[TEMPORARY].getBlockId(), blocks[TEMPORARY].getGenerationStamp(), vol, - vol.createTmpFile(bpid, blocks[TEMPORARY].getLocalBlock()).getParentFile())); + vol.createTmpFile(bpid, blocks[TEMPORARY].getLocalBlock()).getParentFile(), 0)); replicaInfo = new ReplicaBeingWritten(blocks[RBW].getLocalBlock(), vol, vol.createRbwFile(bpid, blocks[RBW].getLocalBlock()).getParentFile(), null); From a23144fd8a1e399e431f2f272388ec109df37ab1 Mon Sep 17 00:00:00 2001 From: cnauroth Date: Sun, 7 Sep 2014 08:39:20 -0700 Subject: [PATCH 15/18] HDFS Credential Provider related Unit Test Failure. Contributed by Xiaoyu Yao. --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../org/apache/hadoop/cli/TestCryptoAdminCLI.java | 4 +++- .../java/org/apache/hadoop/hdfs/TestDFSUtil.java | 4 +++- .../org/apache/hadoop/hdfs/TestEncryptionZones.java | 12 +++++++----- .../org/apache/hadoop/hdfs/TestReservedRawPaths.java | 3 ++- 5 files changed, 18 insertions(+), 8 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 3d43171fca..5a30d0a70d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -615,6 +615,9 @@ Release 2.6.0 - UNRELEASED HDFS-6898. DN must reserve space for a full block when an RBW block is created. (Arpit Agarwal) + HDFS-7025. HDFS Credential Provider related Unit Test Failure. + (Xiaoyu Yao via cnauroth) + BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS HDFS-6387. HDFS CLI admin tool for creating & deleting an diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java index 1c83829102..adeabfe856 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java @@ -37,6 +37,7 @@ import org.apache.hadoop.crypto.key.JavaKeyStoreProvider; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProviderFactory; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HDFSPolicyProvider; @@ -64,8 +65,9 @@ public class TestCryptoAdminCLI extends CLITestHelperDFS { tmpDir = new File(System.getProperty("test.build.data", "target"), UUID.randomUUID().toString()).getAbsoluteFile(); + final Path jksPath = new Path(tmpDir.toString(), "test.jks"); conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, - JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir + "/test.jks"); + JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()); dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); dfsCluster.waitClusterUp(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java index 5ffd3b5bd8..046265f552 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java @@ -59,6 +59,7 @@ import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -809,8 +810,9 @@ public class TestDFSUtil { "target/test-dir")); Configuration conf = new Configuration(); + final Path jksPath = new Path(testDir.toString(), "test.jks"); final String ourUrl = - JavaKeyStoreProvider.SCHEME_NAME + "://file/" + testDir + "/test.jks"; + JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri(); File file = new File(testDir, "test.jks"); file.delete(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java index 1cf9263c25..0ef538d80b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java @@ -99,8 +99,9 @@ public class TestEncryptionZones { // Set up java key store String testRoot = fsHelper.getTestRootDir(); testRootDir = new File(testRoot).getAbsoluteFile(); + final Path jksPath = new Path(testRootDir.toString(), "test.jks"); conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, - JavaKeyStoreProvider.SCHEME_NAME + "://file" + testRootDir + "/test.jks" + JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri() ); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); // Lower the batch size for testing @@ -324,7 +325,7 @@ public class TestEncryptionZones { final UserGroupInformation user = UserGroupInformation. createUserForTesting("user", new String[] { "mygroup" }); - final Path testRoot = new Path(fsHelper.getTestRootDir()); + final Path testRoot = new Path("/tmp/TestEncryptionZones"); final Path superPath = new Path(testRoot, "superuseronly"); final Path allPath = new Path(testRoot, "accessall"); @@ -358,7 +359,7 @@ public class TestEncryptionZones { final UserGroupInformation user = UserGroupInformation. createUserForTesting("user", new String[] { "mygroup" }); - final Path testRoot = new Path(fsHelper.getTestRootDir()); + final Path testRoot = new Path("/tmp/TestEncryptionZones"); final Path superPath = new Path(testRoot, "superuseronly"); final Path superPathFile = new Path(superPath, "file1"); final Path allPath = new Path(testRoot, "accessall"); @@ -451,7 +452,7 @@ public class TestEncryptionZones { * Test success of Rename EZ on a directory which is already an EZ. */ private void doRenameEncryptionZone(FSTestWrapper wrapper) throws Exception { - final Path testRoot = new Path(fsHelper.getTestRootDir()); + final Path testRoot = new Path("/tmp/TestEncryptionZones"); final Path pathFoo = new Path(testRoot, "foo"); final Path pathFooBaz = new Path(pathFoo, "baz"); wrapper.mkdir(pathFoo, FsPermission.getDirDefault(), true); @@ -598,8 +599,9 @@ public class TestEncryptionZones { } catch (IOException e) { assertExceptionContains("since no key provider is available", e); } + final Path jksPath = new Path(testRootDir.toString(), "test.jks"); clusterConf.set(KeyProviderFactory.KEY_PROVIDER_PATH, - JavaKeyStoreProvider.SCHEME_NAME + "://file" + testRootDir + "/test.jks" + JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri() ); // Try listing EZs as well assertNumZones(0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java index 2a20954a39..20e4f4edf2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java @@ -69,8 +69,9 @@ public class TestReservedRawPaths { // Set up java key store String testRoot = fsHelper.getTestRootDir(); File testRootDir = new File(testRoot).getAbsoluteFile(); + final Path jksPath = new Path(testRootDir.toString(), "test.jks"); conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, - JavaKeyStoreProvider.SCHEME_NAME + "://file" + testRootDir + "/test.jks" + JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri() ); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE); From a092cdf32de4d752456286a9f4dda533d8a62bca Mon Sep 17 00:00:00 2001 From: Zhijie Shen Date: Sun, 7 Sep 2014 17:49:06 -0700 Subject: [PATCH 16/18] YARN-2512. Allowed pattern matching for origins in CrossOriginFilter. Contributed by Jonathan Eagles. --- hadoop-yarn-project/CHANGES.txt | 3 +++ .../timeline/webapp/CrossOriginFilter.java | 20 +++++++++++++++++- .../webapp/TestCrossOriginFilter.java | 21 ++++++++++++++++++- 3 files changed, 42 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index beafc22518..ed31479072 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -184,6 +184,9 @@ Release 2.6.0 - UNRELEASED YARN-2508. Cross Origin configuration parameters prefix are not honored (Mit Desai via jeagles) + YARN-2512. Allowed pattern matching for origins in CrossOriginFilter. + (Jonathan Eagles via zjshen) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/CrossOriginFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/CrossOriginFilter.java index d71175f96d..5a0703d404 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/CrossOriginFilter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/CrossOriginFilter.java @@ -24,6 +24,8 @@ import java.net.URLEncoder; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import javax.servlet.Filter; import javax.servlet.FilterChain; @@ -204,7 +206,23 @@ public class CrossOriginFilter implements Filter { @VisibleForTesting boolean isOriginAllowed(String origin) { - return allowAllOrigins || allowedOrigins.contains(origin); + if (allowAllOrigins) { + return true; + } + + for (String allowedOrigin : allowedOrigins) { + if (allowedOrigin.contains("*")) { + String regex = allowedOrigin.replace(".", "\\.").replace("*", ".*"); + Pattern p = Pattern.compile(regex); + Matcher m = p.matcher(origin); + if (m.matches()) { + return true; + } + } else if (allowedOrigin.equals(origin)) { + return true; + } + } + return false; } private boolean areHeadersAllowed(String accessControlRequestHeaders) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestCrossOriginFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestCrossOriginFilter.java index f666c21b1e..ccc9bbfde5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestCrossOriginFilter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestCrossOriginFilter.java @@ -77,7 +77,26 @@ public class TestCrossOriginFilter { // Object under test CrossOriginFilter filter = new CrossOriginFilter(); filter.init(filterConfig); - Assert.assertTrue(filter.isOriginAllowed("example.org")); + Assert.assertTrue(filter.isOriginAllowed("example.com")); + } + + @Test + public void testPatternMatchingOrigins() throws ServletException, IOException { + + // Setup the configuration settings of the server + Map conf = new HashMap(); + conf.put(CrossOriginFilter.ALLOWED_ORIGINS, "*.example.com"); + FilterConfig filterConfig = new FilterConfigTest(conf); + + // Object under test + CrossOriginFilter filter = new CrossOriginFilter(); + filter.init(filterConfig); + + // match multiple sub-domains + Assert.assertFalse(filter.isOriginAllowed("example.com")); + Assert.assertFalse(filter.isOriginAllowed("foo:example.com")); + Assert.assertTrue(filter.isOriginAllowed("foo.example.com")); + Assert.assertTrue(filter.isOriginAllowed("foo.bar.example.com")); } @Test From 56dc496a1031621d2b701801de4ec29179d75f2e Mon Sep 17 00:00:00 2001 From: Zhijie Shen Date: Sun, 7 Sep 2014 18:22:40 -0700 Subject: [PATCH 17/18] YARN-2507. Documented CrossOriginFilter configurations for the timeline server. Contributed by Jonathan Eagles. --- hadoop-yarn-project/CHANGES.txt | 3 ++ .../src/site/apt/TimelineServer.apt.vm | 37 +++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index ed31479072..ed9de87d3e 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -187,6 +187,9 @@ Release 2.6.0 - UNRELEASED YARN-2512. Allowed pattern matching for origins in CrossOriginFilter. (Jonathan Eagles via zjshen) + YARN-2507. Documented CrossOriginFilter configurations for the timeline + server. (Jonathan Eagles via zjshen) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/TimelineServer.apt.vm b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/TimelineServer.apt.vm index c704d3797f..92c7377099 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/TimelineServer.apt.vm +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/TimelineServer.apt.vm @@ -102,6 +102,43 @@ YARN Timeline Server yarn.timeline-service.handler-thread-count 10 + + + Enables cross-origin support (CORS) for web services where + cross-origin web response headers are needed. For example, javascript making + a web services request to the timeline server. + yarn.timeline-service.http-cross-origin.enabled + false + + + + Comma separated list of origins that are allowed for web + services needing cross-origin (CORS) support. Wildcards (*) and patterns + allowed + yarn.timeline-service.http-cross-origin.allowed-origins + * + + + + Comma separated list of methods that are allowed for web + services needing cross-origin (CORS) support. + yarn.timeline-service.http-cross-origin.allowed-methods + GET,POST,HEAD + + + + Comma separated list of headers that are allowed for web + services needing cross-origin (CORS) support. + yarn.timeline-service.http-cross-origin.allowed-headers + X-Requested-With,Content-Type,Accept,Origin + + + + The number of seconds a pre-flighted request can be cached + for web services needing cross-origin (CORS) support. + yarn.timeline-service.http-cross-origin.max-age + 1800 + +---+ * Generic-data related Configuration From 0974f434c47ffbf4b77a8478937fd99106c8ddbd Mon Sep 17 00:00:00 2001 From: Jian He Date: Sun, 7 Sep 2014 18:25:44 -0700 Subject: [PATCH 18/18] YARN-2515. Updated ConverterUtils#toContainerId to parse epoch. Contributed by Tsuyoshi OZAWA --- hadoop-yarn-project/CHANGES.txt | 3 ++ .../hadoop/yarn/api/records/ContainerId.java | 39 ++++++++++++++++++- .../hadoop/yarn/util/ConverterUtils.java | 15 +------ .../hadoop/yarn/api/TestContainerId.java | 4 ++ .../hadoop/yarn/util/TestConverterUtils.java | 9 +++++ 5 files changed, 55 insertions(+), 15 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index ed9de87d3e..d54fcd6fef 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -190,6 +190,9 @@ Release 2.6.0 - UNRELEASED YARN-2507. Documented CrossOriginFilter configurations for the timeline server. (Jonathan Eagles via zjshen) + YARN-2515. Updated ConverterUtils#toContainerId to parse epoch. + (Tsuyoshi OZAWA via jianhe) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java index fc7f40488e..321052b062 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java @@ -18,8 +18,10 @@ package org.apache.hadoop.yarn.api.records; -import java.text.NumberFormat; +import com.google.common.base.Splitter; +import java.text.NumberFormat; +import java.util.Iterator; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; @@ -33,6 +35,8 @@ import org.apache.hadoop.yarn.util.Records; @Public @Stable public abstract class ContainerId implements Comparable{ + private static final Splitter _SPLITTER = Splitter.on('_').trimResults(); + private static final String CONTAINER_PREFIX = "container"; @Private @Unstable @@ -163,5 +167,38 @@ public abstract class ContainerId implements Comparable{ return sb.toString(); } + @Public + @Unstable + public static ContainerId fromString(String containerIdStr) { + Iterator it = _SPLITTER.split(containerIdStr).iterator(); + if (!it.next().equals(CONTAINER_PREFIX)) { + throw new IllegalArgumentException("Invalid ContainerId prefix: " + + containerIdStr); + } + try { + ApplicationAttemptId appAttemptID = toApplicationAttemptId(it); + int id = Integer.parseInt(it.next()); + int epoch = 0; + if (it.hasNext()) { + epoch = Integer.parseInt(it.next()); + } + int cid = (epoch << 22) | id; + ContainerId containerId = ContainerId.newInstance(appAttemptID, cid); + return containerId; + } catch (NumberFormatException n) { + throw new IllegalArgumentException("Invalid ContainerId: " + + containerIdStr, n); + } + } + + private static ApplicationAttemptId toApplicationAttemptId( + Iterator it) throws NumberFormatException { + ApplicationId appId = ApplicationId.newInstance(Long.parseLong(it.next()), + Integer.parseInt(it.next())); + ApplicationAttemptId appAttemptId = + ApplicationAttemptId.newInstance(appId, Integer.parseInt(it.next())); + return appAttemptId; + } + protected abstract void build(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java index f731af95fc..27f7bc107e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java @@ -168,20 +168,7 @@ public class ConverterUtils { } public static ContainerId toContainerId(String containerIdStr) { - Iterator it = _split(containerIdStr).iterator(); - if (!it.next().equals(CONTAINER_PREFIX)) { - throw new IllegalArgumentException("Invalid ContainerId prefix: " - + containerIdStr); - } - try { - ApplicationAttemptId appAttemptID = toApplicationAttemptId(it); - ContainerId containerId = - ContainerId.newInstance(appAttemptID, Integer.parseInt(it.next())); - return containerId; - } catch (NumberFormatException n) { - throw new IllegalArgumentException("Invalid ContainerId: " - + containerIdStr, n); - } + return ContainerId.fromString(containerIdStr); } public static ApplicationAttemptId toApplicationAttemptId( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java index b23d0ed3fb..8baf244dda 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java @@ -54,10 +54,14 @@ public class TestContainerId { long ts = System.currentTimeMillis(); ContainerId c6 = newContainerId(36473, 4365472, ts, 25645811); Assert.assertEquals("container_10_0001_01_000001", c1.toString()); + Assert.assertEquals(c1, + ContainerId.fromString("container_10_0001_01_000001")); Assert.assertEquals(479987, 0x003fffff & c6.getId()); Assert.assertEquals(6, c6.getId() >> 22); Assert.assertEquals("container_" + ts + "_36473_4365472_479987_06", c6.toString()); + Assert.assertEquals(c6, + ContainerId.fromString("container_" + ts + "_36473_4365472_479987_06")); } public static ContainerId newContainerId(int appId, int appAttemptId, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestConverterUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestConverterUtils.java index 21af4555e3..3f4147c418 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestConverterUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestConverterUtils.java @@ -55,6 +55,15 @@ public class TestConverterUtils { assertEquals(gen, id); } + @Test + public void testContainerIdWithEpoch() throws URISyntaxException { + ContainerId id = TestContainerId.newContainerId(0, 0, 0, 25645811); + String cid = ConverterUtils.toString(id); + assertEquals("container_0_0000_00_479987_06", cid); + ContainerId gen = ConverterUtils.toContainerId(cid); + assertEquals(gen.toString(), id.toString()); + } + @Test public void testContainerIdNull() throws URISyntaxException { assertNull(ConverterUtils.toString((ContainerId)null));