From b9370fdcf601e6f01bcee171ac7e23cb936bd0ac Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Wed, 23 Jul 2014 18:53:38 +0000 Subject: [PATCH 01/15] HADOOP-10890. TestDFVariations.testMount fails intermittently. (Contributed by Yongjun Zhang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1612916 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 +++ .../apache/hadoop/fs/TestDFVariations.java | 21 ++++++++++++++++++- 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 49134e12ef..2463fb065d 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -811,6 +811,9 @@ Release 2.5.0 - UNRELEASED HADOOP-10872. TestPathData fails intermittently with "Mkdirs failed to create d1". (Yongjun Zhang via Arpit Agarwal) + HADOOP-10890. TestDFVariations.testMount fails intermittently. (Yongjun + Zhang via Arpit Agarwal) + Release 2.4.1 - 2014-06-23 INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFVariations.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFVariations.java index d457c0e8a4..97dbe5e606 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFVariations.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFVariations.java @@ -29,14 +29,33 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Shell; +import org.junit.After; +import org.junit.Before; import org.junit.Test; + import static org.junit.Assert.*; public class TestDFVariations { + private static final String TEST_ROOT_DIR = + System.getProperty("test.build.data","build/test/data") + "/TestDFVariations"; + private static File test_root = null; + @Before + public void setup() throws IOException { + test_root = new File(TEST_ROOT_DIR); + test_root.mkdirs(); + } + + @After + public void after() throws IOException { + FileUtil.setWritable(test_root, true); + FileUtil.fullyDelete(test_root); + assertTrue(!test_root.exists()); + } + public static class XXDF extends DF { public XXDF() throws IOException { - super(new File(System.getProperty("test.build.data","/tmp")), 0L); + super(test_root, 0L); } @Override From 5343b43fd989ec596afed807ddce29ad96c23e2d Mon Sep 17 00:00:00 2001 From: Uma Maheswara Rao G Date: Wed, 23 Jul 2014 19:05:11 +0000 Subject: [PATCH 02/15] HDFS-6422. getfattr in CLI doesn't throw exception or return non-0 return code when xattr doesn't exist. (Charles Lamb via umamahesh) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1612922 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hadoop/hdfs/protocol/ClientProtocol.java | 2 +- .../hdfs/server/namenode/FSEditLog.java | 3 +- .../hdfs/server/namenode/FSEditLogLoader.java | 4 + .../hdfs/server/namenode/FSEditLogOp.java | 5 + .../hdfs/server/namenode/FSNamesystem.java | 62 +++- .../namenode/XAttrPermissionFilter.java | 17 +- .../hdfs/web/resources/XAttrNameParam.java | 4 +- .../org/apache/hadoop/hdfs/TestDFSShell.java | 69 ++++ .../hdfs/server/namenode/FSXAttrBaseTest.java | 351 ++++++++++++++++-- .../namenode/TestNamenodeRetryCache.java | 4 +- .../namenode/ha/TestRetryCacheWithHA.java | 54 ++- .../hadoop/hdfs/web/resources/TestParam.java | 6 - .../src/test/resources/editsStored | Bin 4970 -> 4992 bytes .../src/test/resources/editsStored.xml | 2 + 15 files changed, 534 insertions(+), 52 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 8c172bf03c..4fbe4cfc28 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -892,6 +892,9 @@ Release 2.5.0 - UNRELEASED HDFS-6703. NFS: Files can be deleted from a read-only mount (Srikanth Upputuri via brandonli) + HDFS-6422. getfattr in CLI doesn't throw exception or return non-0 return code + when xattr doesn't exist. (Charles Lamb via umamahesh) + BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index ad331d1e75..9398c721a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -1337,6 +1337,6 @@ public List listXAttrs(String src) * @param xAttr XAttr to remove * @throws IOException */ - @Idempotent + @AtMostOnce public void removeXAttr(String src, XAttr xAttr) throws IOException; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index 85cfc1c774..b2adcd455f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -1074,10 +1074,11 @@ void logSetXAttrs(String src, List xAttrs, boolean toLogRpcIds) { logEdit(op); } - void logRemoveXAttrs(String src, List xAttrs) { + void logRemoveXAttrs(String src, List xAttrs, boolean toLogRpcIds) { final RemoveXAttrOp op = RemoveXAttrOp.getInstance(); op.src = src; op.xAttrs = xAttrs; + logRpcIds(op, toLogRpcIds); logEdit(op); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 858cd57b23..a721491948 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -821,6 +821,10 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, RemoveXAttrOp removeXAttrOp = (RemoveXAttrOp) op; fsDir.unprotectedRemoveXAttrs(removeXAttrOp.src, removeXAttrOp.xAttrs); + if (toAddRetryCache) { + fsNamesys.addCacheEntry(removeXAttrOp.rpcClientId, + removeXAttrOp.rpcCallId); + } break; } default: diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index e972799b33..5543e0cb86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -3551,6 +3551,7 @@ void readFields(DataInputStream in, int logVersion) throws IOException { XAttrEditLogProto p = XAttrEditLogProto.parseDelimitedFrom(in); src = p.getSrc(); xAttrs = PBHelper.convertXAttrs(p.getXAttrsList()); + readRpcIds(in, logVersion); } @Override @@ -3561,18 +3562,22 @@ public void writeFields(DataOutputStream out) throws IOException { } b.addAllXAttrs(PBHelper.convertXAttrProto(xAttrs)); b.build().writeDelimitedTo(out); + // clientId and callId + writeRpcIds(rpcClientId, rpcCallId, out); } @Override protected void toXml(ContentHandler contentHandler) throws SAXException { XMLUtils.addSaxString(contentHandler, "SRC", src); appendXAttrsToXml(contentHandler, xAttrs); + appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId); } @Override void fromXml(Stanza st) throws InvalidXmlException { src = st.getValue("SRC"); xAttrs = readXAttrsFromXml(st); + readRpcIdsFromXml(st); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 3e922ba023..6ce6a70ce2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -8279,11 +8279,12 @@ List getXAttrs(String src, List xAttrs) throws IOException { nnConf.checkXAttrsConfigFlag(); FSPermissionChecker pc = getPermissionChecker(); boolean getAll = xAttrs == null || xAttrs.isEmpty(); - List filteredXAttrs = null; if (!getAll) { - filteredXAttrs = XAttrPermissionFilter.filterXAttrsForApi(pc, xAttrs); - if (filteredXAttrs.isEmpty()) { - return filteredXAttrs; + try { + XAttrPermissionFilter.checkPermissionForApi(pc, xAttrs); + } catch (AccessControlException e) { + logAuditEvent(false, "getXAttrs", src); + throw e; } } checkOperation(OperationCategory.READ); @@ -8302,15 +8303,21 @@ List getXAttrs(String src, List xAttrs) throws IOException { if (filteredAll == null || filteredAll.isEmpty()) { return null; } - List toGet = Lists.newArrayListWithCapacity(filteredXAttrs.size()); - for (XAttr xAttr : filteredXAttrs) { + List toGet = Lists.newArrayListWithCapacity(xAttrs.size()); + for (XAttr xAttr : xAttrs) { + boolean foundIt = false; for (XAttr a : filteredAll) { if (xAttr.getNameSpace() == a.getNameSpace() && xAttr.getName().equals(a.getName())) { toGet.add(a); + foundIt = true; break; } } + if (!foundIt) { + throw new IOException( + "At least one of the attributes provided was not found."); + } } return toGet; } @@ -8344,17 +8351,42 @@ List listXAttrs(String src) throws IOException { readUnlock(); } } - + + /** + * Remove an xattr for a file or directory. + * + * @param src + * - path to remove the xattr from + * @param xAttr + * - xAttr to remove + * @throws AccessControlException + * @throws SafeModeException + * @throws UnresolvedLinkException + * @throws IOException + */ void removeXAttr(String src, XAttr xAttr) throws IOException { - nnConf.checkXAttrsConfigFlag(); - HdfsFileStatus resultingStat = null; - FSPermissionChecker pc = getPermissionChecker(); + CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); + if (cacheEntry != null && cacheEntry.isSuccess()) { + return; // Return previous response + } + boolean success = false; try { - XAttrPermissionFilter.checkPermissionForApi(pc, xAttr); + removeXAttrInt(src, xAttr, cacheEntry != null); + success = true; } catch (AccessControlException e) { logAuditEvent(false, "removeXAttr", src); throw e; + } finally { + RetryCache.setState(cacheEntry, success); } + } + + void removeXAttrInt(String src, XAttr xAttr, boolean logRetryCache) + throws IOException { + nnConf.checkXAttrsConfigFlag(); + HdfsFileStatus resultingStat = null; + FSPermissionChecker pc = getPermissionChecker(); + XAttrPermissionFilter.checkPermissionForApi(pc, xAttr); checkOperation(OperationCategory.WRITE); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); writeLock(); @@ -8368,12 +8400,12 @@ void removeXAttr(String src, XAttr xAttr) throws IOException { xAttrs.add(xAttr); List removedXAttrs = dir.removeXAttrs(src, xAttrs); if (removedXAttrs != null && !removedXAttrs.isEmpty()) { - getEditLog().logRemoveXAttrs(src, removedXAttrs); + getEditLog().logRemoveXAttrs(src, removedXAttrs, logRetryCache); + } else { + throw new IOException( + "No matching attributes found for remove operation"); } resultingStat = getAuditFileInfo(src, false); - } catch (AccessControlException e) { - logAuditEvent(false, "removeXAttr", src); - throw e; } finally { writeUnlock(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java index 47f29399e5..98730142fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java @@ -26,6 +26,7 @@ import org.apache.hadoop.security.AccessControlException; import com.google.common.collect.Lists; +import com.google.common.base.Preconditions; /** * There are four types of extended attributes defined by the @@ -60,8 +61,20 @@ static void checkPermissionForApi(FSPermissionChecker pc, XAttr xAttr) throw new AccessControlException("User doesn't have permission for xattr: " + XAttrHelper.getPrefixName(xAttr)); } - - static List filterXAttrsForApi(FSPermissionChecker pc, + + static void checkPermissionForApi(FSPermissionChecker pc, + List xAttrs) throws AccessControlException { + Preconditions.checkArgument(xAttrs != null); + if (xAttrs.isEmpty()) { + return; + } + + for (XAttr xAttr : xAttrs) { + checkPermissionForApi(pc, xAttr); + } + } + + static List filterXAttrsForApi(FSPermissionChecker pc, List xAttrs) { assert xAttrs != null : "xAttrs can not be null"; if (xAttrs == null || xAttrs.isEmpty()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java index 3860f916e4..8137b4494f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java @@ -25,8 +25,8 @@ public class XAttrNameParam extends StringParam { /** Default parameter value. **/ public static final String DEFAULT = ""; - private static Domain DOMAIN = new Domain(NAME, - Pattern.compile("^(user\\.|trusted\\.|system\\.|security\\.).+")); + private static Domain DOMAIN = new Domain(NAME, + Pattern.compile(".*")); public XAttrNameParam(final String str) { super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java index 8eb1c41e05..aac16f4e5e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java @@ -2653,6 +2653,75 @@ public Object run() throws Exception { } } + /* + * 1. Test that CLI throws an exception and returns non-0 when user does + * not have permission to read an xattr. + * 2. Test that CLI throws an exception and returns non-0 when a non-existent + * xattr is requested. + */ + @Test (timeout = 120000) + public void testGetFAttrErrors() throws Exception { + final UserGroupInformation user = UserGroupInformation. + createUserForTesting("user", new String[] {"mygroup"}); + MiniDFSCluster cluster = null; + PrintStream bakErr = null; + try { + final Configuration conf = new HdfsConfiguration(); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + + final FileSystem fs = cluster.getFileSystem(); + final Path p = new Path("/foo"); + fs.mkdirs(p); + bakErr = System.err; + + final FsShell fshell = new FsShell(conf); + final ByteArrayOutputStream out = new ByteArrayOutputStream(); + System.setErr(new PrintStream(out)); + + // No permission for "other". + fs.setPermission(p, new FsPermission((short) 0700)); + + { + final int ret = ToolRunner.run(fshell, new String[] { + "-setfattr", "-n", "user.a1", "-v", "1234", "/foo"}); + assertEquals("Returned should be 0", 0, ret); + out.reset(); + } + + user.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + int ret = ToolRunner.run(fshell, new String[] { + "-getfattr", "-n", "user.a1", "/foo"}); + String str = out.toString(); + assertTrue("xattr value was incorrectly returned", + str.indexOf("1234") == -1); + out.reset(); + return null; + } + }); + + { + final int ret = ToolRunner.run(fshell, new String[]{ + "-getfattr", "-n", "user.nonexistent", "/foo"}); + String str = out.toString(); + assertTrue("xattr value was incorrectly returned", + str.indexOf( + "getfattr: At least one of the attributes provided was not found") + >= 0); + out.reset(); + } + } finally { + if (bakErr != null) { + System.setErr(bakErr); + } + if (cluster != null) { + cluster.shutdown(); + } + } + } + /** * Test that the server trash configuration is respected when * the client configuration is not set. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java index 86f1ec90b8..636ecc2417 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java @@ -19,7 +19,6 @@ import java.io.FileNotFoundException; import java.io.IOException; -import java.io.FileNotFoundException; import java.security.PrivilegedExceptionAction; import java.util.EnumSet; import java.util.List; @@ -46,6 +45,7 @@ import static org.apache.hadoop.fs.permission.FsAction.ALL; import static org.apache.hadoop.fs.permission.FsAction.READ; import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import org.junit.After; @@ -261,11 +261,12 @@ public void testSetXAttr() throws Exception { fs.setXAttr(path, "user.", value1, EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE)); Assert.fail("Setting xattr with empty name should fail."); + } catch (RemoteException e) { + assertEquals("Unexpected RemoteException: " + e, e.getClassName(), + HadoopIllegalArgumentException.class.getCanonicalName()); + GenericTestUtils.assertExceptionContains("XAttr name cannot be empty", e); } catch (HadoopIllegalArgumentException e) { GenericTestUtils.assertExceptionContains("XAttr name cannot be empty", e); - } catch (IllegalArgumentException e) { - GenericTestUtils.assertExceptionContains("Invalid value: \"user.\" does " + - "not belong to the domain ^(user\\.|trusted\\.|system\\.|security\\.).+", e); } // Set xattr with invalid name: "a1" @@ -274,11 +275,12 @@ public void testSetXAttr() throws Exception { XAttrSetFlag.REPLACE)); Assert.fail("Setting xattr with invalid name prefix or without " + "name prefix should fail."); + } catch (RemoteException e) { + assertEquals("Unexpected RemoteException: " + e, e.getClassName(), + HadoopIllegalArgumentException.class.getCanonicalName()); + GenericTestUtils.assertExceptionContains("XAttr name must be prefixed", e); } catch (HadoopIllegalArgumentException e) { GenericTestUtils.assertExceptionContains("XAttr name must be prefixed", e); - } catch (IllegalArgumentException e) { - GenericTestUtils.assertExceptionContains("Invalid value: \"a1\" does " + - "not belong to the domain ^(user\\.|trusted\\.|system\\.|security\\.).+", e); } // Set xattr without XAttrSetFlag @@ -341,9 +343,18 @@ public void testSetXAttr() throws Exception { } /** - * Tests for getting xattr - * 1. To get xattr which does not exist. - * 2. To get multiple xattrs. + * getxattr tests. Test that getxattr throws an exception if any of + * the following are true: + * an xattr that was requested doesn't exist + * the caller specifies an unknown namespace + * the caller doesn't have access to the namespace + * the caller doesn't have permission to get the value of the xattr + * the caller does not have search access to the parent directory + * the caller has only read access to the owning directory + * the caller has only search access to the owning directory and + * execute/search access to the actual entity + * the caller does not have search access to the owning directory and read + * access to the actual entity */ @Test(timeout = 120000) public void testGetXAttrs() throws Exception { @@ -351,21 +362,159 @@ public void testGetXAttrs() throws Exception { fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE)); - // XAttr does not exist. - byte[] value = fs.getXAttr(path, name3); - Assert.assertEquals(value, null); + /* An XAttr that was requested does not exist. */ + try { + final byte[] value = fs.getXAttr(path, name3); + Assert.fail("expected IOException"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains( + "At least one of the attributes provided was not found.", e); + } - List names = Lists.newArrayList(); - names.add(name1); - names.add(name2); - names.add(name3); - Map xattrs = fs.getXAttrs(path, names); - Assert.assertEquals(xattrs.size(), 2); - Assert.assertArrayEquals(value1, xattrs.get(name1)); - Assert.assertArrayEquals(value2, xattrs.get(name2)); + /* Throw an exception if an xattr that was requested does not exist. */ + { + final List names = Lists.newArrayList(); + names.add(name1); + names.add(name2); + names.add(name3); + try { + final Map xattrs = fs.getXAttrs(path, names); + Assert.fail("expected IOException"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains( + "At least one of the attributes provided was not found.", e); + } + } fs.removeXAttr(path, name1); fs.removeXAttr(path, name2); + + /* Unknown namespace should throw an exception. */ + try { + final byte[] xattr = fs.getXAttr(path, "wackynamespace.foo"); + Assert.fail("expected IOException"); + } catch (Exception e) { + GenericTestUtils.assertExceptionContains + ("An XAttr name must be prefixed with user/trusted/security/system, " + + "followed by a '.'", + e); + } + + /* + * The 'trusted' namespace should not be accessible and should throw an + * exception. + */ + final UserGroupInformation user = UserGroupInformation. + createUserForTesting("user", new String[] {"mygroup"}); + fs.setXAttr(path, "trusted.foo", "1234".getBytes()); + try { + user.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final FileSystem userFs = dfsCluster.getFileSystem(); + final byte[] xattr = userFs.getXAttr(path, "trusted.foo"); + return null; + } + }); + Assert.fail("expected IOException"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("User doesn't have permission", e); + } + + fs.setXAttr(path, name1, "1234".getBytes()); + + /* + * Test that an exception is thrown if the caller doesn't have permission to + * get the value of the xattr. + */ + + /* Set access so that only the owner has access. */ + fs.setPermission(path, new FsPermission((short) 0700)); + try { + user.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final FileSystem userFs = dfsCluster.getFileSystem(); + final byte[] xattr = userFs.getXAttr(path, name1); + return null; + } + }); + Assert.fail("expected IOException"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("Permission denied", e); + } + + /* + * The caller must have search access to the parent directory. + */ + final Path childDir = new Path(path, "child" + pathCount); + /* Set access to parent so that only the owner has access. */ + FileSystem.mkdirs(fs, childDir, FsPermission.createImmutable((short)0700)); + fs.setXAttr(childDir, name1, "1234".getBytes()); + try { + user.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final FileSystem userFs = dfsCluster.getFileSystem(); + final byte[] xattr = userFs.getXAttr(childDir, name1); + return null; + } + }); + Assert.fail("expected IOException"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("Permission denied", e); + } + + /* Check that read access to the owning directory is not good enough. */ + fs.setPermission(path, new FsPermission((short) 0704)); + try { + user.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final FileSystem userFs = dfsCluster.getFileSystem(); + final byte[] xattr = userFs.getXAttr(childDir, name1); + return null; + } + }); + Assert.fail("expected IOException"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("Permission denied", e); + } + + /* + * Check that search access to the owning directory and search/execute + * access to the actual entity with extended attributes is not good enough. + */ + fs.setPermission(path, new FsPermission((short) 0701)); + fs.setPermission(childDir, new FsPermission((short) 0701)); + try { + user.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final FileSystem userFs = dfsCluster.getFileSystem(); + final byte[] xattr = userFs.getXAttr(childDir, name1); + return null; + } + }); + Assert.fail("expected IOException"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("Permission denied", e); + } + + /* + * Check that search access to the owning directory and read access to + * the actual entity with the extended attribute is good enough. + */ + fs.setPermission(path, new FsPermission((short) 0701)); + fs.setPermission(childDir, new FsPermission((short) 0704)); + user.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final FileSystem userFs = dfsCluster.getFileSystem(); + final byte[] xattr = userFs.getXAttr(childDir, name1); + return null; + } + }); } /** @@ -402,6 +551,166 @@ public void testRemoveXAttr() throws Exception { fs.removeXAttr(path, name3); } + /** + * removexattr tests. Test that removexattr throws an exception if any of + * the following are true: + * an xattr that was requested doesn't exist + * the caller specifies an unknown namespace + * the caller doesn't have access to the namespace + * the caller doesn't have permission to get the value of the xattr + * the caller does not have "execute" (scan) access to the parent directory + * the caller has only read access to the owning directory + * the caller has only execute access to the owning directory and execute + * access to the actual entity + * the caller does not have execute access to the owning directory and write + * access to the actual entity + */ + @Test(timeout = 120000) + public void testRemoveXAttrPermissions() throws Exception { + FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); + fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE)); + fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE)); + fs.setXAttr(path, name3, null, EnumSet.of(XAttrSetFlag.CREATE)); + + try { + fs.removeXAttr(path, name2); + fs.removeXAttr(path, name2); + Assert.fail("expected IOException"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("No matching attributes found", e); + } + + /* Unknown namespace should throw an exception. */ + final String expectedExceptionString = "An XAttr name must be prefixed " + + "with user/trusted/security/system, followed by a '.'"; + try { + fs.removeXAttr(path, "wackynamespace.foo"); + Assert.fail("expected IOException"); + } catch (RemoteException e) { + assertEquals("Unexpected RemoteException: " + e, e.getClassName(), + HadoopIllegalArgumentException.class.getCanonicalName()); + GenericTestUtils.assertExceptionContains(expectedExceptionString, e); + } catch (HadoopIllegalArgumentException e) { + GenericTestUtils.assertExceptionContains(expectedExceptionString, e); + } + + /* + * The 'trusted' namespace should not be accessible and should throw an + * exception. + */ + final UserGroupInformation user = UserGroupInformation. + createUserForTesting("user", new String[] {"mygroup"}); + fs.setXAttr(path, "trusted.foo", "1234".getBytes()); + try { + user.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final FileSystem userFs = dfsCluster.getFileSystem(); + userFs.removeXAttr(path, "trusted.foo"); + return null; + } + }); + Assert.fail("expected IOException"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("User doesn't have permission", e); + } finally { + fs.removeXAttr(path, "trusted.foo"); + } + + /* + * Test that an exception is thrown if the caller doesn't have permission to + * get the value of the xattr. + */ + + /* Set access so that only the owner has access. */ + fs.setPermission(path, new FsPermission((short) 0700)); + try { + user.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final FileSystem userFs = dfsCluster.getFileSystem(); + userFs.removeXAttr(path, name1); + return null; + } + }); + Assert.fail("expected IOException"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("Permission denied", e); + } + + /* + * The caller must have "execute" (scan) access to the parent directory. + */ + final Path childDir = new Path(path, "child" + pathCount); + /* Set access to parent so that only the owner has access. */ + FileSystem.mkdirs(fs, childDir, FsPermission.createImmutable((short)0700)); + fs.setXAttr(childDir, name1, "1234".getBytes()); + try { + user.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final FileSystem userFs = dfsCluster.getFileSystem(); + userFs.removeXAttr(childDir, name1); + return null; + } + }); + Assert.fail("expected IOException"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("Permission denied", e); + } + + /* Check that read access to the owning directory is not good enough. */ + fs.setPermission(path, new FsPermission((short) 0704)); + try { + user.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final FileSystem userFs = dfsCluster.getFileSystem(); + userFs.removeXAttr(childDir, name1); + return null; + } + }); + Assert.fail("expected IOException"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("Permission denied", e); + } + + /* + * Check that execute access to the owning directory and scan access to + * the actual entity with extended attributes is not good enough. + */ + fs.setPermission(path, new FsPermission((short) 0701)); + fs.setPermission(childDir, new FsPermission((short) 0701)); + try { + user.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final FileSystem userFs = dfsCluster.getFileSystem(); + userFs.removeXAttr(childDir, name1); + return null; + } + }); + Assert.fail("expected IOException"); + } catch (IOException e) { + GenericTestUtils.assertExceptionContains("Permission denied", e); + } + + /* + * Check that execute access to the owning directory and write access to + * the actual entity with extended attributes is good enough. + */ + fs.setPermission(path, new FsPermission((short) 0701)); + fs.setPermission(childDir, new FsPermission((short) 0706)); + user.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final FileSystem userFs = dfsCluster.getFileSystem(); + userFs.removeXAttr(childDir, name1); + return null; + } + }); + } + @Test(timeout = 120000) public void testRenameFileWithXAttr() throws Exception { FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java index d6f3885347..a0ae43b57f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java @@ -415,7 +415,7 @@ public void testRetryCacheRebuild() throws Exception { LightWeightCache cacheSet = (LightWeightCache) namesystem.getRetryCache().getCacheSet(); - assertEquals(22, cacheSet.size()); + assertEquals(23, cacheSet.size()); Map oldEntries = new HashMap(); @@ -434,7 +434,7 @@ public void testRetryCacheRebuild() throws Exception { assertTrue(namesystem.hasRetryCache()); cacheSet = (LightWeightCache) namesystem .getRetryCache().getCacheSet(); - assertEquals(22, cacheSet.size()); + assertEquals(23, cacheSet.size()); iter = cacheSet.iterator(); while (iter.hasNext()) { CacheEntry entry = iter.next(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java index a34a0365f9..77f7090e8f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java @@ -160,7 +160,7 @@ public void testRetryCacheOnStandbyNN() throws Exception { FSNamesystem fsn0 = cluster.getNamesystem(0); LightWeightCache cacheSet = (LightWeightCache) fsn0.getRetryCache().getCacheSet(); - assertEquals(22, cacheSet.size()); + assertEquals(23, cacheSet.size()); Map oldEntries = new HashMap(); @@ -181,7 +181,7 @@ public void testRetryCacheOnStandbyNN() throws Exception { FSNamesystem fsn1 = cluster.getNamesystem(1); cacheSet = (LightWeightCache) fsn1 .getRetryCache().getCacheSet(); - assertEquals(22, cacheSet.size()); + assertEquals(23, cacheSet.size()); iter = cacheSet.iterator(); while (iter.hasNext()) { CacheEntry entry = iter.next(); @@ -1047,6 +1047,49 @@ Object getResult() { } } + /** removeXAttr */ + class RemoveXAttrOp extends AtMostOnceOp { + private final String src; + + RemoveXAttrOp(DFSClient client, String src) { + super("removeXAttr", client); + this.src = src; + } + + @Override + void prepare() throws Exception { + Path p = new Path(src); + if (!dfs.exists(p)) { + DFSTestUtil.createFile(dfs, p, BlockSize, DataNodes, 0); + client.setXAttr(src, "user.key", "value".getBytes(), + EnumSet.of(XAttrSetFlag.CREATE)); + } + } + + @Override + void invoke() throws Exception { + client.removeXAttr(src, "user.key"); + } + + @Override + boolean checkNamenodeBeforeReturn() throws Exception { + for (int i = 0; i < CHECKTIMES; i++) { + Map iter = dfs.getXAttrs(new Path(src)); + Set keySet = iter.keySet(); + if (!keySet.contains("user.key")) { + return true; + } + Thread.sleep(1000); + } + return false; + } + + @Override + Object getResult() { + return null; + } + } + @Test (timeout=60000) public void testCreateSnapshot() throws Exception { final DFSClient client = genClientWithDummyHandler(); @@ -1183,6 +1226,13 @@ public void testSetXAttr() throws Exception { testClientRetryWithFailover(op); } + @Test (timeout=60000) + public void testRemoveXAttr() throws Exception { + DFSClient client = genClientWithDummyHandler(); + AtMostOnceOp op = new RemoveXAttrOp(client, "/removexattr"); + testClientRetryWithFailover(op); + } + /** * When NN failover happens, if the client did not receive the response and * send a retry request to the other NN, the same response should be recieved diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java index 1a20739a71..a84243e7d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java @@ -355,12 +355,6 @@ public void testAclPermissionParam() { public void testXAttrNameParam() { final XAttrNameParam p = new XAttrNameParam("user.a1"); Assert.assertEquals(p.getXAttrName(), "user.a1"); - try { - new XAttrNameParam("a1"); - Assert.fail(); - } catch (IllegalArgumentException e) { - LOG.info("EXPECTED: " + e); - } } @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored index a3561cda528a9bae7b6fb6e16180ca9a9d344d7a..a134969ca79136819a5f1e2d5e16771d54aed04a 100644 GIT binary patch delta 27 jcmaE*)}X%Oj}W87USER a2 + e03f4a52-3d85-4e05-8942-286185e639bd + 82 From a6b1d2a85cad227182c28633886670a077aadb4c Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Wed, 23 Jul 2014 20:59:39 +0000 Subject: [PATCH 03/15] HDFS-6114. Block Scan log rolling will never happen if blocks written continuously leading to huge size of dncp_block_verification.log.curr (vinayakumarb via cmccabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1612943 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 ++ .../datanode/BlockPoolSliceScanner.java | 54 ++++++++++++++----- 2 files changed, 46 insertions(+), 12 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 4fbe4cfc28..63168e0ca2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -357,6 +357,10 @@ Release 2.6.0 - UNRELEASED HDFS-6731. Run "hdfs zkfc-formatZK" on a server in a non-namenode will cause a null pointer exception. (Masatake Iwasaki via brandonli) + HDFS-6114. Block Scan log rolling will never happen if blocks written + continuously leading to huge size of dncp_block_verification.log.curr + (vinayakumarb via cmccabe) + Release 2.5.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java index 1039b4fe92..bbb67fc473 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java @@ -84,6 +84,10 @@ class BlockPoolSliceScanner { private final SortedSet blockInfoSet = new TreeSet(BlockScanInfo.LAST_SCAN_TIME_COMPARATOR); + + private final SortedSet newBlockInfoSet = + new TreeSet(BlockScanInfo.LAST_SCAN_TIME_COMPARATOR); + private final GSet blockMap = new LightWeightGSet( LightWeightGSet.computeCapacity(0.5, "BlockMap")); @@ -195,7 +199,7 @@ public LinkedElement getNext() { BlockScanInfo info = new BlockScanInfo( block ); info.lastScanTime = scanTime--; //still keep 'info.lastScanType' to NONE. - addBlockInfo(info); + addBlockInfo(info, false); } RollingLogs rollingLogs = null; @@ -221,25 +225,42 @@ private void updateBytesToScan(long len, long lastScanTime) { // Should we change throttler bandwidth every time bytesLeft changes? // not really required. } - - private synchronized void addBlockInfo(BlockScanInfo info) { - boolean added = blockInfoSet.add(info); + + /** + * Add the BlockScanInfo to sorted set of blockScanInfo + * @param info BlockScanInfo to be added + * @param isNewBlock true if the block is the new Block, false if + * BlockScanInfo is being updated with new scanTime + */ + private synchronized void addBlockInfo(BlockScanInfo info, + boolean isNewBlock) { + boolean added = false; + if (isNewBlock) { + // check whether the block already present + boolean exists = blockInfoSet.contains(info); + added = !exists && newBlockInfoSet.add(info); + } else { + added = blockInfoSet.add(info); + } blockMap.put(info); if (added) { updateBytesToScan(info.getNumBytes(), info.lastScanTime); } } - + private synchronized void delBlockInfo(BlockScanInfo info) { boolean exists = blockInfoSet.remove(info); + if (!exists){ + exists = newBlockInfoSet.remove(info); + } blockMap.remove(info); if (exists) { updateBytesToScan(-info.getNumBytes(), info.lastScanTime); } } - + /** Update blockMap by the given LogEntry */ private synchronized void updateBlockInfo(LogEntry e) { BlockScanInfo info = blockMap.get(new Block(e.blockId, 0, e.genStamp)); @@ -249,7 +270,7 @@ private synchronized void updateBlockInfo(LogEntry e) { delBlockInfo(info); info.lastScanTime = e.verificationTime; info.lastScanType = ScanType.VERIFICATION_SCAN; - addBlockInfo(info); + addBlockInfo(info, false); } } @@ -275,14 +296,14 @@ synchronized void addBlock(ExtendedBlock block) { info = new BlockScanInfo(block.getLocalBlock()); info.lastScanTime = getNewBlockScanTime(); - addBlockInfo(info); + addBlockInfo(info, true); adjustThrottler(); } /** Deletes the block from internal structures */ synchronized void deleteBlock(Block block) { BlockScanInfo info = blockMap.get(block); - if ( info != null ) { + if (info != null) { delBlockInfo(info); } } @@ -319,7 +340,7 @@ private synchronized void updateScanStatus(BlockScanInfo info, info.lastScanType = type; info.lastScanTime = now; info.lastScanOk = scanOk; - addBlockInfo(info); + addBlockInfo(info, false); // Don't update meta data if the verification failed. if (!scanOk) { @@ -578,7 +599,7 @@ private boolean assignInitialVerificationTimes() { delBlockInfo(info); info.lastScanTime = lastScanTime; lastScanTime += verifyInterval; - addBlockInfo(info); + addBlockInfo(info, false); } } } @@ -674,12 +695,21 @@ private void scan() { throw e; } finally { rollVerificationLogs(); + rollNewBlocksInfo(); if (LOG.isDebugEnabled()) { LOG.debug("Done scanning block pool: " + blockPoolId); } } } - + + // add new blocks to scan in next iteration + private synchronized void rollNewBlocksInfo() { + for (BlockScanInfo newBlock : newBlockInfoSet) { + blockInfoSet.add(newBlock); + } + newBlockInfoSet.clear(); + } + private synchronized void rollVerificationLogs() { if (verificationLog != null) { try { From 2a5f1029a5221c42ab61b22f99d79251ed069ca4 Mon Sep 17 00:00:00 2001 From: Brandon Li Date: Wed, 23 Jul 2014 21:22:50 +0000 Subject: [PATCH 04/15] HDFS-6455. NFS: Exception should be added in NFS log for invalid separator in nfs.exports.allowed.hosts. Contributed by Abhiraj Butala git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1612947 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/hadoop/nfs/NfsExports.java | 7 ++++++- .../hdfs/nfs/mount/RpcProgramMountd.java | 19 +++++++++++++++---- .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java | 5 ++++- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ 4 files changed, 28 insertions(+), 6 deletions(-) diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java index 9628686538..b617ae5088 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java @@ -53,7 +53,12 @@ public static synchronized NfsExports getInstance(Configuration conf) { long expirationPeriodNano = conf.getLong( Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY, Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT) * 1000 * 1000; - exports = new NfsExports(cacheSize, expirationPeriodNano, matchHosts); + try { + exports = new NfsExports(cacheSize, expirationPeriodNano, matchHosts); + } catch (IllegalArgumentException e) { + LOG.error("Invalid NFS Exports provided: ", e); + return exports; + } } return exports; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java index 9fbab240f6..2814cb007e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java @@ -104,6 +104,10 @@ public XDR nullOp(XDR out, int xid, InetAddress client) { @Override public XDR mnt(XDR xdr, XDR out, int xid, InetAddress client) { + if (hostsMatcher == null) { + return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid, + null); + } AccessPrivilege accessPrivilege = hostsMatcher.getAccessPrivilege(client); if (accessPrivilege == AccessPrivilege.NONE) { return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid, @@ -208,16 +212,23 @@ public void handleInternal(ChannelHandlerContext ctx, RpcInfo info) { } else if (mntproc == MNTPROC.UMNTALL) { umntall(out, xid, client); } else if (mntproc == MNTPROC.EXPORT) { - // Currently only support one NFS export + // Currently only support one NFS export List hostsMatchers = new ArrayList(); - hostsMatchers.add(hostsMatcher); - out = MountResponse.writeExportList(out, xid, exports, hostsMatchers); + if (hostsMatcher != null) { + hostsMatchers.add(hostsMatcher); + out = MountResponse.writeExportList(out, xid, exports, hostsMatchers); + } else { + // This means there are no valid exports provided. + RpcAcceptedReply.getInstance(xid, + RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write( + out); + } } else { // Invalid procedure RpcAcceptedReply.getInstance(xid, RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write( out); - } + } ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer()); RpcResponse rsp = new RpcResponse(buf, info.remoteAddress()); RpcUtil.sendRpcResponse(ctx, rsp); diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java index f254f50709..1650b14724 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java @@ -2123,8 +2123,11 @@ private boolean checkAccessPrivilege(SocketAddress remoteAddress, if (!doPortMonitoring(remoteAddress)) { return false; } - + // Check export table + if (exports == null) { + return false; + } InetAddress client = ((InetSocketAddress) remoteAddress).getAddress(); AccessPrivilege access = exports.getAccessPrivilege(client); if (access == AccessPrivilege.NONE) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 63168e0ca2..bbeebd1984 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -361,6 +361,9 @@ Release 2.6.0 - UNRELEASED continuously leading to huge size of dncp_block_verification.log.curr (vinayakumarb via cmccabe) + HDFS-6455. NFS: Exception should be added in NFS log for invalid separator in + nfs.exports.allowed.hosts. (Abhiraj Butala via brandonli) + Release 2.5.0 - UNRELEASED INCOMPATIBLE CHANGES From 28fca92521b04e9d9b6f4d095c593282a06e0a36 Mon Sep 17 00:00:00 2001 From: Jason Darrell Lowe Date: Wed, 23 Jul 2014 21:40:57 +0000 Subject: [PATCH 05/15] YARN-2147. client lacks delegation token exception details when application submit fails. Contributed by Chen He git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1612950 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 ++ .../security/DelegationTokenRenewer.java | 6 +++- .../security/TestDelegationTokenRenewer.java | 36 ++++++++++++++++++- 3 files changed, 43 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 4c42fd7cdc..6fbfbab542 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -94,6 +94,9 @@ Release 2.6.0 - UNRELEASED YARN-2313. Livelock can occur in FairScheduler when there are lots of running apps (Tsuyoshi Ozawa via Sandy Ryza) + YARN-2147. client lacks delegation token exception details when + application submit fails (Chen He via jlowe) + Release 2.5.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java index 38e908926d..bdcfd0460e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java @@ -388,7 +388,11 @@ private void handleAppSubmitEvent(DelegationTokenRenewerAppSubmitEvent evt) // If user provides incorrect token then it should not be added for // renewal. for (DelegationTokenToRenew dtr : tokenList) { - renewToken(dtr); + try { + renewToken(dtr); + } catch (IOException ioe) { + throw new IOException("Failed to renew token: " + dtr.token, ioe); + } } for (DelegationTokenToRenew dtr : tokenList) { addTokenToList(dtr); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java index 0c1ded3a14..f65fcdcb3b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java @@ -24,6 +24,7 @@ import static org.mockito.Matchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -673,7 +674,40 @@ private void waitForEventsToGetProcessed(DelegationTokenRenewer dtr) Thread.sleep(200); } } - + + @Test(timeout=20000) + public void testDTRonAppSubmission() + throws IOException, InterruptedException, BrokenBarrierException { + final Credentials credsx = new Credentials(); + final Token tokenx = mock(Token.class); + credsx.addToken(new Text("token"), tokenx); + doReturn(true).when(tokenx).isManaged(); + doThrow(new IOException("boom")) + .when(tokenx).renew(any(Configuration.class)); + // fire up the renewer + final DelegationTokenRenewer dtr = + createNewDelegationTokenRenewer(conf, counter); + RMContext mockContext = mock(RMContext.class); + ClientRMService mockClientRMService = mock(ClientRMService.class); + when(mockContext.getClientRMService()).thenReturn(mockClientRMService); + InetSocketAddress sockAddr = + InetSocketAddress.createUnresolved("localhost", 1234); + when(mockClientRMService.getBindAddress()).thenReturn(sockAddr); + dtr.setRMContext(mockContext); + when(mockContext.getDelegationTokenRenewer()).thenReturn(dtr); + dtr.init(conf); + dtr.start(); + + try { + dtr.addApplicationSync(mock(ApplicationId.class), credsx, false); + fail("Catch IOException on app submission"); + } catch (IOException e){ + Assert.assertTrue(e.getMessage().contains(tokenx.toString())); + Assert.assertTrue(e.getCause().toString().contains("boom")); + } + + } + @Test(timeout=20000) public void testConcurrentAddApplication() throws IOException, InterruptedException, BrokenBarrierException { From 9bfae42538048f25596d688d27be9f21956e0870 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Wed, 23 Jul 2014 21:42:01 +0000 Subject: [PATCH 06/15] HADOOP-10887. Add XAttrs to ViewFs and make XAttrs + ViewFileSystem internal dir behavior consistent. Contributed by Stephen Chu. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1612951 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 + .../org/apache/hadoop/fs/FileContext.java | 29 +++++++ .../java/org/apache/hadoop/fs/FileSystem.java | 2 +- .../apache/hadoop/fs/viewfs/ChRootedFs.java | 34 ++++++++ .../hadoop/fs/viewfs/ViewFileSystem.java | 34 ++++++++ .../org/apache/hadoop/fs/viewfs/ViewFs.java | 80 +++++++++++++++++++ .../fs/viewfs/ViewFileSystemBaseTest.java | 30 +++++++ .../hadoop/fs/viewfs/ViewFsBaseTest.java | 30 +++++++ 8 files changed, 241 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 2463fb065d..08bc9012e9 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -452,6 +452,9 @@ Release 2.6.0 - UNRELEASED HADOOP-10855. Allow Text to be read with a known Length. (todd) + HADOOP-10887. Add XAttrs to ViewFs and make XAttrs + ViewFileSystem + internal dir behavior consistent. (Stephen Chu via wang) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java index 8bb797eb93..2bfcbdcceb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java @@ -2484,4 +2484,33 @@ public Void next(final AbstractFileSystem fs, final Path p) } }.resolve(this, absF); } + + /** + * Get all of the xattr names for a file or directory. + * Only those xattr names which the logged-in user has permissions to view + * are returned. + *

+ * A regular user can only get xattr names for the "user" namespace. + * The super user can only get xattr names for "user" and "trusted" + * namespaces. + * The xattrs of the "security" and "system" namespaces are only + * used/exposed internally by/to the FS impl. + *

+ * @see + * http://en.wikipedia.org/wiki/Extended_file_attributes + * + * @param path Path to get extended attributes + * @return List of the XAttr names of the file or directory + * @throws IOException + */ + public List listXAttrs(Path path) throws IOException { + final Path absF = fixRelativePart(path); + return new FSLinkResolver>() { + @Override + public List next(final AbstractFileSystem fs, final Path p) + throws IOException { + return fs.listXAttrs(p); + } + }.resolve(this, absF); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index deaceb3342..cb921c8842 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -2509,7 +2509,7 @@ public Map getXAttrs(Path path, List names) * http://en.wikipedia.org/wiki/Extended_file_attributes * * @param path Path to get extended attributes - * @return Map describing the XAttrs of the file or directory + * @return List of the XAttr names of the file or directory * @throws IOException */ public List listXAttrs(Path path) throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java index f1975eae1b..5d53eb79d0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java @@ -22,6 +22,7 @@ import java.net.URISyntaxException; import java.util.EnumSet; import java.util.List; +import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -37,6 +38,7 @@ import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnresolvedLinkException; +import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; @@ -313,6 +315,38 @@ public AclStatus getAclStatus(Path path) throws IOException { return myFs.getAclStatus(fullPath(path)); } + @Override + public void setXAttr(Path path, String name, byte[] value, + EnumSet flag) throws IOException { + myFs.setXAttr(fullPath(path), name, value, flag); + } + + @Override + public byte[] getXAttr(Path path, String name) throws IOException { + return myFs.getXAttr(fullPath(path), name); + } + + @Override + public Map getXAttrs(Path path) throws IOException { + return myFs.getXAttrs(fullPath(path)); + } + + @Override + public Map getXAttrs(Path path, List names) + throws IOException { + return myFs.getXAttrs(fullPath(path), names); + } + + @Override + public List listXAttrs(Path path) throws IOException { + return myFs.listXAttrs(fullPath(path)); + } + + @Override + public void removeXAttr(Path path, String name) throws IOException { + myFs.removeXAttr(fullPath(path), name); + } + @Override public void setVerifyChecksum(final boolean verifyChecksum) throws IOException, UnresolvedLinkException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java index 34a9afc549..b4ac18eb1a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java @@ -913,5 +913,39 @@ public AclStatus getAclStatus(Path path) throws IOException { .addEntries(AclUtil.getMinimalAcl(PERMISSION_555)) .stickyBit(false).build(); } + + @Override + public void setXAttr(Path path, String name, byte[] value, + EnumSet flag) throws IOException { + checkPathIsSlash(path); + throw readOnlyMountTable("setXAttr", path); + } + + @Override + public byte[] getXAttr(Path path, String name) throws IOException { + throw new NotInMountpointException(path, "getXAttr"); + } + + @Override + public Map getXAttrs(Path path) throws IOException { + throw new NotInMountpointException(path, "getXAttrs"); + } + + @Override + public Map getXAttrs(Path path, List names) + throws IOException { + throw new NotInMountpointException(path, "getXAttrs"); + } + + @Override + public List listXAttrs(Path path) throws IOException { + throw new NotInMountpointException(path, "listXAttrs"); + } + + @Override + public void removeXAttr(Path path, String name) throws IOException { + checkPathIsSlash(path); + throw readOnlyMountTable("removeXAttr", path); + } } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java index 232fcbbb40..5cdccd2997 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java @@ -26,6 +26,7 @@ import java.util.ArrayList; import java.util.EnumSet; import java.util.List; +import java.util.Map; import java.util.Map.Entry; import org.apache.hadoop.classification.InterfaceAudience; @@ -48,6 +49,7 @@ import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnsupportedFileSystemException; +import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.local.LocalConfigKeys; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclUtil; @@ -651,6 +653,50 @@ public AclStatus getAclStatus(Path path) throws IOException { fsState.resolve(getUriPath(path), true); return res.targetFileSystem.getAclStatus(res.remainingPath); } + + @Override + public void setXAttr(Path path, String name, byte[] value, + EnumSet flag) throws IOException { + InodeTree.ResolveResult res = + fsState.resolve(getUriPath(path), true); + res.targetFileSystem.setXAttr(res.remainingPath, name, value, flag); + } + + @Override + public byte[] getXAttr(Path path, String name) throws IOException { + InodeTree.ResolveResult res = + fsState.resolve(getUriPath(path), true); + return res.targetFileSystem.getXAttr(res.remainingPath, name); + } + + @Override + public Map getXAttrs(Path path) throws IOException { + InodeTree.ResolveResult res = + fsState.resolve(getUriPath(path), true); + return res.targetFileSystem.getXAttrs(res.remainingPath); + } + + @Override + public Map getXAttrs(Path path, List names) + throws IOException { + InodeTree.ResolveResult res = + fsState.resolve(getUriPath(path), true); + return res.targetFileSystem.getXAttrs(res.remainingPath, names); + } + + @Override + public List listXAttrs(Path path) throws IOException { + InodeTree.ResolveResult res = + fsState.resolve(getUriPath(path), true); + return res.targetFileSystem.listXAttrs(res.remainingPath); + } + + @Override + public void removeXAttr(Path path, String name) throws IOException { + InodeTree.ResolveResult res = + fsState.resolve(getUriPath(path), true); + res.targetFileSystem.removeXAttr(res.remainingPath, name); + } /* @@ -921,5 +967,39 @@ public AclStatus getAclStatus(Path path) throws IOException { .addEntries(AclUtil.getMinimalAcl(PERMISSION_555)) .stickyBit(false).build(); } + + @Override + public void setXAttr(Path path, String name, byte[] value, + EnumSet flag) throws IOException { + checkPathIsSlash(path); + throw readOnlyMountTable("setXAttr", path); + } + + @Override + public byte[] getXAttr(Path path, String name) throws IOException { + throw new NotInMountpointException(path, "getXAttr"); + } + + @Override + public Map getXAttrs(Path path) throws IOException { + throw new NotInMountpointException(path, "getXAttrs"); + } + + @Override + public Map getXAttrs(Path path, List names) + throws IOException { + throw new NotInMountpointException(path, "getXAttrs"); + } + + @Override + public List listXAttrs(Path path) throws IOException { + throw new NotInMountpointException(path, "listXAttrs"); + } + + @Override + public void removeXAttr(Path path, String name) throws IOException { + checkPathIsSlash(path); + throw readOnlyMountTable("removeXAttr", path); + } } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java index e1a440d061..a32455604c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java @@ -773,4 +773,34 @@ public void testInternalGetAclStatus() throws IOException { assertFalse(aclStatus.isStickyBit()); } + @Test(expected=AccessControlException.class) + public void testInternalSetXAttr() throws IOException { + fsView.setXAttr(new Path("/internalDir"), "xattrName", null); + } + + @Test(expected=NotInMountpointException.class) + public void testInternalGetXAttr() throws IOException { + fsView.getXAttr(new Path("/internalDir"), "xattrName"); + } + + @Test(expected=NotInMountpointException.class) + public void testInternalGetXAttrs() throws IOException { + fsView.getXAttrs(new Path("/internalDir")); + } + + @Test(expected=NotInMountpointException.class) + public void testInternalGetXAttrsWithNames() throws IOException { + fsView.getXAttrs(new Path("/internalDir"), new ArrayList()); + } + + @Test(expected=NotInMountpointException.class) + public void testInternalListXAttr() throws IOException { + fsView.listXAttrs(new Path("/internalDir")); + } + + @Test(expected=AccessControlException.class) + public void testInternalRemoveXAttr() throws IOException { + fsView.removeXAttr(new Path("/internalDir"), "xattrName"); + } + } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java index 2813c34bef..035b280249 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java @@ -747,4 +747,34 @@ public void testInternalGetAclStatus() throws IOException { AclUtil.getMinimalAcl(PERMISSION_555)); assertFalse(aclStatus.isStickyBit()); } + + @Test(expected=AccessControlException.class) + public void testInternalSetXAttr() throws IOException { + fcView.setXAttr(new Path("/internalDir"), "xattrName", null); + } + + @Test(expected=NotInMountpointException.class) + public void testInternalGetXAttr() throws IOException { + fcView.getXAttr(new Path("/internalDir"), "xattrName"); + } + + @Test(expected=NotInMountpointException.class) + public void testInternalGetXAttrs() throws IOException { + fcView.getXAttrs(new Path("/internalDir")); + } + + @Test(expected=NotInMountpointException.class) + public void testInternalGetXAttrsWithNames() throws IOException { + fcView.getXAttrs(new Path("/internalDir"), new ArrayList()); + } + + @Test(expected=NotInMountpointException.class) + public void testInternalListXAttr() throws IOException { + fcView.listXAttrs(new Path("/internalDir")); + } + + @Test(expected=AccessControlException.class) + public void testInternalRemoveXAttr() throws IOException { + fcView.removeXAttr(new Path("/internalDir"), "xattrName"); + } } From 06e5c5cb2de0148014a7c0433c885b099757b775 Mon Sep 17 00:00:00 2001 From: Zhijie Shen Date: Thu, 24 Jul 2014 03:12:45 +0000 Subject: [PATCH 07/15] YARN-2300. Improved the documentation of the sample requests for RM REST API - submitting an app. Contributed by Varun Vasudev. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1612981 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 + .../src/site/apt/ResourceManagerRest.apt.vm | 109 ++++++++---------- 2 files changed, 50 insertions(+), 62 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 6fbfbab542..6857d6c374 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -303,6 +303,9 @@ Release 2.5.0 - UNRELEASED YARN-1408 Preemption caused Invalid State Event: ACQUIRED at KILLED and caused a task timeout for 30mins. (Sunil G via mayank) + YARN-2300. Improved the documentation of the sample requests for RM REST API - + submitting an app. (Varun Vasudev via zjshen) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm index 6359e2b7f9..1952e11176 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm @@ -2228,68 +2228,48 @@ _01_000001 { "application-id":"application_1404203615263_0001", "application-name":"test", - "queue":"testqueue", - "priority":"3", "am-container-spec": { "local-resources": { "entry": - { - "key":"example", - "value": + [ { - "resource":"http://www.test.com/file.txt", - "type":"FILE", - "visibility":"APPLICATION", - "size":"100", - "timestamp":"1404203616003" + "key":"AppMaster.jar", + "value": + { + "resource":"hdfs://hdfs-namenode:9000/user/testuser/DistributedShell/demo-app/AppMaster.jar", + "type":"FILE", + "visibility":"APPLICATION", + "size": "43004", + "timestamp": "1405452071209" + } } - } + ] + }, + "commands": + { + "command":"{{JAVA_HOME}}/bin/java -Xmx10m org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster --container_memory 10 --container_vcores 1 --num_containers 1 --priority 0 1>/AppMaster.stdout 2>/AppMaster.stderr" }, "environment": - { - "entry": - { - "key":"APP_VAR", - "value":"ENV_SETTING" - } - }, - "commands": - { - "command":"/bin/sleep 5" - }, - "service-data": - { - "entry": - { - "key":"test", - "value":"dmFsdWUxMg" - } - }, - "credentials": - { - "tokens":null, - "secrets": - { - "entry": - { - "key":"secret1", - "value":"c2VjcmV0MQ" - } - } - }, - "application-acls": { "entry": [ { - "key":"VIEW_APP", - "value":"testuser3, testuser4" + "key": "DISTRIBUTEDSHELLSCRIPTTIMESTAMP", + "value": "1405459400754" }, { - "key":"MODIFY_APP", - "value":"testuser1, testuser2" + "key": "CLASSPATH", + "value": "{{CLASSPATH}}./*{{HADOOP_CONF_DIR}}{{HADOOP_COMMON_HOME}}/share/hadoop/common/*{{HADOOP_COMMON_HOME}}/share/hadoop/common/lib/*{{HADOOP_HDFS_HOME}}/share/hadoop/hdfs/*{{HADOOP_HDFS_HOME}}/share/hadoop/hdfs/lib/*{{HADOOP_YARN_HOME}}/share/hadoop/yarn/*{{HADOOP_YARN_HOME}}/share/hadoop/yarn/lib/*./log4j.properties" + }, + { + "key": "DISTRIBUTEDSHELLSCRIPTLEN", + "value": "6" + }, + { + "key": "DISTRIBUTEDSHELLSCRIPTLOCATION", + "value": "hdfs://hdfs-namenode:9000/user/testuser/demo-app/shellCommands" } ] } @@ -2302,16 +2282,9 @@ _01_000001 "vCores":"1" }, "application-type":"YARN", - "keep-containers-across-application-attempts":"false", - "application-tags": - { - "tag": - [ - "tag 2", - "tag1" - ] - } + "keep-containers-across-application-attempts":"false" } + +---+ Response Header: @@ -2349,22 +2322,34 @@ _01_000001 example - http://www.test.com/file.txt + hdfs://hdfs-namenode:9000/user/testuser/DistributedShell/demo-app/AppMaster.jar FILE APPLICATION - 100 - 1404204892877 + 43004 + 1405452071209 - APP_VAR - ENV_SETTING + DISTRIBUTEDSHELLSCRIPTTIMESTAMP + 1405459400754 + + + CLASSPATH + {{CLASSPATH}}<CPS>./*<CPS>{{HADOOP_CONF_DIR}}<CPS>{{HADOOP_COMMON_HOME}}/share/hadoop/common/*<CPS>{{HADOOP_COMMON_HOME}}/share/hadoop/common/lib/*<CPS>{{HADOOP_HDFS_HOME}}/share/hadoop/hdfs/*<CPS>{{HADOOP_HDFS_HOME}}/share/hadoop/hdfs/lib/*<CPS>{{HADOOP_YARN_HOME}}/share/hadoop/yarn/*<CPS>{{HADOOP_YARN_HOME}}/share/hadoop/yarn/lib/*<CPS>./log4j.properties + + + DISTRIBUTEDSHELLSCRIPTLEN + 6 + + + DISTRIBUTEDSHELLSCRIPTLOCATION + hdfs://hdfs-namenode:9000/user/testuser/demo-app/shellCommands - /bin/sleep 5 + {{JAVA_HOME}}/bin/java -Xmx10m org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster --container_memory 10 --container_vcores 1 --num_containers 1 --priority 0 1><LOG_DIR>/AppMaster.stdout 2><LOG_DIR>/AppMaster.stderr From 2050e0dad661bade3e140d7a5692cfe1999badc3 Mon Sep 17 00:00:00 2001 From: Devarajulu K Date: Thu, 24 Jul 2014 05:02:00 +0000 Subject: [PATCH 08/15] YARN-1342. Recover container tokens upon nodemanager restart. Contributed by Jason Lowe. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1612995 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 + .../BaseContainerTokenSecretManager.java | 2 +- .../yarn/server/nodemanager/NodeManager.java | 6 +- .../recovery/NMLeveldbStateStoreService.java | 117 ++++++++++++-- .../recovery/NMNullStateStoreService.java | 30 +++- .../recovery/NMStateStoreService.java | 40 ++++- .../NMContainerTokenSecretManager.java | 105 ++++++++++--- .../security/NMTokenSecretManagerInNM.java | 5 +- .../recovery/NMMemoryStateStoreService.java | 55 ++++++- .../TestNMLeveldbStateStoreService.java | 96 +++++++++++- .../TestNMContainerTokenSecretManager.java | 144 ++++++++++++++++++ .../TestNMTokenSecretManagerInNM.java | 8 +- 12 files changed, 557 insertions(+), 54 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/security/TestNMContainerTokenSecretManager.java diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 6857d6c374..1712abeb9e 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -62,6 +62,9 @@ Release 2.6.0 - UNRELEASED YARN-2295. Refactored DistributedShell to use public APIs of protocol records. (Li Lu via jianhe) + YARN-1342. Recover container tokens upon nodemanager restart. (Jason Lowe via + devaraj) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/BaseContainerTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/BaseContainerTokenSecretManager.java index ccfe8f59fd..e73d07c26c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/BaseContainerTokenSecretManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/BaseContainerTokenSecretManager.java @@ -43,7 +43,7 @@ public class BaseContainerTokenSecretManager extends private static Log LOG = LogFactory .getLog(BaseContainerTokenSecretManager.class); - private int serialNo = new SecureRandom().nextInt(); + protected int serialNo = new SecureRandom().nextInt(); protected final ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); protected final Lock readLock = readWriteLock.readLock(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java index 65988a211d..a479be29f7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java @@ -173,8 +173,8 @@ private void recoverTokens(NMTokenSecretManagerInNM nmTokenSecretManager, NMContainerTokenSecretManager containerTokenSecretManager) throws IOException { if (nmStore.canRecover()) { - nmTokenSecretManager.recover(nmStore.loadNMTokenState()); - // TODO: recover containerTokenSecretManager + nmTokenSecretManager.recover(); + containerTokenSecretManager.recover(); } } @@ -190,7 +190,7 @@ protected void serviceInit(Configuration conf) throws Exception { initAndStartRecoveryStore(conf); NMContainerTokenSecretManager containerTokenSecretManager = - new NMContainerTokenSecretManager(conf); + new NMContainerTokenSecretManager(conf, nmStore); NMTokenSecretManagerInNM nmTokenSecretManager = new NMTokenSecretManagerInNM(nmStore); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java index b905c1e5ad..008da7a2b8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java @@ -37,6 +37,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto; import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto; @@ -90,6 +91,12 @@ public class NMLeveldbStateStoreService extends NMStateStoreService { NM_TOKENS_KEY_PREFIX + CURRENT_MASTER_KEY_SUFFIX; private static final String NM_TOKENS_PREV_MASTER_KEY = NM_TOKENS_KEY_PREFIX + PREV_MASTER_KEY_SUFFIX; + private static final String CONTAINER_TOKENS_KEY_PREFIX = + "ContainerTokens/"; + private static final String CONTAINER_TOKENS_CURRENT_MASTER_KEY = + CONTAINER_TOKENS_KEY_PREFIX + CURRENT_MASTER_KEY_SUFFIX; + private static final String CONTAINER_TOKENS_PREV_MASTER_KEY = + CONTAINER_TOKENS_KEY_PREFIX + PREV_MASTER_KEY_SUFFIX; private DB db; @@ -141,7 +148,7 @@ public RecoveredLocalizationState loadLocalizationState() key.substring(0, userEndPos+1))); } } catch (DBException e) { - throw new IOException(e.getMessage(), e); + throw new IOException(e); } finally { if (iter != null) { iter.close(); @@ -260,7 +267,7 @@ public void startResourceLocalization(String user, ApplicationId appId, try { db.put(bytes(key), proto.toByteArray()); } catch (DBException e) { - throw new IOException(e.getMessage(), e); + throw new IOException(e); } } @@ -283,7 +290,7 @@ public void finishResourceLocalization(String user, ApplicationId appId, batch.close(); } } catch (DBException e) { - throw new IOException(e.getMessage(), e); + throw new IOException(e); } } @@ -306,7 +313,7 @@ public void removeLocalizedResource(String user, ApplicationId appId, batch.close(); } } catch (DBException e) { - throw new IOException(e.getMessage(), e); + throw new IOException(e); } } @@ -355,7 +362,7 @@ public RecoveredDeletionServiceState loadDeletionServiceState() DeletionServiceDeleteTaskProto.parseFrom(entry.getValue())); } } catch (DBException e) { - throw new IOException(e.getMessage(), e); + throw new IOException(e); } finally { if (iter != null) { iter.close(); @@ -371,7 +378,7 @@ public void storeDeletionTask(int taskId, try { db.put(bytes(key), taskProto.toByteArray()); } catch (DBException e) { - throw new IOException(e.getMessage(), e); + throw new IOException(e); } } @@ -381,14 +388,14 @@ public void removeDeletionTask(int taskId) throws IOException { try { db.delete(bytes(key)); } catch (DBException e) { - throw new IOException(e.getMessage(), e); + throw new IOException(e); } } @Override - public RecoveredNMTokenState loadNMTokenState() throws IOException { - RecoveredNMTokenState state = new RecoveredNMTokenState(); + public RecoveredNMTokensState loadNMTokensState() throws IOException { + RecoveredNMTokensState state = new RecoveredNMTokensState(); state.applicationMasterKeys = new HashMap(); LeveldbIterator iter = null; @@ -420,7 +427,7 @@ public RecoveredNMTokenState loadNMTokenState() throws IOException { } } } catch (DBException e) { - throw new IOException(e.getMessage(), e); + throw new IOException(e); } finally { if (iter != null) { iter.close(); @@ -454,7 +461,7 @@ public void removeNMTokenApplicationMasterKey( try { db.delete(bytes(key)); } catch (DBException e) { - throw new IOException(e.getMessage(), e); + throw new IOException(e); } } @@ -468,7 +475,91 @@ private void storeMasterKey(String dbKey, MasterKey key) try { db.put(bytes(dbKey), pb.getProto().toByteArray()); } catch (DBException e) { - throw new IOException(e.getMessage(), e); + throw new IOException(e); + } + } + + + @Override + public RecoveredContainerTokensState loadContainerTokensState() + throws IOException { + RecoveredContainerTokensState state = new RecoveredContainerTokensState(); + state.activeTokens = new HashMap(); + LeveldbIterator iter = null; + try { + iter = new LeveldbIterator(db); + iter.seek(bytes(CONTAINER_TOKENS_KEY_PREFIX)); + final int containerTokensKeyPrefixLength = + CONTAINER_TOKENS_KEY_PREFIX.length(); + while (iter.hasNext()) { + Entry entry = iter.next(); + String fullKey = asString(entry.getKey()); + if (!fullKey.startsWith(CONTAINER_TOKENS_KEY_PREFIX)) { + break; + } + String key = fullKey.substring(containerTokensKeyPrefixLength); + if (key.equals(CURRENT_MASTER_KEY_SUFFIX)) { + state.currentMasterKey = parseMasterKey(entry.getValue()); + } else if (key.equals(PREV_MASTER_KEY_SUFFIX)) { + state.previousMasterKey = parseMasterKey(entry.getValue()); + } else if (key.startsWith(ConverterUtils.CONTAINER_PREFIX)) { + loadContainerToken(state, fullKey, key, entry.getValue()); + } + } + } catch (DBException e) { + throw new IOException(e); + } finally { + if (iter != null) { + iter.close(); + } + } + return state; + } + + private static void loadContainerToken(RecoveredContainerTokensState state, + String key, String containerIdStr, byte[] value) throws IOException { + ContainerId containerId; + Long expTime; + try { + containerId = ConverterUtils.toContainerId(containerIdStr); + expTime = Long.parseLong(asString(value)); + } catch (IllegalArgumentException e) { + throw new IOException("Bad container token state for " + key, e); + } + state.activeTokens.put(containerId, expTime); + } + + @Override + public void storeContainerTokenCurrentMasterKey(MasterKey key) + throws IOException { + storeMasterKey(CONTAINER_TOKENS_CURRENT_MASTER_KEY, key); + } + + @Override + public void storeContainerTokenPreviousMasterKey(MasterKey key) + throws IOException { + storeMasterKey(CONTAINER_TOKENS_PREV_MASTER_KEY, key); + } + + @Override + public void storeContainerToken(ContainerId containerId, Long expTime) + throws IOException { + String key = CONTAINER_TOKENS_KEY_PREFIX + containerId; + try { + db.put(bytes(key), bytes(expTime.toString())); + } catch (DBException e) { + throw new IOException(e); + } + } + + @Override + public void removeContainerToken(ContainerId containerId) + throws IOException { + String key = CONTAINER_TOKENS_KEY_PREFIX + containerId; + try { + db.delete(bytes(key)); + } catch (DBException e) { + throw new IOException(e); } } @@ -554,7 +645,7 @@ private void dbStoreVersion(NMDBSchemaVersion state) throws IOException { try { db.put(bytes(key), data); } catch (DBException e) { - throw new IOException(e.getMessage(), e); + throw new IOException(e); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java index 5d9e0ea15a..89205b1f63 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMNullStateStoreService.java @@ -24,6 +24,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto; @@ -80,7 +81,7 @@ public void removeDeletionTask(int taskId) throws IOException { } @Override - public RecoveredNMTokenState loadNMTokenState() throws IOException { + public RecoveredNMTokensState loadNMTokensState() throws IOException { throw new UnsupportedOperationException( "Recovery not supported by this state store"); } @@ -105,6 +106,33 @@ public void removeNMTokenApplicationMasterKey(ApplicationAttemptId attempt) throws IOException { } + @Override + public RecoveredContainerTokensState loadContainerTokensState() + throws IOException { + throw new UnsupportedOperationException( + "Recovery not supported by this state store"); + } + + @Override + public void storeContainerTokenCurrentMasterKey(MasterKey key) + throws IOException { + } + + @Override + public void storeContainerTokenPreviousMasterKey(MasterKey key) + throws IOException { + } + + @Override + public void storeContainerToken(ContainerId containerId, + Long expirationTime) throws IOException { + } + + @Override + public void removeContainerToken(ContainerId containerId) + throws IOException { + } + @Override protected void initStorage(Configuration conf) throws IOException { } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java index 8a5944dbd1..87c438b59b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMStateStoreService.java @@ -31,6 +31,7 @@ import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto; @@ -102,7 +103,7 @@ public List getTasks() { } } - public static class RecoveredNMTokenState { + public static class RecoveredNMTokensState { MasterKey currentMasterKey; MasterKey previousMasterKey; Map applicationMasterKeys; @@ -120,6 +121,24 @@ public Map getApplicationMasterKeys() { } } + public static class RecoveredContainerTokensState { + MasterKey currentMasterKey; + MasterKey previousMasterKey; + Map activeTokens; + + public MasterKey getCurrentMasterKey() { + return currentMasterKey; + } + + public MasterKey getPreviousMasterKey() { + return previousMasterKey; + } + + public Map getActiveTokens() { + return activeTokens; + } + } + /** Initialize the state storage */ @Override public void serviceInit(Configuration conf) throws IOException { @@ -193,7 +212,8 @@ public abstract void storeDeletionTask(int taskId, public abstract void removeDeletionTask(int taskId) throws IOException; - public abstract RecoveredNMTokenState loadNMTokenState() throws IOException; + public abstract RecoveredNMTokensState loadNMTokensState() + throws IOException; public abstract void storeNMTokenCurrentMasterKey(MasterKey key) throws IOException; @@ -208,6 +228,22 @@ public abstract void removeNMTokenApplicationMasterKey( ApplicationAttemptId attempt) throws IOException; + public abstract RecoveredContainerTokensState loadContainerTokensState() + throws IOException; + + public abstract void storeContainerTokenCurrentMasterKey(MasterKey key) + throws IOException; + + public abstract void storeContainerTokenPreviousMasterKey(MasterKey key) + throws IOException; + + public abstract void storeContainerToken(ContainerId containerId, + Long expirationTime) throws IOException; + + public abstract void removeContainerToken(ContainerId containerId) + throws IOException; + + protected abstract void initStorage(Configuration conf) throws IOException; protected abstract void startStorage() throws IOException; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMContainerTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMContainerTokenSecretManager.java index 8860a95252..2a92d40ad5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMContainerTokenSecretManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMContainerTokenSecretManager.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.nodemanager.security; +import java.io.IOException; import java.util.ArrayList; import java.util.Iterator; import java.util.List; @@ -33,6 +34,9 @@ import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; import org.apache.hadoop.yarn.server.api.records.MasterKey; +import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService; +import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService; +import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredContainerTokensState; import org.apache.hadoop.yarn.server.security.BaseContainerTokenSecretManager; import org.apache.hadoop.yarn.server.security.MasterKeyData; @@ -49,14 +53,74 @@ public class NMContainerTokenSecretManager extends private MasterKeyData previousMasterKey; private final TreeMap> recentlyStartedContainerTracker; - + private final NMStateStoreService stateStore; private String nodeHostAddr; public NMContainerTokenSecretManager(Configuration conf) { + this(conf, new NMNullStateStoreService()); + } + + public NMContainerTokenSecretManager(Configuration conf, + NMStateStoreService stateStore) { super(conf); recentlyStartedContainerTracker = new TreeMap>(); + this.stateStore = stateStore; + } + + public synchronized void recover() + throws IOException { + RecoveredContainerTokensState state = + stateStore.loadContainerTokensState(); + MasterKey key = state.getCurrentMasterKey(); + if (key != null) { + super.currentMasterKey = + new MasterKeyData(key, createSecretKey(key.getBytes().array())); + } + + key = state.getPreviousMasterKey(); + if (key != null) { + previousMasterKey = + new MasterKeyData(key, createSecretKey(key.getBytes().array())); + } + + // restore the serial number from the current master key + if (super.currentMasterKey != null) { + super.serialNo = super.currentMasterKey.getMasterKey().getKeyId() + 1; + } + + for (Entry entry : state.getActiveTokens().entrySet()) { + ContainerId containerId = entry.getKey(); + Long expTime = entry.getValue(); + List containerList = + recentlyStartedContainerTracker.get(expTime); + if (containerList == null) { + containerList = new ArrayList(); + recentlyStartedContainerTracker.put(expTime, containerList); + } + if (!containerList.contains(containerId)) { + containerList.add(containerId); + } + } + } + + private void updateCurrentMasterKey(MasterKeyData key) { + super.currentMasterKey = key; + try { + stateStore.storeContainerTokenCurrentMasterKey(key.getMasterKey()); + } catch (IOException e) { + LOG.error("Unable to update current master key in state store", e); + } + } + + private void updatePreviousMasterKey(MasterKeyData key) { + previousMasterKey = key; + try { + stateStore.storeContainerTokenPreviousMasterKey(key.getMasterKey()); + } catch (IOException e) { + LOG.error("Unable to update previous master key in state store", e); + } } /** @@ -68,21 +132,16 @@ public NMContainerTokenSecretManager(Configuration conf) { */ @Private public synchronized void setMasterKey(MasterKey masterKeyRecord) { - LOG.info("Rolling master-key for container-tokens, got key with id " - + masterKeyRecord.getKeyId()); - if (super.currentMasterKey == null) { - super.currentMasterKey = - new MasterKeyData(masterKeyRecord, createSecretKey(masterKeyRecord - .getBytes().array())); - } else { - if (super.currentMasterKey.getMasterKey().getKeyId() != masterKeyRecord - .getKeyId()) { - // Update keys only if the key has changed. - this.previousMasterKey = super.currentMasterKey; - super.currentMasterKey = - new MasterKeyData(masterKeyRecord, createSecretKey(masterKeyRecord - .getBytes().array())); + // Update keys only if the key has changed. + if (super.currentMasterKey == null || super.currentMasterKey.getMasterKey() + .getKeyId() != masterKeyRecord.getKeyId()) { + LOG.info("Rolling master-key for container-tokens, got key with id " + + masterKeyRecord.getKeyId()); + if (super.currentMasterKey != null) { + updatePreviousMasterKey(super.currentMasterKey); } + updateCurrentMasterKey(new MasterKeyData(masterKeyRecord, + createSecretKey(masterKeyRecord.getBytes().array()))); } } @@ -137,14 +196,19 @@ public synchronized void startContainerSuccessful( removeAnyContainerTokenIfExpired(); + ContainerId containerId = tokenId.getContainerID(); Long expTime = tokenId.getExpiryTimeStamp(); // We might have multiple containers with same expiration time. if (!recentlyStartedContainerTracker.containsKey(expTime)) { recentlyStartedContainerTracker .put(expTime, new ArrayList()); } - recentlyStartedContainerTracker.get(expTime).add(tokenId.getContainerID()); - + recentlyStartedContainerTracker.get(expTime).add(containerId); + try { + stateStore.storeContainerToken(containerId, expTime); + } catch (IOException e) { + LOG.error("Unable to store token for container " + containerId, e); + } } protected synchronized void removeAnyContainerTokenIfExpired() { @@ -155,6 +219,13 @@ protected synchronized void removeAnyContainerTokenIfExpired() { while (containersI.hasNext()) { Entry> containerEntry = containersI.next(); if (containerEntry.getKey() < currTime) { + for (ContainerId container : containerEntry.getValue()) { + try { + stateStore.removeContainerToken(container); + } catch (IOException e) { + LOG.error("Unable to remove token for container " + container, e); + } + } containersI.remove(); } else { break; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMTokenSecretManagerInNM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMTokenSecretManagerInNM.java index a9b9b994ad..f6169e7026 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMTokenSecretManagerInNM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/NMTokenSecretManagerInNM.java @@ -34,7 +34,7 @@ import org.apache.hadoop.yarn.server.api.records.MasterKey; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService; -import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredNMTokenState; +import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredNMTokensState; import org.apache.hadoop.yarn.server.security.BaseNMTokenSecretManager; import org.apache.hadoop.yarn.server.security.MasterKeyData; @@ -64,8 +64,9 @@ public NMTokenSecretManagerInNM(NMStateStoreService stateStore) { this.stateStore = stateStore; } - public synchronized void recover(RecoveredNMTokenState state) + public synchronized void recover() throws IOException { + RecoveredNMTokensState state = stateStore.loadNMTokensState(); MasterKey key = state.getCurrentMasterKey(); if (key != null) { super.currentMasterKey = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java index 9909d9db9e..fef2b12221 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMMemoryStateStoreService.java @@ -27,6 +27,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto; import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto; @@ -36,7 +37,8 @@ public class NMMemoryStateStoreService extends NMStateStoreService { private Map trackerStates; private Map deleteTasks; - private RecoveredNMTokenState nmTokenState; + private RecoveredNMTokensState nmTokenState; + private RecoveredContainerTokensState containerTokenState; public NMMemoryStateStoreService() { super(NMMemoryStateStoreService.class.getName()); @@ -117,12 +119,13 @@ public synchronized void removeLocalizedResource(String user, @Override protected void initStorage(Configuration conf) { - nmTokenState = new RecoveredNMTokenState(); + nmTokenState = new RecoveredNMTokensState(); nmTokenState.applicationMasterKeys = new HashMap(); + containerTokenState = new RecoveredContainerTokensState(); + containerTokenState.activeTokens = new HashMap(); trackerStates = new HashMap(); deleteTasks = new HashMap(); - } @Override @@ -157,9 +160,9 @@ public synchronized void removeDeletionTask(int taskId) throws IOException { @Override - public RecoveredNMTokenState loadNMTokenState() throws IOException { + public RecoveredNMTokensState loadNMTokensState() throws IOException { // return a copy so caller can't modify our state - RecoveredNMTokenState result = new RecoveredNMTokenState(); + RecoveredNMTokensState result = new RecoveredNMTokensState(); result.currentMasterKey = nmTokenState.currentMasterKey; result.previousMasterKey = nmTokenState.previousMasterKey; result.applicationMasterKeys = @@ -197,6 +200,48 @@ public void removeNMTokenApplicationMasterKey(ApplicationAttemptId attempt) } + @Override + public RecoveredContainerTokensState loadContainerTokensState() + throws IOException { + // return a copy so caller can't modify our state + RecoveredContainerTokensState result = + new RecoveredContainerTokensState(); + result.currentMasterKey = containerTokenState.currentMasterKey; + result.previousMasterKey = containerTokenState.previousMasterKey; + result.activeTokens = + new HashMap(containerTokenState.activeTokens); + return result; + } + + @Override + public void storeContainerTokenCurrentMasterKey(MasterKey key) + throws IOException { + MasterKeyPBImpl keypb = (MasterKeyPBImpl) key; + containerTokenState.currentMasterKey = + new MasterKeyPBImpl(keypb.getProto()); + } + + @Override + public void storeContainerTokenPreviousMasterKey(MasterKey key) + throws IOException { + MasterKeyPBImpl keypb = (MasterKeyPBImpl) key; + containerTokenState.previousMasterKey = + new MasterKeyPBImpl(keypb.getProto()); + } + + @Override + public void storeContainerToken(ContainerId containerId, + Long expirationTime) throws IOException { + containerTokenState.activeTokens.put(containerId, expirationTime); + } + + @Override + public void removeContainerToken(ContainerId containerId) + throws IOException { + containerTokenState.activeTokens.remove(containerId); + } + + private static class TrackerState { Map inProgressMap = new HashMap(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java index ca17a4e6e8..833a062d3b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java @@ -27,11 +27,13 @@ import java.io.IOException; import java.util.Map; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.service.ServiceStateException; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; @@ -42,12 +44,15 @@ import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto; import org.apache.hadoop.yarn.server.api.records.MasterKey; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.LocalResourceTrackerState; +import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredContainerTokensState; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredDeletionServiceState; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredLocalizationState; -import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredNMTokenState; +import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredNMTokensState; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredUserResources; import org.apache.hadoop.yarn.server.nodemanager.recovery.records.NMDBSchemaVersion; +import org.apache.hadoop.yarn.server.security.BaseContainerTokenSecretManager; import org.apache.hadoop.yarn.server.security.BaseNMTokenSecretManager; +import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.ConverterUtils; import org.junit.After; import org.junit.Assert; @@ -502,7 +507,7 @@ public void testDeletionTaskStorage() throws IOException { @Test public void testNMTokenStorage() throws IOException { // test empty when no state - RecoveredNMTokenState state = stateStore.loadNMTokenState(); + RecoveredNMTokensState state = stateStore.loadNMTokensState(); assertNull(state.getCurrentMasterKey()); assertNull(state.getPreviousMasterKey()); assertTrue(state.getApplicationMasterKeys().isEmpty()); @@ -512,7 +517,7 @@ public void testNMTokenStorage() throws IOException { MasterKey currentKey = secretMgr.generateKey(); stateStore.storeNMTokenCurrentMasterKey(currentKey); restartStateStore(); - state = stateStore.loadNMTokenState(); + state = stateStore.loadNMTokensState(); assertEquals(currentKey, state.getCurrentMasterKey()); assertNull(state.getPreviousMasterKey()); assertTrue(state.getApplicationMasterKeys().isEmpty()); @@ -521,7 +526,7 @@ public void testNMTokenStorage() throws IOException { MasterKey prevKey = secretMgr.generateKey(); stateStore.storeNMTokenPreviousMasterKey(prevKey); restartStateStore(); - state = stateStore.loadNMTokenState(); + state = stateStore.loadNMTokensState(); assertEquals(currentKey, state.getCurrentMasterKey()); assertEquals(prevKey, state.getPreviousMasterKey()); assertTrue(state.getApplicationMasterKeys().isEmpty()); @@ -536,7 +541,7 @@ public void testNMTokenStorage() throws IOException { MasterKey attemptKey2 = secretMgr.generateKey(); stateStore.storeNMTokenApplicationMasterKey(attempt2, attemptKey2); restartStateStore(); - state = stateStore.loadNMTokenState(); + state = stateStore.loadNMTokensState(); assertEquals(currentKey, state.getCurrentMasterKey()); assertEquals(prevKey, state.getPreviousMasterKey()); Map loadedAppKeys = @@ -558,7 +563,7 @@ public void testNMTokenStorage() throws IOException { currentKey = secretMgr.generateKey(); stateStore.storeNMTokenCurrentMasterKey(currentKey); restartStateStore(); - state = stateStore.loadNMTokenState(); + state = stateStore.loadNMTokensState(); assertEquals(currentKey, state.getCurrentMasterKey()); assertEquals(prevKey, state.getPreviousMasterKey()); loadedAppKeys = state.getApplicationMasterKeys(); @@ -568,10 +573,89 @@ public void testNMTokenStorage() throws IOException { assertEquals(attemptKey3, loadedAppKeys.get(attempt3)); } + @Test + public void testContainerTokenStorage() throws IOException { + // test empty when no state + RecoveredContainerTokensState state = + stateStore.loadContainerTokensState(); + assertNull(state.getCurrentMasterKey()); + assertNull(state.getPreviousMasterKey()); + assertTrue(state.getActiveTokens().isEmpty()); + + // store a master key and verify recovered + ContainerTokenKeyGeneratorForTest keygen = + new ContainerTokenKeyGeneratorForTest(new YarnConfiguration()); + MasterKey currentKey = keygen.generateKey(); + stateStore.storeContainerTokenCurrentMasterKey(currentKey); + restartStateStore(); + state = stateStore.loadContainerTokensState(); + assertEquals(currentKey, state.getCurrentMasterKey()); + assertNull(state.getPreviousMasterKey()); + assertTrue(state.getActiveTokens().isEmpty()); + + // store a previous key and verify recovered + MasterKey prevKey = keygen.generateKey(); + stateStore.storeContainerTokenPreviousMasterKey(prevKey); + restartStateStore(); + state = stateStore.loadContainerTokensState(); + assertEquals(currentKey, state.getCurrentMasterKey()); + assertEquals(prevKey, state.getPreviousMasterKey()); + assertTrue(state.getActiveTokens().isEmpty()); + + // store a few container tokens and verify recovered + ContainerId cid1 = BuilderUtils.newContainerId(1, 1, 1, 1); + Long expTime1 = 1234567890L; + ContainerId cid2 = BuilderUtils.newContainerId(2, 2, 2, 2); + Long expTime2 = 9876543210L; + stateStore.storeContainerToken(cid1, expTime1); + stateStore.storeContainerToken(cid2, expTime2); + restartStateStore(); + state = stateStore.loadContainerTokensState(); + assertEquals(currentKey, state.getCurrentMasterKey()); + assertEquals(prevKey, state.getPreviousMasterKey()); + Map loadedActiveTokens = + state.getActiveTokens(); + assertEquals(2, loadedActiveTokens.size()); + assertEquals(expTime1, loadedActiveTokens.get(cid1)); + assertEquals(expTime2, loadedActiveTokens.get(cid2)); + + // add/update/remove tokens and verify recovered + ContainerId cid3 = BuilderUtils.newContainerId(3, 3, 3, 3); + Long expTime3 = 135798642L; + stateStore.storeContainerToken(cid3, expTime3); + stateStore.removeContainerToken(cid1); + expTime2 += 246897531L; + stateStore.storeContainerToken(cid2, expTime2); + prevKey = currentKey; + stateStore.storeContainerTokenPreviousMasterKey(prevKey); + currentKey = keygen.generateKey(); + stateStore.storeContainerTokenCurrentMasterKey(currentKey); + restartStateStore(); + state = stateStore.loadContainerTokensState(); + assertEquals(currentKey, state.getCurrentMasterKey()); + assertEquals(prevKey, state.getPreviousMasterKey()); + loadedActiveTokens = state.getActiveTokens(); + assertEquals(2, loadedActiveTokens.size()); + assertNull(loadedActiveTokens.get(cid1)); + assertEquals(expTime2, loadedActiveTokens.get(cid2)); + assertEquals(expTime3, loadedActiveTokens.get(cid3)); + } + private static class NMTokenSecretManagerForTest extends BaseNMTokenSecretManager { public MasterKey generateKey() { return createNewMasterKey().getMasterKey(); } } + + private static class ContainerTokenKeyGeneratorForTest extends + BaseContainerTokenSecretManager { + public ContainerTokenKeyGeneratorForTest(Configuration conf) { + super(conf); + } + + public MasterKey generateKey() { + return createNewMasterKey().getMasterKey(); + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/security/TestNMContainerTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/security/TestNMContainerTokenSecretManager.java new file mode 100644 index 0000000000..f2a46adaf8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/security/TestNMContainerTokenSecretManager.java @@ -0,0 +1,144 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.nodemanager.security; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.token.SecretManager.InvalidToken; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.Token; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; +import org.apache.hadoop.yarn.server.api.records.MasterKey; +import org.apache.hadoop.yarn.server.nodemanager.recovery.NMMemoryStateStoreService; +import org.apache.hadoop.yarn.server.security.BaseContainerTokenSecretManager; +import org.apache.hadoop.yarn.server.utils.BuilderUtils; +import org.junit.Test; + +public class TestNMContainerTokenSecretManager { + + @Test + public void testRecovery() throws IOException { + YarnConfiguration conf = new YarnConfiguration(); + conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true); + final NodeId nodeId = NodeId.newInstance("somehost", 1234); + final ContainerId cid1 = BuilderUtils.newContainerId(1, 1, 1, 1); + final ContainerId cid2 = BuilderUtils.newContainerId(2, 2, 2, 2); + ContainerTokenKeyGeneratorForTest keygen = + new ContainerTokenKeyGeneratorForTest(conf); + NMMemoryStateStoreService stateStore = new NMMemoryStateStoreService(); + stateStore.init(conf); + stateStore.start(); + NMContainerTokenSecretManager secretMgr = + new NMContainerTokenSecretManager(conf, stateStore); + secretMgr.setNodeId(nodeId); + MasterKey currentKey = keygen.generateKey(); + secretMgr.setMasterKey(currentKey); + ContainerTokenIdentifier tokenId1 = + createContainerTokenId(cid1, nodeId, "user1", secretMgr); + ContainerTokenIdentifier tokenId2 = + createContainerTokenId(cid2, nodeId, "user2", secretMgr); + assertNotNull(secretMgr.retrievePassword(tokenId1)); + assertNotNull(secretMgr.retrievePassword(tokenId2)); + + // restart and verify tokens still valid + secretMgr = new NMContainerTokenSecretManager(conf, stateStore); + secretMgr.setNodeId(nodeId); + secretMgr.recover(); + assertEquals(currentKey, secretMgr.getCurrentKey()); + assertTrue(secretMgr.isValidStartContainerRequest(tokenId1)); + assertTrue(secretMgr.isValidStartContainerRequest(tokenId2)); + assertNotNull(secretMgr.retrievePassword(tokenId1)); + assertNotNull(secretMgr.retrievePassword(tokenId2)); + + // roll master key and start a container + secretMgr.startContainerSuccessful(tokenId2); + currentKey = keygen.generateKey(); + secretMgr.setMasterKey(currentKey); + + // restart and verify tokens still valid due to prev key persist + secretMgr = new NMContainerTokenSecretManager(conf, stateStore); + secretMgr.setNodeId(nodeId); + secretMgr.recover(); + assertEquals(currentKey, secretMgr.getCurrentKey()); + assertTrue(secretMgr.isValidStartContainerRequest(tokenId1)); + assertFalse(secretMgr.isValidStartContainerRequest(tokenId2)); + assertNotNull(secretMgr.retrievePassword(tokenId1)); + assertNotNull(secretMgr.retrievePassword(tokenId2)); + + // roll master key again, restart, and verify keys no longer valid + currentKey = keygen.generateKey(); + secretMgr.setMasterKey(currentKey); + secretMgr = new NMContainerTokenSecretManager(conf, stateStore); + secretMgr.setNodeId(nodeId); + secretMgr.recover(); + assertEquals(currentKey, secretMgr.getCurrentKey()); + assertTrue(secretMgr.isValidStartContainerRequest(tokenId1)); + assertFalse(secretMgr.isValidStartContainerRequest(tokenId2)); + try { + secretMgr.retrievePassword(tokenId1); + fail("token should not be valid"); + } catch (InvalidToken e) { + // expected + } + try { + secretMgr.retrievePassword(tokenId2); + fail("token should not be valid"); + } catch (InvalidToken e) { + // expected + } + + stateStore.close(); + } + + private static ContainerTokenIdentifier createContainerTokenId( + ContainerId cid, NodeId nodeId, String user, + NMContainerTokenSecretManager secretMgr) throws IOException { + long rmid = cid.getApplicationAttemptId().getApplicationId() + .getClusterTimestamp(); + ContainerTokenIdentifier ctid = new ContainerTokenIdentifier(cid, + nodeId.toString(), user, BuilderUtils.newResource(1024, 1), + System.currentTimeMillis() + 100000L, + secretMgr.getCurrentKey().getKeyId(), rmid, + Priority.newInstance(0), 0); + Token token = BuilderUtils.newContainerToken(nodeId, + secretMgr.createPassword(ctid), ctid); + return BuilderUtils.newContainerTokenIdentifier(token); + } + + private static class ContainerTokenKeyGeneratorForTest extends + BaseContainerTokenSecretManager { + public ContainerTokenKeyGeneratorForTest(Configuration conf) { + super(conf); + } + + public MasterKey generateKey() { + return createNewMasterKey().getMasterKey(); + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/security/TestNMTokenSecretManagerInNM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/security/TestNMTokenSecretManagerInNM.java index 1f1fc51e56..5c1f3a1c31 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/security/TestNMTokenSecretManagerInNM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/security/TestNMTokenSecretManagerInNM.java @@ -73,7 +73,7 @@ public void testRecovery() throws IOException { // restart and verify key is still there and token still valid secretMgr = new NMTokenSecretManagerInNM(stateStore); - secretMgr.recover(stateStore.loadNMTokenState()); + secretMgr.recover(); secretMgr.setNodeId(nodeId); assertEquals(currentKey, secretMgr.getCurrentKey()); assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1)); @@ -88,7 +88,7 @@ public void testRecovery() throws IOException { // restart and verify attempt1 key is still valid due to prev key persist secretMgr = new NMTokenSecretManagerInNM(stateStore); - secretMgr.recover(stateStore.loadNMTokenState()); + secretMgr.recover(); secretMgr.setNodeId(nodeId); assertEquals(currentKey, secretMgr.getCurrentKey()); assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1)); @@ -101,7 +101,7 @@ public void testRecovery() throws IOException { currentKey = keygen.generateKey(); secretMgr.setMasterKey(currentKey); secretMgr = new NMTokenSecretManagerInNM(stateStore); - secretMgr.recover(stateStore.loadNMTokenState()); + secretMgr.recover(); secretMgr.setNodeId(nodeId); assertEquals(currentKey, secretMgr.getCurrentKey()); assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1)); @@ -117,7 +117,7 @@ public void testRecovery() throws IOException { // remove last attempt, restart, verify both tokens are now bad secretMgr.appFinished(attempt2.getApplicationId()); secretMgr = new NMTokenSecretManagerInNM(stateStore); - secretMgr.recover(stateStore.loadNMTokenState()); + secretMgr.recover(); secretMgr.setNodeId(nodeId); assertEquals(currentKey, secretMgr.getCurrentKey()); assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1)); From 2054453a39efeca86361e26033a65f2715f4785c Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Thu, 24 Jul 2014 06:22:02 +0000 Subject: [PATCH 09/15] HADOOP-10882. Move DirectBufferPool into common util. Contributed by Todd Lipcon. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1613006 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++ .../main/java/org/apache/hadoop}/util/DirectBufferPool.java | 6 ++++-- .../java/org/apache/hadoop}/util/TestDirectBufferPool.java | 4 ++-- .../main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java | 2 +- .../java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java | 2 +- .../hadoop/hdfs/protocol/datatransfer/PacketReceiver.java | 2 +- 6 files changed, 11 insertions(+), 7 deletions(-) rename {hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs => hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop}/util/DirectBufferPool.java (95%) rename {hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs => hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop}/util/TestDirectBufferPool.java (95%) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 08bc9012e9..491daf04e9 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -455,6 +455,8 @@ Release 2.6.0 - UNRELEASED HADOOP-10887. Add XAttrs to ViewFs and make XAttrs + ViewFileSystem internal dir behavior consistent. (Stephen Chu via wang) + HADOOP-10882. Move DirectBufferPool into common util. (todd) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DirectBufferPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DirectBufferPool.java similarity index 95% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DirectBufferPool.java rename to hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DirectBufferPool.java index 7332d34594..510938b7ff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DirectBufferPool.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DirectBufferPool.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.util; +package org.apache.hadoop.util; import java.lang.ref.WeakReference; import java.nio.ByteBuffer; @@ -27,6 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.classification.InterfaceStability; /** * A simple class for pooling direct ByteBuffers. This is necessary @@ -40,7 +41,8 @@ * allocated at the same size. There is no attempt to reuse larger * buffers to satisfy smaller allocations. */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) +@InterfaceStability.Evolving public class DirectBufferPool { // Essentially implement a multimap with weak values. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDirectBufferPool.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDirectBufferPool.java similarity index 95% rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDirectBufferPool.java rename to hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDirectBufferPool.java index 31a18fb881..c8fd754666 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDirectBufferPool.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDirectBufferPool.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.util; +package org.apache.hadoop.util; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotSame; @@ -29,7 +29,7 @@ import com.google.common.collect.Lists; public class TestDirectBufferPool { - final DirectBufferPool pool = new DirectBufferPool(); + final org.apache.hadoop.util.DirectBufferPool pool = new org.apache.hadoop.util.DirectBufferPool(); @Test public void testBasics() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java index bb9612a995..cd75e53b27 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.shortcircuit.ClientMmap; import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica; -import org.apache.hadoop.hdfs.util.DirectBufferPool; +import org.apache.hadoop.util.DirectBufferPool; import org.apache.hadoop.util.DataChecksum; import com.google.common.annotations.VisibleForTesting; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java index c68e548099..47455754d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; import org.apache.hadoop.hdfs.shortcircuit.ClientMmap; -import org.apache.hadoop.hdfs.util.DirectBufferPool; +import org.apache.hadoop.util.DirectBufferPool; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.UserGroupInformation; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java index 3503554636..0de445c222 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java @@ -27,7 +27,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdfs.util.DirectBufferPool; +import org.apache.hadoop.util.DirectBufferPool; import org.apache.hadoop.io.IOUtils; import com.google.common.base.Preconditions; From ef9b6a45c437a56f9ebf198cba902e06e875f27c Mon Sep 17 00:00:00 2001 From: Haohui Mai Date: Thu, 24 Jul 2014 17:28:31 +0000 Subject: [PATCH 10/15] HDFS-6657. Remove link to 'Legacy UI' in trunk's Namenode UI. Contributed by Vinayakumar B. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1613195 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../src/main/webapps/hdfs/dfshealth.html | 1 - .../hadoop-hdfs/src/main/webapps/hdfs/index.html | 13 +------------ .../src/main/webapps/secondary/index.html | 11 ----------- 4 files changed, 4 insertions(+), 24 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index bbeebd1984..217bf4df09 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -252,6 +252,9 @@ Trunk (Unreleased) HDFS-5794. Fix the inconsistency of layout version number of ADD_DATANODE_AND_STORAGE_UUIDS between trunk and branch-2. (jing9) + HDFS-6657. Remove link to 'Legacy UI' in trunk's Namenode UI. + (Vinayakumar B via wheat 9) + Release 2.6.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html index fadba07072..8fdf73ba19 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html @@ -66,7 +66,6 @@


Hadoop, 2014.

-
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html index 99bb13b326..aa62a37239 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html @@ -18,18 +18,7 @@ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> - + Hadoop Administration - - -

Hadoop Administration

- - diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html index 97e0207e06..f7ef858b9e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html @@ -21,15 +21,4 @@ Hadoop Administration - - -

Hadoop Administration

- - \ No newline at end of file From f2137d7c0e19176d5ad7e28c6abcfc03eac49ec3 Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Thu, 24 Jul 2014 17:47:08 +0000 Subject: [PATCH 11/15] HADOOP-10894. Fix dead link in ToolRunner documentation. (Contributed by Akira Ajisaka) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1613200 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../src/main/java/org/apache/hadoop/util/ToolRunner.java | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 491daf04e9..5ea931c649 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -819,6 +819,9 @@ Release 2.5.0 - UNRELEASED HADOOP-10890. TestDFVariations.testMount fails intermittently. (Yongjun Zhang via Arpit Agarwal) + HADOOP-10894. Fix dead link in ToolRunner documentation. (Akira Ajisaka + via Arpit Agarwal) + Release 2.4.1 - 2014-06-23 INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ToolRunner.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ToolRunner.java index 49581000ca..16872d0891 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ToolRunner.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ToolRunner.java @@ -30,7 +30,7 @@ *

ToolRunner can be used to run classes implementing * Tool interface. It works in conjunction with * {@link GenericOptionsParser} to parse the - * + * * generic hadoop command line arguments and modifies the * Configuration of the Tool. The * application-specific options are passed along without being modified. From a7855e1c3376fee23eb2ed61f9ae4ad3c9754722 Mon Sep 17 00:00:00 2001 From: Haohui Mai Date: Thu, 24 Jul 2014 17:59:45 +0000 Subject: [PATCH 12/15] HDFS-6723. New NN webUI no longer displays decommissioned state for dead node. Contributed by Ming Ma. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1613220 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 217bf4df09..4510989639 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -983,6 +983,9 @@ Release 2.5.0 - UNRELEASED HDFS-6622. Rename and AddBlock may race and produce invalid edits (kihwal via cmccabe) + HDFS-6723. New NN webUI no longer displays decommissioned state for dead node. + (Ming Ma via wheat9) + Release 2.4.1 - 2014-06-23 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html index 8fdf73ba19..2589526198 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html @@ -282,7 +282,7 @@ {name} ({xferaddr}) {lastContact} - Dead{?decomissioned}, Decomissioned{/decomissioned} + Dead{?decommissioned}, Decommissioned{/decommissioned} - - - From 8c6e172a0ad8f06a4f9b70d61d9f3f7789405815 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Thu, 24 Jul 2014 18:28:00 +0000 Subject: [PATCH 13/15] HDFS-6715. Webhdfs wont fail over when it gets java.io.IOException: Namenode is in startup mode. Contributed by Jing Zhao. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1613237 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../web/resources/NamenodeWebHdfsMethods.java | 3 +- .../apache/hadoop/hdfs/web/TestWebHDFS.java | 39 +++++++++- .../hadoop/hdfs/web/TestWebHDFSForHA.java | 76 +++++++++++++++++-- 4 files changed, 112 insertions(+), 9 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 4510989639..376c272a57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -367,6 +367,9 @@ Release 2.6.0 - UNRELEASED HDFS-6455. NFS: Exception should be added in NFS log for invalid separator in nfs.exports.allowed.hosts. (Abhiraj Butala via brandonli) + HDFS-6715. Webhdfs wont fail over when it gets java.io.IOException: Namenode + is in startup mode. (jing9) + Release 2.5.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index 92a58f9822..d7235b3872 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -113,6 +113,7 @@ import org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam; import org.apache.hadoop.hdfs.web.resources.XAttrValueParam; import org.apache.hadoop.io.Text; +import org.apache.hadoop.ipc.RetriableException; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException; import org.apache.hadoop.net.Node; @@ -190,7 +191,7 @@ private static NamenodeProtocols getRPCServer(NameNode namenode) throws IOException { final NamenodeProtocols np = namenode.getRpcServer(); if (np == null) { - throw new IOException("Namenode is in startup mode"); + throw new RetriableException("Namenode is in startup mode"); } return np; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java index e9c74c6de3..14312110aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java @@ -39,14 +39,18 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.TestDFSClientRetries; +import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; -import org.apache.hadoop.hdfs.TestDFSClientRetries; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; +import org.apache.hadoop.ipc.RetriableException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Level; import org.junit.Assert; import org.junit.Test; +import org.mockito.internal.util.reflection.Whitebox; /** Test WebHDFS */ public class TestWebHDFS { @@ -445,4 +449,37 @@ public void testWebHdfsRenameSnapshot() throws Exception { } } } + + /** + * Make sure a RetriableException is thrown when rpcServer is null in + * NamenodeWebHdfsMethods. + */ + @Test + public void testRaceWhileNNStartup() throws Exception { + MiniDFSCluster cluster = null; + final Configuration conf = WebHdfsTestUtil.createConf(); + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); + cluster.waitActive(); + final NameNode namenode = cluster.getNameNode(); + final NamenodeProtocols rpcServer = namenode.getRpcServer(); + Whitebox.setInternalState(namenode, "rpcServer", null); + + final Path foo = new Path("/foo"); + final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, + WebHdfsFileSystem.SCHEME); + try { + webHdfs.mkdirs(foo); + fail("Expected RetriableException"); + } catch (RetriableException e) { + GenericTestUtils.assertExceptionContains("Namenode is in startup mode", + e); + } + Whitebox.setInternalState(namenode, "rpcServer", rpcServer); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java index 772e367f93..0340b95225 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java @@ -18,6 +18,15 @@ package org.apache.hadoop.hdfs.web; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; + +import java.io.IOException; +import java.net.URI; +import java.util.HashMap; +import java.util.Map; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -29,18 +38,14 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; +import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.token.Token; import org.junit.Assert; import org.junit.Test; - -import java.io.IOException; -import java.net.URI; - -import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; +import org.mockito.internal.util.reflection.Whitebox; public class TestWebHDFSForHA { private static final String LOGICAL_NAME = "minidfs"; @@ -182,4 +187,61 @@ public void testMultipleNamespacesConfigured() throws Exception { } } } + + /** + * Make sure the WebHdfsFileSystem will retry based on RetriableException when + * rpcServer is null in NamenodeWebHdfsMethods while NameNode starts up. + */ + @Test (timeout=120000) + public void testRetryWhileNNStartup() throws Exception { + final Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME); + MiniDFSCluster cluster = null; + final Map resultMap = new HashMap(); + + try { + cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo) + .numDataNodes(0).build(); + HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME); + cluster.waitActive(); + cluster.transitionToActive(0); + + final NameNode namenode = cluster.getNameNode(0); + final NamenodeProtocols rpcServer = namenode.getRpcServer(); + Whitebox.setInternalState(namenode, "rpcServer", null); + + new Thread() { + @Override + public void run() { + boolean result = false; + FileSystem fs = null; + try { + fs = FileSystem.get(WEBHDFS_URI, conf); + final Path dir = new Path("/test"); + result = fs.mkdirs(dir); + } catch (IOException e) { + result = false; + } finally { + IOUtils.cleanup(null, fs); + } + synchronized (TestWebHDFSForHA.this) { + resultMap.put("mkdirs", result); + TestWebHDFSForHA.this.notifyAll(); + } + } + }.start(); + + Thread.sleep(1000); + Whitebox.setInternalState(namenode, "rpcServer", rpcServer); + synchronized (this) { + while (!resultMap.containsKey("mkdirs")) { + this.wait(); + } + Assert.assertTrue(resultMap.get("mkdirs")); + } + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } } From e171254d56bfff467a67a6cf9160595c941f50c0 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Thu, 24 Jul 2014 23:42:06 +0000 Subject: [PATCH 14/15] Name node cannot start if the path of a file under construction contains .snapshot. (wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1613329 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hdfs/server/namenode/FSImageFormat.java | 11 ++ .../hadoop/hdfs/TestDFSUpgradeFromImage.java | 137 ++++++++++++++++++ .../test/resources/hadoop-0.23-reserved.tgz | Bin 0 -> 4558 bytes .../src/test/resources/hadoop-1-reserved.tgz | Bin 0 -> 2572 bytes 5 files changed, 151 insertions(+) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-0.23-reserved.tgz create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-1-reserved.tgz diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 376c272a57..498454916d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -908,6 +908,9 @@ Release 2.5.0 - UNRELEASED HDFS-6422. getfattr in CLI doesn't throw exception or return non-0 return code when xattr doesn't exist. (Charles Lamb via umamahesh) + HDFS-6696. Name node cannot start if the path of a file under + construction contains ".snapshot". (wang) + BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 49a035cfff..5b6d269546 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -614,6 +614,16 @@ private void loadFullNameINodes(long numFiles, DataInput in, Counter counter) INodeDirectory parentINode = fsDir.rootDir; for (long i = 0; i < numFiles; i++) { pathComponents = FSImageSerialization.readPathComponents(in); + for (int j=0; j < pathComponents.length; j++) { + byte[] newComponent = renameReservedComponentOnUpgrade + (pathComponents[j], getLayoutVersion()); + if (!Arrays.equals(newComponent, pathComponents[j])) { + String oldPath = DFSUtil.byteArray2PathString(pathComponents); + pathComponents[j] = newComponent; + String newPath = DFSUtil.byteArray2PathString(pathComponents); + LOG.info("Renaming reserved path " + oldPath + " to " + newPath); + } + } final INode newNode = loadINode( pathComponents[pathComponents.length-1], false, in, counter); @@ -926,6 +936,7 @@ LayoutVersion.Feature.ADD_INODE_ID, getLayoutVersion())) { oldnode = namesystem.dir.getInode(cons.getId()).asFile(); inSnapshot = true; } else { + path = renameReservedPathsOnUpgrade(path, getLayoutVersion()); final INodesInPath iip = fsDir.getLastINodeInPath(path); oldnode = INodeFile.valueOf(iip.getINode(0), path); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java index 1e1f668f21..f5dbdceaa1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java @@ -70,6 +70,9 @@ public class TestDFSUpgradeFromImage { private static final String HADOOP_DFS_DIR_TXT = "hadoop-dfs-dir.txt"; private static final String HADOOP22_IMAGE = "hadoop-22-dfs-dir.tgz"; private static final String HADOOP1_BBW_IMAGE = "hadoop1-bbw.tgz"; + private static final String HADOOP1_RESERVED_IMAGE = "hadoop-1-reserved.tgz"; + private static final String HADOOP023_RESERVED_IMAGE = + "hadoop-0.23-reserved.tgz"; private static final String HADOOP2_RESERVED_IMAGE = "hadoop-2-reserved.tgz"; private static class ReferenceFileInfo { @@ -325,6 +328,140 @@ public void testUpgradeFromCorruptRel22Image() throws IOException { } } + /** + * Test upgrade from a branch-1.2 image with reserved paths + */ + @Test + public void testUpgradeFromRel1ReservedImage() throws Exception { + unpackStorage(HADOOP1_RESERVED_IMAGE); + MiniDFSCluster cluster = null; + // Try it once without setting the upgrade flag to ensure it fails + final Configuration conf = new Configuration(); + // Try it again with a custom rename string + try { + FSImageFormat.setRenameReservedPairs( + ".snapshot=.user-snapshot," + + ".reserved=.my-reserved"); + cluster = + new MiniDFSCluster.Builder(conf) + .format(false) + .startupOption(StartupOption.UPGRADE) + .numDataNodes(0).build(); + DistributedFileSystem dfs = cluster.getFileSystem(); + // Make sure the paths were renamed as expected + // Also check that paths are present after a restart, checks that the + // upgraded fsimage has the same state. + final String[] expected = new String[] { + "/.my-reserved", + "/.user-snapshot", + "/.user-snapshot/.user-snapshot", + "/.user-snapshot/open", + "/dir1", + "/dir1/.user-snapshot", + "/dir2", + "/dir2/.user-snapshot", + "/user", + "/user/andrew", + "/user/andrew/.user-snapshot", + }; + for (int i=0; i<2; i++) { + // Restart the second time through this loop + if (i==1) { + cluster.finalizeCluster(conf); + cluster.restartNameNode(true); + } + ArrayList toList = new ArrayList(); + toList.add(new Path("/")); + ArrayList found = new ArrayList(); + while (!toList.isEmpty()) { + Path p = toList.remove(0); + FileStatus[] statuses = dfs.listStatus(p); + for (FileStatus status: statuses) { + final String path = status.getPath().toUri().getPath(); + System.out.println("Found path " + path); + found.add(path); + if (status.isDirectory()) { + toList.add(status.getPath()); + } + } + } + for (String s: expected) { + assertTrue("Did not find expected path " + s, found.contains(s)); + } + assertEquals("Found an unexpected path while listing filesystem", + found.size(), expected.length); + } + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + + /** + * Test upgrade from a 0.23.11 image with reserved paths + */ + @Test + public void testUpgradeFromRel023ReservedImage() throws Exception { + unpackStorage(HADOOP023_RESERVED_IMAGE); + MiniDFSCluster cluster = null; + // Try it once without setting the upgrade flag to ensure it fails + final Configuration conf = new Configuration(); + // Try it again with a custom rename string + try { + FSImageFormat.setRenameReservedPairs( + ".snapshot=.user-snapshot," + + ".reserved=.my-reserved"); + cluster = + new MiniDFSCluster.Builder(conf) + .format(false) + .startupOption(StartupOption.UPGRADE) + .numDataNodes(0).build(); + DistributedFileSystem dfs = cluster.getFileSystem(); + // Make sure the paths were renamed as expected + // Also check that paths are present after a restart, checks that the + // upgraded fsimage has the same state. + final String[] expected = new String[] { + "/.user-snapshot", + "/dir1", + "/dir1/.user-snapshot", + "/dir2", + "/dir2/.user-snapshot" + }; + for (int i=0; i<2; i++) { + // Restart the second time through this loop + if (i==1) { + cluster.finalizeCluster(conf); + cluster.restartNameNode(true); + } + ArrayList toList = new ArrayList(); + toList.add(new Path("/")); + ArrayList found = new ArrayList(); + while (!toList.isEmpty()) { + Path p = toList.remove(0); + FileStatus[] statuses = dfs.listStatus(p); + for (FileStatus status: statuses) { + final String path = status.getPath().toUri().getPath(); + System.out.println("Found path " + path); + found.add(path); + if (status.isDirectory()) { + toList.add(status.getPath()); + } + } + } + for (String s: expected) { + assertTrue("Did not find expected path " + s, found.contains(s)); + } + assertEquals("Found an unexpected path while listing filesystem", + found.size(), expected.length); + } + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + /** * Test upgrade from 2.0 image with a variety of .snapshot and .reserved * paths to test renaming on upgrade diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-0.23-reserved.tgz b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-0.23-reserved.tgz new file mode 100644 index 0000000000000000000000000000000000000000..0f53f2adb27e1a8379e4bc0d74a9c98700228559 GIT binary patch literal 4558 zcmeIzjZ=~b90zc^)~u~`xy*u;?bI}N;Zm9<$Zbsxbv2fyXv%4pQuBq2R1|%h;P<~ zeptpvWNe1L8OaWZ@A>5u*ZIeWy?kc0q`jGDp7_P~#A;tABY)SB3$pWoDe^z~n z@UbVq?de_xxdKiu$QgD2)*Q;zc(1z{x|-t`jrbZ}Ji4-`dV^-LC2`$uhT0F%^$dID z*<)_RPXUidWHW4v+ZG&T_G7+;yk~W|#m1N7@P|P40EE9&0~yjoq)r-1oAbGPC^Y69 z8*UBLHtxT8w^o18V|(T;gT>RwS5d_GrsdiZBgM9ybh2w)ue`X!X`xyt-rQ$n?F*Pj zOV^?=_{DSz@iURwnUG0LbjJ-8CLJ4BH~-3ZbK?xeVx1T!iP~+uER+OLLZFgBUsli| z$oIv4?Tdy*&cHr_=RuB&Nmxu9ZAnQ6S`$(H@2}jdNaHDFq-60XB?4c4RrVKg+Hh$+ zG29<_xad@|PgWUuL;pZDXLxP=g%B=}DkQfLiO%=YzRG?;<|j>etHx!hpD(9jwp_kD zaKdQNx_^aAR%hm<5XW0a|J|H7{tV%qFWpKK3SE6DoVv2sADd>W=)F~X!D#QcSngA(hYnuF&RDD?uM=ITa*`plqUukOJuK}kg|b^0H}}d(-S~nC(zD~ zB4Fd*=CbIYch=R4dGsZQaXhgON%sm(o-s$6KT9ao%7A;PPU7mW6tBOW)?UT#i0U0s z5BXN{>Wax?n0VL64BigXXDW8i_ap%KB|%3S?<|{en!a6?l^L(sQ>%;_m zLCNwwX1mBVRc@3xjna$Y07YK9J9w$lzb_qy%wqq1QPi}*v?-CMJkZmnOnW>MknJq# zG2N7P%GE5$hz)IESxXOy?0(g3Ek`FuhEy(jgjOm9v^_CQO_#zjWhA7PpqQ;9+R-^v6L)y0ibQ&z8N~|R#}8p9>HR|8l?ZOCO&F|uJD~&=HUH92 z;nvdzI5FVIt*zd~Sr(6}nT6OwB(fda-?>x<;=Y@GJKNnLK;BphoqAfs?ttbZ1PKt0 zhuiMlVpmC5)O%cKmn(rCa^z z!TydX=5Slq?Y>Kko7or1p$FZ{as$T(^VH7ntp>}`#KN`CWB45`g4a4DE0`5{_BCar z1P@nf8Zcp^d+_jYNRpk~`fCXSIQ2sj!9K{~h0QKk#Z^8?ZFcLc&Hndwge=7vnaJ*F zY*XmbMMY9*hIDb zTGLCRZ3uk#3v%j32*$+I<)L6c>gErz7Sj_;X6Q&#A?i+>-DDQjmUKOORujJyMG_Wf z9RbY(R-UaKwH+`WrEon~=^6` z*{t(|7K}iJSb$>(_t?T%U&Kzm{UGD4^4O^^r`T)f1^15)MPJx2)4Iq7a?CVt$f**Rs>nq|P3ENNKBsw56s7fr+-Q%qJLTq+)*8nTJK# z%EyJu5}OhlW+5gbxkV%j&QLTJuoxLWp`bWY`D}OlX?MH*1MB@KynlJY>5g_fg*@8M zerm9b_z8Bi-6+bVw?-thjbEaA;P}gh4_x4|3 z5-@|ZOsf(*j6Kl$`x~uDr$ws^RJ-@Pb^Y-pF%n386994p%`AQe zFRR5%y*m1Rg7$+@d_Xa%-wc~ThV)~$=-he$7t71_DUqebHsn5jHKbryxA9w>n_4Lo zig88{5}-583K2DaBrXQvS{;XN1?fUZggJBQ|GLXKt&jLI=Xy*9U{B9ex3AG5Y z$mN*?qfS#wos|%?UKi02A+hDAZte-|JolOlMOr%or`PsU%=W1L< zeNg~)zPBS2{qrveZ_%xnNBSyyrQ9yKR{TW0v$+$^y@VlYTd1V7Ryx!B=7W6H_L>r( zlo)vaOWIb>u4j^g%+|{Cs?X-*0v@zyS1|T>qslSrSx%08{L9-LV<{DTccQtv2FlzL z)R=C}ot=iRU+iuL`I9e}d7Glf0Inrns4NU|BcekKzSL#oh3<*HYCYd;M^mJ`{6?2yq0OYXuy<;W2c{YI5wiVx^ zZD7I%?ME$u)=1ec^; zTx^l{^ezi?E*;RoRPz+VVLB?>^_2J9aD2cLxoVZ_rAMFAzYkJAO)5KX_n{95J{q~NDI-Wd8mh9#HC>elkZavDdlYeMnI zmG!01lXf`wM;J!Rjuv&x|S(ec%FR_UvwgfT#vXJbTQ#6BE0p0;qA4p zz6{|bf1Kw9%Q!o4w(zU`tj}TQ#THHg`zE#w*$!P0mIxefVvO~gcJhKoW`Z5Sy2m*& z!j5|5RCOV1?ztR}iY=h?W;25J1pAE((@$TX_jgSzXY6ih)kRzH^yhncH&6N}#S~(s zu~WaBL5EDVD44ES;G{RqNN1dW#>%SUn1RpfC+Z|L%UQtoIC7U9+(3v(`P-tGQU75P zJR@5&@%o<@379>Bzjoxc4UjGl{RL8Fh%r399%lr3pDWF(Jt@mDbl2;77DfbEx8A5n z^ Date: Thu, 24 Jul 2014 23:49:22 +0000 Subject: [PATCH 15/15] HADOOP-10891. Add EncryptedKeyVersion factory method to KeyProviderCryptoExtension. (wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1613332 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 +++ .../key/KeyProviderCryptoExtension.java | 24 +++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 5ea931c649..55914f8319 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -189,6 +189,9 @@ Trunk (Unreleased) HADOOP-10720. KMS: Implement generateEncryptedKey and decryptEncryptedKey in the REST API. (asuresh via tucu) + HADOOP-10891. Add EncryptedKeyVersion factory method to + KeyProviderCryptoExtension. (wang) + BUG FIXES HADOOP-9451. Fault single-layer config if node group topology is enabled. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java index 0ba73f1519..227e19b484 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java @@ -79,6 +79,30 @@ protected EncryptedKeyVersion(String keyName, this.encryptedKeyVersion = encryptedKeyVersion; } + /** + * Factory method to create a new EncryptedKeyVersion that can then be + * passed into {@link #decryptEncryptedKey}. Note that the fields of the + * returned EncryptedKeyVersion will only partially be populated; it is not + * necessarily suitable for operations besides decryption. + * + * @param encryptionKeyVersionName Version name of the encryption key used + * to encrypt the encrypted key. + * @param encryptedKeyIv Initialization vector of the encrypted + * key. The IV of the encryption key used to + * encrypt the encrypted key is derived from + * this IV. + * @param encryptedKeyMaterial Key material of the encrypted key. + * @return EncryptedKeyVersion suitable for decryption. + */ + public static EncryptedKeyVersion createForDecryption(String + encryptionKeyVersionName, byte[] encryptedKeyIv, + byte[] encryptedKeyMaterial) { + KeyVersion encryptedKeyVersion = new KeyVersion(null, null, + encryptedKeyMaterial); + return new EncryptedKeyVersion(null, encryptionKeyVersionName, + encryptedKeyIv, encryptedKeyVersion); + } + /** * @return Name of the encryption key used to encrypt the encrypted key. */