From b0a41de68c5b08f534ca231293de053c0b0cbd5d Mon Sep 17 00:00:00 2001 From: cnauroth Date: Thu, 13 Nov 2014 12:31:44 -0800 Subject: [PATCH] HDFS-7385. ThreadLocal used in FSEditLog class causes FSImage permission mess up. Contributed by jiangyu. --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hdfs/server/namenode/FSEditLog.java | 2 + .../hdfs/server/namenode/FSEditLogOp.java | 12 ++++ .../hdfs/server/namenode/TestEditLog.java | 67 +++++++++++++++++++ 4 files changed, 84 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index bd3dcd569d..998a395c4d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1446,6 +1446,9 @@ Release 2.6.0 - 2014-11-15 HDFS-7391. Renable SSLv2Hello in HttpFS. (rkanter via acmurthy) + HDFS-7385. ThreadLocal used in FSEditLog class causes FSImage permission mess + up. (jiangyu via cnauroth) + Release 2.5.2 - 2014-11-10 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index e952e7cb9e..305fcdc744 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -708,6 +708,7 @@ public void logOpenFile(String path, INodeFile newNode, boolean overwrite, Preconditions.checkArgument(newNode.isUnderConstruction()); PermissionStatus permissions = newNode.getPermissionStatus(); AddOp op = AddOp.getInstance(cache.get()) + .reset() .setInodeId(newNode.getId()) .setPath(path) .setReplication(newNode.getFileReplication()) @@ -778,6 +779,7 @@ public void logUpdateBlocks(String path, INodeFile file, boolean toLogRpcIds) { public void logMkDir(String path, INode newNode) { PermissionStatus permissions = newNode.getPermissionStatus(); MkdirOp op = MkdirOp.getInstance(cache.get()) + .reset() .setInodeId(newNode.getId()) .setPath(path) .setTimestamp(newNode.getModificationTime()) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index c9eb9fe1a9..06b44192af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -419,6 +419,12 @@ private AddCloseOp(FSEditLogOpCodes opCode) { assert(opCode == OP_ADD || opCode == OP_CLOSE); } + T reset() { + this.aclEntries = null; + this.xAttrs = null; + return (T)this; + } + T setInodeId(long inodeId) { this.inodeId = inodeId; return (T)this; @@ -1410,6 +1416,12 @@ static MkdirOp getInstance(OpInstanceCache cache) { return (MkdirOp)cache.get(OP_MKDIR); } + MkdirOp reset() { + this.aclEntries = null; + this.xAttrs = null; + return this; + } + MkdirOp setInodeId(long inodeId) { this.inodeId = inodeId; return this; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index 7b622426aa..2df55aa7fa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -17,9 +17,14 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.fs.permission.AclEntryScope.*; +import static org.apache.hadoop.fs.permission.AclEntryType.*; +import static org.apache.hadoop.fs.permission.FsAction.*; +import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*; import static org.apache.hadoop.test.MetricsAsserts.assertCounter; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -57,6 +62,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -1501,4 +1507,65 @@ public void testManyEditLogSegments() throws IOException { LOG.info(String.format("loaded %d edit log segments in %.2f seconds", NUM_EDIT_LOG_ROLLS, delta)); } + + /** + * Edit log op instances are cached internally using thread-local storage. + * This test checks that the cached instances are reset in between different + * transactions processed on the same thread, so that we don't accidentally + * apply incorrect attributes to an inode. + * + * @throws IOException if there is an I/O error + */ + @Test + public void testResetThreadLocalCachedOps() throws IOException { + Configuration conf = new HdfsConfiguration(); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); + // Set single handler thread, so all transactions hit same thread-local ops. + conf.setInt(DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_KEY, 1); + MiniDFSCluster cluster = null; + FileSystem fileSys = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + fileSys = cluster.getFileSystem(); + + // Create /dir1 with a default ACL. + Path dir1 = new Path("/dir1"); + fileSys.mkdirs(dir1); + List aclSpec = Lists.newArrayList( + aclEntry(DEFAULT, USER, "foo", READ_EXECUTE)); + fileSys.modifyAclEntries(dir1, aclSpec); + + // /dir1/dir2 is expected to clone the default ACL. + Path dir2 = new Path("/dir1/dir2"); + fileSys.mkdirs(dir2); + + // /dir1/file1 is expected to clone the default ACL. + Path file1 = new Path("/dir1/file1"); + fileSys.create(file1).close(); + + // /dir3 is not a child of /dir1, so must not clone the default ACL. + Path dir3 = new Path("/dir3"); + fileSys.mkdirs(dir3); + + // /file2 is not a child of /dir1, so must not clone the default ACL. + Path file2 = new Path("/file2"); + fileSys.create(file2).close(); + + // Restart and assert the above stated expectations. + IOUtils.cleanup(LOG, fileSys); + cluster.restartNameNode(); + fileSys = cluster.getFileSystem(); + assertFalse(fileSys.getAclStatus(dir1).getEntries().isEmpty()); + assertFalse(fileSys.getAclStatus(dir2).getEntries().isEmpty()); + assertFalse(fileSys.getAclStatus(file1).getEntries().isEmpty()); + assertTrue(fileSys.getAclStatus(dir3).getEntries().isEmpty()); + assertTrue(fileSys.getAclStatus(file2).getEntries().isEmpty()); + } finally { + IOUtils.cleanup(LOG, fileSys); + if (cluster != null) { + cluster.shutdown(); + } + } + } }