From a1f12bb543778ddc243205eaa962e99da4d8f135 Mon Sep 17 00:00:00 2001 From: Tsz-Wo Nicholas Sze Date: Fri, 14 Jul 2017 14:34:01 -0700 Subject: [PATCH] Revert "HDFS-12130. Optimizing permission check for getContentSummary." to fix commit message. This reverts commit a29fe100b3c671954b759add5923a2b44af9e6a4. --- .../blockmanagement/BlockCollection.java | 4 +- .../ContentSummaryComputationContext.java | 20 -- .../namenode/DirectoryWithQuotaFeature.java | 4 +- .../namenode/FSDirStatAndListingOp.java | 9 +- .../server/namenode/FSPermissionChecker.java | 32 --- .../hadoop/hdfs/server/namenode/INode.java | 9 +- .../hdfs/server/namenode/INodeDirectory.java | 9 +- .../hdfs/server/namenode/INodeReference.java | 3 +- .../DirectorySnapshottableFeature.java | 3 +- .../DirectoryWithSnapshotFeature.java | 3 +- .../server/namenode/snapshot/Snapshot.java | 4 +- .../TestGetContentSummaryWithPermission.java | 201 ------------------ 12 files changed, 16 insertions(+), 285 deletions(-) delete mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java index b880590795..2f214be03c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java @@ -21,7 +21,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.ContentSummary; -import org.apache.hadoop.security.AccessControlException; /** * This interface is used by the block manager to expose a @@ -37,8 +36,7 @@ public interface BlockCollection { /** * Get content summary. */ - public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps) - throws AccessControlException; + public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps); /** * @return the number of blocks or block groups diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java index 43e6f0d56b..8d5aa0dfed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java @@ -20,14 +20,11 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.io.WritableUtils; -import org.apache.hadoop.security.AccessControlException; - import java.io.ByteArrayInputStream; import java.io.DataInputStream; import java.io.IOException; @@ -49,8 +46,6 @@ public class ContentSummaryComputationContext { public static final String REPLICATED = "Replicated"; public static final Log LOG = LogFactory.getLog(INode.class); - - private FSPermissionChecker pc; /** * Constructor * @@ -62,12 +57,6 @@ public class ContentSummaryComputationContext { */ public ContentSummaryComputationContext(FSDirectory dir, FSNamesystem fsn, long limitPerRun, long sleepMicroSec) { - this(dir, fsn, limitPerRun, sleepMicroSec, null); - } - - public ContentSummaryComputationContext(FSDirectory dir, - FSNamesystem fsn, long limitPerRun, long sleepMicroSec, - FSPermissionChecker pc) { this.dir = dir; this.fsn = fsn; this.limitPerRun = limitPerRun; @@ -76,7 +65,6 @@ public ContentSummaryComputationContext(FSDirectory dir, this.snapshotCounts = new ContentCounts.Builder().build(); this.sleepMilliSec = sleepMicroSec/1000; this.sleepNanoSec = (int)((sleepMicroSec%1000)*1000); - this.pc = pc; } /** Constructor for blocking computation. */ @@ -198,12 +186,4 @@ public String getErasureCodingPolicyName(INode inode) { } return ""; } - - void checkPermission(INodeDirectory inode, int snapshotId, FsAction access) - throws AccessControlException { - if (dir != null && dir.isPermissionEnabled() - && pc != null && !pc.isSuperUser()) { - pc.checkPermission(inode, snapshotId, access); - } - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java index 0968c65cd0..31b45ad758 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.util.EnumCounters; -import org.apache.hadoop.security.AccessControlException; /** * Quota feature for {@link INodeDirectory}. @@ -126,8 +125,7 @@ QuotaCounts AddCurrentSpaceUsage(QuotaCounts counts) { } ContentSummaryComputationContext computeContentSummary(final INodeDirectory dir, - final ContentSummaryComputationContext summary) - throws AccessControlException { + final ContentSummaryComputationContext summary) { final long original = summary.getCounts().getStoragespace(); long oldYieldCount = summary.getYieldCount(); dir.computeDirectoryContentSummary(summary, Snapshot.CURRENT_STATE_ID); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java index 4c9224908b..04efa652d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java @@ -127,8 +127,10 @@ static ContentSummary getContentSummary( FSDirectory fsd, String src) throws IOException { FSPermissionChecker pc = fsd.getPermissionChecker(); final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ_LINK); - // getContentSummaryInt() call will check access (if enabled) when - // traversing all sub directories. + if (fsd.isPermissionEnabled()) { + fsd.checkPermission(pc, iip, false, null, null, null, + FsAction.READ_EXECUTE); + } return getContentSummaryInt(fsd, iip); } @@ -511,8 +513,7 @@ private static ContentSummary getContentSummaryInt(FSDirectory fsd, // processed. 0 means disabled. I.e. blocking for the entire duration. ContentSummaryComputationContext cscc = new ContentSummaryComputationContext(fsd, fsd.getFSNamesystem(), - fsd.getContentCountLimit(), fsd.getContentSleepMicroSec(), - fsd.getPermissionChecker()); + fsd.getContentCountLimit(), fsd.getContentSleepMicroSec()); ContentSummary cs = targetNode.computeAndConvertContentSummary( iip.getPathSnapshotId(), cscc); fsd.addYieldCount(cscc.getYieldCount()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index f745a6ceb0..f1250dd277 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -195,38 +195,6 @@ void checkPermission(INodesInPath inodesInPath, boolean doCheckOwner, ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir); } - /** - * Check permission only for the given inode (not checking the children's - * access). - * - * @param inode the inode to check. - * @param snapshotId the snapshot id. - * @param access the target access. - * @throws AccessControlException - */ - void checkPermission(INode inode, int snapshotId, FsAction access) - throws AccessControlException { - try { - byte[][] localComponents = {inode.getLocalNameBytes()}; - INodeAttributes[] iNodeAttr = {inode.getSnapshotINode(snapshotId)}; - AccessControlEnforcer enforcer = getAccessControlEnforcer(); - enforcer.checkPermission( - fsOwner, supergroup, callerUgi, - iNodeAttr, // single inode attr in the array - new INode[]{inode}, // single inode in the array - localComponents, snapshotId, - null, -1, // this will skip checkTraverse() because - // not checking ancestor here - false, null, null, - access, // the target access to be checked against the inode - null, // passing null sub access avoids checking children - false); - } catch (AccessControlException ace) { - throw new AccessControlException( - toAccessControlString(inode, inode.getFullPathName(), access)); - } - } - @Override public void checkPermission(String fsOwner, String supergroup, UserGroupInformation callerUgi, INodeAttributes[] inodeAttrs, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index d768e08558..1f982ca8d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.util.Diff; -import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.util.ChunkedArrayList; import org.apache.hadoop.util.StringUtils; @@ -419,8 +418,7 @@ public abstract void cleanSubtree(ReclaimContext reclaimContext, public abstract void destroyAndCollectBlocks(ReclaimContext reclaimContext); /** Compute {@link ContentSummary}. Blocking call */ - public final ContentSummary computeContentSummary( - BlockStoragePolicySuite bsps) throws AccessControlException { + public final ContentSummary computeContentSummary(BlockStoragePolicySuite bsps) { return computeAndConvertContentSummary(Snapshot.CURRENT_STATE_ID, new ContentSummaryComputationContext(bsps)); } @@ -429,7 +427,7 @@ public final ContentSummary computeContentSummary( * Compute {@link ContentSummary}. */ public final ContentSummary computeAndConvertContentSummary(int snapshotId, - ContentSummaryComputationContext summary) throws AccessControlException { + ContentSummaryComputationContext summary) { computeContentSummary(snapshotId, summary); final ContentCounts counts = summary.getCounts(); final ContentCounts snapshotCounts = summary.getSnapshotCounts(); @@ -463,8 +461,7 @@ public final ContentSummary computeAndConvertContentSummary(int snapshotId, * @return The same objects as summary. */ public abstract ContentSummaryComputationContext computeContentSummary( - int snapshotId, ContentSummaryComputationContext summary) - throws AccessControlException; + int snapshotId, ContentSummaryComputationContext summary); /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index 3b7fa4e3fa..4012783108 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -26,7 +26,6 @@ import java.util.Map; import org.apache.hadoop.fs.PathIsNotDirectoryException; -import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.XAttr; @@ -44,7 +43,6 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import org.apache.hadoop.security.AccessControlException; import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; @@ -634,7 +632,7 @@ public QuotaCounts computeQuotaUsage4CurrentDirectory( @Override public ContentSummaryComputationContext computeContentSummary(int snapshotId, - ContentSummaryComputationContext summary) throws AccessControlException { + ContentSummaryComputationContext summary) { final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature(); if (sf != null && snapshotId == Snapshot.CURRENT_STATE_ID) { final ContentCounts counts = new ContentCounts.Builder().build(); @@ -656,10 +654,7 @@ public ContentSummaryComputationContext computeContentSummary(int snapshotId, } protected ContentSummaryComputationContext computeDirectoryContentSummary( - ContentSummaryComputationContext summary, int snapshotId) - throws AccessControlException{ - // throws exception if failing the permission check - summary.checkPermission(this, snapshotId, FsAction.READ_EXECUTE); + ContentSummaryComputationContext summary, int snapshotId) { ReadOnlyList childrenList = getChildrenList(snapshotId); // Explicit traversing is done to enable repositioning after relinquishing // and reacquiring locks. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java index db2026da41..1b852374c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import com.google.common.base.Preconditions; -import org.apache.hadoop.security.AccessControlException; /** * An anonymous reference to an inode. @@ -315,7 +314,7 @@ public void destroyAndCollectBlocks(ReclaimContext reclaimContext) { @Override public ContentSummaryComputationContext computeContentSummary(int snapshotId, - ContentSummaryComputationContext summary) throws AccessControlException { + ContentSummaryComputationContext summary) { return referred.computeContentSummary(snapshotId, summary); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java index 0ab928d04d..fbfc278703 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java @@ -44,7 +44,6 @@ import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.hdfs.util.Diff.ListType; import org.apache.hadoop.hdfs.util.ReadOnlyList; -import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.util.Time; import com.google.common.annotations.VisibleForTesting; @@ -235,7 +234,7 @@ public Snapshot removeSnapshot( @Override public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps, - final ContentCounts counts) throws AccessControlException { + final ContentCounts counts) { counts.addContent(Content.SNAPSHOT, snapshotsByNames.size()); counts.addContent(Content.SNAPSHOTTABLE_DIRECTORY, 1); super.computeContentSummary4Snapshot(bsps, counts); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java index 7535879d09..0111b3bfb6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java @@ -47,7 +47,6 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList; import com.google.common.base.Preconditions; -import org.apache.hadoop.security.AccessControlException; import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.NO_SNAPSHOT_ID; @@ -631,7 +630,7 @@ public QuotaCounts computeQuotaUsage4CurrentDirectory( } public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps, - final ContentCounts counts) throws AccessControlException { + final ContentCounts counts) { // Create a new blank summary context for blocking processing of subtree. ContentSummaryComputationContext summary = new ContentSummaryComputationContext(bsps); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java index 515f164bd8..e98e766d34 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/Snapshot.java @@ -41,7 +41,6 @@ import com.google.common.base.Predicate; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; -import org.apache.hadoop.security.AccessControlException; /** Snapshot of a sub-tree in the namesystem. */ @InterfaceAudience.Private @@ -177,8 +176,7 @@ public INode getChild(byte[] name, int snapshotId) { @Override public ContentSummaryComputationContext computeContentSummary( - int snapshotId, ContentSummaryComputationContext summary) - throws AccessControlException { + int snapshotId, ContentSummaryComputationContext summary) { return computeDirectoryContentSummary(summary, snapshotId); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java deleted file mode 100644 index 03aa440a53..0000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetContentSummaryWithPermission.java +++ /dev/null @@ -1,201 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.namenode; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.ContentSummary; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.DFSTestUtil; -import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.security.AccessControlException; -import org.apache.hadoop.security.UserGroupInformation; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.security.PrivilegedExceptionAction; - -import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -/** - * This class tests get content summary with permission settings. - */ -public class TestGetContentSummaryWithPermission { - protected static final short REPLICATION = 3; - protected static final long BLOCKSIZE = 1024; - - private Configuration conf; - private MiniDFSCluster cluster; - private DistributedFileSystem dfs; - - @Before - public void setUp() throws Exception { - conf = new Configuration(); - conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE); - cluster = - new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build(); - cluster.waitActive(); - - dfs = cluster.getFileSystem(); - } - - @After - public void tearDown() throws Exception { - if (cluster != null) { - cluster.shutdown(); - cluster = null; - } - } - - /** - * Test getContentSummary for super user. For super user, whatever - * permission the directories are with, always allowed to access - * - * @throws Exception - */ - @Test - public void testGetContentSummarySuperUser() throws Exception { - final Path foo = new Path("/fooSuper"); - final Path bar = new Path(foo, "barSuper"); - final Path baz = new Path(bar, "bazSuper"); - dfs.mkdirs(bar); - DFSTestUtil.createFile(dfs, baz, 10, REPLICATION, 0L); - - ContentSummary summary; - - summary = cluster.getNameNodeRpc().getContentSummary( - foo.toString()); - verifySummary(summary, 2, 1, 10); - - dfs.setPermission(foo, new FsPermission((short)0)); - - summary = cluster.getNameNodeRpc().getContentSummary( - foo.toString()); - verifySummary(summary, 2, 1, 10); - - dfs.setPermission(bar, new FsPermission((short)0)); - - summary = cluster.getNameNodeRpc().getContentSummary( - foo.toString()); - verifySummary(summary, 2, 1, 10); - - dfs.setPermission(baz, new FsPermission((short)0)); - - summary = cluster.getNameNodeRpc().getContentSummary( - foo.toString()); - verifySummary(summary, 2, 1, 10); - } - - /** - * Test getContentSummary for non-super, non-owner. Such users are restricted - * by permission of subdirectories. Namely if there is any subdirectory that - * does not have READ_EXECUTE access, AccessControlException will be thrown. - * - * @throws Exception - */ - @Test - public void testGetContentSummaryNonSuperUser() throws Exception { - final Path foo = new Path("/fooNoneSuper"); - final Path bar = new Path(foo, "barNoneSuper"); - final Path baz = new Path(bar, "bazNoneSuper"); - // run as some random non-superuser, non-owner user. - final UserGroupInformation userUgi = - UserGroupInformation.createUserForTesting( - "randomUser", new String[]{"randomGroup"}); - dfs.mkdirs(bar); - DFSTestUtil.createFile(dfs, baz, 10, REPLICATION, 0L); - - // by default, permission is rwxr-xr-x, as long as READ and EXECUTE are set, - // content summary should accessible - FileStatus fileStatus; - fileStatus = dfs.getFileStatus(foo); - assertEquals((short)755, fileStatus.getPermission().toOctal()); - fileStatus = dfs.getFileStatus(bar); - assertEquals((short)755, fileStatus.getPermission().toOctal()); - // file has no EXECUTE, it is rw-r--r-- default - fileStatus = dfs.getFileStatus(baz); - assertEquals((short)644, fileStatus.getPermission().toOctal()); - - // by default, can get content summary - ContentSummary summary = - userUgi.doAs((PrivilegedExceptionAction) - () -> cluster.getNameNodeRpc().getContentSummary( - foo.toString())); - verifySummary(summary, 2, 1, 10); - - // set empty access on root dir, should disallow content summary - dfs.setPermission(foo, new FsPermission((short)0)); - try { - userUgi.doAs((PrivilegedExceptionAction) - () -> cluster.getNameNodeRpc().getContentSummary( - foo.toString())); - fail("Should've fail due to access control exception."); - } catch (AccessControlException e) { - assertTrue(e.getMessage().contains("Permission denied")); - } - - // restore foo's permission to allow READ_EXECUTE - dfs.setPermission(foo, - new FsPermission(READ_EXECUTE, READ_EXECUTE, READ_EXECUTE)); - - // set empty access on subdir, should disallow content summary from root dir - dfs.setPermission(bar, new FsPermission((short)0)); - - try { - userUgi.doAs((PrivilegedExceptionAction) - () -> cluster.getNameNodeRpc().getContentSummary( - foo.toString())); - fail("Should've fail due to access control exception."); - } catch (AccessControlException e) { - assertTrue(e.getMessage().contains("Permission denied")); - } - - // restore the permission of subdir to READ_EXECUTE. enable - // getContentSummary again for root - dfs.setPermission(bar, - new FsPermission(READ_EXECUTE, READ_EXECUTE, READ_EXECUTE)); - - summary = userUgi.doAs((PrivilegedExceptionAction) - () -> cluster.getNameNodeRpc().getContentSummary( - foo.toString())); - verifySummary(summary, 2, 1, 10); - - // permission of files under the directory does not affect - // getContentSummary - dfs.setPermission(baz, new FsPermission((short)0)); - summary = userUgi.doAs((PrivilegedExceptionAction) - () -> cluster.getNameNodeRpc().getContentSummary( - foo.toString())); - verifySummary(summary, 2, 1, 10); - } - - private void verifySummary(ContentSummary summary, int dirCount, - int fileCount, int length) { - assertEquals(dirCount, summary.getDirectoryCount()); - assertEquals(fileCount, summary.getFileCount()); - assertEquals(length, summary.getLength()); - } - -}