Revert "HDFS-12130. Optimizing permission check for getContentSummary." to fix commit message.
This reverts commit a29fe100b3
.
This commit is contained in:
parent
9e0cde1469
commit
a1f12bb543
@ -21,7 +21,6 @@
|
|||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.fs.ContentSummary;
|
import org.apache.hadoop.fs.ContentSummary;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This interface is used by the block manager to expose a
|
* This interface is used by the block manager to expose a
|
||||||
@ -37,8 +36,7 @@ public interface BlockCollection {
|
|||||||
/**
|
/**
|
||||||
* Get content summary.
|
* Get content summary.
|
||||||
*/
|
*/
|
||||||
public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps)
|
public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps);
|
||||||
throws AccessControlException;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return the number of blocks or block groups
|
* @return the number of blocks or block groups
|
||||||
|
@ -20,14 +20,11 @@
|
|||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.fs.permission.FsAction;
|
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.fs.XAttr;
|
import org.apache.hadoop.fs.XAttr;
|
||||||
import org.apache.hadoop.io.WritableUtils;
|
import org.apache.hadoop.io.WritableUtils;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
|
||||||
|
|
||||||
import java.io.ByteArrayInputStream;
|
import java.io.ByteArrayInputStream;
|
||||||
import java.io.DataInputStream;
|
import java.io.DataInputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
@ -49,8 +46,6 @@ public class ContentSummaryComputationContext {
|
|||||||
|
|
||||||
public static final String REPLICATED = "Replicated";
|
public static final String REPLICATED = "Replicated";
|
||||||
public static final Log LOG = LogFactory.getLog(INode.class);
|
public static final Log LOG = LogFactory.getLog(INode.class);
|
||||||
|
|
||||||
private FSPermissionChecker pc;
|
|
||||||
/**
|
/**
|
||||||
* Constructor
|
* Constructor
|
||||||
*
|
*
|
||||||
@ -62,12 +57,6 @@ public class ContentSummaryComputationContext {
|
|||||||
*/
|
*/
|
||||||
public ContentSummaryComputationContext(FSDirectory dir,
|
public ContentSummaryComputationContext(FSDirectory dir,
|
||||||
FSNamesystem fsn, long limitPerRun, long sleepMicroSec) {
|
FSNamesystem fsn, long limitPerRun, long sleepMicroSec) {
|
||||||
this(dir, fsn, limitPerRun, sleepMicroSec, null);
|
|
||||||
}
|
|
||||||
|
|
||||||
public ContentSummaryComputationContext(FSDirectory dir,
|
|
||||||
FSNamesystem fsn, long limitPerRun, long sleepMicroSec,
|
|
||||||
FSPermissionChecker pc) {
|
|
||||||
this.dir = dir;
|
this.dir = dir;
|
||||||
this.fsn = fsn;
|
this.fsn = fsn;
|
||||||
this.limitPerRun = limitPerRun;
|
this.limitPerRun = limitPerRun;
|
||||||
@ -76,7 +65,6 @@ public ContentSummaryComputationContext(FSDirectory dir,
|
|||||||
this.snapshotCounts = new ContentCounts.Builder().build();
|
this.snapshotCounts = new ContentCounts.Builder().build();
|
||||||
this.sleepMilliSec = sleepMicroSec/1000;
|
this.sleepMilliSec = sleepMicroSec/1000;
|
||||||
this.sleepNanoSec = (int)((sleepMicroSec%1000)*1000);
|
this.sleepNanoSec = (int)((sleepMicroSec%1000)*1000);
|
||||||
this.pc = pc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Constructor for blocking computation. */
|
/** Constructor for blocking computation. */
|
||||||
@ -198,12 +186,4 @@ public String getErasureCodingPolicyName(INode inode) {
|
|||||||
}
|
}
|
||||||
return "";
|
return "";
|
||||||
}
|
}
|
||||||
|
|
||||||
void checkPermission(INodeDirectory inode, int snapshotId, FsAction access)
|
|
||||||
throws AccessControlException {
|
|
||||||
if (dir != null && dir.isPermissionEnabled()
|
|
||||||
&& pc != null && !pc.isSuperUser()) {
|
|
||||||
pc.checkPermission(inode, snapshotId, access);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -25,7 +25,6 @@
|
|||||||
import org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException;
|
import org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||||
import org.apache.hadoop.hdfs.util.EnumCounters;
|
import org.apache.hadoop.hdfs.util.EnumCounters;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Quota feature for {@link INodeDirectory}.
|
* Quota feature for {@link INodeDirectory}.
|
||||||
@ -126,8 +125,7 @@ QuotaCounts AddCurrentSpaceUsage(QuotaCounts counts) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ContentSummaryComputationContext computeContentSummary(final INodeDirectory dir,
|
ContentSummaryComputationContext computeContentSummary(final INodeDirectory dir,
|
||||||
final ContentSummaryComputationContext summary)
|
final ContentSummaryComputationContext summary) {
|
||||||
throws AccessControlException {
|
|
||||||
final long original = summary.getCounts().getStoragespace();
|
final long original = summary.getCounts().getStoragespace();
|
||||||
long oldYieldCount = summary.getYieldCount();
|
long oldYieldCount = summary.getYieldCount();
|
||||||
dir.computeDirectoryContentSummary(summary, Snapshot.CURRENT_STATE_ID);
|
dir.computeDirectoryContentSummary(summary, Snapshot.CURRENT_STATE_ID);
|
||||||
|
@ -127,8 +127,10 @@ static ContentSummary getContentSummary(
|
|||||||
FSDirectory fsd, String src) throws IOException {
|
FSDirectory fsd, String src) throws IOException {
|
||||||
FSPermissionChecker pc = fsd.getPermissionChecker();
|
FSPermissionChecker pc = fsd.getPermissionChecker();
|
||||||
final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ_LINK);
|
final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ_LINK);
|
||||||
// getContentSummaryInt() call will check access (if enabled) when
|
if (fsd.isPermissionEnabled()) {
|
||||||
// traversing all sub directories.
|
fsd.checkPermission(pc, iip, false, null, null, null,
|
||||||
|
FsAction.READ_EXECUTE);
|
||||||
|
}
|
||||||
return getContentSummaryInt(fsd, iip);
|
return getContentSummaryInt(fsd, iip);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -511,8 +513,7 @@ private static ContentSummary getContentSummaryInt(FSDirectory fsd,
|
|||||||
// processed. 0 means disabled. I.e. blocking for the entire duration.
|
// processed. 0 means disabled. I.e. blocking for the entire duration.
|
||||||
ContentSummaryComputationContext cscc =
|
ContentSummaryComputationContext cscc =
|
||||||
new ContentSummaryComputationContext(fsd, fsd.getFSNamesystem(),
|
new ContentSummaryComputationContext(fsd, fsd.getFSNamesystem(),
|
||||||
fsd.getContentCountLimit(), fsd.getContentSleepMicroSec(),
|
fsd.getContentCountLimit(), fsd.getContentSleepMicroSec());
|
||||||
fsd.getPermissionChecker());
|
|
||||||
ContentSummary cs = targetNode.computeAndConvertContentSummary(
|
ContentSummary cs = targetNode.computeAndConvertContentSummary(
|
||||||
iip.getPathSnapshotId(), cscc);
|
iip.getPathSnapshotId(), cscc);
|
||||||
fsd.addYieldCount(cscc.getYieldCount());
|
fsd.addYieldCount(cscc.getYieldCount());
|
||||||
|
@ -195,38 +195,6 @@ void checkPermission(INodesInPath inodesInPath, boolean doCheckOwner,
|
|||||||
ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir);
|
ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Check permission only for the given inode (not checking the children's
|
|
||||||
* access).
|
|
||||||
*
|
|
||||||
* @param inode the inode to check.
|
|
||||||
* @param snapshotId the snapshot id.
|
|
||||||
* @param access the target access.
|
|
||||||
* @throws AccessControlException
|
|
||||||
*/
|
|
||||||
void checkPermission(INode inode, int snapshotId, FsAction access)
|
|
||||||
throws AccessControlException {
|
|
||||||
try {
|
|
||||||
byte[][] localComponents = {inode.getLocalNameBytes()};
|
|
||||||
INodeAttributes[] iNodeAttr = {inode.getSnapshotINode(snapshotId)};
|
|
||||||
AccessControlEnforcer enforcer = getAccessControlEnforcer();
|
|
||||||
enforcer.checkPermission(
|
|
||||||
fsOwner, supergroup, callerUgi,
|
|
||||||
iNodeAttr, // single inode attr in the array
|
|
||||||
new INode[]{inode}, // single inode in the array
|
|
||||||
localComponents, snapshotId,
|
|
||||||
null, -1, // this will skip checkTraverse() because
|
|
||||||
// not checking ancestor here
|
|
||||||
false, null, null,
|
|
||||||
access, // the target access to be checked against the inode
|
|
||||||
null, // passing null sub access avoids checking children
|
|
||||||
false);
|
|
||||||
} catch (AccessControlException ace) {
|
|
||||||
throw new AccessControlException(
|
|
||||||
toAccessControlString(inode, inode.getFullPathName(), access));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void checkPermission(String fsOwner, String supergroup,
|
public void checkPermission(String fsOwner, String supergroup,
|
||||||
UserGroupInformation callerUgi, INodeAttributes[] inodeAttrs,
|
UserGroupInformation callerUgi, INodeAttributes[] inodeAttrs,
|
||||||
|
@ -42,7 +42,6 @@
|
|||||||
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName;
|
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||||
import org.apache.hadoop.hdfs.util.Diff;
|
import org.apache.hadoop.hdfs.util.Diff;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
|
||||||
import org.apache.hadoop.util.ChunkedArrayList;
|
import org.apache.hadoop.util.ChunkedArrayList;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
|
||||||
@ -419,8 +418,7 @@ public abstract void cleanSubtree(ReclaimContext reclaimContext,
|
|||||||
public abstract void destroyAndCollectBlocks(ReclaimContext reclaimContext);
|
public abstract void destroyAndCollectBlocks(ReclaimContext reclaimContext);
|
||||||
|
|
||||||
/** Compute {@link ContentSummary}. Blocking call */
|
/** Compute {@link ContentSummary}. Blocking call */
|
||||||
public final ContentSummary computeContentSummary(
|
public final ContentSummary computeContentSummary(BlockStoragePolicySuite bsps) {
|
||||||
BlockStoragePolicySuite bsps) throws AccessControlException {
|
|
||||||
return computeAndConvertContentSummary(Snapshot.CURRENT_STATE_ID,
|
return computeAndConvertContentSummary(Snapshot.CURRENT_STATE_ID,
|
||||||
new ContentSummaryComputationContext(bsps));
|
new ContentSummaryComputationContext(bsps));
|
||||||
}
|
}
|
||||||
@ -429,7 +427,7 @@ public final ContentSummary computeContentSummary(
|
|||||||
* Compute {@link ContentSummary}.
|
* Compute {@link ContentSummary}.
|
||||||
*/
|
*/
|
||||||
public final ContentSummary computeAndConvertContentSummary(int snapshotId,
|
public final ContentSummary computeAndConvertContentSummary(int snapshotId,
|
||||||
ContentSummaryComputationContext summary) throws AccessControlException {
|
ContentSummaryComputationContext summary) {
|
||||||
computeContentSummary(snapshotId, summary);
|
computeContentSummary(snapshotId, summary);
|
||||||
final ContentCounts counts = summary.getCounts();
|
final ContentCounts counts = summary.getCounts();
|
||||||
final ContentCounts snapshotCounts = summary.getSnapshotCounts();
|
final ContentCounts snapshotCounts = summary.getSnapshotCounts();
|
||||||
@ -463,8 +461,7 @@ public final ContentSummary computeAndConvertContentSummary(int snapshotId,
|
|||||||
* @return The same objects as summary.
|
* @return The same objects as summary.
|
||||||
*/
|
*/
|
||||||
public abstract ContentSummaryComputationContext computeContentSummary(
|
public abstract ContentSummaryComputationContext computeContentSummary(
|
||||||
int snapshotId, ContentSummaryComputationContext summary)
|
int snapshotId, ContentSummaryComputationContext summary);
|
||||||
throws AccessControlException;
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -26,7 +26,6 @@
|
|||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import org.apache.hadoop.fs.PathIsNotDirectoryException;
|
import org.apache.hadoop.fs.PathIsNotDirectoryException;
|
||||||
import org.apache.hadoop.fs.permission.FsAction;
|
|
||||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||||
import org.apache.hadoop.fs.StorageType;
|
import org.apache.hadoop.fs.StorageType;
|
||||||
import org.apache.hadoop.fs.XAttr;
|
import org.apache.hadoop.fs.XAttr;
|
||||||
@ -44,7 +43,6 @@
|
|||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
|
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
|
||||||
|
|
||||||
@ -634,7 +632,7 @@ public QuotaCounts computeQuotaUsage4CurrentDirectory(
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ContentSummaryComputationContext computeContentSummary(int snapshotId,
|
public ContentSummaryComputationContext computeContentSummary(int snapshotId,
|
||||||
ContentSummaryComputationContext summary) throws AccessControlException {
|
ContentSummaryComputationContext summary) {
|
||||||
final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
|
final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
|
||||||
if (sf != null && snapshotId == Snapshot.CURRENT_STATE_ID) {
|
if (sf != null && snapshotId == Snapshot.CURRENT_STATE_ID) {
|
||||||
final ContentCounts counts = new ContentCounts.Builder().build();
|
final ContentCounts counts = new ContentCounts.Builder().build();
|
||||||
@ -656,10 +654,7 @@ public ContentSummaryComputationContext computeContentSummary(int snapshotId,
|
|||||||
}
|
}
|
||||||
|
|
||||||
protected ContentSummaryComputationContext computeDirectoryContentSummary(
|
protected ContentSummaryComputationContext computeDirectoryContentSummary(
|
||||||
ContentSummaryComputationContext summary, int snapshotId)
|
ContentSummaryComputationContext summary, int snapshotId) {
|
||||||
throws AccessControlException{
|
|
||||||
// throws exception if failing the permission check
|
|
||||||
summary.checkPermission(this, snapshotId, FsAction.READ_EXECUTE);
|
|
||||||
ReadOnlyList<INode> childrenList = getChildrenList(snapshotId);
|
ReadOnlyList<INode> childrenList = getChildrenList(snapshotId);
|
||||||
// Explicit traversing is done to enable repositioning after relinquishing
|
// Explicit traversing is done to enable repositioning after relinquishing
|
||||||
// and reacquiring locks.
|
// and reacquiring locks.
|
||||||
|
@ -30,7 +30,6 @@
|
|||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* An anonymous reference to an inode.
|
* An anonymous reference to an inode.
|
||||||
@ -315,7 +314,7 @@ public void destroyAndCollectBlocks(ReclaimContext reclaimContext) {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ContentSummaryComputationContext computeContentSummary(int snapshotId,
|
public ContentSummaryComputationContext computeContentSummary(int snapshotId,
|
||||||
ContentSummaryComputationContext summary) throws AccessControlException {
|
ContentSummaryComputationContext summary) {
|
||||||
return referred.computeContentSummary(snapshotId, summary);
|
return referred.computeContentSummary(snapshotId, summary);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -44,7 +44,6 @@
|
|||||||
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
|
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
|
||||||
import org.apache.hadoop.hdfs.util.Diff.ListType;
|
import org.apache.hadoop.hdfs.util.Diff.ListType;
|
||||||
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
@ -235,7 +234,7 @@ public Snapshot removeSnapshot(
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps,
|
public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps,
|
||||||
final ContentCounts counts) throws AccessControlException {
|
final ContentCounts counts) {
|
||||||
counts.addContent(Content.SNAPSHOT, snapshotsByNames.size());
|
counts.addContent(Content.SNAPSHOT, snapshotsByNames.size());
|
||||||
counts.addContent(Content.SNAPSHOTTABLE_DIRECTORY, 1);
|
counts.addContent(Content.SNAPSHOTTABLE_DIRECTORY, 1);
|
||||||
super.computeContentSummary4Snapshot(bsps, counts);
|
super.computeContentSummary4Snapshot(bsps, counts);
|
||||||
|
@ -47,7 +47,6 @@
|
|||||||
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
import org.apache.hadoop.hdfs.util.ReadOnlyList;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.NO_SNAPSHOT_ID;
|
import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.NO_SNAPSHOT_ID;
|
||||||
|
|
||||||
@ -631,7 +630,7 @@ public QuotaCounts computeQuotaUsage4CurrentDirectory(
|
|||||||
}
|
}
|
||||||
|
|
||||||
public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps,
|
public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps,
|
||||||
final ContentCounts counts) throws AccessControlException {
|
final ContentCounts counts) {
|
||||||
// Create a new blank summary context for blocking processing of subtree.
|
// Create a new blank summary context for blocking processing of subtree.
|
||||||
ContentSummaryComputationContext summary =
|
ContentSummaryComputationContext summary =
|
||||||
new ContentSummaryComputationContext(bsps);
|
new ContentSummaryComputationContext(bsps);
|
||||||
|
@ -41,7 +41,6 @@
|
|||||||
import com.google.common.base.Predicate;
|
import com.google.common.base.Predicate;
|
||||||
import com.google.common.collect.Iterables;
|
import com.google.common.collect.Iterables;
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
|
||||||
|
|
||||||
/** Snapshot of a sub-tree in the namesystem. */
|
/** Snapshot of a sub-tree in the namesystem. */
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@ -177,8 +176,7 @@ public INode getChild(byte[] name, int snapshotId) {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ContentSummaryComputationContext computeContentSummary(
|
public ContentSummaryComputationContext computeContentSummary(
|
||||||
int snapshotId, ContentSummaryComputationContext summary)
|
int snapshotId, ContentSummaryComputationContext summary) {
|
||||||
throws AccessControlException {
|
|
||||||
return computeDirectoryContentSummary(summary, snapshotId);
|
return computeDirectoryContentSummary(summary, snapshotId);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,201 +0,0 @@
|
|||||||
/**
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.hadoop.hdfs.server.namenode;
|
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.fs.ContentSummary;
|
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
|
||||||
import org.apache.hadoop.fs.Path;
|
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
|
||||||
import org.junit.After;
|
|
||||||
import org.junit.Before;
|
|
||||||
import org.junit.Test;
|
|
||||||
|
|
||||||
import java.security.PrivilegedExceptionAction;
|
|
||||||
|
|
||||||
import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
|
|
||||||
import static org.junit.Assert.assertEquals;
|
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
import static org.junit.Assert.fail;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This class tests get content summary with permission settings.
|
|
||||||
*/
|
|
||||||
public class TestGetContentSummaryWithPermission {
|
|
||||||
protected static final short REPLICATION = 3;
|
|
||||||
protected static final long BLOCKSIZE = 1024;
|
|
||||||
|
|
||||||
private Configuration conf;
|
|
||||||
private MiniDFSCluster cluster;
|
|
||||||
private DistributedFileSystem dfs;
|
|
||||||
|
|
||||||
@Before
|
|
||||||
public void setUp() throws Exception {
|
|
||||||
conf = new Configuration();
|
|
||||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
|
|
||||||
cluster =
|
|
||||||
new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build();
|
|
||||||
cluster.waitActive();
|
|
||||||
|
|
||||||
dfs = cluster.getFileSystem();
|
|
||||||
}
|
|
||||||
|
|
||||||
@After
|
|
||||||
public void tearDown() throws Exception {
|
|
||||||
if (cluster != null) {
|
|
||||||
cluster.shutdown();
|
|
||||||
cluster = null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Test getContentSummary for super user. For super user, whatever
|
|
||||||
* permission the directories are with, always allowed to access
|
|
||||||
*
|
|
||||||
* @throws Exception
|
|
||||||
*/
|
|
||||||
@Test
|
|
||||||
public void testGetContentSummarySuperUser() throws Exception {
|
|
||||||
final Path foo = new Path("/fooSuper");
|
|
||||||
final Path bar = new Path(foo, "barSuper");
|
|
||||||
final Path baz = new Path(bar, "bazSuper");
|
|
||||||
dfs.mkdirs(bar);
|
|
||||||
DFSTestUtil.createFile(dfs, baz, 10, REPLICATION, 0L);
|
|
||||||
|
|
||||||
ContentSummary summary;
|
|
||||||
|
|
||||||
summary = cluster.getNameNodeRpc().getContentSummary(
|
|
||||||
foo.toString());
|
|
||||||
verifySummary(summary, 2, 1, 10);
|
|
||||||
|
|
||||||
dfs.setPermission(foo, new FsPermission((short)0));
|
|
||||||
|
|
||||||
summary = cluster.getNameNodeRpc().getContentSummary(
|
|
||||||
foo.toString());
|
|
||||||
verifySummary(summary, 2, 1, 10);
|
|
||||||
|
|
||||||
dfs.setPermission(bar, new FsPermission((short)0));
|
|
||||||
|
|
||||||
summary = cluster.getNameNodeRpc().getContentSummary(
|
|
||||||
foo.toString());
|
|
||||||
verifySummary(summary, 2, 1, 10);
|
|
||||||
|
|
||||||
dfs.setPermission(baz, new FsPermission((short)0));
|
|
||||||
|
|
||||||
summary = cluster.getNameNodeRpc().getContentSummary(
|
|
||||||
foo.toString());
|
|
||||||
verifySummary(summary, 2, 1, 10);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Test getContentSummary for non-super, non-owner. Such users are restricted
|
|
||||||
* by permission of subdirectories. Namely if there is any subdirectory that
|
|
||||||
* does not have READ_EXECUTE access, AccessControlException will be thrown.
|
|
||||||
*
|
|
||||||
* @throws Exception
|
|
||||||
*/
|
|
||||||
@Test
|
|
||||||
public void testGetContentSummaryNonSuperUser() throws Exception {
|
|
||||||
final Path foo = new Path("/fooNoneSuper");
|
|
||||||
final Path bar = new Path(foo, "barNoneSuper");
|
|
||||||
final Path baz = new Path(bar, "bazNoneSuper");
|
|
||||||
// run as some random non-superuser, non-owner user.
|
|
||||||
final UserGroupInformation userUgi =
|
|
||||||
UserGroupInformation.createUserForTesting(
|
|
||||||
"randomUser", new String[]{"randomGroup"});
|
|
||||||
dfs.mkdirs(bar);
|
|
||||||
DFSTestUtil.createFile(dfs, baz, 10, REPLICATION, 0L);
|
|
||||||
|
|
||||||
// by default, permission is rwxr-xr-x, as long as READ and EXECUTE are set,
|
|
||||||
// content summary should accessible
|
|
||||||
FileStatus fileStatus;
|
|
||||||
fileStatus = dfs.getFileStatus(foo);
|
|
||||||
assertEquals((short)755, fileStatus.getPermission().toOctal());
|
|
||||||
fileStatus = dfs.getFileStatus(bar);
|
|
||||||
assertEquals((short)755, fileStatus.getPermission().toOctal());
|
|
||||||
// file has no EXECUTE, it is rw-r--r-- default
|
|
||||||
fileStatus = dfs.getFileStatus(baz);
|
|
||||||
assertEquals((short)644, fileStatus.getPermission().toOctal());
|
|
||||||
|
|
||||||
// by default, can get content summary
|
|
||||||
ContentSummary summary =
|
|
||||||
userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
|
|
||||||
() -> cluster.getNameNodeRpc().getContentSummary(
|
|
||||||
foo.toString()));
|
|
||||||
verifySummary(summary, 2, 1, 10);
|
|
||||||
|
|
||||||
// set empty access on root dir, should disallow content summary
|
|
||||||
dfs.setPermission(foo, new FsPermission((short)0));
|
|
||||||
try {
|
|
||||||
userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
|
|
||||||
() -> cluster.getNameNodeRpc().getContentSummary(
|
|
||||||
foo.toString()));
|
|
||||||
fail("Should've fail due to access control exception.");
|
|
||||||
} catch (AccessControlException e) {
|
|
||||||
assertTrue(e.getMessage().contains("Permission denied"));
|
|
||||||
}
|
|
||||||
|
|
||||||
// restore foo's permission to allow READ_EXECUTE
|
|
||||||
dfs.setPermission(foo,
|
|
||||||
new FsPermission(READ_EXECUTE, READ_EXECUTE, READ_EXECUTE));
|
|
||||||
|
|
||||||
// set empty access on subdir, should disallow content summary from root dir
|
|
||||||
dfs.setPermission(bar, new FsPermission((short)0));
|
|
||||||
|
|
||||||
try {
|
|
||||||
userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
|
|
||||||
() -> cluster.getNameNodeRpc().getContentSummary(
|
|
||||||
foo.toString()));
|
|
||||||
fail("Should've fail due to access control exception.");
|
|
||||||
} catch (AccessControlException e) {
|
|
||||||
assertTrue(e.getMessage().contains("Permission denied"));
|
|
||||||
}
|
|
||||||
|
|
||||||
// restore the permission of subdir to READ_EXECUTE. enable
|
|
||||||
// getContentSummary again for root
|
|
||||||
dfs.setPermission(bar,
|
|
||||||
new FsPermission(READ_EXECUTE, READ_EXECUTE, READ_EXECUTE));
|
|
||||||
|
|
||||||
summary = userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
|
|
||||||
() -> cluster.getNameNodeRpc().getContentSummary(
|
|
||||||
foo.toString()));
|
|
||||||
verifySummary(summary, 2, 1, 10);
|
|
||||||
|
|
||||||
// permission of files under the directory does not affect
|
|
||||||
// getContentSummary
|
|
||||||
dfs.setPermission(baz, new FsPermission((short)0));
|
|
||||||
summary = userUgi.doAs((PrivilegedExceptionAction<ContentSummary>)
|
|
||||||
() -> cluster.getNameNodeRpc().getContentSummary(
|
|
||||||
foo.toString()));
|
|
||||||
verifySummary(summary, 2, 1, 10);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void verifySummary(ContentSummary summary, int dirCount,
|
|
||||||
int fileCount, int length) {
|
|
||||||
assertEquals(dirCount, summary.getDirectoryCount());
|
|
||||||
assertEquals(fileCount, summary.getFileCount());
|
|
||||||
assertEquals(length, summary.getLength());
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
Loading…
Reference in New Issue
Block a user