HDFS-11641. Reduce cost of audit logging by using FileStatus instead of HdfsFileStatus. Contributed by Daryn Sharp.
This commit is contained in:
parent
1d1c52b42f
commit
9b90e52f1e
@ -18,6 +18,8 @@
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclEntryScope;
|
||||
import org.apache.hadoop.fs.permission.AclEntryType;
|
||||
@ -26,7 +28,6 @@
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.AclException;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
|
||||
|
||||
import java.io.IOException;
|
||||
@ -34,7 +35,7 @@
|
||||
import java.util.List;
|
||||
|
||||
class FSDirAclOp {
|
||||
static HdfsFileStatus modifyAclEntries(
|
||||
static FileStatus modifyAclEntries(
|
||||
FSDirectory fsd, final String srcArg, List<AclEntry> aclSpec)
|
||||
throws IOException {
|
||||
String src = srcArg;
|
||||
@ -59,7 +60,7 @@ static HdfsFileStatus modifyAclEntries(
|
||||
return fsd.getAuditFileInfo(iip);
|
||||
}
|
||||
|
||||
static HdfsFileStatus removeAclEntries(
|
||||
static FileStatus removeAclEntries(
|
||||
FSDirectory fsd, final String srcArg, List<AclEntry> aclSpec)
|
||||
throws IOException {
|
||||
String src = srcArg;
|
||||
@ -84,7 +85,7 @@ static HdfsFileStatus removeAclEntries(
|
||||
return fsd.getAuditFileInfo(iip);
|
||||
}
|
||||
|
||||
static HdfsFileStatus removeDefaultAcl(FSDirectory fsd, final String srcArg)
|
||||
static FileStatus removeDefaultAcl(FSDirectory fsd, final String srcArg)
|
||||
throws IOException {
|
||||
String src = srcArg;
|
||||
checkAclsConfigFlag(fsd);
|
||||
@ -108,7 +109,7 @@ static HdfsFileStatus removeDefaultAcl(FSDirectory fsd, final String srcArg)
|
||||
return fsd.getAuditFileInfo(iip);
|
||||
}
|
||||
|
||||
static HdfsFileStatus removeAcl(FSDirectory fsd, final String srcArg)
|
||||
static FileStatus removeAcl(FSDirectory fsd, final String srcArg)
|
||||
throws IOException {
|
||||
String src = srcArg;
|
||||
checkAclsConfigFlag(fsd);
|
||||
@ -127,7 +128,7 @@ static HdfsFileStatus removeAcl(FSDirectory fsd, final String srcArg)
|
||||
return fsd.getAuditFileInfo(iip);
|
||||
}
|
||||
|
||||
static HdfsFileStatus setAcl(
|
||||
static FileStatus setAcl(
|
||||
FSDirectory fsd, final String srcArg, List<AclEntry> aclSpec)
|
||||
throws IOException {
|
||||
String src = srcArg;
|
||||
|
@ -18,6 +18,7 @@
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.InvalidPathException;
|
||||
import org.apache.hadoop.fs.PathIsNotDirectoryException;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
@ -28,7 +29,6 @@
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
@ -50,7 +50,7 @@
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
|
||||
|
||||
public class FSDirAttrOp {
|
||||
static HdfsFileStatus setPermission(
|
||||
static FileStatus setPermission(
|
||||
FSDirectory fsd, final String src, FsPermission permission)
|
||||
throws IOException {
|
||||
if (FSDirectory.isExactReservedName(src)) {
|
||||
@ -70,7 +70,7 @@ static HdfsFileStatus setPermission(
|
||||
return fsd.getAuditFileInfo(iip);
|
||||
}
|
||||
|
||||
static HdfsFileStatus setOwner(
|
||||
static FileStatus setOwner(
|
||||
FSDirectory fsd, String src, String username, String group)
|
||||
throws IOException {
|
||||
if (FSDirectory.isExactReservedName(src)) {
|
||||
@ -100,7 +100,7 @@ static HdfsFileStatus setOwner(
|
||||
return fsd.getAuditFileInfo(iip);
|
||||
}
|
||||
|
||||
static HdfsFileStatus setTimes(
|
||||
static FileStatus setTimes(
|
||||
FSDirectory fsd, String src, long mtime, long atime)
|
||||
throws IOException {
|
||||
FSPermissionChecker pc = fsd.getPermissionChecker();
|
||||
@ -153,13 +153,13 @@ static boolean setReplication(
|
||||
return isFile;
|
||||
}
|
||||
|
||||
static HdfsFileStatus unsetStoragePolicy(FSDirectory fsd, BlockManager bm,
|
||||
static FileStatus unsetStoragePolicy(FSDirectory fsd, BlockManager bm,
|
||||
String src) throws IOException {
|
||||
return setStoragePolicy(fsd, bm, src,
|
||||
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, "unset");
|
||||
}
|
||||
|
||||
static HdfsFileStatus setStoragePolicy(FSDirectory fsd, BlockManager bm,
|
||||
static FileStatus setStoragePolicy(FSDirectory fsd, BlockManager bm,
|
||||
String src, final String policyName) throws IOException {
|
||||
// get the corresponding policy and make sure the policy name is valid
|
||||
BlockStoragePolicy policy = bm.getStoragePolicy(policyName);
|
||||
@ -171,7 +171,7 @@ static HdfsFileStatus setStoragePolicy(FSDirectory fsd, BlockManager bm,
|
||||
return setStoragePolicy(fsd, bm, src, policy.getId(), "set");
|
||||
}
|
||||
|
||||
static HdfsFileStatus setStoragePolicy(FSDirectory fsd, BlockManager bm,
|
||||
static FileStatus setStoragePolicy(FSDirectory fsd, BlockManager bm,
|
||||
String src, final byte policyId, final String operation)
|
||||
throws IOException {
|
||||
if (!fsd.isStoragePolicyEnabled()) {
|
||||
|
@ -21,9 +21,9 @@
|
||||
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshotException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
|
||||
@ -48,7 +48,7 @@
|
||||
*/
|
||||
class FSDirConcatOp {
|
||||
|
||||
static HdfsFileStatus concat(FSDirectory fsd, String target, String[] srcs,
|
||||
static FileStatus concat(FSDirectory fsd, String target, String[] srcs,
|
||||
boolean logRetryCache) throws IOException {
|
||||
validatePath(target, srcs);
|
||||
assert srcs != null;
|
||||
|
@ -34,6 +34,7 @@
|
||||
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
|
||||
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
|
||||
import org.apache.hadoop.fs.FileEncryptionInfo;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
@ -41,7 +42,6 @@
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.hdfs.XAttrHelper;
|
||||
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
|
||||
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
|
||||
@ -143,10 +143,10 @@ static KeyProvider.Metadata ensureKeyIsInitialized(final FSDirectory fsd,
|
||||
* KeyProvider
|
||||
* @param logRetryCache whether to record RPC ids in editlog for retry cache
|
||||
* rebuilding
|
||||
* @return HdfsFileStatus
|
||||
* @return FileStatus
|
||||
* @throws IOException
|
||||
*/
|
||||
static HdfsFileStatus createEncryptionZone(final FSDirectory fsd,
|
||||
static FileStatus createEncryptionZone(final FSDirectory fsd,
|
||||
final String srcArg, final FSPermissionChecker pc, final String cipher,
|
||||
final String keyName, final boolean logRetryCache) throws IOException {
|
||||
final CipherSuite suite = CipherSuite.convert(cipher);
|
||||
@ -177,7 +177,7 @@ static HdfsFileStatus createEncryptionZone(final FSDirectory fsd,
|
||||
* @param pc permission checker to check fs permission
|
||||
* @return the EZ with file status.
|
||||
*/
|
||||
static Map.Entry<EncryptionZone, HdfsFileStatus> getEZForPath(
|
||||
static Map.Entry<EncryptionZone, FileStatus> getEZForPath(
|
||||
final FSDirectory fsd, final String srcArg, final FSPermissionChecker pc)
|
||||
throws IOException {
|
||||
final INodesInPath iip;
|
||||
@ -192,7 +192,7 @@ static Map.Entry<EncryptionZone, HdfsFileStatus> getEZForPath(
|
||||
} finally {
|
||||
fsd.readUnlock();
|
||||
}
|
||||
HdfsFileStatus auditStat = fsd.getAuditFileInfo(iip);
|
||||
FileStatus auditStat = fsd.getAuditFileInfo(iip);
|
||||
return new AbstractMap.SimpleImmutableEntry<>(ret, auditStat);
|
||||
}
|
||||
|
||||
|
@ -32,6 +32,7 @@
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
@ -39,7 +40,6 @@
|
||||
import org.apache.hadoop.hdfs.XAttrHelper;
|
||||
import org.apache.hadoop.hdfs.protocol.IllegalECPolicyException;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
@ -100,12 +100,12 @@ static ErasureCodingPolicy getErasureCodingPolicyByName(
|
||||
* directory.
|
||||
* @param logRetryCache whether to record RPC ids in editlog for retry
|
||||
* cache rebuilding
|
||||
* @return {@link HdfsFileStatus}
|
||||
* @return {@link FileStatus}
|
||||
* @throws IOException
|
||||
* @throws HadoopIllegalArgumentException if the policy is not enabled
|
||||
* @throws AccessControlException if the user does not have write access
|
||||
*/
|
||||
static HdfsFileStatus setErasureCodingPolicy(final FSNamesystem fsn,
|
||||
static FileStatus setErasureCodingPolicy(final FSNamesystem fsn,
|
||||
final String srcArg, final String ecPolicyName,
|
||||
final FSPermissionChecker pc, final boolean logRetryCache)
|
||||
throws IOException, AccessControlException {
|
||||
@ -179,11 +179,11 @@ private static List<XAttr> setErasureCodingPolicyXAttr(final FSNamesystem fsn,
|
||||
* @param srcArg The path of the target directory.
|
||||
* @param logRetryCache whether to record RPC ids in editlog for retry
|
||||
* cache rebuilding
|
||||
* @return {@link HdfsFileStatus}
|
||||
* @return {@link FileStatus}
|
||||
* @throws IOException
|
||||
* @throws AccessControlException if the user does not have write access
|
||||
*/
|
||||
static HdfsFileStatus unsetErasureCodingPolicy(final FSNamesystem fsn,
|
||||
static FileStatus unsetErasureCodingPolicy(final FSNamesystem fsn,
|
||||
final String srcArg, final FSPermissionChecker pc,
|
||||
final boolean logRetryCache) throws IOException {
|
||||
assert fsn.hasWriteLock();
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.fs.FileAlreadyExistsException;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
@ -27,7 +28,6 @@
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.AclException;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
@ -39,7 +39,7 @@
|
||||
|
||||
class FSDirMkdirOp {
|
||||
|
||||
static HdfsFileStatus mkdirs(FSNamesystem fsn, String src,
|
||||
static FileStatus mkdirs(FSNamesystem fsn, String src,
|
||||
PermissionStatus permissions, boolean createParent) throws IOException {
|
||||
FSDirectory fsd = fsn.getFSDirectory();
|
||||
if(NameNode.stateChangeLog.isDebugEnabled()) {
|
||||
|
@ -19,13 +19,13 @@
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.fs.FileAlreadyExistsException;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.InvalidPathException;
|
||||
import org.apache.hadoop.fs.Options;
|
||||
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshotException;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
||||
@ -781,18 +781,18 @@ private static RenameResult createRenameResult(FSDirectory fsd,
|
||||
INodesInPath dst, boolean filesDeleted,
|
||||
BlocksMapUpdateInfo collectedBlocks) throws IOException {
|
||||
boolean success = (dst != null);
|
||||
HdfsFileStatus auditStat = success ? fsd.getAuditFileInfo(dst) : null;
|
||||
FileStatus auditStat = success ? fsd.getAuditFileInfo(dst) : null;
|
||||
return new RenameResult(
|
||||
success, auditStat, filesDeleted, collectedBlocks);
|
||||
}
|
||||
|
||||
static class RenameResult {
|
||||
final boolean success;
|
||||
final HdfsFileStatus auditStat;
|
||||
final FileStatus auditStat;
|
||||
final boolean filesDeleted;
|
||||
final BlocksMapUpdateInfo collectedBlocks;
|
||||
|
||||
RenameResult(boolean success, HdfsFileStatus auditStat,
|
||||
RenameResult(boolean success, FileStatus auditStat,
|
||||
boolean filesDeleted, BlocksMapUpdateInfo collectedBlocks) {
|
||||
this.success = success;
|
||||
this.auditStat = auditStat;
|
||||
|
@ -17,13 +17,13 @@
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.InvalidPathException;
|
||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
|
||||
|
||||
@ -33,7 +33,7 @@
|
||||
|
||||
class FSDirSymlinkOp {
|
||||
|
||||
static HdfsFileStatus createSymlinkInt(
|
||||
static FileStatus createSymlinkInt(
|
||||
FSNamesystem fsn, String target, final String linkArg,
|
||||
PermissionStatus dirPerms, boolean createParent, boolean logRetryCache)
|
||||
throws IOException {
|
||||
|
@ -21,11 +21,11 @@
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
@ -348,9 +348,9 @@ private static boolean shouldCopyOnTruncate(FSNamesystem fsn, INodeFile file,
|
||||
*/
|
||||
static class TruncateResult {
|
||||
private final boolean result;
|
||||
private final HdfsFileStatus stat;
|
||||
private final FileStatus stat;
|
||||
|
||||
public TruncateResult(boolean result, HdfsFileStatus stat) {
|
||||
public TruncateResult(boolean result, FileStatus stat) {
|
||||
this.result = result;
|
||||
this.stat = stat;
|
||||
}
|
||||
@ -366,7 +366,7 @@ boolean getResult() {
|
||||
/**
|
||||
* @return file information.
|
||||
*/
|
||||
HdfsFileStatus getFileStatus() {
|
||||
FileStatus getFileStatus() {
|
||||
return stat;
|
||||
}
|
||||
}
|
||||
|
@ -21,13 +21,13 @@
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Lists;
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.XAttrHelper;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
|
||||
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
|
||||
@ -59,7 +59,7 @@ class FSDirXAttrOp {
|
||||
* - xAttrs flags
|
||||
* @throws IOException
|
||||
*/
|
||||
static HdfsFileStatus setXAttr(
|
||||
static FileStatus setXAttr(
|
||||
FSDirectory fsd, String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag,
|
||||
boolean logRetryCache)
|
||||
throws IOException {
|
||||
@ -153,7 +153,7 @@ static List<XAttr> listXAttrs(
|
||||
* - xAttr to remove
|
||||
* @throws IOException
|
||||
*/
|
||||
static HdfsFileStatus removeXAttr(
|
||||
static FileStatus removeXAttr(
|
||||
FSDirectory fsd, String src, XAttr xAttr, boolean logRetryCache)
|
||||
throws IOException {
|
||||
FSDirXAttrOp.checkXAttrsConfigFlag(fsd);
|
||||
|
@ -28,6 +28,7 @@
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.InvalidPathException;
|
||||
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
@ -38,6 +39,7 @@
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
|
||||
@ -1796,10 +1798,45 @@ void checkUnreadableBySuperuser(FSPermissionChecker pc, INodesInPath iip)
|
||||
}
|
||||
}
|
||||
|
||||
HdfsFileStatus getAuditFileInfo(INodesInPath iip)
|
||||
FileStatus getAuditFileInfo(INodesInPath iip)
|
||||
throws IOException {
|
||||
return (namesystem.isAuditEnabled() && namesystem.isExternalInvocation())
|
||||
? FSDirStatAndListingOp.getFileInfo(this, iip, false) : null;
|
||||
if (!namesystem.isAuditEnabled() || !namesystem.isExternalInvocation()) {
|
||||
return null;
|
||||
}
|
||||
|
||||
final INode inode = iip.getLastINode();
|
||||
if (inode == null) {
|
||||
return null;
|
||||
}
|
||||
final int snapshot = iip.getPathSnapshotId();
|
||||
|
||||
Path symlink = null;
|
||||
long size = 0; // length is zero for directories
|
||||
short replication = 0;
|
||||
long blocksize = 0;
|
||||
|
||||
if (inode.isFile()) {
|
||||
final INodeFile fileNode = inode.asFile();
|
||||
size = fileNode.computeFileSize(snapshot);
|
||||
replication = fileNode.getFileReplication(snapshot);
|
||||
blocksize = fileNode.getPreferredBlockSize();
|
||||
} else if (inode.isSymlink()) {
|
||||
symlink = new Path(
|
||||
DFSUtilClient.bytes2String(inode.asSymlink().getSymlink()));
|
||||
}
|
||||
|
||||
return new FileStatus(
|
||||
size,
|
||||
inode.isDirectory(),
|
||||
replication,
|
||||
blocksize,
|
||||
inode.getModificationTime(snapshot),
|
||||
inode.getAccessTime(snapshot),
|
||||
inode.getFsPermission(snapshot),
|
||||
inode.getUserName(snapshot),
|
||||
inode.getGroupName(snapshot),
|
||||
symlink,
|
||||
new Path(iip.getPath()));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -351,26 +351,34 @@ private void logAuditEvent(boolean succeeded, String cmd, String src)
|
||||
}
|
||||
|
||||
private void logAuditEvent(boolean succeeded, String cmd, String src,
|
||||
String dst, HdfsFileStatus stat) throws IOException {
|
||||
String dst, FileStatus stat) throws IOException {
|
||||
if (isAuditEnabled() && isExternalInvocation()) {
|
||||
logAuditEvent(succeeded, Server.getRemoteUser(), Server.getRemoteIp(),
|
||||
cmd, src, dst, stat);
|
||||
}
|
||||
}
|
||||
|
||||
private void logAuditEvent(boolean succeeded,
|
||||
UserGroupInformation ugi, InetAddress addr, String cmd, String src,
|
||||
String dst, HdfsFileStatus stat) {
|
||||
private void logAuditEvent(boolean succeeded, String cmd, String src,
|
||||
HdfsFileStatus stat) throws IOException {
|
||||
if (!isAuditEnabled() || !isExternalInvocation()) {
|
||||
return;
|
||||
}
|
||||
FileStatus status = null;
|
||||
if (stat != null) {
|
||||
Path symlink = stat.isSymlink() ? new Path(stat.getSymlink()) : null;
|
||||
Path path = dst != null ? new Path(dst) : new Path(src);
|
||||
Path path = new Path(src);
|
||||
status = new FileStatus(stat.getLen(), stat.isDir(),
|
||||
stat.getReplication(), stat.getBlockSize(),
|
||||
stat.getModificationTime(),
|
||||
stat.getAccessTime(), stat.getPermission(), stat.getOwner(),
|
||||
stat.getGroup(), symlink, path);
|
||||
}
|
||||
logAuditEvent(succeeded, cmd, src, null, status);
|
||||
}
|
||||
|
||||
private void logAuditEvent(boolean succeeded,
|
||||
UserGroupInformation ugi, InetAddress addr, String cmd, String src,
|
||||
String dst, FileStatus status) {
|
||||
final String ugiStr = ugi.toString();
|
||||
for (AuditLogger logger : auditLoggers) {
|
||||
if (logger instanceof HdfsAuditLogger) {
|
||||
@ -1725,7 +1733,7 @@ FsServerDefaults getServerDefaults() throws StandbyException {
|
||||
*/
|
||||
void setPermission(String src, FsPermission permission) throws IOException {
|
||||
final String operationName = "setPermission";
|
||||
HdfsFileStatus auditStat;
|
||||
FileStatus auditStat;
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
writeLock();
|
||||
try {
|
||||
@ -1749,7 +1757,7 @@ void setPermission(String src, FsPermission permission) throws IOException {
|
||||
void setOwner(String src, String username, String group)
|
||||
throws IOException {
|
||||
final String operationName = "setOwner";
|
||||
HdfsFileStatus auditStat;
|
||||
FileStatus auditStat;
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
writeLock();
|
||||
try {
|
||||
@ -1886,7 +1894,7 @@ private void sortLocatedBlocks(String clientMachine, LocatedBlocks blocks) {
|
||||
void concat(String target, String [] srcs, boolean logRetryCache)
|
||||
throws IOException {
|
||||
final String operationName = "concat";
|
||||
HdfsFileStatus stat = null;
|
||||
FileStatus stat = null;
|
||||
boolean success = false;
|
||||
writeLock();
|
||||
try {
|
||||
@ -1914,7 +1922,7 @@ void concat(String target, String [] srcs, boolean logRetryCache)
|
||||
*/
|
||||
void setTimes(String src, long mtime, long atime) throws IOException {
|
||||
final String operationName = "setTimes";
|
||||
HdfsFileStatus auditStat;
|
||||
FileStatus auditStat;
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
writeLock();
|
||||
try {
|
||||
@ -1989,7 +1997,7 @@ void createSymlink(String target, String link,
|
||||
if (!FileSystem.areSymlinksEnabled()) {
|
||||
throw new UnsupportedOperationException("Symlinks not supported");
|
||||
}
|
||||
HdfsFileStatus auditStat = null;
|
||||
FileStatus auditStat = null;
|
||||
writeLock();
|
||||
try {
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
@ -2050,7 +2058,7 @@ boolean setReplication(final String src, final short replication)
|
||||
*/
|
||||
void setStoragePolicy(String src, String policyName) throws IOException {
|
||||
final String operationName = "setStoragePolicy";
|
||||
HdfsFileStatus auditStat;
|
||||
FileStatus auditStat;
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
writeLock();
|
||||
try {
|
||||
@ -2075,7 +2083,7 @@ void setStoragePolicy(String src, String policyName) throws IOException {
|
||||
*/
|
||||
void unsetStoragePolicy(String src) throws IOException {
|
||||
final String operationName = "unsetStoragePolicy";
|
||||
HdfsFileStatus auditStat;
|
||||
FileStatus auditStat;
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
writeLock();
|
||||
try {
|
||||
@ -2195,7 +2203,7 @@ HdfsFileStatus startFile(String src, PermissionStatus permissions,
|
||||
logAuditEvent(false, "create", src);
|
||||
throw e;
|
||||
}
|
||||
logAuditEvent(true, "create", src, null, status);
|
||||
logAuditEvent(true, "create", src, status);
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -2954,7 +2962,7 @@ boolean isFileClosed(final String src) throws IOException {
|
||||
boolean mkdirs(String src, PermissionStatus permissions,
|
||||
boolean createParent) throws IOException {
|
||||
final String operationName = "mkdirs";
|
||||
HdfsFileStatus auditStat = null;
|
||||
FileStatus auditStat = null;
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
writeLock();
|
||||
try {
|
||||
@ -6589,7 +6597,7 @@ BatchedListEntries<CachePoolEntry> listCachePools(String prevKey)
|
||||
void modifyAclEntries(final String src, List<AclEntry> aclSpec)
|
||||
throws IOException {
|
||||
final String operationName = "modifyAclEntries";
|
||||
HdfsFileStatus auditStat = null;
|
||||
FileStatus auditStat = null;
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
writeLock();
|
||||
try {
|
||||
@ -6610,7 +6618,7 @@ void removeAclEntries(final String src, List<AclEntry> aclSpec)
|
||||
throws IOException {
|
||||
final String operationName = "removeAclEntries";
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
HdfsFileStatus auditStat = null;
|
||||
FileStatus auditStat = null;
|
||||
writeLock();
|
||||
try {
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
@ -6628,7 +6636,7 @@ void removeAclEntries(final String src, List<AclEntry> aclSpec)
|
||||
|
||||
void removeDefaultAcl(final String src) throws IOException {
|
||||
final String operationName = "removeDefaultAcl";
|
||||
HdfsFileStatus auditStat = null;
|
||||
FileStatus auditStat = null;
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
writeLock();
|
||||
try {
|
||||
@ -6647,7 +6655,7 @@ void removeDefaultAcl(final String src) throws IOException {
|
||||
|
||||
void removeAcl(final String src) throws IOException {
|
||||
final String operationName = "removeAcl";
|
||||
HdfsFileStatus auditStat = null;
|
||||
FileStatus auditStat = null;
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
writeLock();
|
||||
try {
|
||||
@ -6666,7 +6674,7 @@ void removeAcl(final String src) throws IOException {
|
||||
|
||||
void setAcl(final String src, List<AclEntry> aclSpec) throws IOException {
|
||||
final String operationName = "setAcl";
|
||||
HdfsFileStatus auditStat = null;
|
||||
FileStatus auditStat = null;
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
writeLock();
|
||||
try {
|
||||
@ -6722,7 +6730,7 @@ void createEncryptionZone(final String src, final String keyName,
|
||||
checkSuperuserPrivilege();
|
||||
FSPermissionChecker pc = getPermissionChecker();
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
final HdfsFileStatus resultingStat;
|
||||
final FileStatus resultingStat;
|
||||
writeLock();
|
||||
try {
|
||||
checkSuperuserPrivilege();
|
||||
@ -6753,7 +6761,7 @@ void createEncryptionZone(final String src, final String keyName,
|
||||
EncryptionZone getEZForPath(final String srcArg)
|
||||
throws AccessControlException, UnresolvedLinkException, IOException {
|
||||
final String operationName = "getEZForPath";
|
||||
HdfsFileStatus resultingStat = null;
|
||||
FileStatus resultingStat = null;
|
||||
boolean success = false;
|
||||
EncryptionZone encryptionZone;
|
||||
final FSPermissionChecker pc = getPermissionChecker();
|
||||
@ -6761,7 +6769,7 @@ EncryptionZone getEZForPath(final String srcArg)
|
||||
readLock();
|
||||
try {
|
||||
checkOperation(OperationCategory.READ);
|
||||
Entry<EncryptionZone, HdfsFileStatus> ezForPath = FSDirEncryptionZoneOp
|
||||
Entry<EncryptionZone, FileStatus> ezForPath = FSDirEncryptionZoneOp
|
||||
.getEZForPath(dir, srcArg, pc);
|
||||
success = true;
|
||||
resultingStat = ezForPath.getValue();
|
||||
@ -6810,7 +6818,7 @@ void setErasureCodingPolicy(final String srcArg, final String ecPolicyName,
|
||||
UnresolvedLinkException, SafeModeException, AccessControlException {
|
||||
final String operationName = "setErasureCodingPolicy";
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
HdfsFileStatus resultingStat = null;
|
||||
FileStatus resultingStat = null;
|
||||
final FSPermissionChecker pc = getPermissionChecker();
|
||||
boolean success = false;
|
||||
writeLock();
|
||||
@ -6868,7 +6876,7 @@ void unsetErasureCodingPolicy(final String srcArg,
|
||||
UnresolvedLinkException, SafeModeException, AccessControlException {
|
||||
final String operationName = "unsetErasureCodingPolicy";
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
HdfsFileStatus resultingStat = null;
|
||||
FileStatus resultingStat = null;
|
||||
final FSPermissionChecker pc = getPermissionChecker();
|
||||
boolean success = false;
|
||||
writeLock();
|
||||
@ -6926,7 +6934,7 @@ void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag,
|
||||
boolean logRetryCache)
|
||||
throws IOException {
|
||||
final String operationName = "setXAttr";
|
||||
HdfsFileStatus auditStat = null;
|
||||
FileStatus auditStat = null;
|
||||
writeLock();
|
||||
try {
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
@ -6982,7 +6990,7 @@ List<XAttr> listXAttrs(String src) throws IOException {
|
||||
void removeXAttr(String src, XAttr xAttr, boolean logRetryCache)
|
||||
throws IOException {
|
||||
final String operationName = "removeXAttr";
|
||||
HdfsFileStatus auditStat = null;
|
||||
FileStatus auditStat = null;
|
||||
writeLock();
|
||||
try {
|
||||
checkOperation(OperationCategory.WRITE);
|
||||
|
Loading…
Reference in New Issue
Block a user