diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
index 5b456b1eff..a9a19cdc29 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
@@ -43,6 +43,7 @@
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.security.AccessControlException;
@@ -803,6 +804,18 @@ public abstract FileStatus getFileStatus(final Path f)
throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException;
+ /**
+ * The specification of this method matches that of
+ * {@link FileContext#access(Path, FsAction)}
+ * except that an UnresolvedLinkException may be thrown if a symlink is
+ * encountered in the path.
+ */
+ @InterfaceAudience.LimitedPrivate({"HDFS", "Hive"})
+ public void access(Path path, FsAction mode) throws AccessControlException,
+ FileNotFoundException, UnresolvedLinkException, IOException {
+ FileSystem.checkAccessPermissions(this.getFileStatus(path), mode);
+ }
+
/**
* The specification of this method matches that of
* {@link FileContext#getFileLinkStatus(Path)}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index 808709859a..c9c8fa8ffd 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -44,6 +44,7 @@
import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT;
@@ -1108,6 +1109,55 @@ public FileStatus next(final AbstractFileSystem fs, final Path p)
}.resolve(this, absF);
}
+ /**
+ * Checks if the user can access a path. The mode specifies which access
+ * checks to perform. If the requested permissions are granted, then the
+ * method returns normally. If access is denied, then the method throws an
+ * {@link AccessControlException}.
+ *
+ * The default implementation of this method calls {@link #getFileStatus(Path)}
+ * and checks the returned permissions against the requested permissions.
+ * Note that the getFileStatus call will be subject to authorization checks.
+ * Typically, this requires search (execute) permissions on each directory in
+ * the path's prefix, but this is implementation-defined. Any file system
+ * that provides a richer authorization model (such as ACLs) may override the
+ * default implementation so that it checks against that model instead.
+ *
+ * In general, applications should avoid using this method, due to the risk of
+ * time-of-check/time-of-use race conditions. The permissions on a file may
+ * change immediately after the access call returns. Most applications should
+ * prefer running specific file system actions as the desired user represented
+ * by a {@link UserGroupInformation}.
+ *
+ * @param path Path to check
+ * @param mode type of access to check
+ * @throws AccessControlException if access is denied
+ * @throws FileNotFoundException if the path does not exist
+ * @throws UnsupportedFileSystemException if file system for path
+ * is not supported
+ * @throws IOException see specific implementation
+ *
+ * Exceptions applicable to file systems accessed over RPC:
+ * @throws RpcClientException If an exception occurred in the RPC client
+ * @throws RpcServerException If an exception occurred in the RPC server
+ * @throws UnexpectedServerException If server implementation throws
+ * undeclared exception to RPC server
+ */
+ @InterfaceAudience.LimitedPrivate({"HDFS", "Hive"})
+ public void access(final Path path, final FsAction mode)
+ throws AccessControlException, FileNotFoundException,
+ UnsupportedFileSystemException, IOException {
+ final Path absPath = fixRelativePart(path);
+ new FSLinkResolver() {
+ @Override
+ public Void next(AbstractFileSystem fs, Path p) throws IOException,
+ UnresolvedLinkException {
+ fs.access(p, mode);
+ return null;
+ }
+ }.resolve(this, absPath);
+ }
+
/**
* Return a file status object that represents the path. If the path
* refers to a symlink then the FileStatus of the symlink is returned.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 1eb54d16a9..1d2270b37e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -25,6 +25,7 @@
import java.net.URISyntaxException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
@@ -50,6 +51,7 @@
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.MultipleIOException;
import org.apache.hadoop.io.Text;
@@ -2072,6 +2074,71 @@ public short getDefaultReplication(Path path) {
*/
public abstract FileStatus getFileStatus(Path f) throws IOException;
+ /**
+ * Checks if the user can access a path. The mode specifies which access
+ * checks to perform. If the requested permissions are granted, then the
+ * method returns normally. If access is denied, then the method throws an
+ * {@link AccessControlException}.
+ *
+ * The default implementation of this method calls {@link #getFileStatus(Path)}
+ * and checks the returned permissions against the requested permissions.
+ * Note that the getFileStatus call will be subject to authorization checks.
+ * Typically, this requires search (execute) permissions on each directory in
+ * the path's prefix, but this is implementation-defined. Any file system
+ * that provides a richer authorization model (such as ACLs) may override the
+ * default implementation so that it checks against that model instead.
+ *
+ * In general, applications should avoid using this method, due to the risk of
+ * time-of-check/time-of-use race conditions. The permissions on a file may
+ * change immediately after the access call returns. Most applications should
+ * prefer running specific file system actions as the desired user represented
+ * by a {@link UserGroupInformation}.
+ *
+ * @param path Path to check
+ * @param mode type of access to check
+ * @throws AccessControlException if access is denied
+ * @throws FileNotFoundException if the path does not exist
+ * @throws IOException see specific implementation
+ */
+ @InterfaceAudience.LimitedPrivate({"HDFS", "Hive"})
+ public void access(Path path, FsAction mode) throws AccessControlException,
+ FileNotFoundException, IOException {
+ checkAccessPermissions(this.getFileStatus(path), mode);
+ }
+
+ /**
+ * This method provides the default implementation of
+ * {@link #access(Path, FsAction)}.
+ *
+ * @param stat FileStatus to check
+ * @param mode type of access to check
+ * @throws IOException for any error
+ */
+ @InterfaceAudience.Private
+ static void checkAccessPermissions(FileStatus stat, FsAction mode)
+ throws IOException {
+ FsPermission perm = stat.getPermission();
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+ String user = ugi.getShortUserName();
+ List groups = Arrays.asList(ugi.getGroupNames());
+ if (user.equals(stat.getOwner())) {
+ if (perm.getUserAction().implies(mode)) {
+ return;
+ }
+ } else if (groups.contains(stat.getGroup())) {
+ if (perm.getGroupAction().implies(mode)) {
+ return;
+ }
+ } else {
+ if (perm.getOtherAction().implies(mode)) {
+ return;
+ }
+ }
+ throw new AccessControlException(String.format(
+ "Permission denied: user=%s, path=\"%s\":%s:%s:%s%s", user, stat.getPath(),
+ stat.getOwner(), stat.getGroup(), stat.isDirectory() ? "d" : "-", perm));
+ }
+
/**
* See {@link FileContext#fixRelativePart}
*/
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index 139e1430f8..52706f4049 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -30,6 +30,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.security.AccessControlException;
@@ -397,6 +398,12 @@ public FileStatus getFileStatus(Path f) throws IOException {
return fs.getFileStatus(f);
}
+ @Override
+ public void access(Path path, FsAction mode) throws AccessControlException,
+ FileNotFoundException, IOException {
+ fs.access(path, mode);
+ }
+
public void createSymlink(final Path target, final Path link,
final boolean createParent) throws AccessControlException,
FileAlreadyExistsException, FileNotFoundException,
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
index 6ffe9214b3..b6e1d96e03 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
@@ -29,6 +29,7 @@
import org.apache.hadoop.fs.FileSystem.Statistics;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.security.AccessControlException;
@@ -119,6 +120,13 @@ public FileStatus getFileStatus(Path f)
return myFs.getFileStatus(f);
}
+ @Override
+ public void access(Path path, FsAction mode) throws AccessControlException,
+ FileNotFoundException, UnresolvedLinkException, IOException {
+ checkPath(path);
+ myFs.access(path, mode);
+ }
+
@Override
public FileStatus getFileLinkStatus(final Path f)
throws IOException, UnresolvedLinkException {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
index 4480da20f3..9650a374d1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
@@ -41,7 +41,9 @@
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.Progressable;
/**
@@ -222,6 +224,12 @@ public FileStatus getFileStatus(final Path f)
return super.getFileStatus(fullPath(f));
}
+ @Override
+ public void access(Path path, FsAction mode) throws AccessControlException,
+ FileNotFoundException, IOException {
+ super.access(fullPath(path), mode);
+ }
+
@Override
public FsStatus getStatus(Path p) throws IOException {
return super.getStatus(fullPath(p));
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
index 5d53eb79d0..9569e1089b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
@@ -41,7 +41,9 @@
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Progressable;
@@ -200,6 +202,11 @@ public FileStatus getFileStatus(final Path f)
return myFs.getFileStatus(fullPath(f));
}
+ public void access(Path path, FsAction mode) throws AccessControlException,
+ FileNotFoundException, UnresolvedLinkException, IOException {
+ myFs.access(fullPath(path), mode);
+ }
+
@Override
public FileStatus getFileLinkStatus(final Path f)
throws IOException, UnresolvedLinkException {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index b4ac18eb1a..963289f437 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -51,6 +51,7 @@
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.AclUtil;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.viewfs.InodeTree.INode;
import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
@@ -359,7 +360,14 @@ public FileStatus getFileStatus(final Path f) throws AccessControlException,
return new ViewFsFileStatus(status, this.makeQualified(f));
}
-
+ @Override
+ public void access(Path path, FsAction mode) throws AccessControlException,
+ FileNotFoundException, IOException {
+ InodeTree.ResolveResult res =
+ fsState.resolve(getUriPath(path), true);
+ res.targetFileSystem.access(res.remainingPath, mode);
+ }
+
@Override
public FileStatus[] listStatus(final Path f) throws AccessControlException,
FileNotFoundException, IOException {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
index 5cdccd2997..014f488127 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
@@ -54,6 +54,7 @@
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclUtil;
import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.viewfs.InodeTree.INode;
import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
@@ -352,6 +353,14 @@ public FileStatus getFileStatus(final Path f) throws AccessControlException,
return new ViewFsFileStatus(status, this.makeQualified(f));
}
+ @Override
+ public void access(Path path, FsAction mode) throws AccessControlException,
+ FileNotFoundException, UnresolvedLinkException, IOException {
+ InodeTree.ResolveResult res =
+ fsState.resolve(getUriPath(path), true);
+ res.targetFileSystem.access(res.remainingPath, mode);
+ }
+
@Override
public FileStatus getFileLinkStatus(final Path f)
throws AccessControlException, FileNotFoundException,
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
index 24e712c051..1e86439785 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
@@ -23,6 +23,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
@@ -201,6 +202,8 @@ public Map getXAttrs(Path path, List names)
public void removeXAttr(Path path, String name) throws IOException;
public AclStatus getAclStatus(Path path) throws IOException;
+
+ public void access(Path path, FsAction mode) throws IOException;
}
@Test
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f750178561..1e33c194b5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -332,6 +332,9 @@ Release 2.6.0 - UNRELEASED
HDFS-6778. The extended attributes javadoc should simply refer to the
user docs. (clamb via wang)
+ HDFS-6570. add api that enables checking if a user has certain permissions on
+ a file. (Jitendra Pandey via cnauroth)
+
OPTIMIZATIONS
HDFS-6690. Deduplicate xattr names in memory. (wang)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
index 6e4b66f71e..a0e75f81d3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
@@ -33,6 +33,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.hdfs.CorruptFileBlockIterator;
@@ -448,6 +449,11 @@ public void removeXAttr(Path path, String name) throws IOException {
dfs.removeXAttr(getUriPath(path), name);
}
+ @Override
+ public void access(Path path, final FsAction mode) throws IOException {
+ dfs.checkAccess(getUriPath(path), mode);
+ }
+
/**
* Renew an existing delegation token.
*
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 45a9011a56..b9af35ea25 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -122,6 +122,7 @@
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.net.Peer;
@@ -2832,6 +2833,17 @@ public void removeXAttr(String src, String name) throws IOException {
}
}
+ public void checkAccess(String src, FsAction mode) throws IOException {
+ checkOpen();
+ try {
+ namenode.checkAccess(src, mode);
+ } catch (RemoteException re) {
+ throw re.unwrapRemoteException(AccessControlException.class,
+ FileNotFoundException.class,
+ UnresolvedPathException.class);
+ }
+ }
+
@Override // RemotePeerFactory
public Peer newConnectedPeer(InetSocketAddress addr,
Token blockToken, DatanodeID datanodeId)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 5ae39d6974..e20c61f518 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -59,6 +59,7 @@
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
@@ -1898,4 +1899,23 @@ public Void next(final FileSystem fs, final Path p) throws IOException {
}
}.resolve(this, absF);
}
+
+ @Override
+ public void access(Path path, final FsAction mode) throws IOException {
+ final Path absF = fixRelativePart(path);
+ new FileSystemLinkResolver() {
+ @Override
+ public Void doCall(final Path p) throws IOException {
+ dfs.checkAccess(getPathName(p), mode);
+ return null;
+ }
+
+ @Override
+ public Void next(final FileSystem fs, final Path p)
+ throws IOException {
+ fs.access(p, mode);
+ return null;
+ }
+ }.resolve(this, absF);
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 74eca82fbe..8dbe1f7609 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -39,6 +39,7 @@
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
@@ -1327,4 +1328,22 @@ public List listXAttrs(String src)
*/
@AtMostOnce
public void removeXAttr(String src, XAttr xAttr) throws IOException;
+
+ /**
+ * Checks if the user can access a path. The mode specifies which access
+ * checks to perform. If the requested permissions are granted, then the
+ * method returns normally. If access is denied, then the method throws an
+ * {@link AccessControlException}.
+ * In general, applications should avoid using this method, due to the risk of
+ * time-of-check/time-of-use race conditions. The permissions on a file may
+ * change immediately after the access call returns.
+ *
+ * @param path Path to check
+ * @param mode type of access to check
+ * @throws AccessControlException if access is denied
+ * @throws FileNotFoundException if the path does not exist
+ * @throws IOException see specific implementation
+ */
+ @Idempotent
+ public void checkAccess(String path, FsAction mode) throws IOException;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index df0d1b0006..c4211b1d79 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -174,6 +174,8 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
@@ -320,6 +322,9 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
private static final RemoveXAttrResponseProto
VOID_REMOVEXATTR_RESPONSE = RemoveXAttrResponseProto.getDefaultInstance();
+ private static final CheckAccessResponseProto
+ VOID_CHECKACCESS_RESPONSE = CheckAccessResponseProto.getDefaultInstance();
+
/**
* Constructor
*
@@ -1338,4 +1343,15 @@ public RemoveXAttrResponseProto removeXAttr(RpcController controller,
}
return VOID_REMOVEXATTR_RESPONSE;
}
+
+ @Override
+ public CheckAccessResponseProto checkAccess(RpcController controller,
+ CheckAccessRequestProto req) throws ServiceException {
+ try {
+ server.checkAccess(req.getPath(), PBHelper.convert(req.getMode()));
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ return VOID_CHECKACCESS_RESPONSE;
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index 0f8eba970c..85dbb7d718 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -39,6 +39,7 @@
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
@@ -144,6 +145,7 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto;
@@ -1346,4 +1348,15 @@ public void removeXAttr(String src, XAttr xAttr) throws IOException {
throw ProtobufHelper.getRemoteException(e);
}
}
+
+ @Override
+ public void checkAccess(String path, FsAction mode) throws IOException {
+ CheckAccessRequestProto req = CheckAccessRequestProto.newBuilder()
+ .setPath(path).setMode(PBHelper.convert(mode)).build();
+ try {
+ rpcProxy.checkAccess(null, req);
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 5706aab062..9ca93a5ae2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -2107,11 +2107,11 @@ private static XAttr.NameSpace convert(XAttrNamespaceProto v) {
return castEnum(v, XATTR_NAMESPACE_VALUES);
}
- private static FsActionProto convert(FsAction v) {
+ public static FsActionProto convert(FsAction v) {
return FsActionProto.valueOf(v != null ? v.ordinal() : 0);
}
- private static FsAction convert(FsActionProto v) {
+ public static FsAction convert(FsActionProto v) {
return castEnum(v, FSACTION_VALUES);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index f12c4e2cfe..37f8c4b23d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -8458,6 +8458,29 @@ private void checkXAttrChangeAccess(String src, XAttr xAttr,
}
}
+ void checkAccess(String src, FsAction mode) throws AccessControlException,
+ FileNotFoundException, UnresolvedLinkException, IOException {
+ checkOperation(OperationCategory.READ);
+ byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
+ readLock();
+ try {
+ checkOperation(OperationCategory.READ);
+ src = FSDirectory.resolvePath(src, pathComponents, dir);
+ if (dir.getINode(src) == null) {
+ throw new FileNotFoundException("Path not found");
+ }
+ if (isPermissionEnabled) {
+ FSPermissionChecker pc = getPermissionChecker();
+ checkPathAccess(pc, src, mode);
+ }
+ } catch (AccessControlException e) {
+ logAuditEvent(false, "checkAccess", src);
+ throw e;
+ } finally {
+ readUnlock();
+ }
+ }
+
/**
* Default AuditLogger implementation; used when no access logger is
* defined in the config file. It can also be explicitly listed in the
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 2c2cd4f227..6800fcde17 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -54,6 +54,7 @@
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.ha.HAServiceStatus;
import org.apache.hadoop.ha.HealthCheckFailedException;
@@ -1443,5 +1444,10 @@ public List listXAttrs(String src) throws IOException {
public void removeXAttr(String src, XAttr xAttr) throws IOException {
namesystem.removeXAttr(src, xAttr);
}
+
+ @Override
+ public void checkAccess(String path, FsAction mode) throws IOException {
+ namesystem.checkAccess(path, mode);
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index d7235b3872..991885b2e4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -57,6 +57,7 @@
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -112,6 +113,7 @@
import org.apache.hadoop.hdfs.web.resources.XAttrNameParam;
import org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam;
import org.apache.hadoop.hdfs.web.resources.XAttrValueParam;
+import org.apache.hadoop.hdfs.web.resources.FsActionParam;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.ipc.Server;
@@ -755,10 +757,12 @@ public Response getRoot(
@QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT)
final XAttrEncodingParam xattrEncoding,
@QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
- final ExcludeDatanodesParam excludeDatanodes
+ final ExcludeDatanodesParam excludeDatanodes,
+ @QueryParam(FsActionParam.NAME) @DefaultValue(FsActionParam.DEFAULT)
+ final FsActionParam fsAction
) throws IOException, InterruptedException {
return get(ugi, delegation, username, doAsUser, ROOT, op, offset, length,
- renewer, bufferSize, xattrNames, xattrEncoding, excludeDatanodes);
+ renewer, bufferSize, xattrNames, xattrEncoding, excludeDatanodes, fsAction);
}
/** Handle HTTP GET request. */
@@ -789,11 +793,13 @@ public Response get(
@QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT)
final XAttrEncodingParam xattrEncoding,
@QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
- final ExcludeDatanodesParam excludeDatanodes
+ final ExcludeDatanodesParam excludeDatanodes,
+ @QueryParam(FsActionParam.NAME) @DefaultValue(FsActionParam.DEFAULT)
+ final FsActionParam fsAction
) throws IOException, InterruptedException {
init(ugi, delegation, username, doAsUser, path, op, offset, length,
- renewer, bufferSize, xattrEncoding, excludeDatanodes);
+ renewer, bufferSize, xattrEncoding, excludeDatanodes, fsAction);
return ugi.doAs(new PrivilegedExceptionAction() {
@Override
@@ -801,7 +807,7 @@ public Response run() throws IOException, URISyntaxException {
try {
return get(ugi, delegation, username, doAsUser,
path.getAbsolutePath(), op, offset, length, renewer, bufferSize,
- xattrNames, xattrEncoding, excludeDatanodes);
+ xattrNames, xattrEncoding, excludeDatanodes, fsAction);
} finally {
reset();
}
@@ -822,7 +828,8 @@ private Response get(
final BufferSizeParam bufferSize,
final List xattrNames,
final XAttrEncodingParam xattrEncoding,
- final ExcludeDatanodesParam excludeDatanodes
+ final ExcludeDatanodesParam excludeDatanodes,
+ final FsActionParam fsAction
) throws IOException, URISyntaxException {
final NameNode namenode = (NameNode)context.getAttribute("name.node");
final NamenodeProtocols np = getRPCServer(namenode);
@@ -919,6 +926,10 @@ private Response get(
final String js = JsonUtil.toJsonString(xAttrs);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
+ case CHECKACCESS: {
+ np.checkAccess(fullpath, FsAction.getFsAction(fsAction.getValue()));
+ return Response.ok().build();
+ }
default:
throw new UnsupportedOperationException(op + " is not supported");
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 78062ad0b5..cf6233f5a3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -54,6 +54,7 @@
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
@@ -1356,6 +1357,12 @@ BlockLocation[] decodeResponse(Map,?> json) throws IOException {
}.run();
}
+ @Override
+ public void access(final Path path, final FsAction mode) throws IOException {
+ final HttpOpParam.Op op = GetOpParam.Op.CHECKACCESS;
+ new FsPathRunner(op, path, new FsActionParam(mode)).run();
+ }
+
@Override
public ContentSummary getContentSummary(final Path p) throws IOException {
statistics.incrementReadOps(1);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java
new file mode 100644
index 0000000000..c840196003
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import org.apache.hadoop.fs.permission.FsAction;
+
+import java.util.regex.Pattern;
+
+/** {@link FsAction} Parameter */
+public class FsActionParam extends StringParam {
+
+ /** Parameter name. */
+ public static final String NAME = "fsaction";
+
+ /** Default parameter value. */
+ public static final String DEFAULT = NULL;
+
+ private static String FS_ACTION_PATTERN = "[rwx-]{3}";
+
+ private static final Domain DOMAIN = new Domain(NAME,
+ Pattern.compile(FS_ACTION_PATTERN));
+
+ /**
+ * Constructor.
+ * @param str a string representation of the parameter value.
+ */
+ public FsActionParam(final String str) {
+ super(DOMAIN, str == null || str.equals(DEFAULT)? null: str);
+ }
+
+ /**
+ * Constructor.
+ * @param value the parameter value.
+ */
+ public FsActionParam(final FsAction value) {
+ super(DOMAIN, value == null? null: value.SYMBOL);
+ }
+
+ @Override
+ public String getName() {
+ return NAME;
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
index bf5a6a23e5..f63ed44392 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
@@ -39,7 +39,9 @@ public static enum Op implements HttpOpParam.Op {
GETXATTRS(false, HttpURLConnection.HTTP_OK),
LISTXATTRS(false, HttpURLConnection.HTTP_OK),
- NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
+ NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED),
+
+ CHECKACCESS(false, HttpURLConnection.HTTP_OK);
final boolean redirect;
final int expectedHttpResponseCode;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
index d2f92d64d0..cd291a6860 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
@@ -654,6 +654,14 @@ message DeleteSnapshotRequestProto {
message DeleteSnapshotResponseProto { // void response
}
+message CheckAccessRequestProto {
+ required string path = 1;
+ required AclEntryProto.FsActionProto mode = 2;
+}
+
+message CheckAccessResponseProto { // void response
+}
+
service ClientNamenodeProtocol {
rpc getBlockLocations(GetBlockLocationsRequestProto)
returns(GetBlockLocationsResponseProto);
@@ -783,4 +791,6 @@ service ClientNamenodeProtocol {
returns(ListXAttrsResponseProto);
rpc removeXAttr(RemoveXAttrRequestProto)
returns(RemoveXAttrResponseProto);
+ rpc checkAccess(CheckAccessRequestProto)
+ returns(CheckAccessResponseProto);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
index 51bc574095..c3f6a6b813 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
@@ -82,6 +82,9 @@ WebHDFS REST API
* {{{List all XAttrs}<<>>}}
(see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.listXAttrs)
+ * {{{Check access}<<>>}}
+ (see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.access)
+
* HTTP PUT
* {{{Create and Write to a File}<<>>}}
@@ -927,6 +930,28 @@ Transfer-Encoding: chunked
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getAclStatus
+** {Check access}
+
+ * Submit a HTTP GET request.
+
++---------------------------------
+curl -i -X PUT "http://:/webhdfs/v1/?op=CHECKACCESS
+ &fsaction=
++---------------------------------
+
+ The client receives a response with zero content length:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Length: 0
++---------------------------------
+
+ []
+
+ See also:
+ {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.access
+
+
* {Extended Attributes(XAttrs) Operations}
** {Set XAttr}
@@ -2166,6 +2191,25 @@ var tokenProperties =
{{Proxy Users}}
+** {Fs Action}
+
+*----------------+-------------------------------------------------------------------+
+|| Name | <<>> |
+*----------------+-------------------------------------------------------------------+
+|| Description | File system operation read/write/execute |
+*----------------+-------------------------------------------------------------------+
+|| Type | String |
+*----------------+-------------------------------------------------------------------+
+|| Default Value | null (an invalid value) |
+*----------------+-------------------------------------------------------------------+
+|| Valid Values | Strings matching regex pattern \"[rwx-]\{3\}\" |
+*----------------+-------------------------------------------------------------------+
+|| Syntax | \"[rwx-]\{3\}\" |
+*----------------+-------------------------------------------------------------------+
+
+ See also:
+ {{{Check access}<<>>}},
+
** {Group}
*----------------+-------------------------------------------------------------------+
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
index 7d2b0ff704..68349a2ac6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
@@ -20,8 +20,11 @@
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import java.io.FileNotFoundException;
import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
@@ -36,6 +39,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Time;
@@ -421,6 +425,79 @@ public void testPermissionChecking() throws Exception {
}
}
+ @Test
+ public void testAccessOwner() throws IOException, InterruptedException {
+ FileSystem rootFs = FileSystem.get(conf);
+ Path p1 = new Path("/p1");
+ rootFs.mkdirs(p1);
+ rootFs.setOwner(p1, USER1_NAME, GROUP1_NAME);
+ fs = USER1.doAs(new PrivilegedExceptionAction() {
+ @Override
+ public FileSystem run() throws Exception {
+ return FileSystem.get(conf);
+ }
+ });
+ fs.setPermission(p1, new FsPermission((short) 0444));
+ fs.access(p1, FsAction.READ);
+ try {
+ fs.access(p1, FsAction.WRITE);
+ fail("The access call should have failed.");
+ } catch (AccessControlException e) {
+ // expected
+ }
+
+ Path badPath = new Path("/bad/bad");
+ try {
+ fs.access(badPath, FsAction.READ);
+ fail("The access call should have failed");
+ } catch (FileNotFoundException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void testAccessGroupMember() throws IOException, InterruptedException {
+ FileSystem rootFs = FileSystem.get(conf);
+ Path p2 = new Path("/p2");
+ rootFs.mkdirs(p2);
+ rootFs.setOwner(p2, UserGroupInformation.getCurrentUser().getShortUserName(), GROUP1_NAME);
+ rootFs.setPermission(p2, new FsPermission((short) 0740));
+ fs = USER1.doAs(new PrivilegedExceptionAction() {
+ @Override
+ public FileSystem run() throws Exception {
+ return FileSystem.get(conf);
+ }
+ });
+ fs.access(p2, FsAction.READ);
+ try {
+ fs.access(p2, FsAction.EXECUTE);
+ fail("The access call should have failed.");
+ } catch (AccessControlException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void testAccessOthers() throws IOException, InterruptedException {
+ FileSystem rootFs = FileSystem.get(conf);
+ Path p3 = new Path("/p3");
+ rootFs.mkdirs(p3);
+ rootFs.setPermission(p3, new FsPermission((short) 0774));
+ fs = USER1.doAs(new PrivilegedExceptionAction() {
+ @Override
+ public FileSystem run() throws Exception {
+ return FileSystem.get(conf);
+ }
+ });
+ fs.access(p3, FsAction.READ);
+ try {
+ fs.access(p3, FsAction.READ_WRITE);
+ fail("The access call should have failed.");
+ } catch (AccessControlException e) {
+ // expected
+ }
+ }
+
/* Check if namenode performs permission checking correctly
* for the given user for operations mkdir, open, setReplication,
* getFileInfo, isDirectory, exists, getContentLength, list, rename,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
index 25ec8c9eb0..bda95c0752 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
@@ -26,6 +26,7 @@
import static org.junit.Assert.fail;
import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
import java.util.List;
import org.apache.commons.logging.Log;
@@ -36,6 +37,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
@@ -47,6 +49,8 @@
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Before;
@@ -297,7 +301,8 @@ public void runFsFun(String msg, FSRun f) {
* assert that they are either allowed or fail as expected.
*/
@Test
- public void testOperationsWhileInSafeMode() throws IOException {
+ public void testOperationsWhileInSafeMode() throws IOException,
+ InterruptedException {
final Path file1 = new Path("/file1");
assertFalse(dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
@@ -407,6 +412,22 @@ public void run(FileSystem fs) throws IOException {
fail("getAclStatus failed while in SM");
}
+ // Test access
+ UserGroupInformation ugiX = UserGroupInformation.createRemoteUser("userX");
+ FileSystem myfs = ugiX.doAs(new PrivilegedExceptionAction() {
+ @Override
+ public FileSystem run() throws IOException {
+ return FileSystem.get(conf);
+ }
+ });
+ myfs.access(file1, FsAction.READ);
+ try {
+ myfs.access(file1, FsAction.WRITE);
+ fail("The access call should have failed.");
+ } catch (AccessControlException e) {
+ // expected
+ }
+
assertFalse("Could not leave SM",
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
index f36483e642..1ddc774c84 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
@@ -34,6 +34,7 @@
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -1256,6 +1257,33 @@ public void testGetAclStatusRequiresTraverseOrSuper() throws Exception {
fsAsDiana.getAclStatus(bruceFile);
}
+ @Test
+ public void testAccess() throws IOException, InterruptedException {
+ Path p1 = new Path("/p1");
+ fs.mkdirs(p1);
+ fs.setOwner(p1, BRUCE.getShortUserName(), "groupX");
+ fsAsBruce.setAcl(p1, Lists.newArrayList(
+ aclEntry(ACCESS, USER, READ),
+ aclEntry(ACCESS, USER, "bruce", READ),
+ aclEntry(ACCESS, GROUP, NONE),
+ aclEntry(ACCESS, OTHER, NONE)));
+ fsAsBruce.access(p1, FsAction.READ);
+ try {
+ fsAsBruce.access(p1, FsAction.WRITE);
+ fail("The access call should have failed.");
+ } catch (AccessControlException e) {
+ // expected
+ }
+
+ Path badPath = new Path("/bad/bad");
+ try {
+ fsAsBruce.access(badPath, FsAction.READ);
+ fail("The access call should have failed");
+ } catch (FileNotFoundException e) {
+ // expected
+ }
+ }
+
/**
* Creates a FileSystem for the super-user.
*
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
index 704bc1669d..a739b7aa6e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
@@ -45,6 +45,7 @@
import org.apache.hadoop.fs.PathIsNotDirectoryException;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSClient;
@@ -581,6 +582,7 @@ public void testInodeIdBasedPaths() throws Exception {
fs.getAclStatus(testFileInodePath);
fs.getXAttrs(testFileInodePath);
fs.listXAttrs(testFileInodePath);
+ fs.access(testFileInodePath, FsAction.READ_WRITE);
}
// symbolic link related tests
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java
index 3deb47ff3a..3be1d36ca5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java
@@ -30,6 +30,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -674,6 +675,13 @@ private static void assertDirPermissionDenied(FileSystem fs,
} catch (AccessControlException e) {
// expected
}
+
+ try {
+ fs.access(pathToCheck, FsAction.READ);
+ fail("The access call should have failed for "+pathToCheck);
+ } catch (AccessControlException e) {
+ // expected
+ }
}
/**
@@ -689,6 +697,7 @@ private static void assertDirPermissionGranted(FileSystem fs,
UserGroupInformation user, Path pathToCheck) throws Exception {
try {
fs.listStatus(pathToCheck);
+ fs.access(pathToCheck, FsAction.READ);
} catch (AccessControlException e) {
fail("expected permission granted for user " + user + ", path = " +
pathToCheck);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
index 09f025c65a..46e433d6df 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
@@ -39,6 +39,7 @@
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystemContractBaseTest;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.AppendTestUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -49,6 +50,7 @@
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.Assert;
+import org.junit.Test;
public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
private static final Configuration conf = new Configuration();
@@ -530,4 +532,35 @@ public void testResponseCode() throws IOException {
}
}
}
+
+ @Test
+ public void testAccess() throws IOException, InterruptedException {
+ Path p1 = new Path("/pathX");
+ try {
+ UserGroupInformation ugi = UserGroupInformation.createUserForTesting("alpha",
+ new String[]{"beta"});
+ WebHdfsFileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf,
+ WebHdfsFileSystem.SCHEME);
+
+ fs.mkdirs(p1);
+ fs.setPermission(p1, new FsPermission((short) 0444));
+ fs.access(p1, FsAction.READ);
+ try {
+ fs.access(p1, FsAction.WRITE);
+ fail("The access call should have failed.");
+ } catch (AccessControlException e) {
+ // expected
+ }
+
+ Path badPath = new Path("/bad");
+ try {
+ fs.access(badPath, FsAction.READ);
+ fail("The access call should have failed");
+ } catch (FileNotFoundException e) {
+ // expected
+ }
+ } finally {
+ fs.delete(p1, true);
+ }
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
index a84918e13d..45cd8fe3af 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
@@ -31,6 +31,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -40,6 +41,7 @@
import org.apache.hadoop.hdfs.web.resources.PutOpParam;
import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
import org.apache.hadoop.hdfs.web.resources.UserParam;
+import org.apache.hadoop.hdfs.web.resources.FsActionParam;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil;
@@ -283,6 +285,28 @@ public void testSecureProxyAuthParamsInUrl() throws IOException {
},
fileStatusUrl);
}
+
+ @Test(timeout=60000)
+ public void testCheckAccessUrl() throws IOException {
+ Configuration conf = new Configuration();
+
+ UserGroupInformation ugi =
+ UserGroupInformation.createRemoteUser("test-user");
+ UserGroupInformation.setLoginUser(ugi);
+
+ WebHdfsFileSystem webhdfs = getWebHdfsFileSystem(ugi, conf);
+ Path fsPath = new Path("/p1");
+
+ URL checkAccessUrl = webhdfs.toUrl(GetOpParam.Op.CHECKACCESS,
+ fsPath, new FsActionParam(FsAction.READ_WRITE));
+ checkQueryParams(
+ new String[]{
+ GetOpParam.Op.CHECKACCESS.toQueryString(),
+ new UserParam(ugi.getShortUserName()).toString(),
+ FsActionParam.NAME + "=" + FsAction.READ_WRITE.SYMBOL
+ },
+ checkAccessUrl);
+ }
private void checkQueryParams(String[] expected, URL url) {
Arrays.sort(expected);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java
index 13a9610a34..bc41edc110 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java
@@ -27,6 +27,7 @@
import static org.junit.Assert.fail;
import java.io.IOException;
+import java.io.FileNotFoundException;
import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
@@ -39,6 +40,7 @@
import org.apache.hadoop.fs.FileSystemTestWrapper;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -393,4 +395,37 @@ public Object run() throws IOException {
GenericTestUtils.assertExceptionContains("Permission denied", e);
}
}
+
+ @Test
+ public void testAccess() throws Exception {
+ fs.setPermission(target, new FsPermission((short) 0002));
+ fs.setAcl(target, Arrays.asList(
+ aclEntry(ACCESS, USER, ALL),
+ aclEntry(ACCESS, GROUP, NONE),
+ aclEntry(ACCESS, USER, user.getShortUserName(), WRITE),
+ aclEntry(ACCESS, OTHER, WRITE)));
+ FileContext myfc = user.doAs(new PrivilegedExceptionAction() {
+ @Override
+ public FileContext run() throws IOException {
+ return FileContext.getFileContext(conf);
+ }
+ });
+
+ // Path to targetChild via symlink
+ myfc.access(link, FsAction.WRITE);
+ try {
+ myfc.access(link, FsAction.ALL);
+ fail("The access call should have failed.");
+ } catch (AccessControlException e) {
+ // expected
+ }
+
+ Path badPath = new Path(link, "bad");
+ try {
+ myfc.access(badPath, FsAction.READ);
+ fail("The access call should have failed");
+ } catch (FileNotFoundException e) {
+ // expected
+ }
+ }
}