HDFS-5136 MNT EXPORT should give the full group list which can mount the exports. Contributed by Brandon Li
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1519222 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
76cb07ee20
commit
cbca166831
@ -19,6 +19,7 @@
|
|||||||
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.apache.hadoop.nfs.security.NfsExports;
|
||||||
import org.apache.hadoop.oncrpc.RpcAcceptedReply;
|
import org.apache.hadoop.oncrpc.RpcAcceptedReply;
|
||||||
import org.apache.hadoop.oncrpc.XDR;
|
import org.apache.hadoop.oncrpc.XDR;
|
||||||
import org.apache.hadoop.oncrpc.RpcAuthInfo.AuthFlavor;
|
import org.apache.hadoop.oncrpc.RpcAuthInfo.AuthFlavor;
|
||||||
@ -59,15 +60,28 @@ public static XDR writeMountList(XDR xdr, int xid, List<MountEntry> mounts) {
|
|||||||
xdr.writeBoolean(false); // Value follows no
|
xdr.writeBoolean(false); // Value follows no
|
||||||
return xdr;
|
return xdr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Response for RPC call {@link MountInterface.MNTPROC#EXPORT} */
|
/** Response for RPC call {@link MountInterface.MNTPROC#EXPORT} */
|
||||||
public static XDR writeExportList(XDR xdr, int xid, List<String> exports) {
|
public static XDR writeExportList(XDR xdr, int xid, List<String> exports,
|
||||||
|
List<NfsExports> hostMatcher) {
|
||||||
|
assert (exports.size() == hostMatcher.size());
|
||||||
|
|
||||||
RpcAcceptedReply.voidReply(xdr, xid);
|
RpcAcceptedReply.voidReply(xdr, xid);
|
||||||
for (String export : exports) {
|
for (int i = 0; i < exports.size(); i++) {
|
||||||
xdr.writeBoolean(true); // Value follows - yes
|
xdr.writeBoolean(true); // Value follows - yes
|
||||||
xdr.writeString(export);
|
xdr.writeString(exports.get(i));
|
||||||
xdr.writeInt(0);
|
|
||||||
|
// List host groups
|
||||||
|
String[] hostGroups = hostMatcher.get(i).getHostGroupList();
|
||||||
|
if (hostGroups.length > 0) {
|
||||||
|
for (int j = 0; j < hostGroups.length; j++) {
|
||||||
|
xdr.writeBoolean(true); // Value follows - yes
|
||||||
|
xdr.writeVariableOpaque(hostGroups[j].getBytes());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
xdr.writeBoolean(false); // Value follows - no more group
|
||||||
}
|
}
|
||||||
|
|
||||||
xdr.writeBoolean(false); // Value follows - no
|
xdr.writeBoolean(false); // Value follows - no
|
||||||
return xdr;
|
return xdr;
|
||||||
}
|
}
|
||||||
|
@ -192,13 +192,13 @@ public static WriteStableHow fromValue(int id) {
|
|||||||
|
|
||||||
public static final String EXPORTS_ALLOWED_HOSTS_SEPARATOR = ";";
|
public static final String EXPORTS_ALLOWED_HOSTS_SEPARATOR = ";";
|
||||||
/** Allowed hosts for nfs exports */
|
/** Allowed hosts for nfs exports */
|
||||||
public static final String EXPORTS_ALLOWED_HOSTS_KEY = "hdfs.nfs.exports.allowed.hosts";
|
public static final String EXPORTS_ALLOWED_HOSTS_KEY = "dfs.nfs.exports.allowed.hosts";
|
||||||
public static final String EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT = "* rw";
|
public static final String EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT = "* rw";
|
||||||
/** Size for nfs exports cache */
|
/** Size for nfs exports cache */
|
||||||
public static final String EXPORTS_CACHE_SIZE_KEY = "hdfs.nfs.exports.cache.size";
|
public static final String EXPORTS_CACHE_SIZE_KEY = "dfs.nfs.exports.cache.size";
|
||||||
public static final int EXPORTS_CACHE_SIZE_DEFAULT = 512;
|
public static final int EXPORTS_CACHE_SIZE_DEFAULT = 512;
|
||||||
/** Expiration time for nfs exports cache entry */
|
/** Expiration time for nfs exports cache entry */
|
||||||
public static final String EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY = "hdfs.nfs.exports.cache.expirytime.millis";
|
public static final String EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY = "dfs.nfs.exports.cache.expirytime.millis";
|
||||||
public static final long EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT = 15 * 60 * 1000; // 15 min
|
public static final long EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT = 15 * 60 * 1000; // 15 min
|
||||||
|
|
||||||
public static final String FILE_DUMP_DIR_KEY = "dfs.nfs3.dump.dir";
|
public static final String FILE_DUMP_DIR_KEY = "dfs.nfs3.dump.dir";
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.nfs.security;
|
package org.apache.hadoop.nfs.security;
|
||||||
|
|
||||||
public enum AccessPrivilege {
|
public enum AccessPrivilege {
|
||||||
READ_ONLY,
|
READ_ONLY,
|
@ -15,7 +15,7 @@
|
|||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.nfs.security;
|
package org.apache.hadoop.nfs.security;
|
||||||
|
|
||||||
import java.net.InetAddress;
|
import java.net.InetAddress;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
@ -153,6 +153,19 @@ public long getExpirationTime() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the configured group list
|
||||||
|
*/
|
||||||
|
public String[] getHostGroupList() {
|
||||||
|
int listSize = mMatches.size();
|
||||||
|
String[] hostGroups = new String[listSize];
|
||||||
|
|
||||||
|
for (int i = 0; i < mMatches.size(); i++) {
|
||||||
|
hostGroups[i] = mMatches.get(i).getHostGroup();
|
||||||
|
}
|
||||||
|
return hostGroups;
|
||||||
|
}
|
||||||
|
|
||||||
public AccessPrivilege getAccessPrivilege(InetAddress addr) {
|
public AccessPrivilege getAccessPrivilege(InetAddress addr) {
|
||||||
return getAccessPrivilege(addr.getHostAddress(),
|
return getAccessPrivilege(addr.getHostAddress(),
|
||||||
addr.getCanonicalHostName());
|
addr.getCanonicalHostName());
|
||||||
@ -191,6 +204,7 @@ private Match(AccessPrivilege accessPrivilege) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public abstract boolean isIncluded(String address, String hostname);
|
public abstract boolean isIncluded(String address, String hostname);
|
||||||
|
public abstract String getHostGroup();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -202,9 +216,14 @@ private AnonymousMatch(AccessPrivilege accessPrivilege) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isIncluded(String ip, String hostname) {
|
public boolean isIncluded(String address, String hostname) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHostGroup() {
|
||||||
|
return "*";
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -235,6 +254,11 @@ public boolean isIncluded(String address, String hostname) {
|
|||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHostGroup() {
|
||||||
|
return subnetInfo.getAddress() + "/" + subnetInfo.getNetmask();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -264,6 +288,11 @@ public boolean isIncluded(String address, String hostname) {
|
|||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHostGroup() {
|
||||||
|
return ipOrHost;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -293,6 +322,11 @@ public boolean isIncluded(String address, String hostname) {
|
|||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getHostGroup() {
|
||||||
|
return pattern.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
@ -15,12 +15,10 @@
|
|||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.nfs.security;
|
package org.apache.hadoop.nfs.security;
|
||||||
|
|
||||||
import junit.framework.Assert;
|
import junit.framework.Assert;
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.nfs.security.AccessPrivilege;
|
|
||||||
import org.apache.hadoop.hdfs.nfs.security.NfsExports;
|
|
||||||
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
|
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
@ -27,8 +27,6 @@
|
|||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.DFSClient;
|
import org.apache.hadoop.hdfs.DFSClient;
|
||||||
import org.apache.hadoop.hdfs.nfs.security.AccessPrivilege;
|
|
||||||
import org.apache.hadoop.hdfs.nfs.security.NfsExports;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.mount.MountEntry;
|
import org.apache.hadoop.mount.MountEntry;
|
||||||
@ -36,6 +34,8 @@
|
|||||||
import org.apache.hadoop.mount.MountResponse;
|
import org.apache.hadoop.mount.MountResponse;
|
||||||
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||||
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
|
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
|
||||||
|
import org.apache.hadoop.nfs.security.AccessPrivilege;
|
||||||
|
import org.apache.hadoop.nfs.security.NfsExports;
|
||||||
import org.apache.hadoop.oncrpc.RpcAcceptedReply;
|
import org.apache.hadoop.oncrpc.RpcAcceptedReply;
|
||||||
import org.apache.hadoop.oncrpc.RpcCall;
|
import org.apache.hadoop.oncrpc.RpcCall;
|
||||||
import org.apache.hadoop.oncrpc.RpcProgram;
|
import org.apache.hadoop.oncrpc.RpcProgram;
|
||||||
@ -184,7 +184,10 @@ public XDR handleInternal(RpcCall rpcCall, XDR xdr, XDR out,
|
|||||||
} else if (mntproc == MNTPROC.UMNTALL) {
|
} else if (mntproc == MNTPROC.UMNTALL) {
|
||||||
umntall(out, xid, client);
|
umntall(out, xid, client);
|
||||||
} else if (mntproc == MNTPROC.EXPORT) {
|
} else if (mntproc == MNTPROC.EXPORT) {
|
||||||
out = MountResponse.writeExportList(out, xid, exports);
|
// Currently only support one NFS export "/"
|
||||||
|
List<NfsExports> hostsMatchers = new ArrayList<NfsExports>();
|
||||||
|
hostsMatchers.add(hostsMatcher);
|
||||||
|
out = MountResponse.writeExportList(out, xid, exports, hostsMatchers);
|
||||||
} else {
|
} else {
|
||||||
// Invalid procedure
|
// Invalid procedure
|
||||||
RpcAcceptedReply.voidReply(out, xid,
|
RpcAcceptedReply.voidReply(out, xid,
|
||||||
|
@ -26,10 +26,10 @@
|
|||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||||
import org.apache.hadoop.fs.CreateFlag;
|
import org.apache.hadoop.fs.CreateFlag;
|
||||||
import org.apache.hadoop.fs.FSDataInputStream;
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
import org.apache.hadoop.fs.FileSystem.Statistics;
|
import org.apache.hadoop.fs.FileSystem.Statistics;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.fs.FsStatus;
|
import org.apache.hadoop.fs.FsStatus;
|
||||||
import org.apache.hadoop.fs.Options;
|
import org.apache.hadoop.fs.Options;
|
||||||
@ -38,8 +38,6 @@
|
|||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSInputStream;
|
import org.apache.hadoop.hdfs.DFSInputStream;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
||||||
import org.apache.hadoop.hdfs.nfs.security.AccessPrivilege;
|
|
||||||
import org.apache.hadoop.hdfs.nfs.security.NfsExports;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
@ -98,6 +96,8 @@
|
|||||||
import org.apache.hadoop.nfs.nfs3.response.WRITE3Response;
|
import org.apache.hadoop.nfs.nfs3.response.WRITE3Response;
|
||||||
import org.apache.hadoop.nfs.nfs3.response.WccAttr;
|
import org.apache.hadoop.nfs.nfs3.response.WccAttr;
|
||||||
import org.apache.hadoop.nfs.nfs3.response.WccData;
|
import org.apache.hadoop.nfs.nfs3.response.WccData;
|
||||||
|
import org.apache.hadoop.nfs.security.AccessPrivilege;
|
||||||
|
import org.apache.hadoop.nfs.security.NfsExports;
|
||||||
import org.apache.hadoop.oncrpc.RpcAcceptedReply;
|
import org.apache.hadoop.oncrpc.RpcAcceptedReply;
|
||||||
import org.apache.hadoop.oncrpc.RpcAuthInfo.AuthFlavor;
|
import org.apache.hadoop.oncrpc.RpcAuthInfo.AuthFlavor;
|
||||||
import org.apache.hadoop.oncrpc.RpcAuthSys;
|
import org.apache.hadoop.oncrpc.RpcAuthSys;
|
||||||
|
@ -313,6 +313,9 @@ Release 2.1.1-beta - UNRELEASED
|
|||||||
HDFS-5078 Support file append in NFSv3 gateway to enable data streaming
|
HDFS-5078 Support file append in NFSv3 gateway to enable data streaming
|
||||||
to HDFS (brandonli)
|
to HDFS (brandonli)
|
||||||
|
|
||||||
|
HDFS-5136 MNT EXPORT should give the full group list which can mount the
|
||||||
|
exports (brandonli)
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
|
|
||||||
HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may
|
HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may
|
||||||
|
Loading…
Reference in New Issue
Block a user