diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 8dd5eb9d1a..e4783037e3 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -417,6 +417,9 @@ Release 2.3.0 - UNRELEASED HADOOP-9016. HarFsInputStream.skip(long) must never return negative value. (Ivan A. Veselovsky via jeagles) + HADOOP-10088. copy-nativedistlibs.sh needs to quote snappy lib dir. + (Raja Aluri via cnauroth) + Release 2.2.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountEntry.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountEntry.java index ab75ef974e..d73122b59d 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountEntry.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountEntry.java @@ -21,9 +21,9 @@ * Represents a mount entry. */ public class MountEntry { - /** Host correspoinding to the mount entry */ + /** Host corresponding to the mount entry */ private final String host; - /** Path correspoinding to the mount entry */ + /** Path corresponding to the mount entry */ private final String path; public MountEntry(String host, String path) { @@ -31,11 +31,11 @@ public MountEntry(String host, String path) { this.path = path; } - public String host() { + public String getHost() { return this.host; } - public String path() { + public String getPath() { return this.path; } @@ -49,7 +49,7 @@ public boolean equals(Object o) { } MountEntry m = (MountEntry) o; - return host().equals(m.host()) && path().equals(m.path()); + return getHost().equals(m.getHost()) && getPath().equals(m.getPath()); } @Override diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java index 88b023c681..5e101c1de8 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java @@ -54,8 +54,8 @@ public static XDR writeMountList(XDR xdr, int xid, List mounts) { RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr); for (MountEntry mountEntry : mounts) { xdr.writeBoolean(true); // Value follows yes - xdr.writeString(mountEntry.host()); - xdr.writeString(mountEntry.path()); + xdr.writeString(mountEntry.getHost()); + xdr.writeString(mountEntry.getPath()); } xdr.writeBoolean(false); // Value follows no return xdr; diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java index 3878cbc74a..1203e893ef 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java @@ -18,7 +18,6 @@ package org.apache.hadoop.mount; import java.io.IOException; -import java.util.List; import org.apache.hadoop.oncrpc.RpcProgram; import org.apache.hadoop.oncrpc.SimpleTcpServer; @@ -34,6 +33,8 @@ */ abstract public class MountdBase { private final RpcProgram rpcProgram; + private int udpBoundPort; // Will set after server starts + private int tcpBoundPort; // Will set after server starts public RpcProgram getRpcProgram() { return rpcProgram; @@ -41,10 +42,10 @@ public RpcProgram getRpcProgram() { /** * Constructor - * @param exports + * @param program * @throws IOException */ - public MountdBase(List exports, RpcProgram program) throws IOException { + public MountdBase(RpcProgram program) throws IOException { rpcProgram = program; } @@ -54,6 +55,7 @@ private void startUDPServer() { rpcProgram, 1); rpcProgram.startDaemons(); udpServer.run(); + udpBoundPort = udpServer.getBoundPort(); } /* Start TCP server */ @@ -62,14 +64,15 @@ private void startTCPServer() { rpcProgram, 1); rpcProgram.startDaemons(); tcpServer.run(); + tcpBoundPort = tcpServer.getBoundPort(); } public void start(boolean register) { startUDPServer(); startTCPServer(); if (register) { - rpcProgram.register(PortmapMapping.TRANSPORT_UDP); - rpcProgram.register(PortmapMapping.TRANSPORT_TCP); + rpcProgram.register(PortmapMapping.TRANSPORT_UDP, udpBoundPort); + rpcProgram.register(PortmapMapping.TRANSPORT_TCP, tcpBoundPort); } } } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java index 0089bb0f3e..2b6943aada 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java @@ -33,6 +33,7 @@ public abstract class Nfs3Base { public static final Log LOG = LogFactory.getLog(Nfs3Base.class); private final RpcProgram rpcProgram; private final int nfsPort; + private int nfsBoundPort; // Will set after server starts public RpcProgram getRpcProgram() { return rpcProgram; @@ -40,20 +41,16 @@ public RpcProgram getRpcProgram() { protected Nfs3Base(RpcProgram rpcProgram, Configuration conf) { this.rpcProgram = rpcProgram; - this.nfsPort = conf.getInt("nfs3.server.port", Nfs3Constant.PORT); + this.nfsPort = conf.getInt(Nfs3Constant.NFS3_SERVER_PORT, + Nfs3Constant.NFS3_SERVER_PORT_DEFAULT); LOG.info("NFS server port set to: " + nfsPort); } - protected Nfs3Base(RpcProgram rpcProgram) { - this.rpcProgram = rpcProgram; - this.nfsPort = Nfs3Constant.PORT; - } - public void start(boolean register) { startTCPServer(); // Start TCP server if (register) { - rpcProgram.register(PortmapMapping.TRANSPORT_TCP); + rpcProgram.register(PortmapMapping.TRANSPORT_TCP, nfsBoundPort); } } @@ -62,5 +59,6 @@ private void startTCPServer() { rpcProgram, 0); rpcProgram.startDaemons(); tcpServer.run(); + nfsBoundPort = tcpServer.getBoundPort(); } } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java index d7a0b03735..b49aef462c 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java @@ -26,7 +26,8 @@ public class Nfs3Constant { public final static int SUN_RPCBIND = 111; // The IP port number for NFS. - public final static int PORT = 2049; + public final static String NFS3_SERVER_PORT = "nfs3.server.port"; + public final static int NFS3_SERVER_PORT_DEFAULT = 2049; // The RPC program number for NFS. public final static int PROGRAM = 100003; @@ -213,4 +214,7 @@ public static WriteStableHow fromValue(int id) { public final static String UNKNOWN_USER = "nobody"; public final static String UNKNOWN_GROUP = "nobody"; + + public final static String EXPORT_POINT = "dfs.nfs3.export.point"; + public final static String EXPORT_POINT_DEFAULT = "/"; } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java index a802417298..69adcedc7b 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java @@ -40,7 +40,7 @@ public abstract class RpcProgram extends SimpleChannelUpstreamHandler { public static final int RPCB_PORT = 111; private final String program; private final String host; - private final int port; + private int port; // Ephemeral port is chosen later private final int progNumber; private final int lowProgVersion; private final int highProgVersion; @@ -68,22 +68,20 @@ protected RpcProgram(String program, String host, int port, int progNumber, /** * Register this program with the local portmapper. */ - public void register(int transport) { + public void register(int transport, int boundPort) { + if (boundPort != port) { + LOG.info("The bound port is " + boundPort + + ", different with configured port " + port); + port = boundPort; + } // Register all the program versions with portmapper for a given transport for (int vers = lowProgVersion; vers <= highProgVersion; vers++) { - register(vers, transport); + PortmapMapping mapEntry = new PortmapMapping(progNumber, vers, transport, + port); + register(mapEntry); } } - /** - * Register this program with the local portmapper. - */ - private void register(int progVersion, int transport) { - PortmapMapping mapEntry = new PortmapMapping(progNumber, progVersion, - transport, port); - register(mapEntry); - } - /** * Register the program with Portmap or Rpcbind */ diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java index 57ef77a95f..949fdca5dc 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java @@ -23,6 +23,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.jboss.netty.bootstrap.ServerBootstrap; +import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.ChannelFactory; import org.jboss.netty.channel.ChannelPipeline; import org.jboss.netty.channel.ChannelPipelineFactory; @@ -36,6 +37,7 @@ public class SimpleTcpServer { public static final Log LOG = LogFactory.getLog(SimpleTcpServer.class); protected final int port; + protected int boundPort = -1; // Will be set after server starts protected final SimpleChannelUpstreamHandler rpcProgram; /** The maximum number of I/O worker threads */ @@ -79,9 +81,16 @@ public ChannelPipeline getPipeline() throws Exception { bootstrap.setOption("child.keepAlive", true); // Listen to TCP port - bootstrap.bind(new InetSocketAddress(port)); - - LOG.info("Started listening to TCP requests at port " + port + " for " + Channel ch = bootstrap.bind(new InetSocketAddress(port)); + InetSocketAddress socketAddr = (InetSocketAddress) ch.getLocalAddress(); + boundPort = socketAddr.getPort(); + + LOG.info("Started listening to TCP requests at port " + boundPort + " for " + rpcProgram + " with workerCount " + workerCount); } + + // boundPort will be set only after server starts + public int getBoundPort() { + return this.boundPort; + } } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpServer.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpServer.java index 438eebc537..8e77fce36b 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpServer.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpServer.java @@ -23,6 +23,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.jboss.netty.bootstrap.ConnectionlessBootstrap; +import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.Channels; import org.jboss.netty.channel.SimpleChannelUpstreamHandler; import org.jboss.netty.channel.socket.DatagramChannelFactory; @@ -39,6 +40,7 @@ public class SimpleUdpServer { protected final int port; protected final SimpleChannelUpstreamHandler rpcProgram; protected final int workerCount; + protected int boundPort = -1; // Will be set after server starts public SimpleUdpServer(int port, SimpleChannelUpstreamHandler program, int workerCount) { this.port = port; @@ -61,9 +63,16 @@ public void run() { b.setOption("receiveBufferSize", RECEIVE_BUFFER_SIZE); // Listen to the UDP port - b.bind(new InetSocketAddress(port)); - - LOG.info("Started listening to UDP requests at port " + port + " for " + Channel ch = b.bind(new InetSocketAddress(port)); + InetSocketAddress socketAddr = (InetSocketAddress) ch.getLocalAddress(); + boundPort = socketAddr.getPort(); + + LOG.info("Started listening to UDP requests at port " + boundPort + " for " + rpcProgram + " with workerCount " + workerCount); } + + // boundPort will be set only after server starts + public int getBoundPort() { + return this.boundPort; + } } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java index 6a3e86c13d..21e99b6a95 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/Portmap.java @@ -31,14 +31,14 @@ public class Portmap { public static final Log LOG = LogFactory.getLog(Portmap.class); private static void startUDPServer(RpcProgramPortmap rpcProgram) { - rpcProgram.register(PortmapMapping.TRANSPORT_UDP); + rpcProgram.register(PortmapMapping.TRANSPORT_UDP, RpcProgram.RPCB_PORT); SimpleUdpServer udpServer = new SimpleUdpServer(RpcProgram.RPCB_PORT, rpcProgram, 1); udpServer.run(); } private static void startTCPServer(final RpcProgramPortmap rpcProgram) { - rpcProgram.register(PortmapMapping.TRANSPORT_TCP); + rpcProgram.register(PortmapMapping.TRANSPORT_TCP, RpcProgram.RPCB_PORT); SimpleTcpServer tcpServer = new SimpleTcpServer(RpcProgram.RPCB_PORT, rpcProgram, 1); tcpServer.run(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java index 9fca31f258..98ec9f661d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hdfs.nfs.mount; import java.io.IOException; -import java.util.ArrayList; -import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mount.MountdBase; @@ -32,23 +30,14 @@ * handle for requested directory and returns it to the client. */ public class Mountd extends MountdBase { - /** - * Constructor - * @param exports - * @throws IOException - */ - public Mountd(List exports) throws IOException { - super(exports, new RpcProgramMountd(exports)); - } - public Mountd(List exports, Configuration config) throws IOException { - super(exports, new RpcProgramMountd(exports, config)); + public Mountd(Configuration config) throws IOException { + super(new RpcProgramMountd(config)); } public static void main(String[] args) throws IOException { - List exports = new ArrayList(); - exports.add("/"); - Mountd mountd = new Mountd(exports); + Configuration config = new Configuration(); + Mountd mountd = new Mountd(config); mountd.start(true); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java index f8ac1dc1e4..b7e669a698 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java @@ -36,6 +36,7 @@ import org.apache.hadoop.nfs.AccessPrivilege; import org.apache.hadoop.nfs.NfsExports; import org.apache.hadoop.nfs.nfs3.FileHandle; +import org.apache.hadoop.nfs.nfs3.Nfs3Constant; import org.apache.hadoop.nfs.nfs3.Nfs3Status; import org.apache.hadoop.oncrpc.RpcAcceptedReply; import org.apache.hadoop.oncrpc.RpcCall; @@ -49,6 +50,8 @@ import org.jboss.netty.buffer.ChannelBuffers; import org.jboss.netty.channel.ChannelHandlerContext; +import com.google.common.annotations.VisibleForTesting; + /** * RPC program corresponding to mountd daemon. See {@link Mountd}. */ @@ -71,23 +74,15 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface { private final NfsExports hostsMatcher; - public RpcProgramMountd() throws IOException { - this(new ArrayList(0)); - } - - public RpcProgramMountd(List exports) throws IOException { - this(exports, new Configuration()); - } - - public RpcProgramMountd(List exports, Configuration config) - throws IOException { + public RpcProgramMountd(Configuration config) throws IOException { // Note that RPC cache is not enabled super("mountd", "localhost", config.getInt("nfs3.mountd.port", PORT), PROGRAM, VERSION_1, VERSION_3); - + exports = new ArrayList(); + exports.add(config.get(Nfs3Constant.EXPORT_POINT, + Nfs3Constant.EXPORT_POINT_DEFAULT)); this.hostsMatcher = NfsExports.getInstance(config); this.mounts = Collections.synchronizedList(new ArrayList()); - this.exports = Collections.unmodifiableList(exports); this.dfsClient = new DFSClient(NameNode.getAddress(config), config); } @@ -200,7 +195,7 @@ public void handleInternal(ChannelHandlerContext ctx, RpcInfo info) { } else if (mntproc == MNTPROC.UMNTALL) { umntall(out, xid, client); } else if (mntproc == MNTPROC.EXPORT) { - // Currently only support one NFS export "/" + // Currently only support one NFS export List hostsMatchers = new ArrayList(); hostsMatchers.add(hostsMatcher); out = MountResponse.writeExportList(out, xid, exports, hostsMatchers); @@ -220,4 +215,9 @@ protected boolean isIdempotent(RpcCall call) { // Not required, because cache is turned off return false; } + + @VisibleForTesting + public List getExports() { + return this.exports; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java index 420ab8825f..08fd77c34d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/AsyncDataService.java @@ -46,6 +46,7 @@ public class AsyncDataService { public AsyncDataService() { threadFactory = new ThreadFactory() { + @Override public Thread newThread(Runnable r) { return new Thread(threadGroup, r); } @@ -129,6 +130,7 @@ public String toString() { + openFileCtx.getNextOffset(); } + @Override public void run() { try { openFileCtx.executeWriteBack(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java index c7265ea2dd..8ec99de486 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java @@ -118,6 +118,7 @@ public DFSClient load(String userName) throws Exception { // Guava requires CacheLoader never returns null. return ugi.doAs(new PrivilegedExceptionAction() { + @Override public DFSClient run() throws IOException { return new DFSClient(NameNode.getAddress(config), config); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java index b81504bddc..241090528f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java @@ -18,12 +18,9 @@ package org.apache.hadoop.hdfs.nfs.nfs3; import java.io.IOException; -import java.util.ArrayList; -import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.nfs.mount.Mountd; -import org.apache.hadoop.mount.MountdBase; import org.apache.hadoop.nfs.nfs3.Nfs3Base; import org.apache.hadoop.util.StringUtils; @@ -42,28 +39,24 @@ public class Nfs3 extends Nfs3Base { Configuration.addDefaultResource("hdfs-site.xml"); } - public Nfs3(List exports) throws IOException { - super(new RpcProgramNfs3()); - mountd = new Mountd(exports); - } - - @VisibleForTesting - public Nfs3(List exports, Configuration config) throws IOException { - super(new RpcProgramNfs3(config), config); - mountd = new Mountd(exports, config); + public Nfs3(Configuration conf) throws IOException { + super(new RpcProgramNfs3(conf), conf); + mountd = new Mountd(conf); } public Mountd getMountd() { return mountd; } + @VisibleForTesting + public void startServiceInternal(boolean register) throws IOException { + mountd.start(register); // Start mountd + start(register); + } + public static void main(String[] args) throws IOException { - StringUtils.startupShutdownMessage(Nfs3.class, args, LOG); - List exports = new ArrayList(); - exports.add("/"); - - final Nfs3 nfsServer = new Nfs3(exports); - nfsServer.mountd.start(true); // Start mountd - nfsServer.start(true); + StringUtils.startupShutdownMessage(Nfs3.class, args, LOG); + final Nfs3 nfsServer = new Nfs3(new Configuration()); + nfsServer.startServiceInternal(true); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java index 5941238257..91ce8ef24d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java @@ -163,12 +163,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { private final RpcCallCache rpcCallCache; - public RpcProgramNfs3() throws IOException { - this(new Configuration()); - } - public RpcProgramNfs3(Configuration config) throws IOException { - super("NFS3", "localhost", Nfs3Constant.PORT, Nfs3Constant.PROGRAM, + super("NFS3", "localhost", config.getInt(Nfs3Constant.NFS3_SERVER_PORT, + Nfs3Constant.NFS3_SERVER_PORT_DEFAULT), Nfs3Constant.PROGRAM, Nfs3Constant.VERSION, Nfs3Constant.VERSION); config.set(FsPermission.UMASK_LABEL, "000"); diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java index 977902a684..1a31998f35 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java @@ -20,8 +20,6 @@ import java.io.IOException; import java.net.InetAddress; -import java.util.ArrayList; -import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -45,11 +43,13 @@ public void testStart() throws IOException { .build(); cluster.waitActive(); + // Use emphral port in case tests are running in parallel + config.setInt("nfs3.mountd.port", 0); + config.setInt("nfs3.server.port", 0); + // Start nfs - List exports = new ArrayList(); - exports.add("/"); - Nfs3 nfs3 = new Nfs3(exports, config); - nfs3.start(false); + Nfs3 nfs3 = new Nfs3(config); + nfs3.startServiceInternal(false); RpcProgramMountd mountd = (RpcProgramMountd) nfs3.getMountd() .getRpcProgram(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java index 1f0ba43f39..578539886d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java @@ -23,6 +23,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.nfs.nfs3.Nfs3Utils; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3Constant; @@ -154,7 +155,9 @@ public static void main(String[] args) throws InterruptedException { Arrays.fill(data3, (byte) 9); // NFS3 Create request - WriteClient client = new WriteClient("localhost", Nfs3Constant.PORT, + Configuration conf = new Configuration(); + WriteClient client = new WriteClient("localhost", conf.getInt( + Nfs3Constant.NFS3_SERVER_PORT, Nfs3Constant.NFS3_SERVER_PORT_DEFAULT), create(), false); client.run(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestReaddir.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestReaddir.java index 8fdf4bb7f2..e1493b60ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestReaddir.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestReaddir.java @@ -22,7 +22,6 @@ import java.io.IOException; import java.net.InetAddress; -import java.util.ArrayList; import java.util.List; import org.apache.hadoop.conf.Configuration; @@ -67,11 +66,13 @@ public static void setup() throws Exception { hdfs = cluster.getFileSystem(); nn = cluster.getNameNode(); + // Use emphral port in case tests are running in parallel + config.setInt("nfs3.mountd.port", 0); + config.setInt("nfs3.server.port", 0); + // Start nfs - List exports = new ArrayList(); - exports.add("/"); - Nfs3 nfs3 = new Nfs3(exports, config); - nfs3.start(false); + Nfs3 nfs3 = new Nfs3(config); + nfs3.startServiceInternal(false); nfsd = (RpcProgramNfs3) nfs3.getRpcProgram(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestExportsTable.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestExportsTable.java new file mode 100644 index 0000000000..5180d3ad38 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestExportsTable.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.nfs.nfs3; + +import static org.junit.Assert.assertTrue; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.nfs.mount.Mountd; +import org.apache.hadoop.hdfs.nfs.mount.RpcProgramMountd; +import org.apache.hadoop.nfs.nfs3.Nfs3Constant; +import org.junit.Test; + +public class TestExportsTable { + + @Test + public void testExportPoint() throws IOException { + Configuration config = new Configuration(); + MiniDFSCluster cluster = null; + + String exportPoint = "/myexport1"; + config.setStrings(Nfs3Constant.EXPORT_POINT, exportPoint); + // Use emphral port in case tests are running in parallel + config.setInt("nfs3.mountd.port", 0); + config.setInt("nfs3.server.port", 0); + + try { + cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build(); + cluster.waitActive(); + + // Start nfs + final Nfs3 nfsServer = new Nfs3(config); + nfsServer.startServiceInternal(false); + + Mountd mountd = nfsServer.getMountd(); + RpcProgramMountd rpcMount = (RpcProgramMountd) mountd.getRpcProgram(); + assertTrue(rpcMount.getExports().size() == 1); + + String exportInMountd = rpcMount.getExports().get(0); + assertTrue(exportInMountd.equals(exportPoint)); + + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java index e721db69c1..c445136456 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java @@ -23,10 +23,7 @@ import java.io.IOException; import java.net.InetAddress; import java.nio.ByteBuffer; -import java.util.ArrayList; import java.util.Arrays; -import java.util.List; -import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentNavigableMap; import junit.framework.Assert; @@ -215,11 +212,13 @@ public void testWriteStableHow() throws IOException, InterruptedException { cluster.waitActive(); client = new DFSClient(NameNode.getAddress(config), config); + // Use emphral port in case tests are running in parallel + config.setInt("nfs3.mountd.port", 0); + config.setInt("nfs3.server.port", 0); + // Start nfs - List exports = new ArrayList(); - exports.add("/"); - Nfs3 nfs3 = new Nfs3(exports, config); - nfs3.start(false); + Nfs3 nfs3 = new Nfs3(config); + nfs3.startServiceInternal(false); nfsd = (RpcProgramNfs3) nfs3.getRpcProgram(); HdfsFileStatus status = client.getFileInfo("/"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index de8f452562..1918cac2e2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -361,6 +361,9 @@ Trunk (Unreleased) HDFS-5394. Fix race conditions in DN caching and uncaching (cmccabe) + HDFS-5482. DistributedFileSystem#listPathBasedCacheDirectives must support + relative paths. (Colin Patrick McCabe via cnauroth) + Release 2.3.0 - UNRELEASED INCOMPATIBLE CHANGES @@ -610,6 +613,9 @@ Release 2.2.1 - UNRELEASED HDFS-5364. Add OpenFileCtx cache. (brandonli) + HDFS-5469. Add configuration property for the sub-directroy export path + (brandonli) + Release 2.2.0 - 2013-10-13 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index ed235aab66..c285dd574c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -1637,9 +1637,8 @@ public RemoteIterator listPathBasedCacheDirectives( } if (filter.getPath() != null) { filter = new PathBasedCacheDirective.Builder(filter). - setPath(filter.getPath(). - makeQualified(getUri(), filter.getPath())). - build(); + setPath(new Path(getPathName(fixRelativePart(filter.getPath())))). + build(); } final RemoteIterator iter = dfs.listPathBasedCacheDirectives(filter); @@ -1651,8 +1650,11 @@ public boolean hasNext() throws IOException { @Override public PathBasedCacheDirective next() throws IOException { + // Although the paths we get back from the NameNode should always be + // absolute, we call makeQualified to add the scheme and authority of + // this DistributedFilesystem. PathBasedCacheDirective desc = iter.next(); - Path p = desc.getPath().makeQualified(getUri(), desc.getPath()); + Path p = desc.getPath().makeQualified(getUri(), getWorkingDirectory()); return new PathBasedCacheDirective.Builder(desc).setPath(p).build(); } }; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml index 8d389b4b74..54dd7a97b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml @@ -335,7 +335,7 @@ -addDirective -path /bar -pool pool1 -addDirective -path /foo -pool pool2 -addDirective -path /bar -pool pool2 - -removeDirectives -path /foo + -removeDirectives -path ../../foo -listDirectives diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml index 103bcc52a0..dcb3875a12 100644 --- a/hadoop-project-dist/pom.xml +++ b/hadoop-project-dist/pom.xml @@ -346,7 +346,7 @@ cd $${LIB_DIR} $$TAR lib* | (cd $${TARGET_DIR}/; $$UNTAR) if [ "${bundle.snappy}" = "true" ] ; then - cd ${snappy.lib} + cd "${snappy.lib}" $$TAR *snappy* | (cd $${TARGET_DIR}/; $$UNTAR) fi fi @@ -358,7 +358,7 @@ $$TAR * | (cd $${TARGET_BIN_DIR}/; $$UNTAR) if [ "${bundle.snappy.in.bin}" = "true" ] ; then if [ "${bundle.snappy}" = "true" ] ; then - cd ${snappy.lib} + cd "${snappy.lib}" $$TAR *snappy* | (cd $${TARGET_BIN_DIR}/; $$UNTAR) fi fi