diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java index 3944ad1282..32a8cd3d97 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java @@ -162,7 +162,7 @@ public synchronized Text getKind() { /** * Set the token kind. This is only intended to be used by services that - * wrap another service's token, such as HFTP wrapping HDFS. + * wrap another service's token. * @param newKind */ @InterfaceAudience.Private diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 6740c8bac7..73b4ff5751 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -9,6 +9,9 @@ Trunk (Unreleased) HDFS-5079. Cleaning up NNHAStatusHeartbeat.State from DatanodeProtocolProtos. (Tao Luo via shv) + HDFS-5570. Deprecate hftp / hsftp and replace them with webhdfs / swebhdfs. + (wheat9) + NEW FEATURES HDFS-3125. Add JournalService to enable Journal Daemon. (suresh) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java index 293611e377..e9b24ca012 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSelector.java @@ -41,7 +41,7 @@ public class DelegationTokenSelector * Select the delegation token for hdfs. The port will be rewritten to * the port of hdfs.service.host_$nnAddr, or the default rpc namenode port. * This method should only be called by non-hdfs filesystems that do not - * use the rpc port to acquire tokens. Ex. webhdfs, hftp + * use the rpc port to acquire tokens. Ex. webhdfs * @param nnUri of the remote namenode * @param tokens as a collection * @param conf hadoop configuration diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index d05a0ebe42..56a046f890 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -58,8 +58,6 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; import org.apache.hadoop.hdfs.server.datanode.web.resources.DatanodeWebHdfsMethods; -import org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets; -import org.apache.hadoop.hdfs.server.namenode.StreamFile; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.*; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; @@ -361,10 +359,6 @@ private void startInfoServer(Configuration conf) throws IOException { this.infoServer = builder.build(); - this.infoServer.addInternalServlet(null, "/streamFile/*", StreamFile.class); - this.infoServer.addInternalServlet(null, "/getFileChecksum/*", - FileChecksumServlets.GetServlet.class); - this.infoServer.setAttribute("datanode", this); this.infoServer.setAttribute(JspHelper.CURRENT_CONF, conf); this.infoServer.addServlet(null, "/blockScannerReport", diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index a2d247dcaa..87abc6b858 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -781,7 +781,7 @@ public InetSocketAddress getServiceRpcAddress() { /** * @return NameNode HTTP address, used by the Web UI, image transfer, - * and HTTP-based file system clients like Hftp and WebHDFS + * and HTTP-based file system clients like WebHDFS */ public InetSocketAddress getHttpAddress() { return httpServer.getHttpAddress(); @@ -789,7 +789,7 @@ public InetSocketAddress getHttpAddress() { /** * @return NameNode HTTPS address, used by the Web UI, image transfer, - * and HTTP-based file system clients like Hftp and WebHDFS + * and HTTP-based file system clients like WebHDFS */ public InetSocketAddress getHttpsAddress() { return httpServer.getHttpsAddress(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java index eb21197b2f..afcdf26ac4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java @@ -229,27 +229,10 @@ void setStartupProgress(StartupProgress prog) { private static void setupServlets(HttpServer2 httpServer, Configuration conf) { httpServer.addInternalServlet("startupProgress", StartupProgressServlet.PATH_SPEC, StartupProgressServlet.class); - httpServer.addInternalServlet("getDelegationToken", - GetDelegationTokenServlet.PATH_SPEC, - GetDelegationTokenServlet.class, true); - httpServer.addInternalServlet("renewDelegationToken", - RenewDelegationTokenServlet.PATH_SPEC, - RenewDelegationTokenServlet.class, true); - httpServer.addInternalServlet("cancelDelegationToken", - CancelDelegationTokenServlet.PATH_SPEC, - CancelDelegationTokenServlet.class, true); httpServer.addInternalServlet("fsck", "/fsck", FsckServlet.class, true); httpServer.addInternalServlet("imagetransfer", ImageServlet.PATH_SPEC, ImageServlet.class, true); - httpServer.addInternalServlet("listPaths", "/listPaths/*", - ListPathsServlet.class, false); - httpServer.addInternalServlet("data", "/data/*", - FileDataServlet.class, false); - httpServer.addInternalServlet("checksum", "/fileChecksum/*", - FileChecksumServlets.RedirectServlet.class, false); - httpServer.addInternalServlet("contentSummary", "/contentSummary/*", - ContentSummaryServlet.class, false); } static FSImage getFsImageFromContext(ServletContext context) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java index 438712abcc..4b35a757f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java @@ -17,17 +17,11 @@ */ package org.apache.hadoop.hdfs.tools; -import java.io.BufferedReader; import java.io.ByteArrayInputStream; import java.io.DataInputStream; import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; import java.io.PrintStream; -import java.net.HttpURLConnection; -import java.net.InetSocketAddress; import java.net.URI; -import java.net.URL; import java.security.PrivilegedExceptionAction; import java.util.Collection; import java.util.Date; @@ -43,23 +37,16 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; -import org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet; -import org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet; -import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet; -import org.apache.hadoop.hdfs.web.HftpFileSystem; -import org.apache.hadoop.hdfs.web.HsftpFileSystem; -import org.apache.hadoop.hdfs.web.URLConnectionFactory; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.net.NetUtils; + +import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem; +import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.security.Credentials; -import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.GenericOptionsParser; -import com.google.common.base.Charsets; +import com.google.common.annotations.VisibleForTesting; /** * Fetch a DelegationToken from the current Namenode and store it in the @@ -67,61 +54,38 @@ */ @InterfaceAudience.Private public class DelegationTokenFetcher { - private static final Log LOG = - LogFactory.getLog(DelegationTokenFetcher.class); private static final String WEBSERVICE = "webservice"; - private static final String RENEWER = "renewer"; private static final String CANCEL = "cancel"; - private static final String RENEW = "renew"; - private static final String PRINT = "print"; private static final String HELP = "help"; private static final String HELP_SHORT = "h"; + private static final Log LOG = LogFactory + .getLog(DelegationTokenFetcher.class); + private static final String PRINT = "print"; + private static final String RENEW = "renew"; + private static final String RENEWER = "renewer"; - private static void printUsage(PrintStream err) { - err.println("fetchdt retrieves delegation tokens from the NameNode"); - err.println(); - err.println("fetchdt "); - err.println("Options:"); - err.println(" --webservice Url to contact NN on"); - err.println(" --renewer Name of the delegation token renewer"); - err.println(" --cancel Cancel the delegation token"); - err.println(" --renew Renew the delegation token. Delegation " - + "token must have been fetched using the --renewer option."); - err.println(" --print Print the delegation token"); - err.println(); - GenericOptionsParser.printGenericCommandUsage(err); - ExitUtil.terminate(1); - } - - private static Collection> readTokens(Path file, Configuration conf) - throws IOException { - Credentials creds = Credentials.readTokenStorageFile(file, conf); - return creds.getAllTokens(); - } - /** * Command-line interface */ public static void main(final String[] args) throws Exception { final Configuration conf = new HdfsConfiguration(); Options fetcherOptions = new Options(); - fetcherOptions.addOption(WEBSERVICE, true, - "HTTP url to reach the NameNode at"); - fetcherOptions.addOption(RENEWER, true, - "Name of the delegation token renewer"); - fetcherOptions.addOption(CANCEL, false, "cancel the token"); - fetcherOptions.addOption(RENEW, false, "renew the token"); - fetcherOptions.addOption(PRINT, false, "print the token"); - fetcherOptions.addOption(HELP_SHORT, HELP, false, "print out help information"); + fetcherOptions + .addOption(WEBSERVICE, true, "HTTP url to reach the NameNode at") + .addOption(RENEWER, true, "Name of the delegation token renewer") + .addOption(CANCEL, false, "cancel the token") + .addOption(RENEW, false, "renew the token") + .addOption(PRINT, false, "print the token") + .addOption(HELP_SHORT, HELP, false, "print out help information"); + GenericOptionsParser parser = new GenericOptionsParser(conf, - fetcherOptions, args); + fetcherOptions, args); CommandLine cmd = parser.getCommandLine(); - - // get options + final String webUrl = cmd.hasOption(WEBSERVICE) ? cmd - .getOptionValue(WEBSERVICE) : null; - final String renewer = cmd.hasOption(RENEWER) ? - cmd.getOptionValue(RENEWER) : null; + .getOptionValue(WEBSERVICE) : null; + final String renewer = cmd.hasOption(RENEWER) ? cmd.getOptionValue + (RENEWER) : null; final boolean cancel = cmd.hasOption(CANCEL); final boolean renew = cmd.hasOption(RENEW); final boolean print = cmd.hasOption(PRINT); @@ -133,8 +97,9 @@ public static void main(final String[] args) throws Exception { printUsage(System.out); System.exit(0); } - if (cancel && renew || cancel && print || renew && print || cancel && renew - && print) { + + int commandCount = (cancel ? 1 : 0) + (renew ? 1 : 0) + (print ? 1 : 0); + if (commandCount > 1) { System.err.println("ERROR: Only specify cancel, renew or print."); printUsage(System.err); } @@ -145,248 +110,118 @@ public static void main(final String[] args) throws Exception { // default to using the local file system FileSystem local = FileSystem.getLocal(conf); final Path tokenFile = new Path(local.getWorkingDirectory(), remaining[0]); - final URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_SYSTEM_CONNECTION_FACTORY; // Login the current user - UserGroupInformation.getCurrentUser().doAs( - new PrivilegedExceptionAction() { - @Override - public Object run() throws Exception { - - if (print) { - DelegationTokenIdentifier id = new DelegationTokenSecretManager( - 0, 0, 0, 0, null).createIdentifier(); - for (Token token : readTokens(tokenFile, conf)) { - DataInputStream in = new DataInputStream( - new ByteArrayInputStream(token.getIdentifier())); - id.readFields(in); - System.out.println("Token (" + id + ") for " + - token.getService()); - } - } else if (cancel) { - for(Token token: readTokens(tokenFile, conf)) { - if (token.isManaged()) { - token.cancel(conf); - if (LOG.isDebugEnabled()) { - LOG.debug("Cancelled token for " + token.getService()); - } - } - } - } else if (renew) { - for (Token token : readTokens(tokenFile, conf)) { - if (token.isManaged()) { - long result = token.renew(conf); - if (LOG.isDebugEnabled()) { - LOG.debug("Renewed token for " + token.getService() - + " until: " + new Date(result)); - } - } - } - } else { - // otherwise we are fetching - if (webUrl != null) { - Credentials creds = getDTfromRemote(connectionFactory, new URI( - webUrl), renewer, null); - creds.writeTokenStorageFile(tokenFile, conf); - for (Token token : creds.getAllTokens()) { - if(LOG.isDebugEnabled()) { - LOG.debug("Fetched token via " + webUrl + " for " - + token.getService() + " into " + tokenFile); - } - } - } else { - FileSystem fs = FileSystem.get(conf); - Credentials cred = new Credentials(); - Token tokens[] = fs.addDelegationTokens(renewer, cred); - cred.writeTokenStorageFile(tokenFile, conf); - if(LOG.isDebugEnabled()) { - for (Token token : tokens) { - LOG.debug("Fetched token for " + token.getService() - + " into " + tokenFile); - } - } - } - } - return null; - } - }); + UserGroupInformation.getCurrentUser().doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + if (print) { + printTokens(conf, tokenFile); + } else if (cancel) { + cancelTokens(conf, tokenFile); + } else if (renew) { + renewTokens(conf, tokenFile); + } else { + // otherwise we are fetching + FileSystem fs = getFileSystem(conf, webUrl); + saveDelegationToken(conf, fs, renewer, tokenFile); + } + return null; + } + }); } - - static public Credentials getDTfromRemote(URLConnectionFactory factory, - URI nnUri, String renewer, String proxyUser) throws IOException { - StringBuilder buf = new StringBuilder(nnUri.toString()) - .append(GetDelegationTokenServlet.PATH_SPEC); - String separator = "?"; - if (renewer != null) { - buf.append("?").append(GetDelegationTokenServlet.RENEWER).append("=") - .append(renewer); - separator = "&"; - } - if (proxyUser != null) { - buf.append(separator).append("doas=").append(proxyUser); + + private static FileSystem getFileSystem(Configuration conf, String url) + throws IOException { + if (url == null) { + return FileSystem.get(conf); } - boolean isHttps = nnUri.getScheme().equals("https"); + // For backward compatibility + URI fsUri = URI.create( + url.replaceFirst("^http://", WebHdfsFileSystem.SCHEME + "://") + .replaceFirst("^https://", SWebHdfsFileSystem.SCHEME + "://")); - HttpURLConnection conn = null; - DataInputStream dis = null; - InetSocketAddress serviceAddr = NetUtils.createSocketAddr(nnUri - .getAuthority()); + return FileSystem.get(fsUri, conf); + } - try { - if(LOG.isDebugEnabled()) { - LOG.debug("Retrieving token from: " + buf); - } - - conn = run(factory, new URL(buf.toString())); - InputStream in = conn.getInputStream(); - Credentials ts = new Credentials(); - dis = new DataInputStream(in); - ts.readFields(dis); - for (Token token : ts.getAllTokens()) { - token.setKind(isHttps ? HsftpFileSystem.TOKEN_KIND : HftpFileSystem.TOKEN_KIND); - SecurityUtil.setTokenService(token, serviceAddr); - } - return ts; - } catch (Exception e) { - throw new IOException("Unable to obtain remote token", e); - } finally { - IOUtils.cleanup(LOG, dis); - if (conn != null) { - conn.disconnect(); + @VisibleForTesting + static void cancelTokens(final Configuration conf, final Path tokenFile) + throws IOException, InterruptedException { + for (Token token : readTokens(tokenFile, conf)) { + if (token.isManaged()) { + token.cancel(conf); + if (LOG.isDebugEnabled()) { + LOG.debug("Cancelled token for " + token.getService()); + } } } } - /** - * Cancel a Delegation Token. - * @param nnAddr the NameNode's address - * @param tok the token to cancel - * @throws IOException - * @throws AuthenticationException - */ - static public void cancelDelegationToken(URLConnectionFactory factory, - URI nnAddr, Token tok) throws IOException, - AuthenticationException { - StringBuilder buf = new StringBuilder(nnAddr.toString()) - .append(CancelDelegationTokenServlet.PATH_SPEC).append("?") - .append(CancelDelegationTokenServlet.TOKEN).append("=") - .append(tok.encodeToUrlString()); - HttpURLConnection conn = run(factory, new URL(buf.toString())); - conn.disconnect(); - } - - /** - * Renew a Delegation Token. - * @param nnAddr the NameNode's address - * @param tok the token to renew - * @return the Date that the token will expire next. - * @throws IOException - * @throws AuthenticationException - */ - static public long renewDelegationToken(URLConnectionFactory factory, - URI nnAddr, Token tok) throws IOException, - AuthenticationException { - StringBuilder buf = new StringBuilder(nnAddr.toString()) - .append(RenewDelegationTokenServlet.PATH_SPEC).append("?") - .append(RenewDelegationTokenServlet.TOKEN).append("=") - .append(tok.encodeToUrlString()); - - HttpURLConnection connection = null; - BufferedReader in = null; - try { - connection = run(factory, new URL(buf.toString())); - in = new BufferedReader(new InputStreamReader( - connection.getInputStream(), Charsets.UTF_8)); - long result = Long.parseLong(in.readLine()); - return result; - } catch (IOException ie) { - LOG.info("error in renew over HTTP", ie); - IOException e = getExceptionFromResponse(connection); - - if (e != null) { - LOG.info("rethrowing exception from HTTP request: " - + e.getLocalizedMessage()); - throw e; - } - throw ie; - } finally { - IOUtils.cleanup(LOG, in); - if (connection != null) { - connection.disconnect(); + @VisibleForTesting + static void renewTokens(final Configuration conf, final Path tokenFile) + throws IOException, InterruptedException { + for (Token token : readTokens(tokenFile, conf)) { + if (token.isManaged()) { + long result = token.renew(conf); + if (LOG.isDebugEnabled()) { + LOG.debug("Renewed token for " + token.getService() + " until: " + + new Date(result)); + } } } } - // parse the message and extract the name of the exception and the message - static private IOException getExceptionFromResponse(HttpURLConnection con) { - IOException e = null; - String resp; - if(con == null) - return null; - - try { - resp = con.getResponseMessage(); - } catch (IOException ie) { return null; } - if(resp == null || resp.isEmpty()) - return null; + @VisibleForTesting + static void saveDelegationToken(Configuration conf, FileSystem fs, + final String renewer, final Path tokenFile) + throws IOException { + Token token = fs.getDelegationToken(renewer); - String exceptionClass = "", exceptionMsg = ""; - String[] rs = resp.split(";"); - if(rs.length < 2) - return null; - exceptionClass = rs[0]; - exceptionMsg = rs[1]; - LOG.info("Error response from HTTP request=" + resp + - ";ec=" + exceptionClass + ";em="+exceptionMsg); - - if(exceptionClass == null || exceptionClass.isEmpty()) - return null; - - // recreate exception objects - try { - Class ec = - Class.forName(exceptionClass).asSubclass(Exception.class); - // we are interested in constructor with String arguments - java.lang.reflect.Constructor constructor = - ec.getConstructor (new Class[] {String.class}); + Credentials cred = new Credentials(); + cred.addToken(token.getKind(), token); + cred.writeTokenStorageFile(tokenFile, conf); - // create an instance - e = (IOException) constructor.newInstance (exceptionMsg); - - } catch (Exception ee) { - LOG.warn("failed to create object of this class", ee); + if (LOG.isDebugEnabled()) { + LOG.debug("Fetched token " + fs.getUri() + " for " + token.getService() + + " into " + tokenFile); } - if(e == null) - return null; - - e.setStackTrace(new StackTraceElement[0]); // local stack is not relevant - LOG.info("Exception from HTTP response=" + e.getLocalizedMessage()); - return e; } - private static HttpURLConnection run(URLConnectionFactory factory, URL url) - throws IOException, AuthenticationException { - HttpURLConnection conn = null; - - try { - conn = (HttpURLConnection) factory.openConnection(url, true); - if (conn.getResponseCode() != HttpURLConnection.HTTP_OK) { - String msg = conn.getResponseMessage(); - - throw new IOException("Error when dealing remote token: " + msg); - } - } catch (IOException ie) { - LOG.info("Error when dealing remote token:", ie); - IOException e = getExceptionFromResponse(conn); - - if (e != null) { - LOG.info("rethrowing exception from HTTP request: " - + e.getLocalizedMessage()); - throw e; - } - throw ie; + private static void printTokens(final Configuration conf, + final Path tokenFile) + throws IOException { + DelegationTokenIdentifier id = new DelegationTokenSecretManager(0, 0, 0, + 0, null).createIdentifier(); + for (Token token : readTokens(tokenFile, conf)) { + DataInputStream in = new DataInputStream(new ByteArrayInputStream(token + .getIdentifier())); + id.readFields(in); + System.out.println("Token (" + id + ") for " + token.getService()); } - return conn; + } + + private static void printUsage(PrintStream err) { + err.println("fetchdt retrieves delegation tokens from the NameNode"); + err.println(); + err.println("fetchdt "); + err.println("Options:"); + err.println(" --webservice Url to contact NN on (starts with " + + "http:// or https://)"); + err.println(" --renewer Name of the delegation token renewer"); + err.println(" --cancel Cancel the delegation token"); + err.println(" --renew Renew the delegation token. " + + "Delegation " + "token must have been fetched using the --renewer" + + " option."); + err.println(" --print Print the delegation token"); + err.println(); + GenericOptionsParser.printGenericCommandUsage(err); + ExitUtil.terminate(1); + } + + private static Collection> readTokens(Path file, Configuration conf) + throws IOException { + Credentials creds = Credentials.readTokenStorageFile(file, conf); + return creds.getAllTokens(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java index 1a258f062a..fc18dbe72c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java @@ -57,9 +57,7 @@ public void cancel(Token token, Configuration conf) throws IOException { @Override public boolean handleKind(Text kind) { - return kind.equals(HftpFileSystem.TOKEN_KIND) - || kind.equals(HsftpFileSystem.TOKEN_KIND) - || kind.equals(WebHdfsFileSystem.TOKEN_KIND) + return kind.equals(WebHdfsFileSystem.TOKEN_KIND) || kind.equals(SWebHdfsFileSystem.TOKEN_KIND); } @@ -89,11 +87,7 @@ private TokenManagementDelegator getInstance(Token token, } private static String getSchemeByKind(Text kind) { - if (kind.equals(HftpFileSystem.TOKEN_KIND)) { - return HftpFileSystem.SCHEME; - } else if (kind.equals(HsftpFileSystem.TOKEN_KIND)) { - return HsftpFileSystem.SCHEME; - } else if (kind.equals(WebHdfsFileSystem.TOKEN_KIND)) { + if (kind.equals(WebHdfsFileSystem.TOKEN_KIND)) { return WebHdfsFileSystem.SCHEME; } else if (kind.equals(SWebHdfsFileSystem.TOKEN_KIND)) { return SWebHdfsFileSystem.SCHEME; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem index 78fd925f1b..abe2bfc4dd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem @@ -14,7 +14,5 @@ # limitations under the License. org.apache.hadoop.hdfs.DistributedFileSystem -org.apache.hadoop.hdfs.web.HftpFileSystem -org.apache.hadoop.hdfs.web.HsftpFileSystem org.apache.hadoop.hdfs.web.WebHdfsFileSystem org.apache.hadoop.hdfs.web.SWebHdfsFileSystem diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index df545cbc5d..6c2a5a6d83 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -93,7 +93,6 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.tools.DFSAdmin; -import org.apache.hadoop.hdfs.web.HftpFileSystem; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.net.DNSToSwitchMapping; import org.apache.hadoop.net.NetUtils; @@ -1889,36 +1888,6 @@ public String getHttpUri(int nnIndex) { + nameNodes[nnIndex].conf .get(DFS_NAMENODE_HTTP_ADDRESS_KEY); } - - /** - * @return a {@link HftpFileSystem} object. - */ - public HftpFileSystem getHftpFileSystem(int nnIndex) throws IOException { - String uri = "hftp://" - + nameNodes[nnIndex].conf - .get(DFS_NAMENODE_HTTP_ADDRESS_KEY); - try { - return (HftpFileSystem)FileSystem.get(new URI(uri), conf); - } catch (URISyntaxException e) { - throw new IOException(e); - } - } - - /** - * @return a {@link HftpFileSystem} object as specified user. - */ - public HftpFileSystem getHftpFileSystemAs(final String username, - final Configuration conf, final int nnIndex, final String... groups) - throws IOException, InterruptedException { - final UserGroupInformation ugi = UserGroupInformation.createUserForTesting( - username, groups); - return ugi.doAs(new PrivilegedExceptionAction() { - @Override - public HftpFileSystem run() throws Exception { - return getHftpFileSystem(nnIndex); - } - }); - } /** * Get the directories where the namenode stores its image. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java index f4bd2a3736..0982ee2388 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java @@ -63,7 +63,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; -import org.apache.hadoop.hdfs.web.HftpFileSystem; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.security.UserGroupInformation; @@ -495,8 +494,6 @@ private void checkStatistics(FileSystem fs, int readOps, int writeOps, int large @Test public void testFileChecksum() throws Exception { - ((Log4JLogger)HftpFileSystem.LOG).getLogger().setLevel(Level.ALL); - final long seed = RAN.nextLong(); System.out.println("seed=" + seed); RAN.setSeed(seed); @@ -530,17 +527,6 @@ public void testFileChecksum() throws Exception { assertTrue("Not throwing the intended exception message", e.getMessage() .contains("Path is not a file: /test/TestExistingDir")); } - - //hftp - final String hftpuri = "hftp://" + nnAddr; - System.out.println("hftpuri=" + hftpuri); - final FileSystem hftp = ugi.doAs( - new PrivilegedExceptionAction() { - @Override - public FileSystem run() throws Exception { - return new Path(hftpuri).getFileSystem(conf); - } - }); //webhdfs final String webhdfsuri = WebHdfsFileSystem.SCHEME + "://" + nnAddr; @@ -578,14 +564,6 @@ public FileSystem run() throws Exception { final FileChecksum hdfsfoocs = hdfs.getFileChecksum(foo); System.out.println("hdfsfoocs=" + hdfsfoocs); - //hftp - final FileChecksum hftpfoocs = hftp.getFileChecksum(foo); - System.out.println("hftpfoocs=" + hftpfoocs); - - final Path qualified = new Path(hftpuri + dir, "foo" + n); - final FileChecksum qfoocs = hftp.getFileChecksum(qualified); - System.out.println("qfoocs=" + qfoocs); - //webhdfs final FileChecksum webhdfsfoocs = webhdfs.getFileChecksum(foo); System.out.println("webhdfsfoocs=" + webhdfsfoocs); @@ -624,13 +602,6 @@ public FileSystem run() throws Exception { assertEquals(hdfsfoocs.hashCode(), barhashcode); assertEquals(hdfsfoocs, barcs); - //hftp - assertEquals(hftpfoocs.hashCode(), barhashcode); - assertEquals(hftpfoocs, barcs); - - assertEquals(qfoocs.hashCode(), barhashcode); - assertEquals(qfoocs, barcs); - //webhdfs assertEquals(webhdfsfoocs.hashCode(), barhashcode); assertEquals(webhdfsfoocs, barcs); @@ -640,14 +611,6 @@ public FileSystem run() throws Exception { } hdfs.setPermission(dir, new FsPermission((short)0)); - { //test permission error on hftp - try { - hftp.getFileChecksum(qualified); - fail(); - } catch(IOException ioe) { - FileSystem.LOG.info("GOOD: getting an exception", ioe); - } - } { //test permission error on webhdfs try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java index 2644420ee1..55a871e734 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.hdfs.web.HftpFileSystem; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.log4j.Level; @@ -64,7 +63,6 @@ public class TestFileStatus { private static MiniDFSCluster cluster; private static FileSystem fs; private static FileContext fc; - private static HftpFileSystem hftpfs; private static DFSClient dfsClient; private static Path file1; @@ -75,7 +73,6 @@ public static void testSetUp() throws Exception { cluster = new MiniDFSCluster.Builder(conf).build(); fs = cluster.getFileSystem(); fc = FileContext.getFileContext(cluster.getURI(0), conf); - hftpfs = cluster.getHftpFileSystem(0); dfsClient = new DFSClient(NameNode.getAddress(conf), conf); file1 = new Path("filestatus.dat"); DFSTestUtil.createFile(fs, file1, fileSize, fileSize, blockSize, (short) 1, @@ -208,8 +205,6 @@ public void testGetFileStatusOnDir() throws Exception { assertEquals(dir + " should be empty", 0, stats.length); assertEquals(dir + " should be zero size ", 0, fs.getContentSummary(dir).getLength()); - assertEquals(dir + " should be zero size using hftp", - 0, hftpfs.getContentSummary(dir).getLength()); RemoteIterator itor = fc.listStatus(dir); assertFalse(dir + " should be empty", itor.hasNext()); @@ -239,9 +234,7 @@ public void testGetFileStatusOnDir() throws Exception { final int expected = blockSize/2; assertEquals(dir + " size should be " + expected, expected, fs.getContentSummary(dir).getLength()); - assertEquals(dir + " size should be " + expected + " using hftp", - expected, hftpfs.getContentSummary(dir).getLength()); - + // Test listStatus on a non-empty directory stats = fs.listStatus(dir); assertEquals(dir + " should have two entries", 2, stats.length); @@ -290,19 +283,9 @@ public void testGetFileStatusOnDir() throws Exception { assertEquals(dir5.toString(), itor.next().getPath().toString()); assertEquals(file2.toString(), itor.next().getPath().toString()); assertEquals(file3.toString(), itor.next().getPath().toString()); + assertFalse(itor.hasNext()); - { //test permission error on hftp - fs.setPermission(dir, new FsPermission((short)0)); - try { - final String username = UserGroupInformation.getCurrentUser().getShortUserName() + "1"; - final HftpFileSystem hftp2 = cluster.getHftpFileSystemAs(username, conf, 0, "somegroup"); - hftp2.getContentSummary(dir); - fail(); - } catch(IOException ioe) { - FileSystem.LOG.info("GOOD: getting an exception", ioe); - } - } fs.delete(dir, true); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java index 969ccc6edb..94950caa5b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.web.HftpFileSystem; import org.apache.hadoop.hdfs.web.WebHdfsTestUtil; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.security.AccessControlException; @@ -221,30 +220,6 @@ public void testAuditWebHdfsStat() throws Exception { assertTrue("failed to stat file", st != null && st.isFile()); } - /** test that access via Hftp puts proper entry in audit log */ - @Test - public void testAuditHftp() throws Exception { - final Path file = new Path(fnames[0]); - - final String hftpUri = - "hftp://" + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); - - HftpFileSystem hftpFs = null; - - setupAuditLogs(); - try { - hftpFs = (HftpFileSystem) new Path(hftpUri).getFileSystem(conf); - InputStream istream = hftpFs.open(file); - @SuppressWarnings("unused") - int val = istream.read(); - istream.close(); - - verifyAuditLogs(true); - } finally { - if (hftpFs != null) hftpFs.close(); - } - } - /** test that denied access via webhdfs puts proper entry in audit log */ @Test public void testAuditWebHdfsDenied() throws Exception { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java index bfd2697071..7612de341b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java @@ -82,16 +82,6 @@ public static void tearDown() throws Exception { KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir); } - @Test - public void testHsftpFileSystem() throws Exception { - FileSystem fs = FileSystem.get(new URI("hsftp://" + nnAddr), conf); - Assert.assertTrue(fs.exists(new Path("/test"))); - InputStream is = fs.open(new Path("/test")); - Assert.assertEquals(23, is.read()); - is.close(); - fs.close(); - } - @Test public void testSWebHdfsFileSystem() throws Exception { FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, "swebhdfs"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java index 00f9815537..da50174c47 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java @@ -27,7 +27,7 @@ public class FakeRenewer extends TokenRenewer { static Token lastRenewed = null; static Token lastCanceled = null; - static final Text KIND = new Text("TESTING-TOKEN-KIND"); + public static final Text KIND = new Text("TESTING-TOKEN-KIND"); @Override public boolean handleKind(Text kind) { @@ -54,4 +54,12 @@ public static void reset() { lastRenewed = null; lastCanceled = null; } + + public static Token getLastRenewed() { + return lastRenewed; + } + + public static Token getLastCanceled() { + return lastCanceled; + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml index 0bff22cdf6..e34518c609 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml @@ -182,7 +182,6 @@ file.stream-buffer-size4096 dfs.namenode.fs-limits.max-directory-items0 io.mapfile.bloom.size1048576 -fs.hsftp.implorg.apache.hadoop.hdfs.HsftpFileSystem yarn.nodemanager.container-executor.classorg.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor mapreduce.map.maxattempts4 mapreduce.jobtracker.jobhistory.block.size3145728 @@ -224,7 +223,6 @@ hadoop.http.authentication.simple.anonymous.allowedtrue hadoop.rpc.socket.factory.class.defaultorg.apache.hadoop.net.StandardSocketFactory mapreduce.job.submithostnamelocalhost -fs.hftp.implorg.apache.hadoop.hdfs.HftpFileSystem dfs.namenode.handler.count10 fs.automatic.closetrue fs.kfs.implorg.apache.hadoop.fs.kfs.KosmosFileSystem @@ -392,6 +390,7 @@ mapreduce.job.counters.limit120 dfs.datanode.ipc.address0.0.0.0:50020 fs.webhdfs.implorg.apache.hadoop.hdfs.web.WebHdfsFileSystem +fs.swebhdfs.implorg.apache.hadoop.hdfs.web.SWebHdfsFileSystem yarn.nodemanager.delete.debug-delay-sec0 dfs.datanode.max.transfer.threads4096 diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestFileSystem.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestFileSystem.java index 60c1ba6b84..13e27cd9e1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestFileSystem.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestFileSystem.java @@ -565,18 +565,6 @@ public void testFsClose() throws Exception { new Path("file:///").getFileSystem(conf); FileSystem.closeAll(); } - - { - Configuration conf = new Configuration(); - new Path("hftp://localhost:12345/").getFileSystem(conf); - FileSystem.closeAll(); - } - - { - Configuration conf = new Configuration(); - FileSystem fs = new Path("hftp://localhost:12345/").getFileSystem(conf); - FileSystem.closeAll(); - } } public void testFsShutdownHook() throws Exception { @@ -617,12 +605,12 @@ public void testCacheKeysAreCaseInsensitive() Configuration conf = new Configuration(); // check basic equality - FileSystem.Cache.Key lowercaseCachekey1 = new FileSystem.Cache.Key(new URI("hftp://localhost:12345/"), conf); - FileSystem.Cache.Key lowercaseCachekey2 = new FileSystem.Cache.Key(new URI("hftp://localhost:12345/"), conf); + FileSystem.Cache.Key lowercaseCachekey1 = new FileSystem.Cache.Key(new URI("hdfs://localhost:12345/"), conf); + FileSystem.Cache.Key lowercaseCachekey2 = new FileSystem.Cache.Key(new URI("hdfs://localhost:12345/"), conf); assertEquals( lowercaseCachekey1, lowercaseCachekey2 ); // check insensitive equality - FileSystem.Cache.Key uppercaseCachekey = new FileSystem.Cache.Key(new URI("HFTP://Localhost:12345/"), conf); + FileSystem.Cache.Key uppercaseCachekey = new FileSystem.Cache.Key(new URI("HDFS://Localhost:12345/"), conf); assertEquals( lowercaseCachekey2, uppercaseCachekey ); // check behaviour with collections diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyMapper.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyMapper.java index 9568e6131a..4ba95ec99b 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyMapper.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyMapper.java @@ -313,7 +313,7 @@ public void testMakeDirFailure() { = stubContext.getContext(); Configuration configuration = context.getConfiguration(); - String workPath = new Path("hftp://localhost:1234/*/*/*/?/") + String workPath = new Path("webhdfs://localhost:1234/*/*/*/?/") .makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString(); configuration.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, workPath); diff --git a/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestCopyFiles.java b/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestCopyFiles.java index ca7f36fca9..73bba6616a 100644 --- a/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestCopyFiles.java +++ b/hadoop-tools/hadoop-extras/src/test/java/org/apache/hadoop/tools/TestCopyFiles.java @@ -891,64 +891,6 @@ static Path createHomeDirectory(FileSystem fs, UserGroupInformation ugi return home; } - public void testHftpAccessControl() throws Exception { - MiniDFSCluster cluster = null; - try { - final UserGroupInformation DFS_UGI = createUGI("dfs", true); - final UserGroupInformation USER_UGI = createUGI("user", false); - - //start cluster by DFS_UGI - final Configuration dfsConf = new Configuration(); - cluster = new MiniDFSCluster.Builder(dfsConf).numDataNodes(2).build(); - cluster.waitActive(); - - final String httpAdd = dfsConf.get("dfs.http.address"); - final URI nnURI = FileSystem.getDefaultUri(dfsConf); - final String nnUri = nnURI.toString(); - FileSystem fs1 = DFS_UGI.doAs(new PrivilegedExceptionAction() { - public FileSystem run() throws IOException { - return FileSystem.get(nnURI, dfsConf); - } - }); - final Path home = - createHomeDirectory(fs1, USER_UGI); - - //now, login as USER_UGI - final Configuration userConf = new Configuration(); - final FileSystem fs = - USER_UGI.doAs(new PrivilegedExceptionAction() { - public FileSystem run() throws IOException { - return FileSystem.get(nnURI, userConf); - } - }); - - final Path srcrootpath = new Path(home, "src_root"); - final String srcrootdir = srcrootpath.toString(); - final Path dstrootpath = new Path(home, "dst_root"); - final String dstrootdir = dstrootpath.toString(); - final DistCpV1 distcp = USER_UGI.doAs(new PrivilegedExceptionAction() { - public DistCpV1 run() { - return new DistCpV1(userConf); - } - }); - - FileSystem.mkdirs(fs, srcrootpath, new FsPermission((short)0700)); - final String[] args = {"hftp://"+httpAdd+srcrootdir, nnUri+dstrootdir}; - - { //copy with permission 000, should fail - fs.setPermission(srcrootpath, new FsPermission((short)0)); - USER_UGI.doAs(new PrivilegedExceptionAction() { - public Void run() throws Exception { - assertEquals(-3, ToolRunner.run(distcp, args)); - return null; - } - }); - } - } finally { - if (cluster != null) { cluster.shutdown(); } - } - } - /** test -delete */ public void testDelete() throws Exception { final Configuration conf = new Configuration();