diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index ae2d266258..64a4fdf568 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -114,6 +114,9 @@ Trunk (unreleased changes) HDFS-3789. JournalManager#format() should be able to throw IOException (Ivan Kelly via todd) + HDFS-3723. Add support -h, -help to all the commands. (Jing Zhao via + suresh) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 8d59ae6588..d263acd590 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -18,8 +18,21 @@ package org.apache.hadoop.hdfs; -import static org.apache.hadoop.hdfs.DFSConfigKeys.*; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID; + import java.io.IOException; +import java.io.PrintStream; import java.io.UnsupportedEncodingException; import java.net.InetSocketAddress; import java.net.URI; @@ -33,10 +46,17 @@ import java.util.Map; import java.util.Random; import java.util.Set; -import java.util.StringTokenizer; import javax.net.SocketFactory; +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.CommandLineParser; +import org.apache.commons.cli.Option; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; +import org.apache.commons.cli.PosixParser; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -57,8 +77,7 @@ import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.util.ToolRunner; import com.google.common.base.Joiner; import com.google.common.collect.Lists; @@ -424,7 +443,6 @@ private static Map getAddressesForNameserviceId( * * @param conf configuration * @return list of InetSocketAddresses - * @throws IOException if no addresses are configured */ public static Map> getHaNnRpcAddresses( Configuration conf) { @@ -1073,4 +1091,44 @@ public static String getOnlyNameServiceIdOrNull(Configuration conf) { return null; } } + + public static Options helpOptions = new Options(); + public static Option helpOpt = new Option("h", "help", false, + "get help information"); + + static { + helpOptions.addOption(helpOpt); + } + + /** + * Parse the arguments for commands + * + * @param args the argument to be parsed + * @param helpDescription help information to be printed out + * @param out Printer + * @param printGenericCommandUsage whether to print the + * generic command usage defined in ToolRunner + * @return true when the argument matches help option, false if not + */ + public static boolean parseHelpArgument(String[] args, + String helpDescription, PrintStream out, boolean printGenericCommandUsage) { + if (args.length == 1) { + try { + CommandLineParser parser = new PosixParser(); + CommandLine cmdLine = parser.parse(helpOptions, args); + if (cmdLine.hasOption(helpOpt.getOpt()) + || cmdLine.hasOption(helpOpt.getLongOpt())) { + // should print out the help information + out.println(helpDescription + "\n"); + if (printGenericCommandUsage) { + ToolRunner.printGenericCommandUsage(out); + } + return true; + } + } catch (ParseException pe) { + return false; + } + } + return false; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index f949c924a1..577d73b76b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.balancer; +import static com.google.common.base.Preconditions.checkArgument; import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed; import java.io.BufferedInputStream; @@ -26,6 +27,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.io.PrintStream; import java.net.Socket; import java.net.URI; import java.text.DateFormat; @@ -68,7 +70,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; -import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; import org.apache.hadoop.io.IOUtils; @@ -79,7 +80,6 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import static com.google.common.base.Preconditions.checkArgument; /**

The balancer is a tool that balances disk space usage on an HDFS cluster * when some datanodes become full or when new empty nodes join the cluster. @@ -189,6 +189,13 @@ public class Balancer { */ public static final int MAX_NUM_CONCURRENT_MOVES = 5; + private static final String USAGE = "Usage: java " + + Balancer.class.getSimpleName() + + "\n\t[-policy ]\tthe balancing policy: " + + BalancingPolicy.Node.INSTANCE.getName() + " or " + + BalancingPolicy.Pool.INSTANCE.getName() + + "\n\t[-threshold ]\tPercentage of disk capacity"; + private final NameNodeConnector nnc; private final BalancingPolicy policy; private final double threshold; @@ -1550,7 +1557,7 @@ static Parameters parse(String[] args) { } } } catch(RuntimeException e) { - printUsage(); + printUsage(System.err); throw e; } } @@ -1558,13 +1565,8 @@ static Parameters parse(String[] args) { return new Parameters(policy, threshold); } - private static void printUsage() { - System.out.println("Usage: java " + Balancer.class.getSimpleName()); - System.out.println(" [-policy ]\tthe balancing policy: " - + BalancingPolicy.Node.INSTANCE.getName() + " or " - + BalancingPolicy.Pool.INSTANCE.getName()); - System.out.println( - " [-threshold ]\tPercentage of disk capacity"); + private static void printUsage(PrintStream out) { + out.println(USAGE + "\n"); } } @@ -1573,6 +1575,10 @@ private static void printUsage() { * @param args Command line arguments */ public static void main(String[] args) { + if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) { + System.exit(0); + } + try { System.exit(ToolRunner.run(new HdfsConfiguration(), new Cli(), args)); } catch (Throwable e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index a456d133d4..83219cca48 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -46,6 +46,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTPS_ENABLE_KEY; +import static org.apache.hadoop.util.ExitUtil.terminate; import java.io.BufferedOutputStream; import java.io.ByteArrayInputStream; @@ -55,6 +56,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.io.PrintStream; import java.net.InetSocketAddress; import java.net.ServerSocket; import java.net.Socket; @@ -98,8 +100,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil; import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; -import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor; +import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService; @@ -124,9 +126,6 @@ import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.Util; - -import static org.apache.hadoop.util.ExitUtil.terminate; - import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; @@ -171,9 +170,9 @@ import org.apache.hadoop.util.VersionInfo; import org.mortbay.util.ajax.JSON; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; -import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.BlockingService; /********************************************************** @@ -230,6 +229,8 @@ public class DataNode extends Configured static final Log ClientTraceLog = LogFactory.getLog(DataNode.class.getName() + ".clienttrace"); + + private static final String USAGE = "Usage: java DataNode [-rollback | -regular]"; /** * Use {@link NetUtils#createSocketAddr(String)} instead. @@ -1541,7 +1542,7 @@ public static DataNode instantiateDataNode(String args [], Configuration conf, } if (!parseArguments(args, conf)) { - printUsage(); + printUsage(System.err); return null; } Collection dataDirs = getStorageDirs(conf); @@ -1655,9 +1656,8 @@ public String toString() { + xmitsInProgress.get() + "}"; } - private static void printUsage() { - System.err.println("Usage: java DataNode"); - System.err.println(" [-rollback]"); + private static void printUsage(PrintStream out) { + out.println(USAGE + "\n"); } /** @@ -1742,6 +1742,10 @@ public static void secureMain(String args[], SecureResources resources) { } public static void main(String args[]) { + if (DFSUtil.parseHelpArgument(args, DataNode.USAGE, System.out, true)) { + System.exit(0); + } + secureMain(args, null); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 20e7aafba9..2df693b3c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; import java.io.IOException; +import java.io.PrintStream; import java.net.InetSocketAddress; import java.net.URI; import java.util.ArrayList; @@ -38,6 +39,8 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Trash; import static org.apache.hadoop.hdfs.DFSConfigKeys.*; +import static org.apache.hadoop.util.ExitUtil.terminate; +import static org.apache.hadoop.util.ToolRunner.confirmPrompt; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; @@ -69,12 +72,9 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol; import org.apache.hadoop.tools.GetUserMappingsProtocol; +import org.apache.hadoop.util.ExitUtil.ExitException; import org.apache.hadoop.util.ServicePlugin; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.ExitUtil.ExitException; - -import static org.apache.hadoop.util.ExitUtil.terminate; -import static org.apache.hadoop.util.ToolRunner.confirmPrompt; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; @@ -188,6 +188,22 @@ public static enum OperationCategory { DFS_HA_AUTO_FAILOVER_ENABLED_KEY }; + private static final String USAGE = "Usage: java NameNode [" + + StartupOption.BACKUP.getName() + "] | [" + + StartupOption.CHECKPOINT.getName() + "] | [" + + StartupOption.FORMAT.getName() + " [" + + StartupOption.CLUSTERID.getName() + " cid ] [" + + StartupOption.FORCE.getName() + "] [" + + StartupOption.NONINTERACTIVE.getName() + "] ] | [" + + StartupOption.UPGRADE.getName() + "] | [" + + StartupOption.ROLLBACK.getName() + "] | [" + + StartupOption.FINALIZE.getName() + "] | [" + + StartupOption.IMPORT.getName() + "] | [" + + StartupOption.INITIALIZESHAREDEDITS.getName() + "] | [" + + StartupOption.BOOTSTRAPSTANDBY.getName() + "] | [" + + StartupOption.RECOVER.getName() + " [ " + StartupOption.FORCE.getName() + + " ] ]"; + public long getProtocolVersion(String protocol, long clientVersion) throws IOException { if (protocol.equals(ClientProtocol.class.getName())) { @@ -895,25 +911,8 @@ private static boolean finalize(Configuration conf, return false; } - private static void printUsage() { - System.err.println( - "Usage: java NameNode [" + - StartupOption.BACKUP.getName() + "] | [" + - StartupOption.CHECKPOINT.getName() + "] | [" + - StartupOption.FORMAT.getName() + " [" + StartupOption.CLUSTERID.getName() + - " cid ] [" + StartupOption.FORCE.getName() + "] [" + - StartupOption.NONINTERACTIVE.getName() + "] ] | [" + - StartupOption.UPGRADE.getName() + "] | [" + - StartupOption.ROLLBACK.getName() + "] | [" + - StartupOption.FINALIZE.getName() + "] | [" + - StartupOption.IMPORT.getName() + "] | [" + - StartupOption.INITIALIZESHAREDEDITS.getName() + - " [" + StartupOption.FORCE.getName() + "] [" + - StartupOption.NONINTERACTIVE.getName() + "]" + - "] | [" + - StartupOption.BOOTSTRAPSTANDBY.getName() + "] | [" + - StartupOption.RECOVER.getName() + " [ " + - StartupOption.FORCE.getName() + " ] ]"); + private static void printUsage(PrintStream out) { + out.println(USAGE + "\n"); } private static StartupOption parseArguments(String args[]) { @@ -1061,7 +1060,7 @@ public static NameNode createNameNode(String argv[], Configuration conf) conf = new HdfsConfiguration(); StartupOption startOpt = parseArguments(argv); if (startOpt == null) { - printUsage(); + printUsage(System.err); return null; } setStartupOption(conf, startOpt); @@ -1175,6 +1174,10 @@ protected String getNameServiceId(Configuration conf) { /** */ public static void main(String argv[]) throws Exception { + if (DFSUtil.parseHelpArgument(argv, NameNode.USAGE, System.out, true)) { + System.exit(0); + } + try { StringUtils.startupShutdownMessage(NameNode.class, argv, LOG); NameNode namenode = createNameNode(argv, null); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index 8057955dfa..47d09ef993 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -562,6 +562,9 @@ public static void main(String[] argv) throws Exception { if (opts == null) { LOG.fatal("Failed to parse options"); terminate(1); + } else if (opts.shouldPrintHelp()) { + opts.usage(); + System.exit(0); } StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG); @@ -595,6 +598,7 @@ static class CommandLineOpts { private final Option geteditsizeOpt; private final Option checkpointOpt; private final Option formatOpt; + private final Option helpOpt; Command cmd; @@ -605,6 +609,7 @@ enum Command { private boolean shouldForce; private boolean shouldFormat; + private boolean shouldPrintHelp; CommandLineOpts() { geteditsizeOpt = new Option("geteditsize", @@ -612,20 +617,32 @@ enum Command { checkpointOpt = OptionBuilder.withArgName("force") .hasOptionalArg().withDescription("checkpoint on startup").create("checkpoint");; formatOpt = new Option("format", "format the local storage during startup"); + helpOpt = new Option("h", "help", false, "get help information"); options.addOption(geteditsizeOpt); options.addOption(checkpointOpt); options.addOption(formatOpt); + options.addOption(helpOpt); } public boolean shouldFormat() { return shouldFormat; } + public boolean shouldPrintHelp() { + return shouldPrintHelp; + } + public void parse(String ... argv) throws ParseException { CommandLineParser parser = new PosixParser(); CommandLine cmdLine = parser.parse(options, argv); + if (cmdLine.hasOption(helpOpt.getOpt()) + || cmdLine.hasOption(helpOpt.getLongOpt())) { + shouldPrintHelp = true; + return; + } + boolean hasGetEdit = cmdLine.hasOption(geteditsizeOpt.getOpt()); boolean hasCheckpoint = cmdLine.hasOption(checkpointOpt.getOpt()); if (hasGetEdit && hasCheckpoint) { @@ -662,8 +679,13 @@ public boolean shouldForceCheckpoint() { } void usage() { + String header = "The Secondary NameNode is a helper " + + "to the primary NameNode. The Secondary is responsible " + + "for supporting periodic checkpoints of the HDFS metadata. " + + "The current design allows only one Secondary NameNode " + + "per HDFS cluster."; HelpFormatter formatter = new HelpFormatter(); - formatter.printHelp("secondarynamenode", options); + formatter.printHelp("secondarynamenode", header, options, "", false); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java index d4397276ea..47c852d596 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java @@ -42,6 +42,10 @@ public class DFSHAAdmin extends HAAdmin { protected void setErrOut(PrintStream errOut) { this.errOut = errOut; } + + protected void setOut(PrintStream out) { + this.out = out; + } @Override public void setConf(Configuration conf) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java index b1163d6885..e18c9a86ad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java @@ -162,6 +162,10 @@ protected String getScopeInsideParentNode() { public static void main(String args[]) throws Exception { + if (DFSUtil.parseHelpArgument(args, + ZKFailoverController.USAGE, System.out, true)) { + System.exit(0); + } GenericOptionsParser parser = new GenericOptionsParser( new HdfsConfiguration(), args); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java index 566d77a5fb..c3238f0de3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java @@ -73,6 +73,25 @@ public class DFSck extends Configured implements Tool { HdfsConfiguration.init(); } + private static final String USAGE = "Usage: DFSck " + + "[-list-corruptfileblocks | " + + "[-move | -delete | -openforwrite] " + + "[-files [-blocks [-locations | -racks]]]]\n" + + "\t\tstart checking from this path\n" + + "\t-move\tmove corrupted files to /lost+found\n" + + "\t-delete\tdelete corrupted files\n" + + "\t-files\tprint out files being checked\n" + + "\t-openforwrite\tprint out files opened for write\n" + + "\t-list-corruptfileblocks\tprint out list of missing " + + "blocks and files they belong to\n" + + "\t-blocks\tprint out block report\n" + + "\t-locations\tprint out locations for every block\n" + + "\t-racks\tprint out network topology for data-node locations\n" + + "\t\tBy default fsck ignores files opened for write, " + + "use -openforwrite to report such files. They are usually " + + " tagged CORRUPT or HEALTHY depending on their block " + + "allocation status"; + private final UserGroupInformation ugi; private final PrintStream out; @@ -93,25 +112,9 @@ public DFSck(Configuration conf, PrintStream out) throws IOException { /** * Print fsck usage information */ - static void printUsage() { - System.err.println("Usage: DFSck [-list-corruptfileblocks | " + - "[-move | -delete | -openforwrite] " + - "[-files [-blocks [-locations | -racks]]]]"); - System.err.println("\t\tstart checking from this path"); - System.err.println("\t-move\tmove corrupted files to /lost+found"); - System.err.println("\t-delete\tdelete corrupted files"); - System.err.println("\t-files\tprint out files being checked"); - System.err.println("\t-openforwrite\tprint out files opened for write"); - System.err.println("\t-list-corruptfileblocks\tprint out list of missing " - + "blocks and files they belong to"); - System.err.println("\t-blocks\tprint out block report"); - System.err.println("\t-locations\tprint out locations for every block"); - System.err.println("\t-racks\tprint out network topology for data-node locations"); - System.err.println("\t\tBy default fsck ignores files opened for write, " + - "use -openforwrite to report such files. They are usually " + - " tagged CORRUPT or HEALTHY depending on their block " + - "allocation status"); - ToolRunner.printGenericCommandUsage(System.err); + static void printUsage(PrintStream out) { + out.println(USAGE + "\n"); + ToolRunner.printGenericCommandUsage(out); } /** * @param args @@ -119,7 +122,7 @@ static void printUsage() { @Override public int run(final String[] args) throws IOException { if (args.length == 0) { - printUsage(); + printUsage(System.err); return -1; } @@ -258,12 +261,12 @@ else if (args[idx].equals("-list-corruptfileblocks")) { } else { System.err.println("fsck: can only operate on one path at a time '" + args[idx] + "'"); - printUsage(); + printUsage(System.err); return -1; } } else { System.err.println("fsck: Illegal option '" + args[idx] + "'"); - printUsage(); + printUsage(System.err); return -1; } } @@ -304,10 +307,14 @@ public static void main(String[] args) throws Exception { // -files option is also used by GenericOptionsParser // Make sure that is not the first argument for fsck int res = -1; - if ((args.length == 0 ) || ("-files".equals(args[0]))) - printUsage(); - else + if ((args.length == 0) || ("-files".equals(args[0]))) { + printUsage(System.err); + ToolRunner.printGenericCommandUsage(System.err); + } else if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) { + res = 0; + } else { res = ToolRunner.run(new DFSck(new HdfsConfiguration()), args); + } System.exit(res); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java index e0935d475c..f74b4e8896 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java @@ -40,7 +40,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HftpFileSystem; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; @@ -48,9 +47,7 @@ import org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet; import org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet; import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet; -import org.apache.hadoop.hdfs.web.URLUtils; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.SecurityUtil; @@ -71,8 +68,10 @@ public class DelegationTokenFetcher { private static final String CANCEL = "cancel"; private static final String RENEW = "renew"; private static final String PRINT = "print"; + private static final String HELP = "help"; + private static final String HELP_SHORT = "h"; - private static void printUsage(PrintStream err) throws IOException { + private static void printUsage(PrintStream err) { err.println("fetchdt retrieves delegation tokens from the NameNode"); err.println(); err.println("fetchdt "); @@ -107,6 +106,7 @@ public static void main(final String[] args) throws Exception { fetcherOptions.addOption(CANCEL, false, "cancel the token"); fetcherOptions.addOption(RENEW, false, "renew the token"); fetcherOptions.addOption(PRINT, false, "print the token"); + fetcherOptions.addOption(HELP_SHORT, HELP, false, "print out help information"); GenericOptionsParser parser = new GenericOptionsParser(conf, fetcherOptions, args); CommandLine cmd = parser.getCommandLine(); @@ -119,9 +119,14 @@ public static void main(final String[] args) throws Exception { final boolean cancel = cmd.hasOption(CANCEL); final boolean renew = cmd.hasOption(RENEW); final boolean print = cmd.hasOption(PRINT); + final boolean help = cmd.hasOption(HELP); String[] remaining = parser.getRemainingArgs(); // check option validity + if (help) { + printUsage(System.out); + System.exit(0); + } if (cancel && renew || cancel && print || renew && print || cancel && renew && print) { System.err.println("ERROR: Only specify cancel, renew or print."); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java index adf3293edf..778ac59ee2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java @@ -324,6 +324,10 @@ public Integer run() throws Exception { } public static void main(String[] args) throws Exception { + if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) { + System.exit(0); + } + int res = ToolRunner.run(new GetConf(new HdfsConfiguration()), args); System.exit(res); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java index c0e415a843..49d56d534f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java @@ -28,6 +28,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -43,6 +44,8 @@ public class GetGroups extends GetGroupsBase { private static final Log LOG = LogFactory.getLog(GetGroups.class); + + static final String USAGE = "Usage: hdfs groups [username ...]"; static{ HdfsConfiguration.init(); @@ -86,6 +89,10 @@ protected GetUserMappingsProtocol getUgmProtocol() throws IOException { } public static void main(String[] argv) throws Exception { + if (DFSUtil.parseHelpArgument(argv, USAGE, System.out, true)) { + System.exit(0); + } + int res = ToolRunner.run(new GetGroups(new HdfsConfiguration()), argv); System.exit(res); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java index 61e8ebef5c..666e52b484 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java @@ -55,7 +55,9 @@ public class TestDFSHAAdmin { private DFSHAAdmin tool; private ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream(); + private ByteArrayOutputStream outBytes = new ByteArrayOutputStream(); private String errOutput; + private String output; private HAServiceProtocol mockProtocol; private ZKFCProtocol mockZkfcProtocol; @@ -111,12 +113,14 @@ protected HAServiceTarget resolveTarget(String nnId) { }; tool.setConf(getHAConf()); tool.setErrOut(new PrintStream(errOutBytes)); + tool.setOut(new PrintStream(outBytes)); } private void assertOutputContains(String string) { - if (!errOutput.contains(string)) { - fail("Expected output to contain '" + string + "' but was:\n" + - errOutput); + if (!errOutput.contains(string) && !output.contains(string)) { + fail("Expected output to contain '" + string + + "' but err_output was:\n" + errOutput + + "\n and output was: \n" + output); } } @@ -143,7 +147,7 @@ public void testNamenodeResolution() throws Exception { @Test public void testHelp() throws Exception { - assertEquals(-1, runTool("-help")); + assertEquals(0, runTool("-help")); assertEquals(0, runTool("-help", "transitionToActive")); assertOutputContains("Transitions the service into Active"); } @@ -378,10 +382,12 @@ public void testFencingConfigPerNameNode() throws Exception { private Object runTool(String ... args) throws Exception { errOutBytes.reset(); + outBytes.reset(); LOG.info("Running: DFSHAAdmin " + Joiner.on(" ").join(args)); int ret = tool.run(args); errOutput = new String(errOutBytes.toByteArray(), Charsets.UTF_8); - LOG.info("Output:\n" + errOutput); + output = new String(outBytes.toByteArray(), Charsets.UTF_8); + LOG.info("Err_output:\n" + errOutput + "\nOutput:\n" + output); return ret; }