HDFS-12577. Rename Router tooling. Contributed by Inigo Goiri.
(cherry picked from commit 53e8d0d030525e4c7f3875e23807c6dbe778890f)
This commit is contained in:
parent
81601dac8e
commit
5d63a388d1
@ -39,10 +39,11 @@ function hadoop_usage
|
||||
hadoop_add_subcommand "debug" admin "run a Debug Admin to execute HDFS debug commands"
|
||||
hadoop_add_subcommand "dfs" client "run a filesystem command on the file system"
|
||||
hadoop_add_subcommand "dfsadmin" admin "run a DFS admin client"
|
||||
hadoop_add_subcommand "dfsrouter" daemon "run the DFS router"
|
||||
hadoop_add_subcommand "dfsrouteradmin" admin "manage Router-based federation"
|
||||
hadoop_add_subcommand "diskbalancer" daemon "Distributes data evenly among disks on a given node"
|
||||
hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables"
|
||||
hadoop_add_subcommand "ec" admin "run a HDFS ErasureCoding CLI"
|
||||
hadoop_add_subcommand "federation" admin "manage Router-based federation"
|
||||
hadoop_add_subcommand "fetchdt" client "fetch a delegation token from the NameNode"
|
||||
hadoop_add_subcommand "fsck" admin "run a DFS filesystem checking utility"
|
||||
hadoop_add_subcommand "getconf" client "get config values from configuration"
|
||||
@ -58,7 +59,6 @@ function hadoop_usage
|
||||
hadoop_add_subcommand "oiv" admin "apply the offline fsimage viewer to an fsimage"
|
||||
hadoop_add_subcommand "oiv_legacy" admin "apply the offline fsimage viewer to a legacy fsimage"
|
||||
hadoop_add_subcommand "portmap" daemon "run a portmap service"
|
||||
hadoop_add_subcommand "router" daemon "run the DFS router"
|
||||
hadoop_add_subcommand "secondarynamenode" daemon "run the DFS secondary namenode"
|
||||
hadoop_add_subcommand "snapshotDiff" client "diff two snapshots of a directory or diff the current directory contents with a snapshot"
|
||||
hadoop_add_subcommand "storagepolicies" admin "list/get/set block storage policies"
|
||||
@ -107,6 +107,13 @@ function hdfscmd_case
|
||||
dfsadmin)
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DFSAdmin
|
||||
;;
|
||||
dfsrouter)
|
||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
||||
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.federation.router.DFSRouter'
|
||||
;;
|
||||
dfsrouteradmin)
|
||||
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.tools.federation.RouterAdmin'
|
||||
;;
|
||||
diskbalancer)
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DiskBalancerCLI
|
||||
;;
|
||||
@ -178,13 +185,6 @@ function hdfscmd_case
|
||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.portmap.Portmap
|
||||
;;
|
||||
router)
|
||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
||||
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.federation.router.Router'
|
||||
;;
|
||||
federation)
|
||||
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.tools.federation.RouterAdmin'
|
||||
;;
|
||||
secondarynamenode)
|
||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
||||
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode'
|
||||
|
@ -59,7 +59,7 @@ if "%1" == "--loglevel" (
|
||||
)
|
||||
)
|
||||
|
||||
set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath crypto router federation debug
|
||||
set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath crypto dfsrouter dfsrouteradmin debug
|
||||
for %%i in ( %hdfscommands% ) do (
|
||||
if %hdfs-command% == %%i set hdfscommand=true
|
||||
)
|
||||
@ -179,12 +179,12 @@ goto :eof
|
||||
set CLASS=org.apache.hadoop.hdfs.tools.CryptoAdmin
|
||||
goto :eof
|
||||
|
||||
:router
|
||||
set CLASS=org.apache.hadoop.hdfs.server.federation.router.Router
|
||||
:dfsrouter
|
||||
set CLASS=org.apache.hadoop.hdfs.server.federation.router.DFSRouter
|
||||
set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ROUTER_OPTS%
|
||||
goto :eof
|
||||
|
||||
:federation
|
||||
:dfsrouteradmin
|
||||
set CLASS=org.apache.hadoop.hdfs.tools.federation.RouterAdmin
|
||||
set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ROUTER_OPTS%
|
||||
goto :eof
|
||||
@ -229,7 +229,8 @@ goto :eof
|
||||
@echo secondarynamenode run the DFS secondary namenode
|
||||
@echo namenode run the DFS namenode
|
||||
@echo journalnode run the DFS journalnode
|
||||
@echo router run the DFS router
|
||||
@echo dfsrouter run the DFS router
|
||||
@echo dfsrouteradmin manage Router-based federation
|
||||
@echo zkfc run the ZK Failover Controller daemon
|
||||
@echo datanode run a DFS datanode
|
||||
@echo dfsadmin run a DFS admin client
|
||||
|
@ -0,0 +1,76 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.federation.router;
|
||||
|
||||
import static org.apache.hadoop.util.ExitUtil.terminate;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.service.CompositeService.CompositeServiceShutdownHook;
|
||||
import org.apache.hadoop.util.ShutdownHookManager;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Tool to start the {@link Router} for Router-based federation.
|
||||
*/
|
||||
public final class DFSRouter {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(DFSRouter.class);
|
||||
|
||||
|
||||
/** Usage string for help message. */
|
||||
private static final String USAGE = "Usage: hdfs dfsrouter";
|
||||
|
||||
/** Priority of the Router shutdown hook. */
|
||||
public static final int SHUTDOWN_HOOK_PRIORITY = 30;
|
||||
|
||||
|
||||
private DFSRouter() {
|
||||
// This is just a class to trigger the Router
|
||||
}
|
||||
|
||||
/**
|
||||
* Main run loop for the router.
|
||||
*
|
||||
* @param argv parameters.
|
||||
*/
|
||||
public static void main(String[] argv) {
|
||||
if (DFSUtil.parseHelpArgument(argv, USAGE, System.out, true)) {
|
||||
System.exit(0);
|
||||
}
|
||||
|
||||
try {
|
||||
StringUtils.startupShutdownMessage(Router.class, argv, LOG);
|
||||
|
||||
Router router = new Router();
|
||||
|
||||
ShutdownHookManager.get().addShutdownHook(
|
||||
new CompositeServiceShutdownHook(router), SHUTDOWN_HOOK_PRIORITY);
|
||||
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
router.init(conf);
|
||||
router.start();
|
||||
} catch (Throwable e) {
|
||||
LOG.error("Failed to start router", e);
|
||||
terminate(1, e);
|
||||
}
|
||||
}
|
||||
}
|
@ -19,7 +19,6 @@
|
||||
|
||||
import static org.apache.hadoop.hdfs.server.federation.router.FederationUtil.newActiveNamenodeResolver;
|
||||
import static org.apache.hadoop.hdfs.server.federation.router.FederationUtil.newFileSubclusterResolver;
|
||||
import static org.apache.hadoop.util.ExitUtil.terminate;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
@ -35,7 +34,6 @@
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.HAUtil;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.server.federation.metrics.FederationMetrics;
|
||||
import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
|
||||
import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
|
||||
@ -44,8 +42,6 @@
|
||||
import org.apache.hadoop.metrics2.source.JvmMetrics;
|
||||
import org.apache.hadoop.service.CompositeService;
|
||||
import org.apache.hadoop.util.JvmPauseMonitor;
|
||||
import org.apache.hadoop.util.ShutdownHookManager;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
@ -110,12 +106,6 @@ public class Router extends CompositeService {
|
||||
private JvmPauseMonitor pauseMonitor;
|
||||
|
||||
|
||||
/** Usage string for help message. */
|
||||
private static final String USAGE = "Usage: java Router";
|
||||
|
||||
/** Priority of the Router shutdown hook. */
|
||||
public static final int SHUTDOWN_HOOK_PRIORITY = 30;
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////
|
||||
// Constructor
|
||||
@ -250,35 +240,6 @@ public void run() {
|
||||
}.start();
|
||||
}
|
||||
|
||||
/**
|
||||
* Main run loop for the router.
|
||||
*
|
||||
* @param argv parameters.
|
||||
*/
|
||||
public static void main(String[] argv) {
|
||||
if (DFSUtil.parseHelpArgument(argv, Router.USAGE, System.out, true)) {
|
||||
System.exit(0);
|
||||
}
|
||||
|
||||
try {
|
||||
StringUtils.startupShutdownMessage(Router.class, argv, LOG);
|
||||
|
||||
Router router = new Router();
|
||||
|
||||
ShutdownHookManager.get().addShutdownHook(
|
||||
new CompositeServiceShutdownHook(router), SHUTDOWN_HOOK_PRIORITY);
|
||||
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
router.init(conf);
|
||||
router.start();
|
||||
} catch (Throwable e) {
|
||||
LOG.error("Failed to start router", e);
|
||||
terminate(1, e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////
|
||||
// RPC Server
|
||||
/////////////////////////////////////////////////////////
|
||||
|
@ -164,11 +164,11 @@ The rest of the options are documented in [hdfs-default.xml](./hdfs-default.xml)
|
||||
|
||||
Once the Router is configured, it can be started:
|
||||
|
||||
[hdfs]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --script $HADOOP_PREFIX/bin/hdfs start router
|
||||
[hdfs]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --script $HADOOP_PREFIX/bin/hdfs start dfsrouter
|
||||
|
||||
And to stop it:
|
||||
|
||||
[hdfs]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --script $HADOOP_PREFIX/bin/hdfs stop router
|
||||
[hdfs]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --script $HADOOP_PREFIX/bin/hdfs stop dfsrouter
|
||||
|
||||
### Mount table management
|
||||
|
||||
@ -179,10 +179,10 @@ For example, if we to mount `/data/app1` in the federated namespace, it is recom
|
||||
The federation admin tool supports managing the mount table.
|
||||
For example, to create three mount points and list them:
|
||||
|
||||
[hdfs]$ $HADOOP_HOME/bin/hdfs federation -add /tmp ns1 /tmp
|
||||
[hdfs]$ $HADOOP_HOME/bin/hdfs federation -add /data/app1 ns2 /data/app1
|
||||
[hdfs]$ $HADOOP_HOME/bin/hdfs federation -add /data/app2 ns3 /data/app2
|
||||
[hdfs]$ $HADOOP_HOME/bin/hdfs federation -ls
|
||||
[hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -add /tmp ns1 /tmp
|
||||
[hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -add /data/app1 ns2 /data/app1
|
||||
[hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -add /data/app2 ns3 /data/app2
|
||||
[hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -ls
|
||||
|
||||
If a mount point is not set, the Router will map it to the default namespace `dfs.federation.router.default.nameserviceId`.
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user