HDFS-8591. Remove support for deprecated configuration key dfs.namenode.decommission.nodes.per.interval.
This commit is contained in:
parent
b039e69bb0
commit
a3990ca414
@ -31,6 +31,9 @@ Trunk (Unreleased)
|
||||
|
||||
HDFS-8135. Remove the deprecated FSConstants class. (Li Lu via wheat9)
|
||||
|
||||
HDFS-8591. Remove support for deprecated configuration key
|
||||
dfs.namenode.decommission.nodes.per.interval. (wang)
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)
|
||||
|
@ -136,29 +136,20 @@ void activate(Configuration conf) {
|
||||
checkArgument(intervalSecs >= 0, "Cannot set a negative " +
|
||||
"value for " + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY);
|
||||
|
||||
// By default, the new configuration key overrides the deprecated one.
|
||||
// No # node limit is set.
|
||||
int blocksPerInterval = conf.getInt(
|
||||
DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_DEFAULT);
|
||||
int nodesPerInterval = Integer.MAX_VALUE;
|
||||
|
||||
// If the expected key isn't present and the deprecated one is,
|
||||
// use the deprecated one into the new one. This overrides the
|
||||
// default.
|
||||
//
|
||||
// Also print a deprecation warning.
|
||||
final String deprecatedKey =
|
||||
"dfs.namenode.decommission.nodes.per.interval";
|
||||
final String strNodes = conf.get(deprecatedKey);
|
||||
if (strNodes != null) {
|
||||
nodesPerInterval = Integer.parseInt(strNodes);
|
||||
blocksPerInterval = Integer.MAX_VALUE;
|
||||
LOG.warn("Using deprecated configuration key {} value of {}.",
|
||||
deprecatedKey, nodesPerInterval);
|
||||
LOG.warn("Deprecated configuration key {} will be ignored.",
|
||||
deprecatedKey);
|
||||
LOG.warn("Please update your configuration to use {} instead.",
|
||||
DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY);
|
||||
}
|
||||
|
||||
checkArgument(blocksPerInterval > 0,
|
||||
"Must set a positive value for "
|
||||
+ DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY);
|
||||
@ -170,15 +161,14 @@ void activate(Configuration conf) {
|
||||
"value for "
|
||||
+ DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES);
|
||||
|
||||
monitor = new Monitor(blocksPerInterval,
|
||||
nodesPerInterval, maxConcurrentTrackedNodes);
|
||||
monitor = new Monitor(blocksPerInterval, maxConcurrentTrackedNodes);
|
||||
executor.scheduleAtFixedRate(monitor, intervalSecs, intervalSecs,
|
||||
TimeUnit.SECONDS);
|
||||
|
||||
LOG.debug("Activating DecommissionManager with interval {} seconds, " +
|
||||
"{} max blocks per interval, {} max nodes per interval, " +
|
||||
"{} max blocks per interval, " +
|
||||
"{} max concurrently tracked nodes.", intervalSecs,
|
||||
blocksPerInterval, nodesPerInterval, maxConcurrentTrackedNodes);
|
||||
blocksPerInterval, maxConcurrentTrackedNodes);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -333,10 +323,6 @@ private class Monitor implements Runnable {
|
||||
* The maximum number of blocks to check per tick.
|
||||
*/
|
||||
private final int numBlocksPerCheck;
|
||||
/**
|
||||
* The maximum number of nodes to check per tick.
|
||||
*/
|
||||
private final int numNodesPerCheck;
|
||||
/**
|
||||
* The maximum number of nodes to track in decomNodeBlocks. A value of 0
|
||||
* means no limit.
|
||||
@ -348,7 +334,7 @@ private class Monitor implements Runnable {
|
||||
private int numBlocksChecked = 0;
|
||||
/**
|
||||
* The number of nodes that have been checked on this tick. Used for
|
||||
* testing.
|
||||
* statistics.
|
||||
*/
|
||||
private int numNodesChecked = 0;
|
||||
/**
|
||||
@ -357,10 +343,8 @@ private class Monitor implements Runnable {
|
||||
private DatanodeDescriptor iterkey = new DatanodeDescriptor(new
|
||||
DatanodeID("", "", "", 0, 0, 0, 0));
|
||||
|
||||
Monitor(int numBlocksPerCheck, int numNodesPerCheck, int
|
||||
maxConcurrentTrackedNodes) {
|
||||
Monitor(int numBlocksPerCheck, int maxConcurrentTrackedNodes) {
|
||||
this.numBlocksPerCheck = numBlocksPerCheck;
|
||||
this.numNodesPerCheck = numNodesPerCheck;
|
||||
this.maxConcurrentTrackedNodes = maxConcurrentTrackedNodes;
|
||||
}
|
||||
|
||||
@ -369,12 +353,6 @@ private boolean exceededNumBlocksPerCheck() {
|
||||
return numBlocksChecked >= numBlocksPerCheck;
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
private boolean exceededNumNodesPerCheck() {
|
||||
LOG.trace("Processed {} nodes so far this tick", numNodesChecked);
|
||||
return numNodesChecked >= numNodesPerCheck;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
if (!namesystem.isRunning()) {
|
||||
@ -416,9 +394,7 @@ private void check() {
|
||||
it = new CyclicIteration<>(decomNodeBlocks, iterkey).iterator();
|
||||
final LinkedList<DatanodeDescriptor> toRemove = new LinkedList<>();
|
||||
|
||||
while (it.hasNext()
|
||||
&& !exceededNumBlocksPerCheck()
|
||||
&& !exceededNumNodesPerCheck()) {
|
||||
while (it.hasNext() && !exceededNumBlocksPerCheck()) {
|
||||
numNodesChecked++;
|
||||
final Map.Entry<DatanodeDescriptor, AbstractList<BlockInfo>>
|
||||
entry = it.next();
|
||||
|
@ -1045,32 +1045,6 @@ public void testBlocksPerInterval() throws Exception {
|
||||
doDecomCheck(datanodeManager, decomManager, 1);
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
@Test(timeout=120000)
|
||||
public void testNodesPerInterval() throws Exception {
|
||||
Configuration newConf = new Configuration(conf);
|
||||
org.apache.log4j.Logger.getLogger(DecommissionManager.class)
|
||||
.setLevel(Level.TRACE);
|
||||
// Set the deprecated configuration key which limits the # of nodes per
|
||||
// interval
|
||||
newConf.setInt("dfs.namenode.decommission.nodes.per.interval", 1);
|
||||
// Disable the normal monitor runs
|
||||
newConf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY,
|
||||
Integer.MAX_VALUE);
|
||||
startCluster(1, 3, newConf);
|
||||
final FileSystem fs = cluster.getFileSystem();
|
||||
final DatanodeManager datanodeManager =
|
||||
cluster.getNamesystem().getBlockManager().getDatanodeManager();
|
||||
final DecommissionManager decomManager = datanodeManager.getDecomManager();
|
||||
|
||||
// Write a 3 block file, so each node has one block. Should scan 1 node
|
||||
// each time.
|
||||
DFSTestUtil.createFile(fs, new Path("/file1"), 64, (short) 3, 0xBAD1DEA);
|
||||
for (int i=0; i<3; i++) {
|
||||
doDecomCheck(datanodeManager, decomManager, 1);
|
||||
}
|
||||
}
|
||||
|
||||
private void doDecomCheck(DatanodeManager datanodeManager,
|
||||
DecommissionManager decomManager, int expectedNumCheckedNodes)
|
||||
throws IOException, ExecutionException, InterruptedException {
|
||||
|
Loading…
Reference in New Issue
Block a user