HDFS-11368. Erasure Coding: Deprecate replication-related config keys. Contributed by Rakesh R.
This commit is contained in:
parent
dd98a8005a
commit
de4894936a
@ -43,8 +43,11 @@ The following table lists the configuration property names that are deprecated i
|
||||
| dfs.permissions | dfs.permissions.enabled |
|
||||
| dfs.permissions.supergroup | dfs.permissions.superusergroup |
|
||||
| dfs.read.prefetch.size | dfs.client.read.prefetch.size |
|
||||
| dfs.replication.considerLoad | dfs.namenode.replication.considerLoad |
|
||||
| dfs.replication.interval | dfs.namenode.replication.interval |
|
||||
| dfs.replication.considerLoad | dfs.namenode.redundancy.considerLoad |
|
||||
| dfs.namenode.replication.considerLoad | dfs.namenode.redundancy.considerLoad |
|
||||
| dfs.namenode.replication.considerLoad.factor | dfs.namenode.redundancy.considerLoad.factor |
|
||||
| dfs.replication.interval | dfs.namenode.redundancy.interval |
|
||||
| dfs.namenode.replication.interval | dfs.namenode.redundancy.interval |
|
||||
| dfs.replication.min | dfs.namenode.replication.min |
|
||||
| dfs.replication.pending.timeout.sec | dfs.namenode.reconstruction.pending.timeout-sec |
|
||||
| dfs.namenode.replication.pending.timeout-sec | dfs.namenode.reconstruction.pending.timeout-sec |
|
||||
|
@ -115,9 +115,15 @@ private static void addDeprecatedKeys() {
|
||||
new DeprecationDelta("dfs.access.time.precision",
|
||||
DeprecatedKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY),
|
||||
new DeprecationDelta("dfs.replication.considerLoad",
|
||||
DeprecatedKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY),
|
||||
DeprecatedKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY),
|
||||
new DeprecationDelta("dfs.namenode.replication.considerLoad",
|
||||
DeprecatedKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY),
|
||||
new DeprecationDelta("dfs.namenode.replication.considerLoad.factor",
|
||||
DeprecatedKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_FACTOR),
|
||||
new DeprecationDelta("dfs.replication.interval",
|
||||
DeprecatedKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY),
|
||||
DeprecatedKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY),
|
||||
new DeprecationDelta("dfs.namenode.replication.interval",
|
||||
DeprecatedKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY),
|
||||
new DeprecationDelta("dfs.replication.min",
|
||||
DeprecatedKeys.DFS_NAMENODE_REPLICATION_MIN_KEY),
|
||||
new DeprecationDelta("dfs.replication.pending.timeout.sec",
|
||||
|
@ -204,10 +204,12 @@ interface DeprecatedKeys {
|
||||
String DFS_METRICS_SESSION_ID_KEY = "dfs.metrics.session-id";
|
||||
String DFS_NAMENODE_ACCESSTIME_PRECISION_KEY =
|
||||
"dfs.namenode.accesstime.precision";
|
||||
String DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY =
|
||||
"dfs.namenode.replication.considerLoad";
|
||||
String DFS_NAMENODE_REPLICATION_INTERVAL_KEY =
|
||||
"dfs.namenode.replication.interval";
|
||||
String DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY =
|
||||
"dfs.namenode.redundancy.considerLoad";
|
||||
String DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_FACTOR =
|
||||
"dfs.namenode.redundancy.considerLoad.factor";
|
||||
String DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY =
|
||||
"dfs.namenode.redundancy.interval.seconds";
|
||||
String DFS_NAMENODE_REPLICATION_MIN_KEY = "dfs.namenode.replication.min";
|
||||
String DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY =
|
||||
"dfs.namenode.reconstruction.pending.timeout-sec";
|
||||
|
@ -194,16 +194,17 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||
public static final String DFS_NAMENODE_ACCESSTIME_PRECISION_KEY =
|
||||
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY;
|
||||
public static final long DFS_NAMENODE_ACCESSTIME_PRECISION_DEFAULT = 3600000;
|
||||
public static final String DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY =
|
||||
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY;
|
||||
public static final boolean DFS_NAMENODE_REPLICATION_CONSIDERLOAD_DEFAULT = true;
|
||||
public static final String DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR =
|
||||
"dfs.namenode.replication.considerLoad.factor";
|
||||
public static final String DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY =
|
||||
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY;
|
||||
public static final boolean DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_DEFAULT =
|
||||
true;
|
||||
public static final String DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_FACTOR =
|
||||
"dfs.namenode.redundancy.considerLoad.factor";
|
||||
public static final double
|
||||
DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR_DEFAULT = 2.0;
|
||||
public static final String DFS_NAMENODE_REPLICATION_INTERVAL_KEY =
|
||||
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY;
|
||||
public static final int DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT = 3;
|
||||
DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_FACTOR_DEFAULT = 2.0;
|
||||
public static final String DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY =
|
||||
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY;
|
||||
public static final int DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT = 3;
|
||||
public static final String DFS_NAMENODE_REPLICATION_MIN_KEY =
|
||||
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
|
||||
public static final int DFS_NAMENODE_REPLICATION_MIN_DEFAULT = 1;
|
||||
|
@ -673,8 +673,8 @@ static int run(Collection<URI> namenodes, final BalancerParameters p,
|
||||
DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT,
|
||||
TimeUnit.SECONDS) * 2000 +
|
||||
conf.getTimeDuration(
|
||||
DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT,
|
||||
DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT,
|
||||
TimeUnit.SECONDS) * 1000;
|
||||
LOG.info("namenodes = " + namenodes);
|
||||
LOG.info("parameters = " + p);
|
||||
|
@ -230,8 +230,11 @@ public long getNumTimedOutPendingReconstructions() {
|
||||
return pendingReconstruction.getNumTimedOuts();
|
||||
}
|
||||
|
||||
/**replicationRecheckInterval is how often namenode checks for new replication work*/
|
||||
private final long replicationRecheckInterval;
|
||||
/**
|
||||
* redundancyRecheckInterval is how often namenode checks for new
|
||||
* reconstruction work.
|
||||
*/
|
||||
private final long redundancyRecheckIntervalMs;
|
||||
|
||||
/** How often to check and the limit for the storageinfo efficiency. */
|
||||
private final long storageInfoDefragmentInterval;
|
||||
@ -244,8 +247,8 @@ public long getNumTimedOutPendingReconstructions() {
|
||||
*/
|
||||
final BlocksMap blocksMap;
|
||||
|
||||
/** Replication thread. */
|
||||
final Daemon replicationThread = new Daemon(new ReplicationMonitor());
|
||||
/** Redundancy thread. */
|
||||
private final Daemon redundancyThread = new Daemon(new RedundancyMonitor());
|
||||
|
||||
/** StorageInfoDefragmenter thread. */
|
||||
private final Daemon storageInfoDefragmenterThread =
|
||||
@ -435,10 +438,10 @@ public BlockManager(final Namesystem namesystem, boolean haEnabled,
|
||||
this.blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
|
||||
this.blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
|
||||
|
||||
this.replicationRecheckInterval =
|
||||
conf.getTimeDuration(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT,
|
||||
TimeUnit.SECONDS) * 1000L;
|
||||
this.redundancyRecheckIntervalMs = conf.getTimeDuration(
|
||||
DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT,
|
||||
TimeUnit.SECONDS) * 1000;
|
||||
|
||||
this.storageInfoDefragmentInterval =
|
||||
conf.getLong(
|
||||
@ -493,7 +496,8 @@ public BlockManager(final Namesystem namesystem, boolean haEnabled,
|
||||
LOG.info("maxReplication = " + maxReplication);
|
||||
LOG.info("minReplication = " + minReplication);
|
||||
LOG.info("maxReplicationStreams = " + maxReplicationStreams);
|
||||
LOG.info("replicationRecheckInterval = " + replicationRecheckInterval);
|
||||
LOG.info("redundancyRecheckInterval = " + redundancyRecheckIntervalMs +
|
||||
"ms");
|
||||
LOG.info("encryptDataTransfer = " + encryptDataTransfer);
|
||||
LOG.info("maxNumBlocksToLog = " + maxNumBlocksToLog);
|
||||
}
|
||||
@ -586,7 +590,7 @@ public BlockTokenSecretManager getBlockTokenSecretManager() {
|
||||
return blockTokenSecretManager;
|
||||
}
|
||||
|
||||
/** Allow silent termination of replication monitor for testing */
|
||||
/** Allow silent termination of redundancy monitor for testing. */
|
||||
@VisibleForTesting
|
||||
void enableRMTerminationForTesting() {
|
||||
checkNSRunning = false;
|
||||
@ -604,8 +608,8 @@ boolean shouldUpdateBlockKey(final long updateTime) throws IOException {
|
||||
public void activate(Configuration conf, long blockTotal) {
|
||||
pendingReconstruction.start();
|
||||
datanodeManager.activate(conf);
|
||||
this.replicationThread.setName("ReplicationMonitor");
|
||||
this.replicationThread.start();
|
||||
this.redundancyThread.setName("RedundancyMonitor");
|
||||
this.redundancyThread.start();
|
||||
storageInfoDefragmenterThread.setName("StorageInfoMonitor");
|
||||
storageInfoDefragmenterThread.start();
|
||||
this.blockReportThread.start();
|
||||
@ -616,10 +620,10 @@ public void activate(Configuration conf, long blockTotal) {
|
||||
public void close() {
|
||||
bmSafeMode.close();
|
||||
try {
|
||||
replicationThread.interrupt();
|
||||
redundancyThread.interrupt();
|
||||
storageInfoDefragmenterThread.interrupt();
|
||||
blockReportThread.interrupt();
|
||||
replicationThread.join(3000);
|
||||
redundancyThread.join(3000);
|
||||
storageInfoDefragmenterThread.join(3000);
|
||||
blockReportThread.join(3000);
|
||||
} catch (InterruptedException ie) {
|
||||
@ -880,7 +884,7 @@ public boolean commitOrCompleteLastBlock(BlockCollection bc,
|
||||
|
||||
/**
|
||||
* If IBR is not sent from expected locations yet, add the datanodes to
|
||||
* pendingReconstruction in order to keep ReplicationMonitor from scheduling
|
||||
* pendingReconstruction in order to keep RedundancyMonitor from scheduling
|
||||
* the block.
|
||||
*/
|
||||
public void addExpectedReplicasToPending(BlockInfo blk) {
|
||||
@ -1884,7 +1888,7 @@ private boolean validateReconstructionWork(BlockReconstructionWork rw) {
|
||||
if (hasEnoughEffectiveReplicas(block, numReplicas, pendingNum)) {
|
||||
neededReconstruction.remove(block, priority);
|
||||
rw.resetTargets();
|
||||
blockLog.debug("BLOCK* Removing {} from neededReplications as" +
|
||||
blockLog.debug("BLOCK* Removing {} from neededReconstruction as" +
|
||||
" it has enough replicas", block);
|
||||
return false;
|
||||
}
|
||||
@ -1910,8 +1914,8 @@ private boolean validateReconstructionWork(BlockReconstructionWork rw) {
|
||||
// reconstructions that fail after an appropriate amount of time.
|
||||
pendingReconstruction.increment(block,
|
||||
DatanodeStorageInfo.toDatanodeDescriptors(targets));
|
||||
blockLog.debug("BLOCK* block {} is moved from neededReplications to "
|
||||
+ "pendingReplications", block);
|
||||
blockLog.debug("BLOCK* block {} is moved from neededReconstruction to "
|
||||
+ "pendingReconstruction", block);
|
||||
|
||||
int numEffectiveReplicas = numReplicas.liveReplicas() + pendingNum;
|
||||
// remove from neededReconstruction
|
||||
@ -4298,32 +4302,32 @@ public int numOfUnderReplicatedBlocks() {
|
||||
/**
|
||||
* Periodically calls computeBlockRecoveryWork().
|
||||
*/
|
||||
private class ReplicationMonitor implements Runnable {
|
||||
private class RedundancyMonitor implements Runnable {
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
while (namesystem.isRunning()) {
|
||||
try {
|
||||
// Process replication work only when active NN is out of safe mode.
|
||||
// Process recovery work only when active NN is out of safe mode.
|
||||
if (isPopulatingReplQueues()) {
|
||||
computeDatanodeWork();
|
||||
processPendingReconstructions();
|
||||
rescanPostponedMisreplicatedBlocks();
|
||||
}
|
||||
Thread.sleep(replicationRecheckInterval);
|
||||
TimeUnit.MILLISECONDS.sleep(redundancyRecheckIntervalMs);
|
||||
} catch (Throwable t) {
|
||||
if (!namesystem.isRunning()) {
|
||||
LOG.info("Stopping ReplicationMonitor.");
|
||||
LOG.info("Stopping RedundancyMonitor.");
|
||||
if (!(t instanceof InterruptedException)) {
|
||||
LOG.info("ReplicationMonitor received an exception"
|
||||
LOG.info("RedundancyMonitor received an exception"
|
||||
+ " while shutting down.", t);
|
||||
}
|
||||
break;
|
||||
} else if (!checkNSRunning && t instanceof InterruptedException) {
|
||||
LOG.info("Stopping ReplicationMonitor for testing.");
|
||||
LOG.info("Stopping RedundancyMonitor for testing.");
|
||||
break;
|
||||
}
|
||||
LOG.error("ReplicationMonitor thread received Runtime exception. ",
|
||||
LOG.error("RedundancyMonitor thread received Runtime exception. ",
|
||||
t);
|
||||
terminate(1, t);
|
||||
}
|
||||
@ -4692,6 +4696,14 @@ void enqueue(Runnable action) throws InterruptedException {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return redundancy thread.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
Daemon getRedundancyThread() {
|
||||
return redundancyThread;
|
||||
}
|
||||
|
||||
public BlockIdManager getBlockIdManager() {
|
||||
return blockIdManager;
|
||||
}
|
||||
|
@ -83,11 +83,11 @@ public void initialize(Configuration conf, FSClusterStats stats,
|
||||
NetworkTopology clusterMap,
|
||||
Host2NodesMap host2datanodeMap) {
|
||||
this.considerLoad = conf.getBoolean(
|
||||
DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_DEFAULT);
|
||||
DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_DEFAULT);
|
||||
this.considerLoadFactor = conf.getDouble(
|
||||
DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR,
|
||||
DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR_DEFAULT);
|
||||
DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_FACTOR,
|
||||
DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_FACTOR_DEFAULT);
|
||||
this.stats = stats;
|
||||
this.clusterMap = clusterMap;
|
||||
this.host2datanodeMap = host2datanodeMap;
|
||||
|
@ -610,8 +610,8 @@ static int run(Map<URI, List<Path>> namenodes, Configuration conf)
|
||||
DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT,
|
||||
TimeUnit.SECONDS) * 2000 +
|
||||
conf.getTimeDuration(
|
||||
DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT,
|
||||
DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT,
|
||||
TimeUnit.SECONDS) * 1000;
|
||||
AtomicInteger retryCount = new AtomicInteger(0);
|
||||
LOG.info("namenodes = " + namenodes);
|
||||
|
@ -469,11 +469,11 @@ public void prepareToStopStandbyServices() throws ServiceFailedException {
|
||||
* (not run or not pass any control commands to DataNodes)
|
||||
* on BackupNode:
|
||||
* {@link LeaseManager.Monitor} protected by SafeMode.
|
||||
* {@link BlockManager.ReplicationMonitor} protected by SafeMode.
|
||||
* {@link BlockManager.RedundancyMonitor} protected by SafeMode.
|
||||
* {@link HeartbeatManager.Monitor} protected by SafeMode.
|
||||
* {@link DecommissionManager.Monitor} need to prohibit refreshNodes().
|
||||
* {@link PendingReconstructionBlocks.PendingReconstructionMonitor}
|
||||
* harmless, because ReplicationMonitor is muted.
|
||||
* harmless, because RedundancyMonitor is muted.
|
||||
*/
|
||||
@Override
|
||||
public void startActiveServices() throws IOException {
|
||||
|
@ -296,14 +296,14 @@
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.replication.considerLoad</name>
|
||||
<name>dfs.namenode.redundancy.considerLoad</name>
|
||||
<value>true</value>
|
||||
<description>Decide if chooseTarget considers the target's load or not
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.replication.considerLoad.factor</name>
|
||||
<name>dfs.namenode.redundancy.considerLoad.factor</name>
|
||||
<value>2.0</value>
|
||||
<description>The factor by which a node's load can exceed the average
|
||||
before being rejected for writes, only if considerLoad is true.
|
||||
@ -980,10 +980,10 @@
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.replication.interval</name>
|
||||
<name>dfs.namenode.redundancy.interval.seconds</name>
|
||||
<value>3s</value>
|
||||
<description>The periodicity in seconds with which the namenode computes
|
||||
replication work for datanodes. Support multiple time unit suffix(case insensitive),
|
||||
low redundancy work for datanodes. Support multiple time unit suffix(case insensitive),
|
||||
as described in dfs.heartbeat.interval.
|
||||
</description>
|
||||
</property>
|
||||
|
@ -90,14 +90,14 @@ public void setup() throws IOException {
|
||||
}
|
||||
|
||||
// Setup conf
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
|
||||
false);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
|
||||
200);
|
||||
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL);
|
||||
conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,
|
||||
BLOCKREPORT_INTERVAL_MSEC);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
|
||||
NAMENODE_REPLICATION_INTERVAL);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1);
|
||||
|
||||
|
@ -58,7 +58,7 @@ public class TestBlockStoragePolicy {
|
||||
static {
|
||||
conf = new HdfsConfiguration();
|
||||
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
|
||||
POLICY_SUITE = BlockStoragePolicySuite.createDefaultSuite();
|
||||
DEFAULT_STORAGE_POLICY = POLICY_SUITE.getDefaultPolicy();
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ public void setup() throws IOException {
|
||||
int numDNs = dataBlocks + parityBlocks + 2;
|
||||
conf = new Configuration();
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
|
||||
false);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
|
||||
if (ErasureCodeNative.isNativeCodeLoaded()) {
|
||||
|
@ -214,7 +214,7 @@ private void tearDown() {
|
||||
private HdfsConfiguration newHdfsConfiguration() {
|
||||
final HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
|
||||
false);
|
||||
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
|
||||
|
@ -111,26 +111,26 @@ public void setup() throws IOException {
|
||||
writeConfigFile(excludeFile, null);
|
||||
|
||||
// Setup conf
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
|
||||
false);
|
||||
conf.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath());
|
||||
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
|
||||
2000);
|
||||
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,
|
||||
BLOCKREPORT_INTERVAL_MSEC);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY,
|
||||
4);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
|
||||
NAMENODE_REPLICATION_INTERVAL);
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
conf.setInt(
|
||||
DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY,
|
||||
cellSize - 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
|
||||
false);
|
||||
|
||||
numDNs = dataBlocks + parityBlocks + 2;
|
||||
|
@ -19,6 +19,7 @@
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.junit.Test;
|
||||
@ -33,8 +34,23 @@ public void testDeprecatedKeys() throws Exception {
|
||||
String scriptFile = conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY);
|
||||
assertTrue(scriptFile.equals("xyz")) ;
|
||||
conf.setInt("dfs.replication.interval", 1);
|
||||
String alpha = DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY;
|
||||
int repInterval = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 3) ;
|
||||
assertTrue(repInterval == 1) ;
|
||||
int redundancyInterval = conf
|
||||
.getInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 3);
|
||||
assertTrue(redundancyInterval == 1);
|
||||
int repInterval = conf.getInt("dfs.replication.interval", 3);
|
||||
assertTrue(repInterval == 1);
|
||||
repInterval = conf.getInt("dfs.namenode.replication.interval", 3);
|
||||
assertTrue(repInterval == 1);
|
||||
|
||||
conf.setBoolean("dfs.replication.considerLoad", false);
|
||||
assertFalse(conf.getBoolean(
|
||||
DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, true));
|
||||
assertFalse(conf.getBoolean("dfs.replication.considerLoad", true));
|
||||
|
||||
conf.setDouble("dfs.namenode.replication.considerLoad.factor", 5.0);
|
||||
assertTrue(5.0 == conf.getDouble(
|
||||
DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_FACTOR, 2.0));
|
||||
assertTrue(5.0 == conf
|
||||
.getDouble("dfs.namenode.replication.considerLoad.factor", 2.0));
|
||||
}
|
||||
}
|
||||
|
@ -329,7 +329,7 @@ public void testLongLivedClientPipelineRecovery()
|
||||
// client only retries establishing pipeline with the 4th node.
|
||||
int numDataNodes = 4;
|
||||
// do not consider load factor when selecting a data node
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
|
||||
false);
|
||||
setEncryptionConfigKeys();
|
||||
|
||||
|
@ -86,7 +86,7 @@ public void setUp() throws Exception {
|
||||
// handle under-replicated blocks quickly (for replication asserts)
|
||||
conf.setInt(
|
||||
DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 5);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
|
||||
|
||||
// handle failures in the DFSClient pipeline quickly
|
||||
// (for cluster.shutdown(); fs.close() idiom)
|
||||
|
@ -75,7 +75,7 @@ public void setup() throws IOException {
|
||||
int numDNs = dataBlocks + parityBlocks + 2;
|
||||
conf = new Configuration();
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
|
||||
false);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||
|
@ -85,7 +85,7 @@ public void setup() throws IOException {
|
||||
conf = new HdfsConfiguration();
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 6000L);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
|
||||
false);
|
||||
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
|
||||
|
@ -59,7 +59,8 @@ public void testMissingBlocksAlert()
|
||||
try {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
//minimize test delay
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 0);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
|
||||
0);
|
||||
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
|
||||
int fileLen = 10*1024;
|
||||
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, fileLen/2);
|
||||
|
@ -99,7 +99,8 @@ public void setup() throws IOException {
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY, 0);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
|
||||
false);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||
cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null);
|
||||
fs = cluster.getFileSystem();
|
||||
|
@ -95,8 +95,8 @@ public void setup() throws IOException {
|
||||
conf.setInt(
|
||||
DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY,
|
||||
cellSize - 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
|
||||
false);
|
||||
if (ErasureCodeNative.isNativeCodeLoaded()) {
|
||||
conf.set(
|
||||
|
@ -118,7 +118,8 @@ public void testDefaultPolicy() throws Exception {
|
||||
public void testReplaceDatanodeOnFailure() throws Exception {
|
||||
final Configuration conf = new HdfsConfiguration();
|
||||
// do not consider load factor when selecting a data node
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
|
||||
false);
|
||||
//always replace a datanode
|
||||
ReplaceDatanodeOnFailure.write(Policy.ALWAYS, true, conf);
|
||||
|
||||
|
@ -228,7 +228,8 @@ public void testBadBlockReportOnTransferMissingBlockFile() throws Exception {
|
||||
*/
|
||||
public void runReplication(boolean simulated) throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
|
||||
false);
|
||||
if (simulated) {
|
||||
SimulatedFSDataset.setFactory(conf);
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ public class TestWriteReadStripedFile {
|
||||
@Before
|
||||
public void setup() throws IOException {
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
|
||||
false);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||
fs = cluster.getFileSystem();
|
||||
|
@ -179,7 +179,8 @@ static void initConf(Configuration conf) {
|
||||
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE);
|
||||
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
|
||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L);
|
||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
|
||||
1L);
|
||||
SimulatedFSDataset.setFactory(conf);
|
||||
|
||||
conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L);
|
||||
@ -212,10 +213,12 @@ static void initConfWithRamDisk(Configuration conf,
|
||||
|
||||
void initConfWithStripe(Configuration conf) {
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
|
||||
false);
|
||||
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
|
||||
SimulatedFSDataset.setFactory(conf);
|
||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L);
|
||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
|
||||
1L);
|
||||
conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L);
|
||||
conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1L);
|
||||
}
|
||||
@ -922,11 +925,13 @@ private void runBalancer(Configuration conf, long totalUsedSpace,
|
||||
private static int runBalancer(Collection<URI> namenodes,
|
||||
final BalancerParameters p,
|
||||
Configuration conf) throws IOException, InterruptedException {
|
||||
final long sleeptime =
|
||||
conf.getLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
|
||||
DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT) * 2000 +
|
||||
conf.getLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000;
|
||||
final long sleeptime = conf.getLong(
|
||||
DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
|
||||
DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT) * 2000
|
||||
+ conf.getLong(
|
||||
DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT)
|
||||
* 1000;
|
||||
LOG.info("namenodes = " + namenodes);
|
||||
LOG.info("parameters = " + p);
|
||||
LOG.info("Print stack trace", new Throwable());
|
||||
@ -1603,7 +1608,7 @@ public void testBalancerDuringUpgrade() throws Exception {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
||||
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
|
||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
|
||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
|
||||
|
||||
conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1L);
|
||||
|
||||
@ -1670,7 +1675,8 @@ public void testTwoReplicaShouldNotInSameDN() throws Exception {
|
||||
int blockSize = 5 * 1024 * 1024 ;
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
|
||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L);
|
||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
|
||||
1L);
|
||||
|
||||
conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1L);
|
||||
|
||||
|
@ -105,25 +105,24 @@ private static int getNumberOfRacks(final BlockManager blockManager,
|
||||
}
|
||||
|
||||
/**
|
||||
* @return replication monitor thread instance from block manager.
|
||||
* @return redundancy monitor thread instance from block manager.
|
||||
*/
|
||||
public static Daemon getReplicationThread(final BlockManager blockManager)
|
||||
{
|
||||
return blockManager.replicationThread;
|
||||
public static Daemon getRedundancyThread(final BlockManager blockManager) {
|
||||
return blockManager.getRedundancyThread();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Stop the replication monitor thread
|
||||
* Stop the redundancy monitor thread.
|
||||
*/
|
||||
public static void stopReplicationThread(final BlockManager blockManager)
|
||||
public static void stopRedundancyThread(final BlockManager blockManager)
|
||||
throws IOException {
|
||||
blockManager.enableRMTerminationForTesting();
|
||||
blockManager.replicationThread.interrupt();
|
||||
blockManager.getRedundancyThread().interrupt();
|
||||
try {
|
||||
blockManager.replicationThread.join();
|
||||
} catch(InterruptedException ie) {
|
||||
blockManager.getRedundancyThread().join();
|
||||
} catch (InterruptedException ie) {
|
||||
throw new IOException(
|
||||
"Interrupted while trying to stop ReplicationMonitor");
|
||||
"Interrupted while trying to stop RedundancyMonitor");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -62,9 +62,9 @@ private Configuration getConf() {
|
||||
// commands quickly (as replies to heartbeats)
|
||||
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
|
||||
|
||||
// Have the NN ReplicationMonitor compute the replication and
|
||||
// Have the NN RedundancyMonitor compute the reconstruction and
|
||||
// invalidation commands to send DNs every second.
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
|
||||
|
||||
// Have the NN check for pending replications every second so it
|
||||
// quickly schedules additional replicas as they are identified.
|
||||
|
@ -56,7 +56,7 @@ public void testNodeCount() throws Exception {
|
||||
60);
|
||||
|
||||
// reduce intervals to make test execution time shorter
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
||||
|
||||
// start a mini dfs cluster of 2 nodes
|
||||
|
@ -65,7 +65,7 @@ public void setUp() throws Exception {
|
||||
// set the block report interval to 2s
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 2000);
|
||||
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
|
||||
// disable the RPC timeout for debug
|
||||
conf.setLong(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY, 0);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
|
||||
|
@ -374,7 +374,7 @@ public void testPendingAndInvalidate() throws Exception {
|
||||
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
|
||||
CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
|
||||
DFS_REPLICATION_INTERVAL);
|
||||
CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,
|
||||
CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
|
||||
DFS_REPLICATION_INTERVAL);
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(
|
||||
DATANODE_COUNT).build();
|
||||
|
@ -96,8 +96,8 @@ private static String[] getRacks(int numHosts, int numRacks) {
|
||||
|
||||
@BeforeClass
|
||||
public static void setup() throws Exception {
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
|
||||
false);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1);
|
||||
}
|
||||
@ -139,7 +139,7 @@ private DataNode getDataNode(String host) {
|
||||
*
|
||||
* In this test, we first need to create a scenario that a striped block has
|
||||
* all the internal blocks but distributed in <6 racks. Then we check if the
|
||||
* replication monitor can correctly schedule the reconstruction work for it.
|
||||
* redundancy monitor can correctly schedule the reconstruction work for it.
|
||||
*/
|
||||
@Test
|
||||
public void testReconstructForNotEnoughRacks() throws Exception {
|
||||
@ -194,7 +194,7 @@ public void testReconstructForNotEnoughRacks() throws Exception {
|
||||
fsn.writeUnlock();
|
||||
}
|
||||
|
||||
// check if replication monitor correctly schedule the replication work
|
||||
// check if redundancy monitor correctly schedule the reconstruction work.
|
||||
boolean scheduled = false;
|
||||
for (int i = 0; i < 5; i++) { // retry 5 times
|
||||
for (DatanodeStorageInfo storage : blockInfo.storages) {
|
||||
|
@ -836,7 +836,7 @@ public void testReplicationWithPriority() throws Exception {
|
||||
int DFS_NAMENODE_REPLICATION_INTERVAL = 1000;
|
||||
int HIGH_PRIORITY = 0;
|
||||
Configuration conf = new Configuration();
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
|
||||
.format(true).build();
|
||||
try {
|
||||
|
@ -53,7 +53,7 @@ public static Iterable<Object[]> data() {
|
||||
@Override
|
||||
DatanodeDescriptor[] getDatanodeDescriptors(Configuration conf) {
|
||||
conf.setDouble(DFSConfigKeys
|
||||
.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR, 1.2);
|
||||
.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_FACTOR, 1.2);
|
||||
final String[] racks = {
|
||||
"/rack1",
|
||||
"/rack1",
|
||||
|
@ -78,7 +78,7 @@ public void setup() throws IOException {
|
||||
conf = new Configuration();
|
||||
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||
cluster.waitActive();
|
||||
cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null);
|
||||
|
@ -104,8 +104,8 @@ public void testCorruptFilesAreDiscarded()
|
||||
Thread.sleep(30000L);
|
||||
assertThat(cluster.getNamesystem().getNumDeadDataNodes(), is(1));
|
||||
|
||||
// Next, wait for the replication monitor to mark the file as corrupt
|
||||
Thread.sleep(2 * DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT * 1000);
|
||||
// Next, wait for the redundancy monitor to mark the file as corrupt.
|
||||
Thread.sleep(2 * DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT * 1000);
|
||||
|
||||
// Wait for the LazyPersistFileScrubber to run
|
||||
Thread.sleep(2 * LAZY_WRITE_FILE_SCRUBBER_INTERVAL_SEC * 1000);
|
||||
@ -137,8 +137,8 @@ public void testDisableLazyPersistFileScrubber()
|
||||
cluster.shutdownDataNodes();
|
||||
Thread.sleep(30000L);
|
||||
|
||||
// Next, wait for the replication monitor to mark the file as corrupt
|
||||
Thread.sleep(2 * DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT * 1000);
|
||||
// Next, wait for the redundancy monitor to mark the file as corrupt.
|
||||
Thread.sleep(2 * DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT * 1000);
|
||||
|
||||
// Wait for the LazyPersistFileScrubber to run
|
||||
Thread.sleep(2 * LAZY_WRITE_FILE_SCRUBBER_INTERVAL_SEC * 1000);
|
||||
@ -164,8 +164,8 @@ public void testFileShouldNotDiscardedIfNNRestarted()
|
||||
|
||||
cluster.restartNameNodes();
|
||||
|
||||
// wait for the replication monitor to mark the file as corrupt
|
||||
Thread.sleep(2 * DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT * 1000);
|
||||
// wait for the redundancy monitor to mark the file as corrupt.
|
||||
Thread.sleep(2 * DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT * 1000);
|
||||
|
||||
Long corruptBlkCount = (long) Iterators.size(cluster.getNameNode()
|
||||
.getNamesystem().getBlockManager().getCorruptReplicaBlockIterator());
|
||||
|
@ -105,7 +105,8 @@ static void initConf(Configuration conf) {
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
|
||||
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE);
|
||||
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
|
||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L);
|
||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
|
||||
1L);
|
||||
conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L);
|
||||
}
|
||||
|
||||
@ -482,8 +483,10 @@ public void testMoverFailedRetry() throws Exception {
|
||||
void initConfWithStripe(Configuration conf) {
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
|
||||
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
|
||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);
|
||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
|
||||
1L);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
|
||||
false);
|
||||
}
|
||||
|
||||
@Test(timeout = 300000)
|
||||
|
@ -93,8 +93,8 @@ public class TestStorageMover {
|
||||
static {
|
||||
DEFAULT_CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
|
||||
DEFAULT_CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
|
||||
DEFAULT_CONF.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,
|
||||
2L);
|
||||
DEFAULT_CONF.setLong(
|
||||
DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 2L);
|
||||
DEFAULT_CONF.setLong(DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_KEY, 2000L);
|
||||
|
||||
DEFAULT_POLICIES = BlockStoragePolicySuite.createDefaultSuite();
|
||||
|
@ -1237,8 +1237,8 @@ void printResults() {
|
||||
} // end BlockReportStats
|
||||
|
||||
/**
|
||||
* Measures how fast replication monitor can compute data-node work.
|
||||
*
|
||||
* Measures how fast redundancy monitor can compute data-node work.
|
||||
*
|
||||
* It runs only one thread until no more work can be scheduled.
|
||||
*/
|
||||
class ReplicationStats extends OperationStatsBase {
|
||||
@ -1265,7 +1265,7 @@ class ReplicationStats extends OperationStatsBase {
|
||||
parseArguments(args);
|
||||
// number of operations is 4 times the number of decommissioned
|
||||
// blocks divided by the number of needed replications scanned
|
||||
// by the replication monitor in one iteration
|
||||
// by the redundancy monitor in one iteration
|
||||
numOpsRequired = (totalBlocks*replication*nodesToDecommission*2)
|
||||
/ (numDatanodes*numDatanodes);
|
||||
|
||||
@ -1314,8 +1314,8 @@ void generateInputs(int[] ignore) throws IOException {
|
||||
|
||||
// start data-nodes; create a bunch of files; generate block reports.
|
||||
blockReportObject.generateInputs(ignore);
|
||||
// stop replication monitor
|
||||
BlockManagerTestUtil.stopReplicationThread(namesystem.getBlockManager());
|
||||
// stop redundancy monitor thread.
|
||||
BlockManagerTestUtil.stopRedundancyThread(namesystem.getBlockManager());
|
||||
|
||||
// report blocks once
|
||||
int nrDatanodes = blockReportObject.getNumDatanodes();
|
||||
|
@ -74,7 +74,7 @@ public void setup() throws IOException {
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
// disable block recovery
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
||||
SimulatedFSDataset.setFactory(conf);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||
|
@ -78,7 +78,7 @@ public class TestDecommissioningStatus {
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
conf = new HdfsConfiguration();
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
|
||||
false);
|
||||
|
||||
// Set up the hosts/exclude files.
|
||||
@ -89,7 +89,8 @@ public void setUp() throws Exception {
|
||||
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
||||
conf.setInt(
|
||||
DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 4);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
|
||||
1000);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1);
|
||||
conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, 1);
|
||||
|
||||
|
@ -156,7 +156,7 @@ public void testReplicationAdjusted() throws Exception {
|
||||
// start a cluster
|
||||
Configuration conf = getConf();
|
||||
// Replicate and heartbeat fast to shave a few seconds off test
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
||||
|
||||
MiniDFSCluster cluster = null;
|
||||
|
@ -76,9 +76,9 @@ private Configuration getConf() {
|
||||
// commands quickly (as replies to heartbeats)
|
||||
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
|
||||
|
||||
// Have the NN ReplicationMonitor compute the replication and
|
||||
// Have the NN RedundancyMonitor compute the low redundant blocks and
|
||||
// invalidation commands to send DNs every second.
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
|
||||
|
||||
// Have the NN check for pending replications every second so it
|
||||
// quickly schedules additional replicas as they are identified.
|
||||
|
@ -64,8 +64,9 @@ public void setUp() throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
|
||||
// High value of replication interval
|
||||
// so that blocks remain under-replicated
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000);
|
||||
// so that blocks remain less redundant
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
|
||||
1000);
|
||||
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
|
||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1L);
|
||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY, 5L);
|
||||
|
@ -192,7 +192,8 @@ private void doTestMissingStripedBlock(int numOfMissed, int numOfBusy)
|
||||
public void test2RecoveryTasksForSameBlockGroup() throws Exception {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1000);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
|
||||
1000);
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 2)
|
||||
.build();
|
||||
@ -255,8 +256,8 @@ private static int getNumberOfBlocksToBeErasureCoded(MiniDFSCluster cluster)
|
||||
@Test
|
||||
public void testCountLiveReplicas() throws Exception {
|
||||
final HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
|
||||
false);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 2)
|
||||
.build();
|
||||
@ -299,7 +300,7 @@ public void testCountLiveReplicas() throws Exception {
|
||||
FSNamesystem fsn = cluster.getNamesystem();
|
||||
BlockManager bm = fsn.getBlockManager();
|
||||
|
||||
Thread.sleep(3000); // wait 3 running cycles of replication monitor
|
||||
Thread.sleep(3000); // wait 3 running cycles of redundancy monitor
|
||||
for (DataNode dn : cluster.getDataNodes()) {
|
||||
DataNodeTestUtils.triggerHeartbeat(dn);
|
||||
}
|
||||
|
@ -307,8 +307,9 @@ public void testUnsuitableStoragePoliciesWithECStripedMode()
|
||||
int defaultStripedBlockSize = testECPolicy.getCellSize() * 4;
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultStripedBlockSize);
|
||||
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
|
||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
|
||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
|
||||
1L);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
|
||||
false);
|
||||
|
||||
// start 10 datanodes
|
||||
|
@ -87,9 +87,10 @@ public class TestDNFencing {
|
||||
public void setupCluster() throws Exception {
|
||||
conf = new Configuration();
|
||||
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, SMALL_BLOCK);
|
||||
// Bump up replication interval so that we only run replication
|
||||
// Bump up redundancy interval so that we only run low redundancy
|
||||
// checks explicitly.
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 600);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
|
||||
600);
|
||||
// Increase max streams so that we re-replicate quickly.
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 1000);
|
||||
// See RandomDeleterPolicy javadoc.
|
||||
@ -167,7 +168,7 @@ public void testDnFencing() throws Exception {
|
||||
// The blocks should no longer be postponed.
|
||||
assertEquals(0, nn2.getNamesystem().getPostponedMisreplicatedBlocks());
|
||||
|
||||
// Wait for NN2 to enact its deletions (replication monitor has to run, etc)
|
||||
// Wait for NN2 to enact its deletions (redundancy monitor has to run, etc)
|
||||
BlockManagerTestUtil.computeInvalidationWork(
|
||||
nn2.getNamesystem().getBlockManager());
|
||||
cluster.triggerHeartbeats();
|
||||
@ -258,7 +259,7 @@ public void testNNClearsCommandsOnFailoverAfterStartup()
|
||||
// The block should no longer be postponed.
|
||||
assertEquals(0, nn2.getNamesystem().getPostponedMisreplicatedBlocks());
|
||||
|
||||
// Wait for NN2 to enact its deletions (replication monitor has to run, etc)
|
||||
// Wait for NN2 to enact its deletions (redundancy monitor has to run, etc)
|
||||
BlockManagerTestUtil.computeInvalidationWork(
|
||||
nn2.getNamesystem().getBlockManager());
|
||||
|
||||
@ -358,7 +359,7 @@ public void testNNClearsCommandsOnFailoverWithReplChanges()
|
||||
// The block should no longer be postponed.
|
||||
assertEquals(0, nn2.getNamesystem().getPostponedMisreplicatedBlocks());
|
||||
|
||||
// Wait for NN2 to enact its deletions (replication monitor has to run, etc)
|
||||
// Wait for NN2 to enact its deletions (redundancy monitor has to run, etc)
|
||||
BlockManagerTestUtil.computeInvalidationWork(
|
||||
nn2.getNamesystem().getBlockManager());
|
||||
|
||||
|
@ -110,7 +110,7 @@ public void testFencingStress() throws Exception {
|
||||
harness.conf.setInt(
|
||||
DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
|
||||
harness.conf.setInt(
|
||||
DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
|
||||
DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
|
||||
|
||||
final MiniDFSCluster cluster = harness.startCluster();
|
||||
try {
|
||||
|
@ -142,8 +142,9 @@ private void doWriteOverFailoverTest(TestScenario scenario,
|
||||
MethodToTestIdempotence methodToTest) throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
|
||||
// Don't check replication periodically.
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000);
|
||||
// Don't check low redundancy periodically.
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
|
||||
1000);
|
||||
|
||||
FSDataOutputStream stm = null;
|
||||
MiniDFSCluster cluster = newMiniCluster(conf, 3);
|
||||
|
@ -45,7 +45,7 @@ public class TestNNMetricFilesInGetListingOps {
|
||||
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 100);
|
||||
CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1);
|
||||
CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
|
||||
CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
|
||||
CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
|
||||
}
|
||||
|
||||
private MiniDFSCluster cluster;
|
||||
|
@ -78,7 +78,7 @@
|
||||
*/
|
||||
public class TestNameNodeMetrics {
|
||||
private static final Configuration CONF = new HdfsConfiguration();
|
||||
private static final int DFS_REPLICATION_INTERVAL = 1;
|
||||
private static final int DFS_REDUNDANCY_INTERVAL = 1;
|
||||
private static final Path TEST_ROOT_DIR_PATH =
|
||||
new Path("/testNameNodeMetrics");
|
||||
private static final String NN_METRICS = "NameNodeActivity";
|
||||
@ -96,9 +96,9 @@ public class TestNameNodeMetrics {
|
||||
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 100);
|
||||
CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1);
|
||||
CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
|
||||
DFS_REPLICATION_INTERVAL);
|
||||
CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,
|
||||
DFS_REPLICATION_INTERVAL);
|
||||
DFS_REDUNDANCY_INTERVAL);
|
||||
CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
|
||||
DFS_REDUNDANCY_INTERVAL);
|
||||
CONF.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY,
|
||||
"" + PERCENTILES_INTERVAL);
|
||||
// Enable stale DataNodes checking
|
||||
@ -333,7 +333,7 @@ public void testMissingBlock() throws Exception {
|
||||
private void waitForDeletion() throws InterruptedException {
|
||||
// Wait for more than DATANODE_COUNT replication intervals to ensure all
|
||||
// the blocks pending deletion are sent for deletion to the datanodes.
|
||||
Thread.sleep(DFS_REPLICATION_INTERVAL * (DATANODE_COUNT + 1) * 1000);
|
||||
Thread.sleep(DFS_REDUNDANCY_INTERVAL * (DATANODE_COUNT + 1) * 1000);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -364,7 +364,7 @@ private MetricsRecordBuilder waitForDnMetricValue(String source,
|
||||
rb = getMetrics(source);
|
||||
gauge = MetricsAsserts.getLongGauge(name, rb);
|
||||
while (gauge != expected && (--retries > 0)) {
|
||||
Thread.sleep(DFS_REPLICATION_INTERVAL * 500);
|
||||
Thread.sleep(DFS_REDUNDANCY_INTERVAL * 500);
|
||||
rb = getMetrics(source);
|
||||
gauge = MetricsAsserts.getLongGauge(name, rb);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user