From de4894936a5b581572f35fa5b8979d9f23da0891 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Wed, 23 Nov 2016 16:42:06 -0800 Subject: [PATCH] HDFS-11368. Erasure Coding: Deprecate replication-related config keys. Contributed by Rakesh R. --- .../src/site/markdown/DeprecatedProperties.md | 7 ++- .../apache/hadoop/hdfs/HdfsConfiguration.java | 10 ++- .../hdfs/client/HdfsClientConfigKeys.java | 10 +-- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 19 +++--- .../hadoop/hdfs/server/balancer/Balancer.java | 4 +- .../server/blockmanagement/BlockManager.java | 62 +++++++++++-------- .../BlockPlacementPolicyDefault.java | 8 +-- .../hadoop/hdfs/server/mover/Mover.java | 4 +- .../hdfs/server/namenode/BackupNode.java | 4 +- .../src/main/resources/hdfs-default.xml | 8 +-- .../hadoop/hdfs/AdminStatesBaseTest.java | 4 +- .../hadoop/hdfs/TestBlockStoragePolicy.java | 2 +- .../hdfs/TestDFSStripedOutputStream.java | 2 +- ...TestDFSStripedOutputStreamWithFailure.java | 2 +- .../hdfs/TestDecommissionWithStriped.java | 10 +-- .../hadoop/hdfs/TestDeprecatedKeys.java | 22 ++++++- .../hadoop/hdfs/TestEncryptedTransfer.java | 2 +- .../apache/hadoop/hdfs/TestFileAppend4.java | 2 +- .../apache/hadoop/hdfs/TestFileChecksum.java | 2 +- .../hadoop/hdfs/TestLeaseRecoveryStriped.java | 2 +- .../hadoop/hdfs/TestMissingBlocksAlert.java | 3 +- .../hdfs/TestReadStripedFileWithDecoding.java | 3 +- .../hdfs/TestReconstructStripedFile.java | 4 +- .../hdfs/TestReplaceDatanodeOnFailure.java | 3 +- .../apache/hadoop/hdfs/TestReplication.java | 3 +- .../hadoop/hdfs/TestWriteReadStripedFile.java | 2 +- .../hdfs/server/balancer/TestBalancer.java | 26 +++++--- .../blockmanagement/BlockManagerTestUtil.java | 21 +++---- .../TestBlocksWithNotEnoughRacks.java | 4 +- .../server/blockmanagement/TestNodeCount.java | 2 +- .../TestPendingInvalidateBlock.java | 2 +- .../TestPendingReconstruction.java | 2 +- ...nstructStripedBlocksWithRackAwareness.java | 8 +-- .../TestReplicationPolicy.java | 2 +- .../TestReplicationPolicyConsiderLoad.java | 2 +- .../TestDataNodeErasureCodingMetrics.java | 2 +- .../fsdataset/impl/TestLazyPersistFiles.java | 12 ++-- .../hadoop/hdfs/server/mover/TestMover.java | 9 ++- .../hdfs/server/mover/TestStorageMover.java | 4 +- .../namenode/NNThroughputBenchmark.java | 10 +-- .../TestAddOverReplicatedStripedBlocks.java | 2 +- .../namenode/TestDecommissioningStatus.java | 5 +- .../server/namenode/TestFSEditLogLoader.java | 2 +- .../hdfs/server/namenode/TestHostsFiles.java | 4 +- .../hdfs/server/namenode/TestMetaSave.java | 5 +- .../TestReconstructStripedBlocks.java | 9 +-- .../server/namenode/TestStripedINodeFile.java | 5 +- .../server/namenode/ha/TestDNFencing.java | 11 ++-- .../ha/TestDNFencingWithReplication.java | 2 +- .../namenode/ha/TestPipelinesFailover.java | 5 +- .../TestNNMetricFilesInGetListingOps.java | 2 +- .../namenode/metrics/TestNameNodeMetrics.java | 12 ++-- 52 files changed, 216 insertions(+), 158 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md b/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md index 8de1be21f1..38a1954fa5 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md @@ -43,8 +43,11 @@ The following table lists the configuration property names that are deprecated i | dfs.permissions | dfs.permissions.enabled | | dfs.permissions.supergroup | dfs.permissions.superusergroup | | dfs.read.prefetch.size | dfs.client.read.prefetch.size | -| dfs.replication.considerLoad | dfs.namenode.replication.considerLoad | -| dfs.replication.interval | dfs.namenode.replication.interval | +| dfs.replication.considerLoad | dfs.namenode.redundancy.considerLoad | +| dfs.namenode.replication.considerLoad | dfs.namenode.redundancy.considerLoad | +| dfs.namenode.replication.considerLoad.factor | dfs.namenode.redundancy.considerLoad.factor | +| dfs.replication.interval | dfs.namenode.redundancy.interval | +| dfs.namenode.replication.interval | dfs.namenode.redundancy.interval | | dfs.replication.min | dfs.namenode.replication.min | | dfs.replication.pending.timeout.sec | dfs.namenode.reconstruction.pending.timeout-sec | | dfs.namenode.replication.pending.timeout-sec | dfs.namenode.reconstruction.pending.timeout-sec | diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java index 3f6eae0e9a..4d4ade2229 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java @@ -115,9 +115,15 @@ private static void addDeprecatedKeys() { new DeprecationDelta("dfs.access.time.precision", DeprecatedKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY), new DeprecationDelta("dfs.replication.considerLoad", - DeprecatedKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY), + DeprecatedKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY), + new DeprecationDelta("dfs.namenode.replication.considerLoad", + DeprecatedKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY), + new DeprecationDelta("dfs.namenode.replication.considerLoad.factor", + DeprecatedKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_FACTOR), new DeprecationDelta("dfs.replication.interval", - DeprecatedKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY), + DeprecatedKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY), + new DeprecationDelta("dfs.namenode.replication.interval", + DeprecatedKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY), new DeprecationDelta("dfs.replication.min", DeprecatedKeys.DFS_NAMENODE_REPLICATION_MIN_KEY), new DeprecationDelta("dfs.replication.pending.timeout.sec", diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java index 4c754d9a52..7ad79e001b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java @@ -204,10 +204,12 @@ interface DeprecatedKeys { String DFS_METRICS_SESSION_ID_KEY = "dfs.metrics.session-id"; String DFS_NAMENODE_ACCESSTIME_PRECISION_KEY = "dfs.namenode.accesstime.precision"; - String DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY = - "dfs.namenode.replication.considerLoad"; - String DFS_NAMENODE_REPLICATION_INTERVAL_KEY = - "dfs.namenode.replication.interval"; + String DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY = + "dfs.namenode.redundancy.considerLoad"; + String DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_FACTOR = + "dfs.namenode.redundancy.considerLoad.factor"; + String DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY = + "dfs.namenode.redundancy.interval.seconds"; String DFS_NAMENODE_REPLICATION_MIN_KEY = "dfs.namenode.replication.min"; String DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY = "dfs.namenode.reconstruction.pending.timeout-sec"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index b9fd939725..d7d3c9dd6b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -194,16 +194,17 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_NAMENODE_ACCESSTIME_PRECISION_KEY = HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY; public static final long DFS_NAMENODE_ACCESSTIME_PRECISION_DEFAULT = 3600000; - public static final String DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY = - HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY; - public static final boolean DFS_NAMENODE_REPLICATION_CONSIDERLOAD_DEFAULT = true; - public static final String DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR = - "dfs.namenode.replication.considerLoad.factor"; + public static final String DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY = + HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY; + public static final boolean DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_DEFAULT = + true; + public static final String DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_FACTOR = + "dfs.namenode.redundancy.considerLoad.factor"; public static final double - DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR_DEFAULT = 2.0; - public static final String DFS_NAMENODE_REPLICATION_INTERVAL_KEY = - HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY; - public static final int DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT = 3; + DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_FACTOR_DEFAULT = 2.0; + public static final String DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY = + HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY; + public static final int DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT = 3; public static final String DFS_NAMENODE_REPLICATION_MIN_KEY = HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REPLICATION_MIN_KEY; public static final int DFS_NAMENODE_REPLICATION_MIN_DEFAULT = 1; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index 61352f3f54..75eecde1b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -673,8 +673,8 @@ static int run(Collection namenodes, final BalancerParameters p, DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT, TimeUnit.SECONDS) * 2000 + conf.getTimeDuration( - DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, - DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT, + DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, + DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT, TimeUnit.SECONDS) * 1000; LOG.info("namenodes = " + namenodes); LOG.info("parameters = " + p); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 2f57571ce2..1b744e768f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -230,8 +230,11 @@ public long getNumTimedOutPendingReconstructions() { return pendingReconstruction.getNumTimedOuts(); } - /**replicationRecheckInterval is how often namenode checks for new replication work*/ - private final long replicationRecheckInterval; + /** + * redundancyRecheckInterval is how often namenode checks for new + * reconstruction work. + */ + private final long redundancyRecheckIntervalMs; /** How often to check and the limit for the storageinfo efficiency. */ private final long storageInfoDefragmentInterval; @@ -244,8 +247,8 @@ public long getNumTimedOutPendingReconstructions() { */ final BlocksMap blocksMap; - /** Replication thread. */ - final Daemon replicationThread = new Daemon(new ReplicationMonitor()); + /** Redundancy thread. */ + private final Daemon redundancyThread = new Daemon(new RedundancyMonitor()); /** StorageInfoDefragmenter thread. */ private final Daemon storageInfoDefragmenterThread = @@ -435,10 +438,10 @@ public BlockManager(final Namesystem namesystem, boolean haEnabled, this.blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf); this.blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf); - this.replicationRecheckInterval = - conf.getTimeDuration(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, - DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT, - TimeUnit.SECONDS) * 1000L; + this.redundancyRecheckIntervalMs = conf.getTimeDuration( + DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, + DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT, + TimeUnit.SECONDS) * 1000; this.storageInfoDefragmentInterval = conf.getLong( @@ -493,7 +496,8 @@ public BlockManager(final Namesystem namesystem, boolean haEnabled, LOG.info("maxReplication = " + maxReplication); LOG.info("minReplication = " + minReplication); LOG.info("maxReplicationStreams = " + maxReplicationStreams); - LOG.info("replicationRecheckInterval = " + replicationRecheckInterval); + LOG.info("redundancyRecheckInterval = " + redundancyRecheckIntervalMs + + "ms"); LOG.info("encryptDataTransfer = " + encryptDataTransfer); LOG.info("maxNumBlocksToLog = " + maxNumBlocksToLog); } @@ -586,7 +590,7 @@ public BlockTokenSecretManager getBlockTokenSecretManager() { return blockTokenSecretManager; } - /** Allow silent termination of replication monitor for testing */ + /** Allow silent termination of redundancy monitor for testing. */ @VisibleForTesting void enableRMTerminationForTesting() { checkNSRunning = false; @@ -604,8 +608,8 @@ boolean shouldUpdateBlockKey(final long updateTime) throws IOException { public void activate(Configuration conf, long blockTotal) { pendingReconstruction.start(); datanodeManager.activate(conf); - this.replicationThread.setName("ReplicationMonitor"); - this.replicationThread.start(); + this.redundancyThread.setName("RedundancyMonitor"); + this.redundancyThread.start(); storageInfoDefragmenterThread.setName("StorageInfoMonitor"); storageInfoDefragmenterThread.start(); this.blockReportThread.start(); @@ -616,10 +620,10 @@ public void activate(Configuration conf, long blockTotal) { public void close() { bmSafeMode.close(); try { - replicationThread.interrupt(); + redundancyThread.interrupt(); storageInfoDefragmenterThread.interrupt(); blockReportThread.interrupt(); - replicationThread.join(3000); + redundancyThread.join(3000); storageInfoDefragmenterThread.join(3000); blockReportThread.join(3000); } catch (InterruptedException ie) { @@ -880,7 +884,7 @@ public boolean commitOrCompleteLastBlock(BlockCollection bc, /** * If IBR is not sent from expected locations yet, add the datanodes to - * pendingReconstruction in order to keep ReplicationMonitor from scheduling + * pendingReconstruction in order to keep RedundancyMonitor from scheduling * the block. */ public void addExpectedReplicasToPending(BlockInfo blk) { @@ -1884,7 +1888,7 @@ private boolean validateReconstructionWork(BlockReconstructionWork rw) { if (hasEnoughEffectiveReplicas(block, numReplicas, pendingNum)) { neededReconstruction.remove(block, priority); rw.resetTargets(); - blockLog.debug("BLOCK* Removing {} from neededReplications as" + + blockLog.debug("BLOCK* Removing {} from neededReconstruction as" + " it has enough replicas", block); return false; } @@ -1910,8 +1914,8 @@ private boolean validateReconstructionWork(BlockReconstructionWork rw) { // reconstructions that fail after an appropriate amount of time. pendingReconstruction.increment(block, DatanodeStorageInfo.toDatanodeDescriptors(targets)); - blockLog.debug("BLOCK* block {} is moved from neededReplications to " - + "pendingReplications", block); + blockLog.debug("BLOCK* block {} is moved from neededReconstruction to " + + "pendingReconstruction", block); int numEffectiveReplicas = numReplicas.liveReplicas() + pendingNum; // remove from neededReconstruction @@ -4298,32 +4302,32 @@ public int numOfUnderReplicatedBlocks() { /** * Periodically calls computeBlockRecoveryWork(). */ - private class ReplicationMonitor implements Runnable { + private class RedundancyMonitor implements Runnable { @Override public void run() { while (namesystem.isRunning()) { try { - // Process replication work only when active NN is out of safe mode. + // Process recovery work only when active NN is out of safe mode. if (isPopulatingReplQueues()) { computeDatanodeWork(); processPendingReconstructions(); rescanPostponedMisreplicatedBlocks(); } - Thread.sleep(replicationRecheckInterval); + TimeUnit.MILLISECONDS.sleep(redundancyRecheckIntervalMs); } catch (Throwable t) { if (!namesystem.isRunning()) { - LOG.info("Stopping ReplicationMonitor."); + LOG.info("Stopping RedundancyMonitor."); if (!(t instanceof InterruptedException)) { - LOG.info("ReplicationMonitor received an exception" + LOG.info("RedundancyMonitor received an exception" + " while shutting down.", t); } break; } else if (!checkNSRunning && t instanceof InterruptedException) { - LOG.info("Stopping ReplicationMonitor for testing."); + LOG.info("Stopping RedundancyMonitor for testing."); break; } - LOG.error("ReplicationMonitor thread received Runtime exception. ", + LOG.error("RedundancyMonitor thread received Runtime exception. ", t); terminate(1, t); } @@ -4692,6 +4696,14 @@ void enqueue(Runnable action) throws InterruptedException { } } + /** + * @return redundancy thread. + */ + @VisibleForTesting + Daemon getRedundancyThread() { + return redundancyThread; + } + public BlockIdManager getBlockIdManager() { return blockIdManager; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java index cb65ba8e30..eb546670ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java @@ -83,11 +83,11 @@ public void initialize(Configuration conf, FSClusterStats stats, NetworkTopology clusterMap, Host2NodesMap host2datanodeMap) { this.considerLoad = conf.getBoolean( - DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, - DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_DEFAULT); + DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, + DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_DEFAULT); this.considerLoadFactor = conf.getDouble( - DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR, - DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR_DEFAULT); + DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_FACTOR, + DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_FACTOR_DEFAULT); this.stats = stats; this.clusterMap = clusterMap; this.host2datanodeMap = host2datanodeMap; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java index 4fbae76c97..4ab55d3233 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java @@ -610,8 +610,8 @@ static int run(Map> namenodes, Configuration conf) DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT, TimeUnit.SECONDS) * 2000 + conf.getTimeDuration( - DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, - DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT, + DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, + DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT, TimeUnit.SECONDS) * 1000; AtomicInteger retryCount = new AtomicInteger(0); LOG.info("namenodes = " + namenodes); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java index 899f0d7225..b859148a44 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java @@ -469,11 +469,11 @@ public void prepareToStopStandbyServices() throws ServiceFailedException { * (not run or not pass any control commands to DataNodes) * on BackupNode: * {@link LeaseManager.Monitor} protected by SafeMode. - * {@link BlockManager.ReplicationMonitor} protected by SafeMode. + * {@link BlockManager.RedundancyMonitor} protected by SafeMode. * {@link HeartbeatManager.Monitor} protected by SafeMode. * {@link DecommissionManager.Monitor} need to prohibit refreshNodes(). * {@link PendingReconstructionBlocks.PendingReconstructionMonitor} - * harmless, because ReplicationMonitor is muted. + * harmless, because RedundancyMonitor is muted. */ @Override public void startActiveServices() throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index acb24fc230..c9d74bb9cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -296,14 +296,14 @@ - dfs.namenode.replication.considerLoad + dfs.namenode.redundancy.considerLoad true Decide if chooseTarget considers the target's load or not - dfs.namenode.replication.considerLoad.factor + dfs.namenode.redundancy.considerLoad.factor 2.0 The factor by which a node's load can exceed the average before being rejected for writes, only if considerLoad is true. @@ -980,10 +980,10 @@ - dfs.namenode.replication.interval + dfs.namenode.redundancy.interval.seconds 3s The periodicity in seconds with which the namenode computes - replication work for datanodes. Support multiple time unit suffix(case insensitive), + low redundancy work for datanodes. Support multiple time unit suffix(case insensitive), as described in dfs.heartbeat.interval. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java index 534c5e0c8f..0ed01f7491 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java @@ -90,14 +90,14 @@ public void setup() throws IOException { } // Setup conf - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 200); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL); conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, BLOCKREPORT_INTERVAL_MSEC); - conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, NAMENODE_REPLICATION_INTERVAL); conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java index 624fc74569..6d55c46e1d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java @@ -58,7 +58,7 @@ public class TestBlockStoragePolicy { static { conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); - conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); POLICY_SUITE = BlockStoragePolicySuite.createDefaultSuite(); DEFAULT_STORAGE_POLICY = POLICY_SUITE.getDefaultPolicy(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java index 47d5e022ec..b686f28b4b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java @@ -67,7 +67,7 @@ public void setup() throws IOException { int numDNs = dataBlocks + parityBlocks + 2; conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0); if (ErasureCodeNative.isNativeCodeLoaded()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java index 53059cec6f..cde07a433d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java @@ -214,7 +214,7 @@ private void tearDown() { private HdfsConfiguration newHdfsConfiguration() { final HdfsConfiguration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java index 1574a03adb..2eab3092b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java @@ -111,26 +111,26 @@ public void setup() throws IOException { writeConfigFile(excludeFile, null); // Setup conf - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false); conf.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath()); conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL); - conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, BLOCKREPORT_INTERVAL_MSEC); conf.setInt(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 4); - conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, NAMENODE_REPLICATION_INTERVAL); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setInt( DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY, cellSize - 1); - conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1); - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false); numDNs = dataBlocks + parityBlocks + 2; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeprecatedKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeprecatedKeys.java index 7152092245..ad5c2a8e2a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeprecatedKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeprecatedKeys.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertFalse; import org.apache.hadoop.conf.Configuration; import org.junit.Test; @@ -33,8 +34,23 @@ public void testDeprecatedKeys() throws Exception { String scriptFile = conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY); assertTrue(scriptFile.equals("xyz")) ; conf.setInt("dfs.replication.interval", 1); - String alpha = DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY; - int repInterval = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 3) ; - assertTrue(repInterval == 1) ; + int redundancyInterval = conf + .getInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 3); + assertTrue(redundancyInterval == 1); + int repInterval = conf.getInt("dfs.replication.interval", 3); + assertTrue(repInterval == 1); + repInterval = conf.getInt("dfs.namenode.replication.interval", 3); + assertTrue(repInterval == 1); + + conf.setBoolean("dfs.replication.considerLoad", false); + assertFalse(conf.getBoolean( + DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, true)); + assertFalse(conf.getBoolean("dfs.replication.considerLoad", true)); + + conf.setDouble("dfs.namenode.replication.considerLoad.factor", 5.0); + assertTrue(5.0 == conf.getDouble( + DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_FACTOR, 2.0)); + assertTrue(5.0 == conf + .getDouble("dfs.namenode.replication.considerLoad.factor", 2.0)); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java index 78020b262a..cc90863729 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java @@ -329,7 +329,7 @@ public void testLongLivedClientPipelineRecovery() // client only retries establishing pipeline with the 4th node. int numDataNodes = 4; // do not consider load factor when selecting a data node - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false); setEncryptionConfigKeys(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java index ae0f0c274d..40bc314dee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java @@ -86,7 +86,7 @@ public void setUp() throws Exception { // handle under-replicated blocks quickly (for replication asserts) conf.setInt( DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 5); - conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); // handle failures in the DFSClient pipeline quickly // (for cluster.shutdown(); fs.close() idiom) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java index 1c75f902c7..9d60798784 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java @@ -75,7 +75,7 @@ public void setup() throws IOException { int numDNs = dataBlocks + parityBlocks + 2; conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java index ca8f6dbed6..246818bdbc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java @@ -85,7 +85,7 @@ public void setup() throws IOException { conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 6000L); - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java index 90d42b6da3..e225141685 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java @@ -59,7 +59,8 @@ public void testMissingBlocksAlert() try { Configuration conf = new HdfsConfiguration(); //minimize test delay - conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 0); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, + 0); conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10); int fileLen = 10*1024; conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, fileLen/2); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java index 60143327bc..506a005b36 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java @@ -99,7 +99,8 @@ public void setup() throws IOException { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY, 0); - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, + false); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null); fs = cluster.getFileSystem(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java index a3d9ef6255..4960c5816c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java @@ -95,8 +95,8 @@ public void setup() throws IOException { conf.setInt( DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY, cellSize - 1); - conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1); - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false); if (ErasureCodeNative.isNativeCodeLoaded()) { conf.set( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java index 3dda7e8f04..aa5c70faa5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java @@ -118,7 +118,8 @@ public void testDefaultPolicy() throws Exception { public void testReplaceDatanodeOnFailure() throws Exception { final Configuration conf = new HdfsConfiguration(); // do not consider load factor when selecting a data node - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, + false); //always replace a datanode ReplaceDatanodeOnFailure.write(Policy.ALWAYS, true, conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java index 3e23db3fb8..0f1bedd02e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java @@ -228,7 +228,8 @@ public void testBadBlockReportOnTransferMissingBlockFile() throws Exception { */ public void runReplication(boolean simulated) throws IOException { Configuration conf = new HdfsConfiguration(); - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, + false); if (simulated) { SimulatedFSDataset.setFactory(conf); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java index 8ada593f16..712f326856 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java @@ -75,7 +75,7 @@ public class TestWriteReadStripedFile { @Before public void setup() throws IOException { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); fs = cluster.getFileSystem(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index 730f655c55..1906d74285 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -179,7 +179,8 @@ static void initConf(Configuration conf) { conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500); - conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L); + conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, + 1L); SimulatedFSDataset.setFactory(conf); conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L); @@ -212,10 +213,12 @@ static void initConfWithRamDisk(Configuration conf, void initConfWithStripe(Configuration conf) { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize); - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, + false); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); SimulatedFSDataset.setFactory(conf); - conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L); + conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, + 1L); conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L); conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1L); } @@ -922,11 +925,13 @@ private void runBalancer(Configuration conf, long totalUsedSpace, private static int runBalancer(Collection namenodes, final BalancerParameters p, Configuration conf) throws IOException, InterruptedException { - final long sleeptime = - conf.getLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, - DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT) * 2000 + - conf.getLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, - DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000; + final long sleeptime = conf.getLong( + DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, + DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT) * 2000 + + conf.getLong( + DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, + DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT) + * 1000; LOG.info("namenodes = " + namenodes); LOG.info("parameters = " + p); LOG.info("Print stack trace", new Throwable()); @@ -1603,7 +1608,7 @@ public void testBalancerDuringUpgrade() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500); - conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1); + conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1L); @@ -1670,7 +1675,8 @@ public void testTwoReplicaShouldNotInSameDN() throws Exception { int blockSize = 5 * 1024 * 1024 ; conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); - conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L); + conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, + 1L); conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1L); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java index 5fb3b2500e..1c7442ab1b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java @@ -105,25 +105,24 @@ private static int getNumberOfRacks(final BlockManager blockManager, } /** - * @return replication monitor thread instance from block manager. + * @return redundancy monitor thread instance from block manager. */ - public static Daemon getReplicationThread(final BlockManager blockManager) - { - return blockManager.replicationThread; + public static Daemon getRedundancyThread(final BlockManager blockManager) { + return blockManager.getRedundancyThread(); } - + /** - * Stop the replication monitor thread + * Stop the redundancy monitor thread. */ - public static void stopReplicationThread(final BlockManager blockManager) + public static void stopRedundancyThread(final BlockManager blockManager) throws IOException { blockManager.enableRMTerminationForTesting(); - blockManager.replicationThread.interrupt(); + blockManager.getRedundancyThread().interrupt(); try { - blockManager.replicationThread.join(); - } catch(InterruptedException ie) { + blockManager.getRedundancyThread().join(); + } catch (InterruptedException ie) { throw new IOException( - "Interrupted while trying to stop ReplicationMonitor"); + "Interrupted while trying to stop RedundancyMonitor"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java index 197e3a6891..0298cbc937 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java @@ -62,9 +62,9 @@ private Configuration getConf() { // commands quickly (as replies to heartbeats) conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); - // Have the NN ReplicationMonitor compute the replication and + // Have the NN RedundancyMonitor compute the reconstruction and // invalidation commands to send DNs every second. - conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); // Have the NN check for pending replications every second so it // quickly schedules additional replicas as they are identified. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java index 6058727806..d915b6e6b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java @@ -56,7 +56,7 @@ public void testNodeCount() throws Exception { 60); // reduce intervals to make test execution time shorter - conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); // start a mini dfs cluster of 2 nodes diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java index d856065d81..674f883381 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingInvalidateBlock.java @@ -65,7 +65,7 @@ public void setUp() throws Exception { // set the block report interval to 2s conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 2000); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); - conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); // disable the RPC timeout for debug conf.setLong(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY, 0); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java index ba3be914e7..aeaaf4e4d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java @@ -374,7 +374,7 @@ public void testPendingAndInvalidate() throws Exception { CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024); CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, DFS_REPLICATION_INTERVAL); - CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, + CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, DFS_REPLICATION_INTERVAL); MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes( DATANODE_COUNT).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java index 94ee623812..ace8732a84 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java @@ -96,8 +96,8 @@ private static String[] getRacks(int numHosts, int numRacks) { @BeforeClass public static void setup() throws Exception { - conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1); - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false); conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1); } @@ -139,7 +139,7 @@ private DataNode getDataNode(String host) { * * In this test, we first need to create a scenario that a striped block has * all the internal blocks but distributed in <6 racks. Then we check if the - * replication monitor can correctly schedule the reconstruction work for it. + * redundancy monitor can correctly schedule the reconstruction work for it. */ @Test public void testReconstructForNotEnoughRacks() throws Exception { @@ -194,7 +194,7 @@ public void testReconstructForNotEnoughRacks() throws Exception { fsn.writeUnlock(); } - // check if replication monitor correctly schedule the replication work + // check if redundancy monitor correctly schedule the reconstruction work. boolean scheduled = false; for (int i = 0; i < 5; i++) { // retry 5 times for (DatanodeStorageInfo storage : blockInfo.storages) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index 9c623833c4..1af013d641 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -836,7 +836,7 @@ public void testReplicationWithPriority() throws Exception { int DFS_NAMENODE_REPLICATION_INTERVAL = 1000; int HIGH_PRIORITY = 0; Configuration conf = new Configuration(); - conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2) .format(true).build(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java index 8336fec298..bcd8245c53 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java @@ -53,7 +53,7 @@ public static Iterable data() { @Override DatanodeDescriptor[] getDatanodeDescriptors(Configuration conf) { conf.setDouble(DFSConfigKeys - .DFS_NAMENODE_REPLICATION_CONSIDERLOAD_FACTOR, 1.2); + .DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_FACTOR, 1.2); final String[] racks = { "/rack1", "/rack1", diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java index 98efc13cad..1b0526b2ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java @@ -78,7 +78,7 @@ public void setup() throws IOException { conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); - conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); cluster.waitActive(); cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java index 8c435923e7..d268d018f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java @@ -104,8 +104,8 @@ public void testCorruptFilesAreDiscarded() Thread.sleep(30000L); assertThat(cluster.getNamesystem().getNumDeadDataNodes(), is(1)); - // Next, wait for the replication monitor to mark the file as corrupt - Thread.sleep(2 * DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT * 1000); + // Next, wait for the redundancy monitor to mark the file as corrupt. + Thread.sleep(2 * DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT * 1000); // Wait for the LazyPersistFileScrubber to run Thread.sleep(2 * LAZY_WRITE_FILE_SCRUBBER_INTERVAL_SEC * 1000); @@ -137,8 +137,8 @@ public void testDisableLazyPersistFileScrubber() cluster.shutdownDataNodes(); Thread.sleep(30000L); - // Next, wait for the replication monitor to mark the file as corrupt - Thread.sleep(2 * DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT * 1000); + // Next, wait for the redundancy monitor to mark the file as corrupt. + Thread.sleep(2 * DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT * 1000); // Wait for the LazyPersistFileScrubber to run Thread.sleep(2 * LAZY_WRITE_FILE_SCRUBBER_INTERVAL_SEC * 1000); @@ -164,8 +164,8 @@ public void testFileShouldNotDiscardedIfNNRestarted() cluster.restartNameNodes(); - // wait for the replication monitor to mark the file as corrupt - Thread.sleep(2 * DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT * 1000); + // wait for the redundancy monitor to mark the file as corrupt. + Thread.sleep(2 * DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT * 1000); Long corruptBlkCount = (long) Iterators.size(cluster.getNameNode() .getNamesystem().getBlockManager().getCorruptReplicaBlockIterator()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java index 4342dab628..20a6959247 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java @@ -105,7 +105,8 @@ static void initConf(Configuration conf) { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE); conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); - conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L); + conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, + 1L); conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L); } @@ -482,8 +483,10 @@ public void testMoverFailedRetry() throws Exception { void initConfWithStripe(Configuration conf) { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); - conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L); - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false); + conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, + 1L); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, + false); } @Test(timeout = 300000) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java index 1b5bd8107d..077997f68a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java @@ -93,8 +93,8 @@ public class TestStorageMover { static { DEFAULT_CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); DEFAULT_CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); - DEFAULT_CONF.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, - 2L); + DEFAULT_CONF.setLong( + DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 2L); DEFAULT_CONF.setLong(DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_KEY, 2000L); DEFAULT_POLICIES = BlockStoragePolicySuite.createDefaultSuite(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index 4b9db07c2b..a3d0be5c74 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -1237,8 +1237,8 @@ void printResults() { } // end BlockReportStats /** - * Measures how fast replication monitor can compute data-node work. - * + * Measures how fast redundancy monitor can compute data-node work. + * * It runs only one thread until no more work can be scheduled. */ class ReplicationStats extends OperationStatsBase { @@ -1265,7 +1265,7 @@ class ReplicationStats extends OperationStatsBase { parseArguments(args); // number of operations is 4 times the number of decommissioned // blocks divided by the number of needed replications scanned - // by the replication monitor in one iteration + // by the redundancy monitor in one iteration numOpsRequired = (totalBlocks*replication*nodesToDecommission*2) / (numDatanodes*numDatanodes); @@ -1314,8 +1314,8 @@ void generateInputs(int[] ignore) throws IOException { // start data-nodes; create a bunch of files; generate block reports. blockReportObject.generateInputs(ignore); - // stop replication monitor - BlockManagerTestUtil.stopReplicationThread(namesystem.getBlockManager()); + // stop redundancy monitor thread. + BlockManagerTestUtil.stopRedundancyThread(namesystem.getBlockManager()); // report blocks once int nrDatanodes = blockReportObject.getNumDatanodes(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java index 15a8756c73..c8b2556457 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java @@ -74,7 +74,7 @@ public void setup() throws IOException { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); // disable block recovery conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0); - conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); SimulatedFSDataset.setFactory(conf); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java index 7f1cc9a8ad..8bdaa74e47 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java @@ -78,7 +78,7 @@ public class TestDecommissioningStatus { @Before public void setUp() throws Exception { conf = new HdfsConfiguration(); - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false); // Set up the hosts/exclude files. @@ -89,7 +89,8 @@ public void setUp() throws Exception { conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt( DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 4); - conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, + 1000); conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1); conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, 1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java index f6e99ee12f..a57a936b08 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java @@ -156,7 +156,7 @@ public void testReplicationAdjusted() throws Exception { // start a cluster Configuration conf = getConf(); // Replicate and heartbeat fast to shave a few seconds off test - conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); MiniDFSCluster cluster = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java index cb19c2cc8b..79e7acc260 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java @@ -76,9 +76,9 @@ private Configuration getConf() { // commands quickly (as replies to heartbeats) conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); - // Have the NN ReplicationMonitor compute the replication and + // Have the NN RedundancyMonitor compute the low redundant blocks and // invalidation commands to send DNs every second. - conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); // Have the NN check for pending replications every second so it // quickly schedules additional replicas as they are identified. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java index 9816d001aa..0303a5d8d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java @@ -64,8 +64,9 @@ public void setUp() throws IOException { Configuration conf = new HdfsConfiguration(); // High value of replication interval - // so that blocks remain under-replicated - conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000); + // so that blocks remain less redundant + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, + 1000); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); conf.setLong(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1L); conf.setLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY, 5L); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java index d43804ad9b..cf1585c07d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java @@ -192,7 +192,8 @@ private void doTestMissingStripedBlock(int numOfMissed, int numOfBusy) public void test2RecoveryTasksForSameBlockGroup() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1000); - conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, + 1000); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 2) .build(); @@ -255,8 +256,8 @@ private static int getNumberOfBlocksToBeErasureCoded(MiniDFSCluster cluster) @Test public void testCountLiveReplicas() throws Exception { final HdfsConfiguration conf = new HdfsConfiguration(); - conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1); - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 2) .build(); @@ -299,7 +300,7 @@ public void testCountLiveReplicas() throws Exception { FSNamesystem fsn = cluster.getNamesystem(); BlockManager bm = fsn.getBlockManager(); - Thread.sleep(3000); // wait 3 running cycles of replication monitor + Thread.sleep(3000); // wait 3 running cycles of redundancy monitor for (DataNode dn : cluster.getDataNodes()) { DataNodeTestUtils.triggerHeartbeat(dn); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java index 6c4d73c08a..d92ec1b940 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java @@ -307,8 +307,9 @@ public void testUnsuitableStoragePoliciesWithECStripedMode() int defaultStripedBlockSize = testECPolicy.getCellSize() * 4; conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultStripedBlockSize); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); - conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L); - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, + conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, + 1L); + conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false); // start 10 datanodes diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java index 15357b05c6..c1521bd562 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java @@ -87,9 +87,10 @@ public class TestDNFencing { public void setupCluster() throws Exception { conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, SMALL_BLOCK); - // Bump up replication interval so that we only run replication + // Bump up redundancy interval so that we only run low redundancy // checks explicitly. - conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 600); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, + 600); // Increase max streams so that we re-replicate quickly. conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 1000); // See RandomDeleterPolicy javadoc. @@ -167,7 +168,7 @@ public void testDnFencing() throws Exception { // The blocks should no longer be postponed. assertEquals(0, nn2.getNamesystem().getPostponedMisreplicatedBlocks()); - // Wait for NN2 to enact its deletions (replication monitor has to run, etc) + // Wait for NN2 to enact its deletions (redundancy monitor has to run, etc) BlockManagerTestUtil.computeInvalidationWork( nn2.getNamesystem().getBlockManager()); cluster.triggerHeartbeats(); @@ -258,7 +259,7 @@ public void testNNClearsCommandsOnFailoverAfterStartup() // The block should no longer be postponed. assertEquals(0, nn2.getNamesystem().getPostponedMisreplicatedBlocks()); - // Wait for NN2 to enact its deletions (replication monitor has to run, etc) + // Wait for NN2 to enact its deletions (redundancy monitor has to run, etc) BlockManagerTestUtil.computeInvalidationWork( nn2.getNamesystem().getBlockManager()); @@ -358,7 +359,7 @@ public void testNNClearsCommandsOnFailoverWithReplChanges() // The block should no longer be postponed. assertEquals(0, nn2.getNamesystem().getPostponedMisreplicatedBlocks()); - // Wait for NN2 to enact its deletions (replication monitor has to run, etc) + // Wait for NN2 to enact its deletions (redundancy monitor has to run, etc) BlockManagerTestUtil.computeInvalidationWork( nn2.getNamesystem().getBlockManager()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencingWithReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencingWithReplication.java index e4fe2307de..c91d4de31a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencingWithReplication.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencingWithReplication.java @@ -110,7 +110,7 @@ public void testFencingStress() throws Exception { harness.conf.setInt( DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1); harness.conf.setInt( - DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1); + DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); final MiniDFSCluster cluster = harness.startCluster(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java index 4fc5029146..dc7f47a79c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java @@ -142,8 +142,9 @@ private void doWriteOverFailoverTest(TestScenario scenario, MethodToTestIdempotence methodToTest) throws Exception { Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); - // Don't check replication periodically. - conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000); + // Don't check low redundancy periodically. + conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, + 1000); FSDataOutputStream stm = null; MiniDFSCluster cluster = newMiniCluster(conf, 3); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java index 110615d909..c11f8807b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java @@ -45,7 +45,7 @@ public class TestNNMetricFilesInGetListingOps { CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 100); CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1); CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); - CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1); + CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); } private MiniDFSCluster cluster; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java index 90d61eeee6..0ad613090c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java @@ -78,7 +78,7 @@ */ public class TestNameNodeMetrics { private static final Configuration CONF = new HdfsConfiguration(); - private static final int DFS_REPLICATION_INTERVAL = 1; + private static final int DFS_REDUNDANCY_INTERVAL = 1; private static final Path TEST_ROOT_DIR_PATH = new Path("/testNameNodeMetrics"); private static final String NN_METRICS = "NameNodeActivity"; @@ -96,9 +96,9 @@ public class TestNameNodeMetrics { CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 100); CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1); CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, - DFS_REPLICATION_INTERVAL); - CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, - DFS_REPLICATION_INTERVAL); + DFS_REDUNDANCY_INTERVAL); + CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, + DFS_REDUNDANCY_INTERVAL); CONF.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + PERCENTILES_INTERVAL); // Enable stale DataNodes checking @@ -333,7 +333,7 @@ public void testMissingBlock() throws Exception { private void waitForDeletion() throws InterruptedException { // Wait for more than DATANODE_COUNT replication intervals to ensure all // the blocks pending deletion are sent for deletion to the datanodes. - Thread.sleep(DFS_REPLICATION_INTERVAL * (DATANODE_COUNT + 1) * 1000); + Thread.sleep(DFS_REDUNDANCY_INTERVAL * (DATANODE_COUNT + 1) * 1000); } /** @@ -364,7 +364,7 @@ private MetricsRecordBuilder waitForDnMetricValue(String source, rb = getMetrics(source); gauge = MetricsAsserts.getLongGauge(name, rb); while (gauge != expected && (--retries > 0)) { - Thread.sleep(DFS_REPLICATION_INTERVAL * 500); + Thread.sleep(DFS_REDUNDANCY_INTERVAL * 500); rb = getMetrics(source); gauge = MetricsAsserts.getLongGauge(name, rb); }