From 840612b3b144eeaa28fd880e697c776bb5f00eeb Mon Sep 17 00:00:00 2001 From: Ahmed Hussein <50450311+amahussein@users.noreply.github.com> Date: Fri, 20 Nov 2020 07:33:45 -0500 Subject: [PATCH] HDFS-15659. MiniDFSCluster dfs.namenode.redundancy.considerLoad default to false (#2443). Contributed by Ahmed Hussein. (cherry picked from commit 747883af4d6cfb41b1f6bd9a4f9c9d246d021048) --- .../apache/hadoop/hdfs/MiniDFSCluster.java | 22 +++++++++++++++++++ .../ReadStripedFileWithDecodingHelper.java | 2 -- .../hadoop/hdfs/TestDFSClientRetries.java | 4 +--- .../hdfs/TestDecommissionWithStriped.java | 2 -- .../hadoop/hdfs/TestEncryptedTransfer.java | 4 +--- .../apache/hadoop/hdfs/TestFileChecksum.java | 2 -- .../hadoop/hdfs/TestLeaseRecoveryStriped.java | 2 -- .../hdfs/TestReconstructStripedFile.java | 2 -- .../apache/hadoop/hdfs/TestReplication.java | 2 -- .../hadoop/hdfs/TestWriteReadStripedFile.java | 2 -- .../TestReconstructStripedBlocks.java | 2 -- .../server/namenode/TestStripedINodeFile.java | 2 -- .../hdfs/server/namenode/ha/TestHAAppend.java | 2 -- 13 files changed, 24 insertions(+), 26 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 95de470871..ff38236987 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -31,6 +31,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY; @@ -184,6 +185,10 @@ public class MiniDFSCluster implements AutoCloseable { // Changing this default may break some tests that assume it is 2. private static final int DEFAULT_STORAGES_PER_DATANODE = 2; + // do not consider load factor when selecting a datanode. + private static final boolean DEFAULT_DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD = + false; + static { DefaultMetricsSystem.setMiniClusterMode(true); } public int getStoragesPerDatanode() { @@ -493,6 +498,19 @@ public Builder useConfiguredTopologyMappingClass( return this; } + /** + * set the value of DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY in the config + * file. + * + * @param val passed to the flag. This allows overriding the default value + * {@link #DEFAULT_DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD}. + * @return the builder object. + */ + public Builder setNNRedundancyConsiderLoad(final boolean val) { + conf.setBoolean(DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, val); + return this; + } + /** * Construct the actual MiniDFSCluster */ @@ -509,6 +527,10 @@ private void initDefaultConfigurations() { DEFAULT_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC); conf.setLong(DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY, defaultScannerVolumeTimeOut); + // default is false. do not consider load factor when selecting a + // datanode. + conf.setBoolean(DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, + DEFAULT_DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD); this.storagesPerDatanode = FsDatasetTestUtils.Factory.getFactory(conf).getDefaultNumOfDataDirs(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ReadStripedFileWithDecodingHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ReadStripedFileWithDecodingHelper.java index e0e2c8b402..e1497445b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ReadStripedFileWithDecodingHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ReadStripedFileWithDecodingHelper.java @@ -76,8 +76,6 @@ public static MiniDFSCluster initializeCluster() throws IOException { conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY, 0); - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, - false); MiniDFSCluster myCluster = new MiniDFSCluster.Builder(conf) .numDataNodes(NUM_DATANODES) .build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index f992a26685..e377a539e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -839,10 +839,8 @@ class Counter { public void testGetFileChecksum() throws Exception { final String f = "/testGetFileChecksum"; final Path p = new Path(f); - // HDFS-15461: the number of datanode is higher than the number of replicas. - // That way when a DN fails, the pipeline can recover. final int numReplicas = 3; - final int numDatanodes = numReplicas + 1; + final int numDatanodes = numReplicas; final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java index f7dbe50629..c68cb1707c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java @@ -127,8 +127,6 @@ public void setup() throws IOException { writeConfigFile(excludeFile, null); // Setup conf - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, - false); conf.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath()); conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java index a23c4bf412..73df025013 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java @@ -381,9 +381,7 @@ public void testLongLivedClientPipelineRecovery() // use 4 datanodes to make sure that after 1 data node is stopped, // client only retries establishing pipeline with the 4th node. int numDataNodes = 4; - // do not consider load factor when selecting a data node - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, - false); + setEncryptionConfigKeys(); cluster = new MiniDFSCluster.Builder(conf) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java index 83ac946ac7..6e5b3e4cbf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java @@ -88,8 +88,6 @@ public void setup() throws IOException { int numDNs = dataBlocks + parityBlocks + 2; conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, - false); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0); conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true); customizeConf(conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java index f6491ef039..d0eccc14a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java @@ -92,8 +92,6 @@ public void setup() throws IOException { conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 60000L); - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, - false); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0); final int numDNs = dataBlocks + parityBlocks; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java index 6c769f90b1..d32b678e9c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java @@ -140,8 +140,6 @@ public void setup() throws IOException { DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY, cellSize - 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, - false); if (ErasureCodeNative.isNativeCodeLoaded()) { conf.set( CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODERS_KEY, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java index 9a9ab1b9e4..4948c6f3b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java @@ -338,8 +338,6 @@ public void testBadBlockReportOnTransferMissingBlockFile() throws Exception { */ public void runReplication(boolean simulated) throws IOException { Configuration conf = new HdfsConfiguration(); - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, - false); if (simulated) { SimulatedFSDataset.setFactory(conf); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java index 49f578a823..e1f7524546 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java @@ -75,8 +75,6 @@ public class TestWriteReadStripedFile { @Before public void setup() throws IOException { conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, - false); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build(); fs = cluster.getFileSystem(); fs.enableErasureCodingPolicy(ecPolicy.getName()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java index 46907fd64e..94a9ae1a4e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java @@ -277,8 +277,6 @@ private static int getNumberOfBlocksToBeErasureCoded(MiniDFSCluster cluster) public void testCountLiveReplicas() throws Exception { final HdfsConfiguration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1); - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, - false); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 2) .build(); cluster.waitActive(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java index 8ecf3a16f6..39e272a59e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java @@ -393,8 +393,6 @@ public void testUnsuitableStoragePoliciesWithECStripedMode() conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1L); - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, - false); // start 10 datanodes int numOfDatanodes = 10; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAAppend.java index d835e230c6..ca2c98d2a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAAppend.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAAppend.java @@ -60,8 +60,6 @@ public void testMultipleAppendsDuringCatchupTailing() throws Exception { // control the ingest of edits by the standby for this test. conf.set(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, "5000"); conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, -1); - conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, - false); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(3).build();