diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java index c23b034ac9..17b48f7ef3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java @@ -190,6 +190,7 @@ public static boolean checkStoragePolicySuitableForECStripedMode( * Clear and clean up. */ public void clear() { - enabledPoliciesByName.clear(); + // TODO: we should only clear policies loaded from NN metadata. + // This is a placeholder for HDFS-7337. } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java index 5da19a713c..8c2acf6873 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @@ -41,6 +41,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; @@ -48,10 +49,13 @@ import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.LogVerificationAppender; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.StripedFileTestUtil; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; @@ -564,7 +568,58 @@ public void testCorruptImageFallback() throws IOException { } finally { cluster.shutdown(); } -} + } + + @Test(timeout=30000) + public void testCorruptImageFallbackLostECPolicy() throws IOException { + final ErasureCodingPolicy defaultPolicy = StripedFileTestUtil + .getDefaultECPolicy(); + final String policy = defaultPolicy.getName(); + final Path f1 = new Path("/f1"); + config.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, policy); + + MiniDFSCluster cluster = new MiniDFSCluster.Builder(config) + .numDataNodes(0) + .format(true) + .build(); + try { + cluster.waitActive(); + DistributedFileSystem fs = cluster.getFileSystem(); + // set root directory to use the default ec policy + Path srcECDir = new Path("/"); + fs.setErasureCodingPolicy(srcECDir, + defaultPolicy.getName()); + + // create a file which will use the default ec policy + fs.create(f1); + FileStatus fs1 = fs.getFileStatus(f1); + assertTrue(fs1.isErasureCoded()); + ErasureCodingPolicy fs1Policy = fs.getErasureCodingPolicy(f1); + assertEquals(fs1Policy, defaultPolicy); + } finally { + cluster.close(); + } + + // Delete a single md5sum + corruptFSImageMD5(false); + // Should still be able to start + cluster = new MiniDFSCluster.Builder(config) + .numDataNodes(0) + .format(false) + .build(); + try { + cluster.waitActive(); + ErasureCodingPolicy[] ecPolicies = cluster.getNameNode() + .getNamesystem().getErasureCodingPolicyManager().getEnabledPolicies(); + DistributedFileSystem fs = cluster.getFileSystem(); + // make sure the ec policy of the file is still correct + assertEquals(fs.getErasureCodingPolicy(f1), defaultPolicy); + // make sure after fsimage fallback, enabled ec policies are not cleared. + assertTrue(ecPolicies.length == 1); + } finally { + cluster.shutdown(); + } + } /** * This test tests hosts include list contains host names. After namenode