diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt index 2ef8527334..21e4c033da 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt @@ -1,4 +1,8 @@ BREAKDOWN OF HDFS-7285 SUBTASKS AND RELATED JIRAS HDFS-7347. Configurable erasure coding policy for individual files and - directories ( Zhe Zhang via vinayakumarb ) \ No newline at end of file + directories ( Zhe Zhang via vinayakumarb ) + + HDFS-7716. Add a test for BlockGroup support in FSImage. + (Takuya Fukudome via szetszwo) + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java index 21df6a993b..16f247add7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java @@ -19,6 +19,7 @@ import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import java.io.File; @@ -31,7 +32,12 @@ import java.io.IOException; import java.util.EnumSet; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.junit.Assert; @@ -46,6 +52,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSOutputStream; +import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -378,4 +385,50 @@ public void testZeroBlockSize() throws Exception { FileUtil.fullyDelete(dfsDir); } } + + /** + * Ensure that FSImage supports BlockGroup. + */ + @Test + public void testSupportBlockGroup() throws IOException { + final short GROUP_SIZE = HdfsConstants.NUM_DATA_BLOCKS + + HdfsConstants.NUM_PARITY_BLOCKS; + final int BLOCK_SIZE = 8 * 1024 * 1024; + Configuration conf = new HdfsConfiguration(); + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE) + .build(); + cluster.waitActive(); + DistributedFileSystem fs = cluster.getFileSystem(); + fs.setStoragePolicy(new Path("/"), HdfsConstants.EC_STORAGE_POLICY_NAME); + Path file = new Path("/striped"); + FSDataOutputStream out = fs.create(file); + byte[] bytes = DFSTestUtil.generateSequentialBytes(0, BLOCK_SIZE); + out.write(bytes); + out.close(); + + fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); + fs.saveNamespace(); + fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); + + cluster.restartNameNodes(); + fs = cluster.getFileSystem(); + assertTrue(fs.exists(file)); + + // check the information of striped blocks + FSNamesystem fsn = cluster.getNamesystem(); + INodeFile inode = fsn.dir.getINode(file.toString()).asFile(); + FileWithStripedBlocksFeature sb = inode.getStripedBlocksFeature(); + assertNotNull(sb); + BlockInfoStriped[] blks = sb.getBlocks(); + assertEquals(1, blks.length); + assertTrue(blks[0].isStriped()); + assertEquals(HdfsConstants.NUM_DATA_BLOCKS, blks[0].getDataBlockNum()); + assertEquals(HdfsConstants.NUM_PARITY_BLOCKS, blks[0].getParityBlockNum()); + } finally { + cluster.shutdown(); + } + } }