diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt index 3c751520d7..596bbcfdd2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt @@ -152,3 +152,6 @@ HDFS-8183. Erasure Coding: Improve DFSStripedOutputStream closing of datastreamer threads. (Rakesh R via Zhe Zhang) + + HDFS-8308. Erasure Coding: NameNode may get blocked in waitForLoadingFSImage() + when loading editlog. (jing9) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java index 8cda289849..14d4e29c04 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java @@ -79,7 +79,8 @@ ECZoneInfo getECZoneInfo(INodesInPath iip) throws IOException { for (XAttr xAttr : xAttrs) { if (XATTR_ERASURECODING_ZONE.equals(XAttrHelper.getPrefixName(xAttr))) { String schemaName = new String(xAttr.getValue()); - ECSchema schema = dir.getFSNamesystem().getECSchema(schemaName); + ECSchema schema = dir.getFSNamesystem().getSchemaManager() + .getSchema(schemaName); return new ECZoneInfo(inode.getFullPathName(), schema); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index a209142787..3fa88184a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -7529,9 +7529,9 @@ BatchedListEntries listEncryptionZones(long prevId) /** * Create an erasure coding zone on directory src. - * @param schema ECSchema for the erasure coding zone - * @param src the path of a directory which will be the root of the + * @param srcArg the path of a directory which will be the root of the * erasure coding zone. The directory must be empty. + * @param schema ECSchema for the erasure coding zone * * @throws AccessControlException if the caller is not the superuser. * @throws UnresolvedLinkException if the path can't be resolved. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 0c88842f9b..0165189bc9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -1948,4 +1948,16 @@ public static Block addStripedBlockToFile(List dataNodes, lastBlock.setNumBytes(numStripes * BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS); return lastBlock; } + + /** + * Because currently DFSStripedOutputStream does not support hflush/hsync, + * tests can use this method to flush all the buffered data to DataNodes. + */ + public static void writeAndFlushStripedOutputStream( + DFSStripedOutputStream out, int chunkSize) throws IOException { + // FSOutputSummer.BUFFER_NUM_CHUNKS == 9 + byte[] toWrite = new byte[chunkSize * 9 + 1]; + out.write(toWrite); + out.flushInternal(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingZones.java index ae2bdd85fb..f1aec821b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingZones.java @@ -35,8 +35,6 @@ import static org.junit.Assert.*; public class TestErasureCodingZones { - private final int NUM_OF_DATANODES = 3; - private Configuration conf; private MiniDFSCluster cluster; private DistributedFileSystem fs; private static final int BLOCK_SIZE = 1024; @@ -44,10 +42,10 @@ public class TestErasureCodingZones { @Before public void setupCluster() throws IOException { - conf = new HdfsConfiguration(); + Configuration conf = new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); cluster = new MiniDFSCluster.Builder(conf). - numDataNodes(NUM_OF_DATANODES).build(); + numDataNodes(1).build(); cluster.waitActive(); fs = cluster.getFileSystem(); namesystem = cluster.getNamesystem(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java index d03e938b5f..297db14978 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java @@ -19,6 +19,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSStripedOutputStream; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -55,6 +56,7 @@ import java.util.List; import java.util.UUID; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT; import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STRIPED_CELL_SIZE; import static org.apache.hadoop.hdfs.protocol.HdfsConstants.NUM_DATA_BLOCKS; import static org.junit.Assert.assertEquals; @@ -103,52 +105,50 @@ public void testAllocateBlockId() throws Exception { Assert.assertEquals(firstId + HdfsConstants.MAX_BLOCKS_IN_GROUP, secondId); } - @Test + @Test (timeout=60000) public void testAddStripedBlock() throws Exception { final Path file = new Path("/file1"); // create an empty file FSDataOutputStream out = null; try { out = dfs.create(file, (short) 1); + DFSTestUtil.writeAndFlushStripedOutputStream( + (DFSStripedOutputStream) out.getWrappedStream(), + DFS_BYTES_PER_CHECKSUM_DEFAULT); FSDirectory fsdir = cluster.getNamesystem().getFSDirectory(); INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile(); - LocatedBlock newBlock = cluster.getNamesystem().getAdditionalBlock( - file.toString(), fileNode.getId(), dfs.getClient().getClientName(), - null, null, null); - assertEquals(GROUP_SIZE, newBlock.getLocations().length); - assertEquals(GROUP_SIZE, newBlock.getStorageIDs().length); BlockInfo[] blocks = fileNode.getBlocks(); assertEquals(1, blocks.length); Assert.assertTrue(blocks[0].isStriped()); checkStripedBlockUC((BlockInfoStriped) fileNode.getLastBlock(), true); + + // restart NameNode to check editlog + cluster.restartNameNode(true); + fsdir = cluster.getNamesystem().getFSDirectory(); + fileNode = fsdir.getINode4Write(file.toString()).asFile(); + blocks = fileNode.getBlocks(); + assertEquals(1, blocks.length); + Assert.assertTrue(blocks[0].isStriped()); + checkStripedBlockUC((BlockInfoStriped) fileNode.getLastBlock(), false); + + // save namespace, restart namenode, and check + dfs = cluster.getFileSystem(); + dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER); + dfs.saveNamespace(); + dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE); + cluster.restartNameNode(true); + fsdir = cluster.getNamesystem().getFSDirectory(); + fileNode = fsdir.getINode4Write(file.toString()).asFile(); + blocks = fileNode.getBlocks(); + assertEquals(1, blocks.length); + Assert.assertTrue(blocks[0].isStriped()); + checkStripedBlockUC((BlockInfoStriped) fileNode.getLastBlock(), false); } finally { IOUtils.cleanup(null, out); } - - // restart NameNode to check editlog - cluster.restartNameNode(true); - FSDirectory fsdir = cluster.getNamesystem().getFSDirectory(); - INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile(); - BlockInfo[] blocks = fileNode.getBlocks(); - assertEquals(1, blocks.length); - Assert.assertTrue(blocks[0].isStriped()); - checkStripedBlockUC((BlockInfoStriped) fileNode.getLastBlock(), false); - - // save namespace, restart namenode, and check - dfs = cluster.getFileSystem(); - dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER); - dfs.saveNamespace(); - dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE); - cluster.restartNameNode(true); - fsdir = cluster.getNamesystem().getFSDirectory(); - fileNode = fsdir.getINode4Write(file.toString()).asFile(); - blocks = fileNode.getBlocks(); - assertEquals(1, blocks.length); - Assert.assertTrue(blocks[0].isStriped()); - checkStripedBlockUC((BlockInfoStriped) fileNode.getLastBlock(), false); } private void checkStripedBlockUC(BlockInfoStriped block, @@ -190,11 +190,12 @@ public void testGetLocatedStripedBlocks() throws Exception { FSDataOutputStream out = null; try { out = dfs.create(file, (short) 1); + DFSTestUtil.writeAndFlushStripedOutputStream( + (DFSStripedOutputStream) out.getWrappedStream(), + DFS_BYTES_PER_CHECKSUM_DEFAULT); FSDirectory fsdir = cluster.getNamesystem().getFSDirectory(); INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile(); - cluster.getNamesystem().getAdditionalBlock(file.toString(), - fileNode.getId(), dfs.getClient().getClientName(), null, null, null); BlockInfoStripedUnderConstruction lastBlk = (BlockInfoStripedUnderConstruction) fileNode.getLastBlock(); DatanodeInfo[] expectedDNs = DatanodeStorageInfo