diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt index 8f28285a48..d8f2e9dbcc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt @@ -107,3 +107,6 @@ HDFS-8190. StripedBlockUtil.getInternalBlockLength may have overflow error. (szetszwo) + + HDFS-8216. TestDFSStripedOutputStream should use BlockReaderTestUtil to + create BlockReader. (szetszwo via Zhe Zhang) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java index 88b7f37dcc..829cf03c86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java @@ -165,20 +165,19 @@ public void readAndCheckEOS(BlockReader reader, int length, boolean expectEof) */ public BlockReader getBlockReader(LocatedBlock testBlock, int offset, int lenToRead) throws IOException { - return getBlockReader(cluster, testBlock, offset, lenToRead); + return getBlockReader(cluster.getFileSystem(), testBlock, offset, lenToRead); } /** * Get a BlockReader for the given block. */ - public static BlockReader getBlockReader(MiniDFSCluster cluster, - LocatedBlock testBlock, int offset, int lenToRead) throws IOException { + public static BlockReader getBlockReader(final DistributedFileSystem fs, + LocatedBlock testBlock, int offset, long lenToRead) throws IOException { InetSocketAddress targetAddr = null; ExtendedBlock block = testBlock.getBlock(); DatanodeInfo[] nodes = testBlock.getLocations(); targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr()); - final DistributedFileSystem fs = cluster.getFileSystem(); return new BlockReaderFactory(fs.getClient().getConf()). setInetSocketAddress(targetAddr). setBlock(block). diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java index d8acefff24..1a767c320e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java @@ -250,8 +250,8 @@ public void run() { LocatedBlock lblock = locatedBlocks.get(0); // first block BlockReader blockReader = null; try { - blockReader = BlockReaderTestUtil. - getBlockReader(cluster, lblock, 0, TEST_FILE_LEN); + blockReader = BlockReaderTestUtil.getBlockReader( + cluster.getFileSystem(), lblock, 0, TEST_FILE_LEN); Assert.fail("expected getBlockReader to fail the first time."); } catch (Throwable t) { Assert.assertTrue("expected to see 'TCP reads were disabled " + @@ -265,8 +265,8 @@ public void run() { // Second time should succeed. try { - blockReader = BlockReaderTestUtil. - getBlockReader(cluster, lblock, 0, TEST_FILE_LEN); + blockReader = BlockReaderTestUtil.getBlockReader( + cluster.getFileSystem(), lblock, 0, TEST_FILE_LEN); } catch (Throwable t) { LOG.error("error trying to retrieve a block reader " + "the second time.", t); @@ -474,8 +474,8 @@ public void run() { while (true) { BlockReader blockReader = null; try { - blockReader = BlockReaderTestUtil. - getBlockReader(cluster, lblock, 0, TEST_FILE_LEN); + blockReader = BlockReaderTestUtil.getBlockReader( + cluster.getFileSystem(), lblock, 0, TEST_FILE_LEN); sem.release(); try { blockReader.readAll(buf, 0, TEST_FILE_LEN); @@ -514,8 +514,8 @@ public void run() { // getting a ClosedChannelException. BlockReader blockReader = null; try { - blockReader = BlockReaderTestUtil. - getBlockReader(cluster, lblock, 0, TEST_FILE_LEN); + blockReader = BlockReaderTestUtil.getBlockReader( + cluster.getFileSystem(), lblock, 0, TEST_FILE_LEN); blockReader.readFully(buf, 0, TEST_FILE_LEN); } finally { if (blockReader != null) blockReader.close(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java index 160b190cbe..c2131834b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hdfs; import java.io.IOException; -import java.net.InetSocketAddress; -import java.net.Socket; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -29,25 +27,14 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.client.impl.DfsClientConf; -import org.apache.hadoop.hdfs.net.Peer; -import org.apache.hadoop.hdfs.net.TcpPeerServer; -import org.apache.hadoop.hdfs.protocol.DatanodeID; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock; -import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; -import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.util.StripedBlockUtil; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.erasurecode.rawcoder.RSRawEncoder; import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.security.token.Token; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -59,7 +46,6 @@ public class TestDFSStripedOutputStream { private int parityBlocks = HdfsConstants.NUM_PARITY_BLOCKS; private MiniDFSCluster cluster; - private Configuration conf = new Configuration(); private DistributedFileSystem fs; private final int cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE; private final int stripesPerBlock = 4; @@ -173,7 +159,11 @@ private void testOneFile(String src, int writeBytes) throws IOException { // check file length FileStatus status = fs.getFileStatus(testPath); Assert.assertEquals(writeBytes, status.getLen()); + + checkData(src, writeBytes); + } + void checkData(String src, int writeBytes) throws IOException { List> blockGroupList = new ArrayList<>(); LocatedBlocks lbs = fs.getClient().getLocatedBlocks(src, 0L); @@ -199,11 +189,7 @@ private void testOneFile(String src, int writeBytes) throws IOException { if (lblock == null) { continue; } - DatanodeInfo[] nodes = lblock.getLocations(); ExtendedBlock block = lblock.getBlock(); - InetSocketAddress targetAddr = NetUtils.createSocketAddr( - nodes[0].getXferAddr()); - byte[] blockBytes = new byte[(int)block.getNumBytes()]; if (i < dataBlocks) { dataBlockBytes[i] = blockBytes; @@ -215,40 +201,8 @@ private void testOneFile(String src, int writeBytes) throws IOException { continue; } - BlockReader blockReader = new BlockReaderFactory(new DfsClientConf(conf)). - setFileName(src). - setBlock(block). - setBlockToken(lblock.getBlockToken()). - setInetSocketAddress(targetAddr). - setStartOffset(0). - setLength(block.getNumBytes()). - setVerifyChecksum(true). - setClientName("TestStripeLayoutWrite"). - setDatanodeInfo(nodes[0]). - setCachingStrategy(CachingStrategy.newDefaultStrategy()). - setClientCacheContext(ClientContext.getFromConf(conf)). - setConfiguration(conf). - setRemotePeerFactory(new RemotePeerFactory() { - @Override - public Peer newConnectedPeer(InetSocketAddress addr, - Token blockToken, - DatanodeID datanodeId) - throws IOException { - Peer peer = null; - Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket(); - try { - sock.connect(addr, HdfsServerConstants.READ_TIMEOUT); - sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT); - peer = TcpPeerServer.peerFromSocket(sock); - } finally { - if (peer == null) { - IOUtils.closeSocket(sock); - } - } - return peer; - } - }).build(); - + final BlockReader blockReader = BlockReaderTestUtil.getBlockReader( + fs, lblock, 0, block.getNumBytes()); blockReader.readAll(blockBytes, 0, (int) block.getNumBytes()); blockReader.close(); }