From dfbde3fc511495ac998f07d674a87355de75fc04 Mon Sep 17 00:00:00 2001 From: Colin Patrick Mccabe Date: Tue, 17 Nov 2015 10:48:15 -0800 Subject: [PATCH] HDFS-9252. Change TestFileTruncate to use FsDatasetTestUtils to get block file size and genstamp. (Lei (Eddy) Xu via cmccabe) --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../fsdataset/impl/FsDatasetUtil.java | 5 +-- .../server/datanode/FsDatasetTestUtils.java | 10 ++++++ .../impl/FsDatasetImplTestUtils.java | 22 +++++++++++- .../server/namenode/TestFileTruncate.java | 35 ++++++++++--------- 5 files changed, 55 insertions(+), 20 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 2e17e65fad..5968dd4c75 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1654,6 +1654,9 @@ Release 2.8.0 - UNRELEASED HDFS-9369. Use ctest to run tests for hadoop-hdfs-native-client. (wheat9) + HDFS-9252. Change TestFileTruncate to use FsDatasetTestUtils to get block + file size and genstamp. (Lei (Eddy) Xu via cmccabe) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java index 8c5b4a13b3..f695c8c228 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java @@ -75,14 +75,15 @@ public boolean accept(File dir, String name) { * Find the meta-file for the specified block file * and then return the generation stamp from the name of the meta-file. */ - static long getGenerationStampFromFile(File[] listdir, File blockFile) { + static long getGenerationStampFromFile(File[] listdir, File blockFile) + throws IOException { String blockName = blockFile.getName(); for (int j = 0; j < listdir.length; j++) { String path = listdir[j].getName(); if (!path.startsWith(blockName)) { continue; } - if (blockFile == listdir[j]) { + if (blockFile.getCanonicalPath().equals(listdir[j].getCanonicalPath())) { continue; } return Block.getGenerationStamp(listdir[j].getName()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java index 51cb2bf6e2..07fb7ceffa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java @@ -232,4 +232,14 @@ Replica createReplicaUnderRecovery(ExtendedBlock block, long recoveryId) * Obtain the raw capacity of underlying storage per DataNode. */ long getRawCapacity() throws IOException; + + /** + * Get the persistently stored length of the block. + */ + long getStoredDataLength(ExtendedBlock block) throws IOException; + + /** + * Get the persistently stored generation stamp. + */ + long getStoredGenerationStamp(ExtendedBlock block) throws IOException; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java index 0a32102ce4..8fce163a25 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java @@ -26,6 +26,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.DF; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; @@ -175,6 +176,10 @@ public FsDatasetImplTestUtils(DataNode datanode) { dataset = (FsDatasetImpl) datanode.getFSDataset(); } + private File getBlockFile(ExtendedBlock eb) throws IOException { + return dataset.getBlockFile(eb.getBlockPoolId(), eb.getBlockId()); + } + /** * Return a materialized replica from the FsDatasetImpl. */ @@ -235,7 +240,6 @@ public Replica createReplicaInPipeline( return rip; } - @Override public Replica createRBW(ExtendedBlock eb) throws IOException { try (FsVolumeReferences volumes = dataset.getFsVolumeReferences()) { @@ -343,4 +347,20 @@ public long getRawCapacity() throws IOException { return df.getCapacity(); } } + + @Override + public long getStoredDataLength(ExtendedBlock block) throws IOException { + File f = getBlockFile(block); + try (RandomAccessFile raf = new RandomAccessFile(f, "r")) { + return raf.length(); + } + } + + @Override + public long getStoredGenerationStamp(ExtendedBlock block) throws IOException { + File f = getBlockFile(block); + File dir = f.getParentFile(); + File[] files = FileUtil.listFiles(dir); + return FsDatasetUtil.getGenerationStampFromFile(files, f); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java index 7fd99d6bb6..a642492dd0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java @@ -59,6 +59,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; +import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils; import org.apache.hadoop.net.ServerSocketUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; @@ -699,11 +700,11 @@ public void testTruncateWithDataNodesRestart() throws Exception { // Wait replicas come to 3 DFSTestUtil.waitReplication(fs, p, REPLICATION); // Old replica is disregarded and replaced with the truncated one - assertEquals(cluster.getBlockFile(dn, newBlock.getBlock()).length(), + FsDatasetTestUtils utils = cluster.getFsDatasetTestUtils(dn); + assertEquals(utils.getStoredDataLength(newBlock.getBlock()), newBlock.getBlockSize()); - assertTrue(cluster.getBlockMetadataFile(dn, - newBlock.getBlock()).getName().endsWith( - newBlock.getBlock().getGenerationStamp() + ".meta")); + assertEquals(utils.getStoredGenerationStamp(newBlock.getBlock()), + newBlock.getBlock().getGenerationStamp()); // Validate the file FileStatus fileStatus = fs.getFileStatus(p); @@ -753,15 +754,15 @@ public void testCopyOnTruncateWithDataNodesRestart() throws Exception { // Wait replicas come to 3 DFSTestUtil.waitReplication(fs, p, REPLICATION); + FsDatasetTestUtils utils = cluster.getFsDatasetTestUtils(dn); // New block is replicated to dn1 - assertEquals(cluster.getBlockFile(dn, newBlock.getBlock()).length(), + assertEquals(utils.getStoredDataLength(newBlock.getBlock()), newBlock.getBlockSize()); // Old replica exists too since there is snapshot - assertEquals(cluster.getBlockFile(dn, oldBlock.getBlock()).length(), + assertEquals(utils.getStoredDataLength(oldBlock.getBlock()), oldBlock.getBlockSize()); - assertTrue(cluster.getBlockMetadataFile(dn, - oldBlock.getBlock()).getName().endsWith( - oldBlock.getBlock().getGenerationStamp() + ".meta")); + assertEquals(utils.getStoredGenerationStamp(oldBlock.getBlock()), + oldBlock.getBlock().getGenerationStamp()); // Validate the file FileStatus fileStatus = fs.getFileStatus(p); @@ -813,18 +814,18 @@ public void testTruncateWithDataNodesRestartImmediately() throws Exception { // Wait replicas come to 3 DFSTestUtil.waitReplication(fs, p, REPLICATION); // Old replica is disregarded and replaced with the truncated one on dn0 - assertEquals(cluster.getBlockFile(dn0, newBlock.getBlock()).length(), + FsDatasetTestUtils utils = cluster.getFsDatasetTestUtils(dn0); + assertEquals(utils.getStoredDataLength(newBlock.getBlock()), newBlock.getBlockSize()); - assertTrue(cluster.getBlockMetadataFile(dn0, - newBlock.getBlock()).getName().endsWith( - newBlock.getBlock().getGenerationStamp() + ".meta")); + assertEquals(utils.getStoredGenerationStamp(newBlock.getBlock()), + newBlock.getBlock().getGenerationStamp()); // Old replica is disregarded and replaced with the truncated one on dn1 - assertEquals(cluster.getBlockFile(dn1, newBlock.getBlock()).length(), + utils = cluster.getFsDatasetTestUtils(dn1); + assertEquals(utils.getStoredDataLength(newBlock.getBlock()), newBlock.getBlockSize()); - assertTrue(cluster.getBlockMetadataFile(dn1, - newBlock.getBlock()).getName().endsWith( - newBlock.getBlock().getGenerationStamp() + ".meta")); + assertEquals(utils.getStoredGenerationStamp(newBlock.getBlock()), + newBlock.getBlock().getGenerationStamp()); // Validate the file FileStatus fileStatus = fs.getFileStatus(p);