HDFS-9252. Change TestFileTruncate to use FsDatasetTestUtils to get block file size and genstamp. (Lei (Eddy) Xu via cmccabe)
This commit is contained in:
parent
b82a6cee4b
commit
dfbde3fc51
@ -1654,6 +1654,9 @@ Release 2.8.0 - UNRELEASED
|
||||
|
||||
HDFS-9369. Use ctest to run tests for hadoop-hdfs-native-client. (wheat9)
|
||||
|
||||
HDFS-9252. Change TestFileTruncate to use FsDatasetTestUtils to get block
|
||||
file size and genstamp. (Lei (Eddy) Xu via cmccabe)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
||||
|
@ -75,14 +75,15 @@ public boolean accept(File dir, String name) {
|
||||
* Find the meta-file for the specified block file
|
||||
* and then return the generation stamp from the name of the meta-file.
|
||||
*/
|
||||
static long getGenerationStampFromFile(File[] listdir, File blockFile) {
|
||||
static long getGenerationStampFromFile(File[] listdir, File blockFile)
|
||||
throws IOException {
|
||||
String blockName = blockFile.getName();
|
||||
for (int j = 0; j < listdir.length; j++) {
|
||||
String path = listdir[j].getName();
|
||||
if (!path.startsWith(blockName)) {
|
||||
continue;
|
||||
}
|
||||
if (blockFile == listdir[j]) {
|
||||
if (blockFile.getCanonicalPath().equals(listdir[j].getCanonicalPath())) {
|
||||
continue;
|
||||
}
|
||||
return Block.getGenerationStamp(listdir[j].getName());
|
||||
|
@ -232,4 +232,14 @@ Replica createReplicaUnderRecovery(ExtendedBlock block, long recoveryId)
|
||||
* Obtain the raw capacity of underlying storage per DataNode.
|
||||
*/
|
||||
long getRawCapacity() throws IOException;
|
||||
|
||||
/**
|
||||
* Get the persistently stored length of the block.
|
||||
*/
|
||||
long getStoredDataLength(ExtendedBlock block) throws IOException;
|
||||
|
||||
/**
|
||||
* Get the persistently stored generation stamp.
|
||||
*/
|
||||
long getStoredGenerationStamp(ExtendedBlock block) throws IOException;
|
||||
}
|
||||
|
@ -26,6 +26,7 @@
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.DF;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
|
||||
@ -175,6 +176,10 @@ public FsDatasetImplTestUtils(DataNode datanode) {
|
||||
dataset = (FsDatasetImpl) datanode.getFSDataset();
|
||||
}
|
||||
|
||||
private File getBlockFile(ExtendedBlock eb) throws IOException {
|
||||
return dataset.getBlockFile(eb.getBlockPoolId(), eb.getBlockId());
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a materialized replica from the FsDatasetImpl.
|
||||
*/
|
||||
@ -235,7 +240,6 @@ public Replica createReplicaInPipeline(
|
||||
return rip;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Replica createRBW(ExtendedBlock eb) throws IOException {
|
||||
try (FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
|
||||
@ -343,4 +347,20 @@ public long getRawCapacity() throws IOException {
|
||||
return df.getCapacity();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getStoredDataLength(ExtendedBlock block) throws IOException {
|
||||
File f = getBlockFile(block);
|
||||
try (RandomAccessFile raf = new RandomAccessFile(f, "r")) {
|
||||
return raf.length();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getStoredGenerationStamp(ExtendedBlock block) throws IOException {
|
||||
File f = getBlockFile(block);
|
||||
File dir = f.getParentFile();
|
||||
File[] files = FileUtil.listFiles(dir);
|
||||
return FsDatasetUtil.getGenerationStampFromFile(files, f);
|
||||
}
|
||||
}
|
||||
|
@ -59,6 +59,7 @@
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils;
|
||||
import org.apache.hadoop.net.ServerSocketUtil;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
@ -699,11 +700,11 @@ public void testTruncateWithDataNodesRestart() throws Exception {
|
||||
// Wait replicas come to 3
|
||||
DFSTestUtil.waitReplication(fs, p, REPLICATION);
|
||||
// Old replica is disregarded and replaced with the truncated one
|
||||
assertEquals(cluster.getBlockFile(dn, newBlock.getBlock()).length(),
|
||||
FsDatasetTestUtils utils = cluster.getFsDatasetTestUtils(dn);
|
||||
assertEquals(utils.getStoredDataLength(newBlock.getBlock()),
|
||||
newBlock.getBlockSize());
|
||||
assertTrue(cluster.getBlockMetadataFile(dn,
|
||||
newBlock.getBlock()).getName().endsWith(
|
||||
newBlock.getBlock().getGenerationStamp() + ".meta"));
|
||||
assertEquals(utils.getStoredGenerationStamp(newBlock.getBlock()),
|
||||
newBlock.getBlock().getGenerationStamp());
|
||||
|
||||
// Validate the file
|
||||
FileStatus fileStatus = fs.getFileStatus(p);
|
||||
@ -753,15 +754,15 @@ public void testCopyOnTruncateWithDataNodesRestart() throws Exception {
|
||||
|
||||
// Wait replicas come to 3
|
||||
DFSTestUtil.waitReplication(fs, p, REPLICATION);
|
||||
FsDatasetTestUtils utils = cluster.getFsDatasetTestUtils(dn);
|
||||
// New block is replicated to dn1
|
||||
assertEquals(cluster.getBlockFile(dn, newBlock.getBlock()).length(),
|
||||
assertEquals(utils.getStoredDataLength(newBlock.getBlock()),
|
||||
newBlock.getBlockSize());
|
||||
// Old replica exists too since there is snapshot
|
||||
assertEquals(cluster.getBlockFile(dn, oldBlock.getBlock()).length(),
|
||||
assertEquals(utils.getStoredDataLength(oldBlock.getBlock()),
|
||||
oldBlock.getBlockSize());
|
||||
assertTrue(cluster.getBlockMetadataFile(dn,
|
||||
oldBlock.getBlock()).getName().endsWith(
|
||||
oldBlock.getBlock().getGenerationStamp() + ".meta"));
|
||||
assertEquals(utils.getStoredGenerationStamp(oldBlock.getBlock()),
|
||||
oldBlock.getBlock().getGenerationStamp());
|
||||
|
||||
// Validate the file
|
||||
FileStatus fileStatus = fs.getFileStatus(p);
|
||||
@ -813,18 +814,18 @@ public void testTruncateWithDataNodesRestartImmediately() throws Exception {
|
||||
// Wait replicas come to 3
|
||||
DFSTestUtil.waitReplication(fs, p, REPLICATION);
|
||||
// Old replica is disregarded and replaced with the truncated one on dn0
|
||||
assertEquals(cluster.getBlockFile(dn0, newBlock.getBlock()).length(),
|
||||
FsDatasetTestUtils utils = cluster.getFsDatasetTestUtils(dn0);
|
||||
assertEquals(utils.getStoredDataLength(newBlock.getBlock()),
|
||||
newBlock.getBlockSize());
|
||||
assertTrue(cluster.getBlockMetadataFile(dn0,
|
||||
newBlock.getBlock()).getName().endsWith(
|
||||
newBlock.getBlock().getGenerationStamp() + ".meta"));
|
||||
assertEquals(utils.getStoredGenerationStamp(newBlock.getBlock()),
|
||||
newBlock.getBlock().getGenerationStamp());
|
||||
|
||||
// Old replica is disregarded and replaced with the truncated one on dn1
|
||||
assertEquals(cluster.getBlockFile(dn1, newBlock.getBlock()).length(),
|
||||
utils = cluster.getFsDatasetTestUtils(dn1);
|
||||
assertEquals(utils.getStoredDataLength(newBlock.getBlock()),
|
||||
newBlock.getBlockSize());
|
||||
assertTrue(cluster.getBlockMetadataFile(dn1,
|
||||
newBlock.getBlock()).getName().endsWith(
|
||||
newBlock.getBlock().getGenerationStamp() + ".meta"));
|
||||
assertEquals(utils.getStoredGenerationStamp(newBlock.getBlock()),
|
||||
newBlock.getBlock().getGenerationStamp());
|
||||
|
||||
// Validate the file
|
||||
FileStatus fileStatus = fs.getFileStatus(p);
|
||||
|
Loading…
Reference in New Issue
Block a user