HDFS-9490. MiniDFSCluster should change block generation stamp via FsDatasetTestUtils. (Tony Wu via lei)

This commit is contained in:
Lei Xu 2015-12-04 10:24:55 -08:00
parent 3fa33b5c2c
commit 0ac8fb4b33
5 changed files with 30 additions and 6 deletions

View File

@ -1710,6 +1710,9 @@ Release 2.8.0 - UNRELEASED
HDFS-9485. Make BlockManager#removeFromExcessReplicateMap accept BlockInfo HDFS-9485. Make BlockManager#removeFromExcessReplicateMap accept BlockInfo
instead of Block. (Mingliang Liu via jing9) instead of Block. (Mingliang Liu via jing9)
HDFS-9490. MiniDFSCluster should change block generation stamp via
FsDatasetTestUtils. (Tony Wu via lei)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

View File

@ -2141,12 +2141,10 @@ public void truncateMeta(int i, ExtendedBlock blk, int newSize)
getMaterializedReplica(i, blk).truncateMeta(newSize); getMaterializedReplica(i, blk).truncateMeta(newSize);
} }
public boolean changeGenStampOfBlock(int dnIndex, ExtendedBlock blk, public void changeGenStampOfBlock(int dnIndex, ExtendedBlock blk,
long newGenStamp) throws IOException { long newGenStamp) throws IOException {
File blockFile = getBlockFile(dnIndex, blk); getFsDatasetTestUtils(dnIndex)
File metaFile = FsDatasetUtil.findMetaFile(blockFile); .changeStoredGenerationStamp(blk, newGenStamp);
return metaFile.renameTo(new File(DatanodeUtil.getMetaName(
blockFile.getAbsolutePath(), newGenStamp)));
} }
/* /*

View File

@ -242,4 +242,13 @@ Replica createReplicaUnderRecovery(ExtendedBlock block, long recoveryId)
* Get the persistently stored generation stamp. * Get the persistently stored generation stamp.
*/ */
long getStoredGenerationStamp(ExtendedBlock block) throws IOException; long getStoredGenerationStamp(ExtendedBlock block) throws IOException;
/**
* Change the persistently stored generation stamp.
* @param block the block whose generation stamp will be changed
* @param newGenStamp the new generation stamp
* @throws IOException
*/
void changeStoredGenerationStamp(ExtendedBlock block, long newGenStamp)
throws IOException;
} }

View File

@ -29,6 +29,7 @@
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils; import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils;
import org.apache.hadoop.hdfs.server.datanode.Replica; import org.apache.hadoop.hdfs.server.datanode.Replica;
@ -47,6 +48,7 @@
import java.io.RandomAccessFile; import java.io.RandomAccessFile;
import java.nio.channels.FileChannel; import java.nio.channels.FileChannel;
import java.nio.file.Files; import java.nio.file.Files;
import java.nio.file.StandardCopyOption;
import java.util.Random; import java.util.Random;
/** /**
@ -363,4 +365,16 @@ public long getStoredGenerationStamp(ExtendedBlock block) throws IOException {
File[] files = FileUtil.listFiles(dir); File[] files = FileUtil.listFiles(dir);
return FsDatasetUtil.getGenerationStampFromFile(files, f); return FsDatasetUtil.getGenerationStampFromFile(files, f);
} }
@Override
public void changeStoredGenerationStamp(
ExtendedBlock block, long newGenStamp) throws IOException {
File blockFile =
dataset.getBlockFile(block.getBlockPoolId(), block.getBlockId());
File metaFile = FsDatasetUtil.findMetaFile(blockFile);
File newMetaFile = new File(
DatanodeUtil.getMetaName(blockFile.getAbsolutePath(), newGenStamp));
Files.move(metaFile.toPath(), newMetaFile.toPath(),
StandardCopyOption.ATOMIC_MOVE);
}
} }

View File

@ -68,7 +68,7 @@ public void testChangedStorageId() throws IOException, URISyntaxException,
// Change the gen stamp of the block on datanode to go back in time (gen // Change the gen stamp of the block on datanode to go back in time (gen
// stamps start at 1000) // stamps start at 1000)
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath); ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath);
assertTrue(cluster.changeGenStampOfBlock(0, block, 900)); cluster.changeGenStampOfBlock(0, block, 900);
// Run directory dsscanner to update Datanode's volumeMap // Run directory dsscanner to update Datanode's volumeMap
DataNodeTestUtils.runDirectoryScanner(cluster.getDataNodes().get(0)); DataNodeTestUtils.runDirectoryScanner(cluster.getDataNodes().get(0));