HDFS-7632. MiniDFSCluster configures DataNode data directories incorrectly if using more than 1 DataNode and more than 2 storage locations per DataNode. Contributed by Chris Nauroth.

This commit is contained in:
cnauroth 2015-01-16 10:52:01 -08:00
parent 9c94015e36
commit ec4389cf72
14 changed files with 58 additions and 57 deletions

@ -716,6 +716,10 @@ Release 2.7.0 - UNRELEASED
HDFS-7635. Remove TestCorruptFilesJsp from branch-2. (cnauroth) HDFS-7635. Remove TestCorruptFilesJsp from branch-2. (cnauroth)
HDFS-7632. MiniDFSCluster configures DataNode data directories incorrectly if
using more than 1 DataNode and more than 2 storage locations per DataNode.
(cnauroth)
Release 2.6.1 - UNRELEASED Release 2.6.1 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

@ -1876,7 +1876,7 @@ public class MiniDFSCluster {
* @return true if a replica was corrupted, false otherwise * @return true if a replica was corrupted, false otherwise
* Types: delete, write bad data, truncate * Types: delete, write bad data, truncate
*/ */
public static boolean corruptReplica(int i, ExtendedBlock blk) public boolean corruptReplica(int i, ExtendedBlock blk)
throws IOException { throws IOException {
File blockFile = getBlockFile(i, blk); File blockFile = getBlockFile(i, blk);
return corruptBlock(blockFile); return corruptBlock(blockFile);
@ -1913,7 +1913,7 @@ public class MiniDFSCluster {
return blockFile.delete(); return blockFile.delete();
} }
public static boolean changeGenStampOfBlock(int dnIndex, ExtendedBlock blk, public boolean changeGenStampOfBlock(int dnIndex, ExtendedBlock blk,
long newGenStamp) throws IOException { long newGenStamp) throws IOException {
File blockFile = getBlockFile(dnIndex, blk); File blockFile = getBlockFile(dnIndex, blk);
File metaFile = FsDatasetUtil.findMetaFile(blockFile); File metaFile = FsDatasetUtil.findMetaFile(blockFile);
@ -2429,7 +2429,7 @@ public class MiniDFSCluster {
* @param dirIndex directory index. * @param dirIndex directory index.
* @return Storage directory * @return Storage directory
*/ */
public static File getStorageDir(int dnIndex, int dirIndex) { public File getStorageDir(int dnIndex, int dirIndex) {
return new File(getBaseDirectory(), getStorageDirPath(dnIndex, dirIndex)); return new File(getBaseDirectory(), getStorageDirPath(dnIndex, dirIndex));
} }
@ -2440,8 +2440,8 @@ public class MiniDFSCluster {
* @param dirIndex directory index. * @param dirIndex directory index.
* @return storage directory path * @return storage directory path
*/ */
private static String getStorageDirPath(int dnIndex, int dirIndex) { private String getStorageDirPath(int dnIndex, int dirIndex) {
return "data/data" + (2 * dnIndex + 1 + dirIndex); return "data/data" + (storagesPerDatanode * dnIndex + 1 + dirIndex);
} }
/** /**
@ -2570,10 +2570,10 @@ public class MiniDFSCluster {
* @param dnIndex Index of the datanode to get block files for * @param dnIndex Index of the datanode to get block files for
* @param block block for which corresponding files are needed * @param block block for which corresponding files are needed
*/ */
public static File getBlockFile(int dnIndex, ExtendedBlock block) { public File getBlockFile(int dnIndex, ExtendedBlock block) {
// Check for block file in the two storage directories of the datanode // Check for block file in the two storage directories of the datanode
for (int i = 0; i <=1 ; i++) { for (int i = 0; i <=1 ; i++) {
File storageDir = MiniDFSCluster.getStorageDir(dnIndex, i); File storageDir = getStorageDir(dnIndex, i);
File blockFile = getBlockFile(storageDir, block); File blockFile = getBlockFile(storageDir, block);
if (blockFile.exists()) { if (blockFile.exists()) {
return blockFile; return blockFile;
@ -2588,10 +2588,10 @@ public class MiniDFSCluster {
* @param dnIndex Index of the datanode to get block files for * @param dnIndex Index of the datanode to get block files for
* @param block block for which corresponding files are needed * @param block block for which corresponding files are needed
*/ */
public static File getBlockMetadataFile(int dnIndex, ExtendedBlock block) { public File getBlockMetadataFile(int dnIndex, ExtendedBlock block) {
// Check for block file in the two storage directories of the datanode // Check for block file in the two storage directories of the datanode
for (int i = 0; i <=1 ; i++) { for (int i = 0; i <=1 ; i++) {
File storageDir = MiniDFSCluster.getStorageDir(dnIndex, i); File storageDir = getStorageDir(dnIndex, i);
File blockMetaFile = getBlockMetadataFile(storageDir, block); File blockMetaFile = getBlockMetadataFile(storageDir, block);
if (blockMetaFile.exists()) { if (blockMetaFile.exists()) {
return blockMetaFile; return blockMetaFile;

@ -160,8 +160,8 @@ public class TestBlockReaderLocal {
fsIn.close(); fsIn.close();
fsIn = null; fsIn = null;
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, TEST_PATH); ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, TEST_PATH);
File dataFile = MiniDFSCluster.getBlockFile(0, block); File dataFile = cluster.getBlockFile(0, block);
File metaFile = MiniDFSCluster.getBlockMetadataFile(0, block); File metaFile = cluster.getBlockMetadataFile(0, block);
ShortCircuitCache shortCircuitCache = ShortCircuitCache shortCircuitCache =
ClientContext.getFromConf(conf).getShortCircuitCache(); ClientContext.getFromConf(conf).getShortCircuitCache();

@ -179,10 +179,6 @@ public class TestDatanodeBlockScanner {
cluster.shutdown(); cluster.shutdown();
} }
public static boolean corruptReplica(ExtendedBlock blk, int replica) throws IOException {
return MiniDFSCluster.corruptReplica(replica, blk);
}
@Test @Test
public void testBlockCorruptionPolicy() throws Exception { public void testBlockCorruptionPolicy() throws Exception {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
@ -202,7 +198,7 @@ public class TestDatanodeBlockScanner {
assertFalse(DFSTestUtil.allBlockReplicasCorrupt(cluster, file1, 0)); assertFalse(DFSTestUtil.allBlockReplicasCorrupt(cluster, file1, 0));
// Corrupt random replica of block // Corrupt random replica of block
assertTrue(MiniDFSCluster.corruptReplica(rand, block)); assertTrue(cluster.corruptReplica(rand, block));
// Restart the datanode hoping the corrupt block to be reported // Restart the datanode hoping the corrupt block to be reported
cluster.restartDataNode(rand); cluster.restartDataNode(rand);
@ -213,9 +209,9 @@ public class TestDatanodeBlockScanner {
// Corrupt all replicas. Now, block should be marked as corrupt // Corrupt all replicas. Now, block should be marked as corrupt
// and we should get all the replicas // and we should get all the replicas
assertTrue(MiniDFSCluster.corruptReplica(0, block)); assertTrue(cluster.corruptReplica(0, block));
assertTrue(MiniDFSCluster.corruptReplica(1, block)); assertTrue(cluster.corruptReplica(1, block));
assertTrue(MiniDFSCluster.corruptReplica(2, block)); assertTrue(cluster.corruptReplica(2, block));
// Trigger each of the DNs to scan this block immediately. // Trigger each of the DNs to scan this block immediately.
// The block pool scanner doesn't run frequently enough on its own // The block pool scanner doesn't run frequently enough on its own
@ -288,7 +284,7 @@ public class TestDatanodeBlockScanner {
// Corrupt numCorruptReplicas replicas of block // Corrupt numCorruptReplicas replicas of block
int[] corruptReplicasDNIDs = new int[numCorruptReplicas]; int[] corruptReplicasDNIDs = new int[numCorruptReplicas];
for (int i=0, j=0; (j != numCorruptReplicas) && (i < numDataNodes); i++) { for (int i=0, j=0; (j != numCorruptReplicas) && (i < numDataNodes); i++) {
if (corruptReplica(block, i)) { if (cluster.corruptReplica(i, block)) {
corruptReplicasDNIDs[j++] = i; corruptReplicasDNIDs[j++] = i;
LOG.info("successfully corrupted block " + block + " on node " LOG.info("successfully corrupted block " + block + " on node "
+ i + " " + cluster.getDataNodes().get(i).getDisplayName()); + i + " " + cluster.getDataNodes().get(i).getDisplayName());
@ -373,7 +369,7 @@ public class TestDatanodeBlockScanner {
assertTrue(waitForVerification(infoPort, fs, fileName, 1, startTime, TIMEOUT) >= startTime); assertTrue(waitForVerification(infoPort, fs, fileName, 1, startTime, TIMEOUT) >= startTime);
// Truncate replica of block // Truncate replica of block
if (!changeReplicaLength(block, 0, -1)) { if (!changeReplicaLength(cluster, block, 0, -1)) {
throw new IOException( throw new IOException(
"failed to find or change length of replica on node 0 " "failed to find or change length of replica on node 0 "
+ cluster.getDataNodes().get(0).getDisplayName()); + cluster.getDataNodes().get(0).getDisplayName());
@ -403,7 +399,7 @@ public class TestDatanodeBlockScanner {
cluster.getFileSystem(), fileName, REPLICATION_FACTOR); cluster.getFileSystem(), fileName, REPLICATION_FACTOR);
// Make sure that truncated block will be deleted // Make sure that truncated block will be deleted
waitForBlockDeleted(block, 0, TIMEOUT); waitForBlockDeleted(cluster, block, 0, TIMEOUT);
} finally { } finally {
cluster.shutdown(); cluster.shutdown();
} }
@ -412,9 +408,9 @@ public class TestDatanodeBlockScanner {
/** /**
* Change the length of a block at datanode dnIndex * Change the length of a block at datanode dnIndex
*/ */
static boolean changeReplicaLength(ExtendedBlock blk, int dnIndex, static boolean changeReplicaLength(MiniDFSCluster cluster, ExtendedBlock blk,
int lenDelta) throws IOException { int dnIndex, int lenDelta) throws IOException {
File blockFile = MiniDFSCluster.getBlockFile(dnIndex, blk); File blockFile = cluster.getBlockFile(dnIndex, blk);
if (blockFile != null && blockFile.exists()) { if (blockFile != null && blockFile.exists()) {
RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw"); RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
raFile.setLength(raFile.length()+lenDelta); raFile.setLength(raFile.length()+lenDelta);
@ -425,9 +421,10 @@ public class TestDatanodeBlockScanner {
return false; return false;
} }
private static void waitForBlockDeleted(ExtendedBlock blk, int dnIndex, private static void waitForBlockDeleted(MiniDFSCluster cluster,
long timeout) throws TimeoutException, InterruptedException { ExtendedBlock blk, int dnIndex, long timeout) throws TimeoutException,
File blockFile = MiniDFSCluster.getBlockFile(dnIndex, blk); InterruptedException {
File blockFile = cluster.getBlockFile(dnIndex, blk);
long failtime = Time.monotonicNow() long failtime = Time.monotonicNow()
+ ((timeout > 0) ? timeout : Long.MAX_VALUE); + ((timeout > 0) ? timeout : Long.MAX_VALUE);
while (blockFile != null && blockFile.exists()) { while (blockFile != null && blockFile.exists()) {
@ -436,7 +433,7 @@ public class TestDatanodeBlockScanner {
+ blockFile.getPath() + (blockFile.exists() ? " still exists; " : " is absent; ")); + blockFile.getPath() + (blockFile.exists() ? " still exists; " : " is absent; "));
} }
Thread.sleep(100); Thread.sleep(100);
blockFile = MiniDFSCluster.getBlockFile(dnIndex, blk); blockFile = cluster.getBlockFile(dnIndex, blk);
} }
} }

@ -79,7 +79,7 @@ public class TestMissingBlocksAlert {
// Corrupt the block // Corrupt the block
ExtendedBlock block = DFSTestUtil.getFirstBlock(dfs, corruptFile); ExtendedBlock block = DFSTestUtil.getFirstBlock(dfs, corruptFile);
assertTrue(TestDatanodeBlockScanner.corruptReplica(block, 0)); assertTrue(cluster.corruptReplica(0, block));
// read the file so that the corrupt block is reported to NN // read the file so that the corrupt block is reported to NN
FSDataInputStream in = dfs.open(corruptFile); FSDataInputStream in = dfs.open(corruptFile);
@ -124,8 +124,7 @@ public class TestMissingBlocksAlert {
DFSTestUtil.createFile(dfs, replOneFile, fileLen, (short)1, 0); DFSTestUtil.createFile(dfs, replOneFile, fileLen, (short)1, 0);
ExtendedBlock replOneBlock = DFSTestUtil.getFirstBlock( ExtendedBlock replOneBlock = DFSTestUtil.getFirstBlock(
dfs, replOneFile); dfs, replOneFile);
assertTrue(TestDatanodeBlockScanner.corruptReplica( assertTrue(cluster.corruptReplica(0, replOneBlock));
replOneBlock, 0));
// read the file so that the corrupt block is reported to NN // read the file so that the corrupt block is reported to NN
in = dfs.open(replOneFile); in = dfs.open(replOneFile);

@ -349,7 +349,6 @@ public class TestReplication {
0, Long.MAX_VALUE).get(0).getBlock(); 0, Long.MAX_VALUE).get(0).getBlock();
cluster.shutdown(); cluster.shutdown();
cluster = null;
for (int i=0; i<25; i++) { for (int i=0; i<25; i++) {
buffer[i] = '0'; buffer[i] = '0';
@ -358,7 +357,7 @@ public class TestReplication {
int fileCount = 0; int fileCount = 0;
// Choose 3 copies of block file - delete 1 and corrupt the remaining 2 // Choose 3 copies of block file - delete 1 and corrupt the remaining 2
for (int dnIndex=0; dnIndex<3; dnIndex++) { for (int dnIndex=0; dnIndex<3; dnIndex++) {
File blockFile = MiniDFSCluster.getBlockFile(dnIndex, block); File blockFile = cluster.getBlockFile(dnIndex, block);
LOG.info("Checking for file " + blockFile); LOG.info("Checking for file " + blockFile);
if (blockFile != null && blockFile.exists()) { if (blockFile != null && blockFile.exists()) {
@ -445,7 +444,8 @@ public class TestReplication {
// Change the length of a replica // Change the length of a replica
for (int i=0; i<cluster.getDataNodes().size(); i++) { for (int i=0; i<cluster.getDataNodes().size(); i++) {
if (TestDatanodeBlockScanner.changeReplicaLength(block, i, lenDelta)) { if (TestDatanodeBlockScanner.changeReplicaLength(cluster, block, i,
lenDelta)) {
break; break;
} }
} }

@ -209,7 +209,7 @@ public class TestBlocksWithNotEnoughRacks {
// Corrupt a replica of the block // Corrupt a replica of the block
int dnToCorrupt = DFSTestUtil.firstDnWithBlock(cluster, b); int dnToCorrupt = DFSTestUtil.firstDnWithBlock(cluster, b);
assertTrue(MiniDFSCluster.corruptReplica(dnToCorrupt, b)); assertTrue(cluster.corruptReplica(dnToCorrupt, b));
// Restart the datanode so blocks are re-scanned, and the corrupt // Restart the datanode so blocks are re-scanned, and the corrupt
// block is detected. // block is detected.

@ -68,7 +68,7 @@ public class TestOverReplicatedBlocks {
// corrupt the block on datanode 0 // corrupt the block on datanode 0
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName); ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
assertTrue(TestDatanodeBlockScanner.corruptReplica(block, 0)); assertTrue(cluster.corruptReplica(0, block));
DataNodeProperties dnProps = cluster.stopDataNode(0); DataNodeProperties dnProps = cluster.stopDataNode(0);
// remove block scanner log to trigger block scanning // remove block scanner log to trigger block scanning
File scanLog = new File(MiniDFSCluster.getFinalizedDir( File scanLog = new File(MiniDFSCluster.getFinalizedDir(

@ -227,7 +227,7 @@ public class TestCachingStrategy {
// verify that we dropped everything from the cache during file creation. // verify that we dropped everything from the cache during file creation.
ExtendedBlock block = cluster.getNameNode().getRpcServer().getBlockLocations( ExtendedBlock block = cluster.getNameNode().getRpcServer().getBlockLocations(
TEST_PATH, 0, Long.MAX_VALUE).get(0).getBlock(); TEST_PATH, 0, Long.MAX_VALUE).get(0).getBlock();
String fadvisedFileName = MiniDFSCluster.getBlockFile(0, block).getName(); String fadvisedFileName = cluster.getBlockFile(0, block).getName();
Stats stats = tracker.getStats(fadvisedFileName); Stats stats = tracker.getStats(fadvisedFileName);
stats.assertDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE); stats.assertDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE);
stats.clear(); stats.clear();
@ -272,7 +272,7 @@ public class TestCachingStrategy {
// verify that we dropped everything from the cache during file creation. // verify that we dropped everything from the cache during file creation.
ExtendedBlock block = cluster.getNameNode().getRpcServer().getBlockLocations( ExtendedBlock block = cluster.getNameNode().getRpcServer().getBlockLocations(
TEST_PATH, 0, Long.MAX_VALUE).get(0).getBlock(); TEST_PATH, 0, Long.MAX_VALUE).get(0).getBlock();
String fadvisedFileName = MiniDFSCluster.getBlockFile(0, block).getName(); String fadvisedFileName = cluster.getBlockFile(0, block).getName();
Stats stats = tracker.getStats(fadvisedFileName); Stats stats = tracker.getStats(fadvisedFileName);
stats.assertDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE); stats.assertDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE);
stats.clear(); stats.clear();
@ -313,7 +313,7 @@ public class TestCachingStrategy {
// specify any policy, we should have done drop-behind. // specify any policy, we should have done drop-behind.
ExtendedBlock block = cluster.getNameNode().getRpcServer().getBlockLocations( ExtendedBlock block = cluster.getNameNode().getRpcServer().getBlockLocations(
TEST_PATH, 0, Long.MAX_VALUE).get(0).getBlock(); TEST_PATH, 0, Long.MAX_VALUE).get(0).getBlock();
String fadvisedFileName = MiniDFSCluster.getBlockFile(0, block).getName(); String fadvisedFileName = cluster.getBlockFile(0, block).getName();
Stats stats = tracker.getStats(fadvisedFileName); Stats stats = tracker.getStats(fadvisedFileName);
stats.assertDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE); stats.assertDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE);
stats.clear(); stats.clear();
@ -355,7 +355,7 @@ public class TestCachingStrategy {
// verify that we did not drop everything from the cache during file creation. // verify that we did not drop everything from the cache during file creation.
ExtendedBlock block = cluster.getNameNode().getRpcServer().getBlockLocations( ExtendedBlock block = cluster.getNameNode().getRpcServer().getBlockLocations(
TEST_PATH, 0, Long.MAX_VALUE).get(0).getBlock(); TEST_PATH, 0, Long.MAX_VALUE).get(0).getBlock();
String fadvisedFileName = MiniDFSCluster.getBlockFile(0, block).getName(); String fadvisedFileName = cluster.getBlockFile(0, block).getName();
Stats stats = tracker.getStats(fadvisedFileName); Stats stats = tracker.getStats(fadvisedFileName);
Assert.assertNull(stats); Assert.assertNull(stats);

@ -178,7 +178,7 @@ public class TestScrLazyPersistFiles extends LazyPersistTestCase {
// Verify short-circuit read from RAM_DISK. // Verify short-circuit read from RAM_DISK.
ensureFileReplicasOnStorageType(path1, RAM_DISK); ensureFileReplicasOnStorageType(path1, RAM_DISK);
File metaFile = MiniDFSCluster.getBlockMetadataFile(0, File metaFile = cluster.getBlockMetadataFile(0,
DFSTestUtil.getFirstBlock(fs, path1)); DFSTestUtil.getFirstBlock(fs, path1));
assertTrue(metaFile.length() <= BlockMetadataHeader.getHeaderSize()); assertTrue(metaFile.length() <= BlockMetadataHeader.getHeaderSize());
assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED)); assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));
@ -188,7 +188,7 @@ public class TestScrLazyPersistFiles extends LazyPersistTestCase {
// Verify short-circuit read from RAM_DISK once again. // Verify short-circuit read from RAM_DISK once again.
ensureFileReplicasOnStorageType(path1, RAM_DISK); ensureFileReplicasOnStorageType(path1, RAM_DISK);
metaFile = MiniDFSCluster.getBlockMetadataFile(0, metaFile = cluster.getBlockMetadataFile(0,
DFSTestUtil.getFirstBlock(fs, path1)); DFSTestUtil.getFirstBlock(fs, path1));
assertTrue(metaFile.length() <= BlockMetadataHeader.getHeaderSize()); assertTrue(metaFile.length() <= BlockMetadataHeader.getHeaderSize());
assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED)); assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));
@ -201,7 +201,7 @@ public class TestScrLazyPersistFiles extends LazyPersistTestCase {
// Verify short-circuit read still works from DEFAULT storage. This time, // Verify short-circuit read still works from DEFAULT storage. This time,
// we'll have a checksum written during lazy persistence. // we'll have a checksum written during lazy persistence.
ensureFileReplicasOnStorageType(path1, DEFAULT); ensureFileReplicasOnStorageType(path1, DEFAULT);
metaFile = MiniDFSCluster.getBlockMetadataFile(0, metaFile = cluster.getBlockMetadataFile(0,
DFSTestUtil.getFirstBlock(fs, path1)); DFSTestUtil.getFirstBlock(fs, path1));
assertTrue(metaFile.length() > BlockMetadataHeader.getHeaderSize()); assertTrue(metaFile.length() > BlockMetadataHeader.getHeaderSize());
assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED)); assertTrue(verifyReadRandomFile(path1, BLOCK_SIZE, SEED));
@ -251,7 +251,7 @@ public class TestScrLazyPersistFiles extends LazyPersistTestCase {
// Corrupt the lazy-persisted block file, and verify that checksum // Corrupt the lazy-persisted block file, and verify that checksum
// verification catches it. // verification catches it.
ensureFileReplicasOnStorageType(path1, DEFAULT); ensureFileReplicasOnStorageType(path1, DEFAULT);
MiniDFSCluster.corruptReplica(0, DFSTestUtil.getFirstBlock(fs, path1)); cluster.corruptReplica(0, DFSTestUtil.getFirstBlock(fs, path1));
exception.expect(ChecksumException.class); exception.expect(ChecksumException.class);
DFSTestUtil.readFileBuffer(fs, path1); DFSTestUtil.readFileBuffer(fs, path1);
} }
@ -291,7 +291,7 @@ public class TestScrLazyPersistFiles extends LazyPersistTestCase {
// Corrupt the lazy-persisted checksum file, and verify that checksum // Corrupt the lazy-persisted checksum file, and verify that checksum
// verification catches it. // verification catches it.
ensureFileReplicasOnStorageType(path1, DEFAULT); ensureFileReplicasOnStorageType(path1, DEFAULT);
File metaFile = MiniDFSCluster.getBlockMetadataFile(0, File metaFile = cluster.getBlockMetadataFile(0,
DFSTestUtil.getFirstBlock(fs, path1)); DFSTestUtil.getFirstBlock(fs, path1));
MiniDFSCluster.corruptBlock(metaFile); MiniDFSCluster.corruptBlock(metaFile);
exception.expect(ChecksumException.class); exception.expect(ChecksumException.class);

@ -345,7 +345,7 @@ public class TestFsck {
totalMissingBlocks += ctFile.getTotalMissingBlocks(); totalMissingBlocks += ctFile.getTotalMissingBlocks();
} }
for (CorruptedTestFile ctFile : ctFiles) { for (CorruptedTestFile ctFile : ctFiles) {
ctFile.removeBlocks(); ctFile.removeBlocks(cluster);
} }
// Wait for fsck to discover all the missing blocks // Wait for fsck to discover all the missing blocks
while (true) { while (true) {
@ -432,14 +432,15 @@ public class TestFsck {
return content; return content;
} }
public void removeBlocks() throws AccessControlException, public void removeBlocks(MiniDFSCluster cluster)
FileNotFoundException, UnresolvedLinkException, IOException { throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException {
for (int corruptIdx : blocksToCorrupt) { for (int corruptIdx : blocksToCorrupt) {
// Corrupt a block by deleting it // Corrupt a block by deleting it
ExtendedBlock block = dfsClient.getNamenode().getBlockLocations( ExtendedBlock block = dfsClient.getNamenode().getBlockLocations(
name, blockSize * corruptIdx, Long.MAX_VALUE).get(0).getBlock(); name, blockSize * corruptIdx, Long.MAX_VALUE).get(0).getBlock();
for (int i = 0; i < numDataNodes; i++) { for (int i = 0; i < numDataNodes; i++) {
File blockFile = MiniDFSCluster.getBlockFile(i, block); File blockFile = cluster.getBlockFile(i, block);
if(blockFile != null && blockFile.exists()) { if(blockFile != null && blockFile.exists()) {
assertTrue(blockFile.delete()); assertTrue(blockFile.delete());
} }
@ -517,7 +518,7 @@ public class TestFsck {
ExtendedBlock block = dfsClient.getNamenode().getBlockLocations( ExtendedBlock block = dfsClient.getNamenode().getBlockLocations(
corruptFileName, 0, Long.MAX_VALUE).get(0).getBlock(); corruptFileName, 0, Long.MAX_VALUE).get(0).getBlock();
for (int i=0; i<4; i++) { for (int i=0; i<4; i++) {
File blockFile = MiniDFSCluster.getBlockFile(i, block); File blockFile = cluster.getBlockFile(i, block);
if(blockFile != null && blockFile.exists()) { if(blockFile != null && blockFile.exists()) {
assertTrue(blockFile.delete()); assertTrue(blockFile.delete());
} }
@ -647,7 +648,7 @@ public class TestFsck {
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS)); assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
// corrupt replicas // corrupt replicas
File blockFile = MiniDFSCluster.getBlockFile(0, block); File blockFile = cluster.getBlockFile(0, block);
if (blockFile != null && blockFile.exists()) { if (blockFile != null && blockFile.exists()) {
RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw"); RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
FileChannel channel = raFile.getChannel(); FileChannel channel = raFile.getChannel();
@ -1303,7 +1304,7 @@ public class TestFsck {
// corrupt replicas // corrupt replicas
block = DFSTestUtil.getFirstBlock(dfs, path); block = DFSTestUtil.getFirstBlock(dfs, path);
File blockFile = MiniDFSCluster.getBlockFile(0, block); File blockFile = cluster.getBlockFile(0, block);
if (blockFile != null && blockFile.exists()) { if (blockFile != null && blockFile.exists()) {
RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw"); RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
FileChannel channel = raFile.getChannel(); FileChannel channel = raFile.getChannel();

@ -267,14 +267,14 @@ public class TestProcessCorruptBlocks {
// corrupt the block on datanode dnIndex // corrupt the block on datanode dnIndex
// the indexes change once the nodes are restarted. // the indexes change once the nodes are restarted.
// But the datadirectory will not change // But the datadirectory will not change
assertTrue(MiniDFSCluster.corruptReplica(dnIndex, block)); assertTrue(cluster.corruptReplica(dnIndex, block));
DataNodeProperties dnProps = cluster.stopDataNode(0); DataNodeProperties dnProps = cluster.stopDataNode(0);
// Each datanode has multiple data dirs, check each // Each datanode has multiple data dirs, check each
for (int dirIndex = 0; dirIndex < 2; dirIndex++) { for (int dirIndex = 0; dirIndex < 2; dirIndex++) {
final String bpid = cluster.getNamesystem().getBlockPoolId(); final String bpid = cluster.getNamesystem().getBlockPoolId();
File storageDir = MiniDFSCluster.getStorageDir(dnIndex, dirIndex); File storageDir = cluster.getStorageDir(dnIndex, dirIndex);
File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid); File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
File scanLogFile = new File(dataDir, "dncp_block_verification.log.curr"); File scanLogFile = new File(dataDir, "dncp_block_verification.log.curr");
if (scanLogFile.exists()) { if (scanLogFile.exists()) {

@ -67,7 +67,7 @@ public class TestPendingCorruptDnMessages {
// Change the gen stamp of the block on datanode to go back in time (gen // Change the gen stamp of the block on datanode to go back in time (gen
// stamps start at 1000) // stamps start at 1000)
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath); ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath);
assertTrue(MiniDFSCluster.changeGenStampOfBlock(0, block, 900)); assertTrue(cluster.changeGenStampOfBlock(0, block, 900));
// Stop the DN so the replica with the changed gen stamp will be reported // Stop the DN so the replica with the changed gen stamp will be reported
// when this DN starts up. // when this DN starts up.

@ -466,7 +466,7 @@ public class TestShortCircuitLocalRead {
"waitReplication: " + e); "waitReplication: " + e);
} }
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, TEST_PATH); ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, TEST_PATH);
File dataFile = MiniDFSCluster.getBlockFile(0, block); File dataFile = cluster.getBlockFile(0, block);
cluster.shutdown(); cluster.shutdown();
cluster = null; cluster = null;
RandomAccessFile raf = null; RandomAccessFile raf = null;