HDFS-13631. TestDFSAdmin#testCheckNumOfBlocksInReportCommand should use a separate MiniDFSCluster path. Contributed by Anbang Hu.

This commit is contained in:
Inigo Goiri 2018-05-31 10:12:38 -07:00
parent 1361030e59
commit 1f324e9661

View File

@ -110,11 +110,15 @@ public class TestDFSAdmin {
private static final PrintStream OLD_OUT = System.out;
private static final PrintStream OLD_ERR = System.err;
private String tempResource = null;
private static final int NUM_DATANODES = 2;
@Before
public void setUp() throws Exception {
conf = new Configuration();
conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 3);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512);
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,
GenericTestUtils.getRandomizedTempPath());
restartCluster();
admin = new DFSAdmin(conf);
@ -157,7 +161,8 @@ private void restartCluster() throws IOException {
if (cluster != null) {
cluster.shutdown();
}
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATANODES).build();
cluster.waitActive();
datanode = cluster.getDataNodes().get(0);
namenode = cluster.getNameNode();
@ -904,40 +909,34 @@ public void testSetBalancerBandwidth() throws Exception {
@Test(timeout = 300000L)
public void testCheckNumOfBlocksInReportCommand() throws Exception {
Configuration config = new Configuration();
config.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512);
config.set(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, "3s");
DistributedFileSystem dfs = cluster.getFileSystem();
Path path = new Path("/tmp.txt");
int numOfDatanodes = 1;
MiniDFSCluster miniDFSCluster = new MiniDFSCluster.Builder(config)
.numDataNodes(numOfDatanodes).build();
try {
miniDFSCluster.waitActive();
DistributedFileSystem dfs = miniDFSCluster.getFileSystem();
Path path= new Path("/tmp.txt");
DatanodeInfo[] dn = dfs.getDataNodeStats();
assertEquals(dn.length, numOfDatanodes);
//Block count should be 0, as no files are created
assertEquals(dn[0].getNumBlocks(), 0);
//Create a file with 2 blocks
DFSTestUtil.createFile(dfs, path, 1024, (short) 1, 0);
int expectedBlockCount = 2;
//Wait for One Heartbeat
Thread.sleep(3 * 1000);
dn = dfs.getDataNodeStats();
assertEquals(dn.length, numOfDatanodes);
//Block count should be 2, as file is created with block count 2
assertEquals(dn[0].getNumBlocks(), expectedBlockCount);
} finally {
cluster.shutdown();
DatanodeInfo[] dn = dfs.getDataNodeStats();
assertEquals(dn.length, NUM_DATANODES);
// Block count should be 0, as no files are created
int actualBlockCount = 0;
for (DatanodeInfo d : dn) {
actualBlockCount += d.getNumBlocks();
}
assertEquals(0, actualBlockCount);
// Create a file with 2 blocks
DFSTestUtil.createFile(dfs, path, 1024, (short) 1, 0);
int expectedBlockCount = 2;
// Wait for One Heartbeat
Thread.sleep(3 * 1000);
dn = dfs.getDataNodeStats();
assertEquals(dn.length, NUM_DATANODES);
// Block count should be 2, as file is created with block count 2
actualBlockCount = 0;
for (DatanodeInfo d : dn) {
actualBlockCount += d.getNumBlocks();
}
assertEquals(expectedBlockCount, actualBlockCount);
}
@Test