HDFS-16269. [Fix] Improve NNThroughputBenchmark#blockReport operation. (#3544)
Reviewed-by: Fei Hui <feihui.ustc@gmail.com> Reviewed-by: Wei-Chiu Chuang <weichiu@apache.org> Signed-off-by: Akira Ajisaka <aajisaka@apache.org>
This commit is contained in:
parent
bccf2f3ef4
commit
618fea27d2
@ -1219,10 +1219,28 @@ void generateInputs(int[] ignore) throws IOException {
|
||||
|
||||
private ExtendedBlock addBlocks(String fileName, String clientName)
|
||||
throws IOException {
|
||||
DatanodeInfo[] excludeNodes = null;
|
||||
DatanodeInfo[] dnInfos = clientProto.getDatanodeReport(
|
||||
HdfsConstants.DatanodeReportType.LIVE);
|
||||
if (dnInfos != null && dnInfos.length > 0) {
|
||||
List<DatanodeInfo> tmpNodes = new ArrayList<>();
|
||||
String localHost = DNS.getDefaultHost("default", "default");
|
||||
for (DatanodeInfo dnInfo : dnInfos) {
|
||||
if (!localHost.equals(dnInfo.getHostName()) ||
|
||||
(dnInfo.getXferPort() > datanodes.length)) {
|
||||
tmpNodes.add(dnInfo);
|
||||
}
|
||||
}
|
||||
|
||||
if (tmpNodes.size() > 0) {
|
||||
excludeNodes = tmpNodes.toArray(new DatanodeInfo[tmpNodes.size()]);
|
||||
}
|
||||
}
|
||||
|
||||
ExtendedBlock prevBlock = null;
|
||||
for(int jdx = 0; jdx < blocksPerFile; jdx++) {
|
||||
LocatedBlock loc = addBlock(fileName, clientName,
|
||||
prevBlock, null, HdfsConstants.GRANDFATHER_INODE_ID, null);
|
||||
prevBlock, excludeNodes, HdfsConstants.GRANDFATHER_INODE_ID, null);
|
||||
prevBlock = loc.getBlock();
|
||||
for(DatanodeInfo dnInfo : loc.getLocations()) {
|
||||
int dnIdx = dnInfo.getXferPort() - 1;
|
||||
|
@ -166,4 +166,25 @@ public void testNNThroughputForAppendOp() throws Exception {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This test runs {@link NNThroughputBenchmark} against a mini DFS cluster
|
||||
* for block report operation.
|
||||
*/
|
||||
@Test(timeout = 120000)
|
||||
public void testNNThroughputForBlockReportOp() throws Exception {
|
||||
final Configuration conf = new HdfsConfiguration();
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);
|
||||
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16);
|
||||
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
|
||||
numDataNodes(3).build()) {
|
||||
cluster.waitActive();
|
||||
final Configuration benchConf = new HdfsConfiguration();
|
||||
benchConf.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 16);
|
||||
benchConf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 16);
|
||||
NNThroughputBenchmark.runBenchmark(benchConf,
|
||||
new String[]{"-fs", cluster.getURI().toString(), "-op",
|
||||
"blockReport", "-datanodes", "3", "-reports", "2"});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user