HDFS-15904 : De-flake TestBalancer#testBalancerWithSortTopNodes() (#2785)
Contributed by Viraj Jasani. Signed-off-by: Mingliang Liu <liuml07@apache.org> Signed-off-by: Ayush Saxena <ayushsaxena@apache.org>
This commit is contained in:
parent
cd44e917d0
commit
261191cbc0
@ -634,10 +634,10 @@ void resetData(Configuration conf) {
|
||||
}
|
||||
|
||||
static class Result {
|
||||
final ExitStatus exitStatus;
|
||||
final long bytesLeftToMove;
|
||||
final long bytesBeingMoved;
|
||||
final long bytesAlreadyMoved;
|
||||
private final ExitStatus exitStatus;
|
||||
private final long bytesLeftToMove;
|
||||
private final long bytesBeingMoved;
|
||||
private final long bytesAlreadyMoved;
|
||||
|
||||
Result(ExitStatus exitStatus, long bytesLeftToMove, long bytesBeingMoved,
|
||||
long bytesAlreadyMoved) {
|
||||
@ -647,6 +647,22 @@ static class Result {
|
||||
this.bytesAlreadyMoved = bytesAlreadyMoved;
|
||||
}
|
||||
|
||||
public ExitStatus getExitStatus() {
|
||||
return exitStatus;
|
||||
}
|
||||
|
||||
public long getBytesLeftToMove() {
|
||||
return bytesLeftToMove;
|
||||
}
|
||||
|
||||
public long getBytesBeingMoved() {
|
||||
return bytesBeingMoved;
|
||||
}
|
||||
|
||||
public long getBytesAlreadyMoved() {
|
||||
return bytesAlreadyMoved;
|
||||
}
|
||||
|
||||
void print(int iteration, NameNodeConnector nnc, PrintStream out) {
|
||||
out.printf("%-24s %10d %19s %18s %17s %s%n",
|
||||
DateFormat.getDateTimeInstance().format(new Date()), iteration,
|
||||
|
@ -1158,12 +1158,7 @@ public void executePendingMove(final PendingMove p) {
|
||||
p.proxySource.removePendingBlock(p);
|
||||
return;
|
||||
}
|
||||
moveExecutor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
p.dispatch();
|
||||
}
|
||||
});
|
||||
moveExecutor.execute(p::dispatch);
|
||||
}
|
||||
|
||||
public boolean dispatchAndCheckContinue() throws InterruptedException {
|
||||
|
@ -1024,14 +1024,14 @@ private static int runBalancer(Collection<URI> namenodes,
|
||||
|
||||
// clean all lists
|
||||
b.resetData(conf);
|
||||
if (r.exitStatus == ExitStatus.IN_PROGRESS) {
|
||||
if (r.getExitStatus() == ExitStatus.IN_PROGRESS) {
|
||||
done = false;
|
||||
} else if (r.exitStatus != ExitStatus.SUCCESS) {
|
||||
} else if (r.getExitStatus() != ExitStatus.SUCCESS) {
|
||||
//must be an error statue, return.
|
||||
return r.exitStatus.getExitCode();
|
||||
return r.getExitStatus().getExitCode();
|
||||
} else {
|
||||
if (iteration > 0) {
|
||||
assertTrue(r.bytesAlreadyMoved > 0);
|
||||
assertTrue(r.getBytesAlreadyMoved() > 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1657,7 +1657,7 @@ public void testMaxIterationTime() throws Exception {
|
||||
// When a block move is not canceled in 2 seconds properly and then
|
||||
// a block is moved unexpectedly, IN_PROGRESS will be reported.
|
||||
assertEquals("We expect ExitStatus.NO_MOVE_PROGRESS to be reported.",
|
||||
ExitStatus.NO_MOVE_PROGRESS, r.exitStatus);
|
||||
ExitStatus.NO_MOVE_PROGRESS, r.getExitStatus());
|
||||
}
|
||||
} finally {
|
||||
for (NameNodeConnector nnc : connectors) {
|
||||
@ -2297,7 +2297,20 @@ public void testBalancerWithSortTopNodes() throws Exception {
|
||||
maxUsage = Math.max(maxUsage, datanodeReport[i].getDfsUsed());
|
||||
}
|
||||
|
||||
assertEquals(200, balancerResult.bytesAlreadyMoved);
|
||||
// The 95% usage DN will have 9 blocks of 100B and 1 block of 50B - all for the same file.
|
||||
// The HDFS balancer will choose a block to move from this node randomly. More likely it will
|
||||
// be 100B block. Since 100B is greater than DFS_BALANCER_MAX_SIZE_TO_MOVE_KEY which is 99L,
|
||||
// it will stop here. Total bytes moved from this 95% DN will be 1 block of size 100B.
|
||||
// However, chances are the first block selected to be moved from this 95% DN is the 50B block.
|
||||
// After this block is moved, the total moved size so far would be 50B which is smaller than
|
||||
// DFS_BALANCER_MAX_SIZE_TO_MOVE_KEY (99L), hence it will try to move another block.
|
||||
// The second block will always be of size 100B. So total bytes moved from this 95% DN will be
|
||||
// 2 blocks of size (100B + 50B) 150B.
|
||||
// Hence, overall total blocks moved by HDFS balancer would be either of these 2 options:
|
||||
// a) 2 blocks of total size (100B + 100B)
|
||||
// b) 3 blocks of total size (50B + 100B + 100B)
|
||||
assertTrue(balancerResult.getBytesAlreadyMoved() == 200
|
||||
|| balancerResult.getBytesAlreadyMoved() == 250);
|
||||
// 100% and 95% used nodes will be balanced, so top used will be 900
|
||||
assertEquals(900, maxUsage);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user