HDFS-12894. [READ] Skip setting block count of ProvidedDatanodeStorageInfo on DN registration update
This commit is contained in:
parent
9c35be86e1
commit
fb996a32a9
@ -4943,4 +4943,9 @@ private static long getBlockRecoveryTimeout(long heartbeatIntervalSecs) {
|
||||
public void setBlockRecoveryTimeout(long blockRecoveryTimeout) {
|
||||
pendingRecoveryBlocks.setRecoveryTimeoutInterval(blockRecoveryTimeout);
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public ProvidedStorageMap getProvidedStorageMap() {
|
||||
return providedStorageMap;
|
||||
}
|
||||
}
|
||||
|
@ -919,7 +919,9 @@ public void updateRegInfo(DatanodeID nodeReg) {
|
||||
|
||||
// must re-process IBR after re-registration
|
||||
for(DatanodeStorageInfo storage : getStorageInfos()) {
|
||||
storage.setBlockReportCount(0);
|
||||
if (storage.getStorageType() != StorageType.PROVIDED) {
|
||||
storage.setBlockReportCount(0);
|
||||
}
|
||||
}
|
||||
heartbeatedSinceRegistration = false;
|
||||
forceRegistration = false;
|
||||
|
@ -559,7 +559,9 @@ public void testProvidedDatanodeFailures() throws Exception {
|
||||
DataNode providedDatanode2 = cluster.getDataNodes().get(1);
|
||||
|
||||
DFSClient client = new DFSClient(new InetSocketAddress("localhost",
|
||||
cluster.getNameNodePort()), cluster.getConfiguration(0));
|
||||
cluster.getNameNodePort()), cluster.getConfiguration(0));
|
||||
|
||||
DatanodeStorageInfo providedDNInfo = getProvidedDatanodeStorageInfo();
|
||||
|
||||
if (numFiles >= 1) {
|
||||
String filename = "/" + filePrefix + (numFiles - 1) + fileSuffix;
|
||||
@ -596,10 +598,15 @@ public void testProvidedDatanodeFailures() throws Exception {
|
||||
providedDatanode2.getDatanodeId().getXferAddr());
|
||||
getAndCheckBlockLocations(client, filename, baseFileLen, 1, 0);
|
||||
|
||||
// BR count for the provided ProvidedDatanodeStorageInfo should reset to
|
||||
// 0, when all DNs with PROVIDED storage fail.
|
||||
assertEquals(0, providedDNInfo.getBlockReportCount());
|
||||
//restart the provided datanode
|
||||
cluster.restartDataNode(providedDNProperties1, true);
|
||||
cluster.waitActive();
|
||||
|
||||
assertEquals(1, providedDNInfo.getBlockReportCount());
|
||||
|
||||
//should find the block on the 1st provided datanode now
|
||||
dnInfos = getAndCheckBlockLocations(client, filename, baseFileLen, 1, 1);
|
||||
//not comparing UUIDs as the datanode can now have a different one.
|
||||
@ -621,6 +628,8 @@ public void testTransientDeadDatanodes() throws Exception {
|
||||
false);
|
||||
|
||||
DataNode providedDatanode = cluster.getDataNodes().get(0);
|
||||
DatanodeStorageInfo providedDNInfo = getProvidedDatanodeStorageInfo();
|
||||
int initialBRCount = providedDNInfo.getBlockReportCount();
|
||||
for (int i= 0; i < numFiles; i++) {
|
||||
// expect to have 2 locations as we have 2 provided Datanodes.
|
||||
verifyFileLocation(i, 2);
|
||||
@ -631,10 +640,19 @@ public void testTransientDeadDatanodes() throws Exception {
|
||||
cluster.waitActive();
|
||||
cluster.triggerHeartbeats();
|
||||
Thread.sleep(1000);
|
||||
// the report count should just continue to increase.
|
||||
assertEquals(initialBRCount + i + 1,
|
||||
providedDNInfo.getBlockReportCount());
|
||||
verifyFileLocation(i, 2);
|
||||
}
|
||||
}
|
||||
|
||||
private DatanodeStorageInfo getProvidedDatanodeStorageInfo() {
|
||||
ProvidedStorageMap providedStorageMap =
|
||||
cluster.getNamesystem().getBlockManager().getProvidedStorageMap();
|
||||
return providedStorageMap.getProvidedStorageInfo();
|
||||
}
|
||||
|
||||
@Test(timeout=30000)
|
||||
public void testNamenodeRestart() throws Exception {
|
||||
createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH,
|
||||
|
Loading…
Reference in New Issue
Block a user