HDFS-12894. [READ] Skip setting block count of ProvidedDatanodeStorageInfo on DN registration update

This commit is contained in:
Virajith Jalaparti 2017-12-05 17:55:32 -08:00 committed by Chris Douglas
parent 9c35be86e1
commit fb996a32a9
3 changed files with 27 additions and 2 deletions

View File

@ -4943,4 +4943,9 @@ private static long getBlockRecoveryTimeout(long heartbeatIntervalSecs) {
public void setBlockRecoveryTimeout(long blockRecoveryTimeout) {
pendingRecoveryBlocks.setRecoveryTimeoutInterval(blockRecoveryTimeout);
}
@VisibleForTesting
public ProvidedStorageMap getProvidedStorageMap() {
return providedStorageMap;
}
}

View File

@ -919,8 +919,10 @@ public void updateRegInfo(DatanodeID nodeReg) {
// must re-process IBR after re-registration
for(DatanodeStorageInfo storage : getStorageInfos()) {
if (storage.getStorageType() != StorageType.PROVIDED) {
storage.setBlockReportCount(0);
}
}
heartbeatedSinceRegistration = false;
forceRegistration = false;
}

View File

@ -561,6 +561,8 @@ public void testProvidedDatanodeFailures() throws Exception {
DFSClient client = new DFSClient(new InetSocketAddress("localhost",
cluster.getNameNodePort()), cluster.getConfiguration(0));
DatanodeStorageInfo providedDNInfo = getProvidedDatanodeStorageInfo();
if (numFiles >= 1) {
String filename = "/" + filePrefix + (numFiles - 1) + fileSuffix;
// 2 locations returned as there are 2 PROVIDED datanodes
@ -596,10 +598,15 @@ public void testProvidedDatanodeFailures() throws Exception {
providedDatanode2.getDatanodeId().getXferAddr());
getAndCheckBlockLocations(client, filename, baseFileLen, 1, 0);
// BR count for the provided ProvidedDatanodeStorageInfo should reset to
// 0, when all DNs with PROVIDED storage fail.
assertEquals(0, providedDNInfo.getBlockReportCount());
//restart the provided datanode
cluster.restartDataNode(providedDNProperties1, true);
cluster.waitActive();
assertEquals(1, providedDNInfo.getBlockReportCount());
//should find the block on the 1st provided datanode now
dnInfos = getAndCheckBlockLocations(client, filename, baseFileLen, 1, 1);
//not comparing UUIDs as the datanode can now have a different one.
@ -621,6 +628,8 @@ public void testTransientDeadDatanodes() throws Exception {
false);
DataNode providedDatanode = cluster.getDataNodes().get(0);
DatanodeStorageInfo providedDNInfo = getProvidedDatanodeStorageInfo();
int initialBRCount = providedDNInfo.getBlockReportCount();
for (int i= 0; i < numFiles; i++) {
// expect to have 2 locations as we have 2 provided Datanodes.
verifyFileLocation(i, 2);
@ -631,10 +640,19 @@ public void testTransientDeadDatanodes() throws Exception {
cluster.waitActive();
cluster.triggerHeartbeats();
Thread.sleep(1000);
// the report count should just continue to increase.
assertEquals(initialBRCount + i + 1,
providedDNInfo.getBlockReportCount());
verifyFileLocation(i, 2);
}
}
private DatanodeStorageInfo getProvidedDatanodeStorageInfo() {
ProvidedStorageMap providedStorageMap =
cluster.getNamesystem().getBlockManager().getProvidedStorageMap();
return providedStorageMap.getProvidedStorageInfo();
}
@Test(timeout=30000)
public void testNamenodeRestart() throws Exception {
createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH,