HDFS-15967. Improve the log for Short Circuit Local Reads. Contributed by Bhavik Patel.

(cherry picked from commit 01bad0e92a)
This commit is contained in:
Takanobu Asanuma 2021-04-26 13:29:28 +09:00
parent 99dfd3b2d0
commit a5f038b3f6
2 changed files with 24 additions and 44 deletions

View File

@ -1113,7 +1113,7 @@ private synchronized void initDirectoryScanner(Configuration conf) {
directoryScanner = new DirectoryScanner(data, conf); directoryScanner = new DirectoryScanner(data, conf);
directoryScanner.start(); directoryScanner.start();
} else { } else {
LOG.info("Periodic Directory Tree Verification scan " + LOG.warn("Periodic Directory Tree Verification scan " +
"is disabled because {}", "is disabled because {}",
reason); reason);
} }
@ -1315,21 +1315,6 @@ public void reportCorruptedBlocks(
} }
} }
/**
* Try to send an error report to the NNs associated with the given
* block pool.
* @param bpid the block pool ID
* @param errCode error code to send
* @param errMsg textual message to send
*/
void trySendErrorReport(String bpid, int errCode, String errMsg) {
BPOfferService bpos = blockPoolManager.get(bpid);
if (bpos == null) {
throw new IllegalArgumentException("Bad block pool: " + bpid);
}
bpos.trySendErrorReport(errCode, errMsg);
}
/** /**
* Return the BPOfferService instance corresponding to the given block. * Return the BPOfferService instance corresponding to the given block.
* @return the BPOS * @return the BPOS
@ -2017,7 +2002,7 @@ private void checkBlockToken(ExtendedBlock block,
ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier()); ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
DataInputStream in = new DataInputStream(buf); DataInputStream in = new DataInputStream(buf);
id.readFields(in); id.readFields(in);
LOG.debug("Got: {}", id); LOG.debug("BlockTokenIdentifier id: {}", id);
blockPoolTokenSecretManager.checkAccess(id, null, block, accessMode, blockPoolTokenSecretManager.checkAccess(id, null, block, accessMode,
null, null); null, null);
} }
@ -2240,8 +2225,8 @@ private void handleDiskError(String failedVolumes, int failedNumber) {
return; // do not shutdown return; // do not shutdown
} }
LOG.warn("DataNode is shutting down due to failed volumes: [" LOG.warn("DataNode is shutting down due to failed volumes: [{}]",
+ failedVolumes + "]"); failedVolumes);
shouldRun = false; shouldRun = false;
} }
@ -2283,7 +2268,7 @@ void incrDatanodeNetworkErrors(String host) {
curCount.put("networkErrors", curCount.get("networkErrors") + 1L); curCount.put("networkErrors", curCount.get("networkErrors") + 1L);
datanodeNetworkCounts.put(host, curCount); datanodeNetworkCounts.put(host, curCount);
} catch (ExecutionException e) { } catch (ExecutionException e) {
LOG.warn("failed to increment network error counts for " + host); LOG.warn("failed to increment network error counts for host: {}", host);
} }
} }
} }
@ -2333,7 +2318,7 @@ private void reportBadBlock(final BPOfferService bpos,
final ExtendedBlock block, final String msg) { final ExtendedBlock block, final String msg) {
FsVolumeSpi volume = getFSDataset().getVolume(block); FsVolumeSpi volume = getFSDataset().getVolume(block);
if (volume == null) { if (volume == null) {
LOG.warn("Cannot find FsVolumeSpi to report bad block: " + block); LOG.warn("Cannot find FsVolumeSpi to report bad block: {}", block);
return; return;
} }
bpos.reportBadBlocks( bpos.reportBadBlocks(
@ -2414,7 +2399,7 @@ void transferBlocks(String poolId, Block blocks[],
transferBlock(new ExtendedBlock(poolId, blocks[i]), xferTargets[i], transferBlock(new ExtendedBlock(poolId, blocks[i]), xferTargets[i],
xferTargetStorageTypes[i], xferTargetStorageIDs[i]); xferTargetStorageTypes[i], xferTargetStorageIDs[i]);
} catch (IOException ie) { } catch (IOException ie) {
LOG.warn("Failed to transfer block " + blocks[i], ie); LOG.warn("Failed to transfer block {}", blocks[i], ie);
} }
} }
} }
@ -2533,7 +2518,6 @@ private class DataTransfer implements Runnable {
DataTransfer(DatanodeInfo targets[], StorageType[] targetStorageTypes, DataTransfer(DatanodeInfo targets[], StorageType[] targetStorageTypes,
String[] targetStorageIds, ExtendedBlock b, String[] targetStorageIds, ExtendedBlock b,
BlockConstructionStage stage, final String clientname) { BlockConstructionStage stage, final String clientname) {
if (DataTransferProtocol.LOG.isDebugEnabled()) {
DataTransferProtocol.LOG.debug("{}: {} (numBytes={}), stage={}, " + DataTransferProtocol.LOG.debug("{}: {} (numBytes={}), stage={}, " +
"clientname={}, targets={}, target storage types={}, " + "clientname={}, targets={}, target storage types={}, " +
"target storage IDs={}", getClass().getSimpleName(), b, "target storage IDs={}", getClass().getSimpleName(), b,
@ -2541,7 +2525,6 @@ private class DataTransfer implements Runnable {
targetStorageTypes == null ? "[]" : targetStorageTypes == null ? "[]" :
Arrays.asList(targetStorageTypes), Arrays.asList(targetStorageTypes),
targetStorageIds == null ? "[]" : Arrays.asList(targetStorageIds)); targetStorageIds == null ? "[]" : Arrays.asList(targetStorageIds));
}
this.targets = targets; this.targets = targets;
this.targetStorageTypes = targetStorageTypes; this.targetStorageTypes = targetStorageTypes;
this.targetStorageIds = targetStorageIds; this.targetStorageIds = targetStorageIds;
@ -2645,7 +2628,7 @@ public void run() {
LOG.warn("{}:Failed to transfer {} to {} got", LOG.warn("{}:Failed to transfer {} to {} got",
bpReg, b, targets[0], ie); bpReg, b, targets[0], ie);
} catch (Throwable t) { } catch (Throwable t) {
LOG.error("Failed to transfer block " + b, t); LOG.error("Failed to transfer block {}", b, t);
} finally { } finally {
decrementXmitsInProgress(); decrementXmitsInProgress();
IOUtils.closeStream(blockSender); IOUtils.closeStream(blockSender);
@ -3037,7 +3020,7 @@ private void checkReadAccess(final ExtendedBlock block) throws IOException {
} }
for (TokenIdentifier tokenId : tokenIds) { for (TokenIdentifier tokenId : tokenIds) {
BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId; BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
LOG.debug("Got: {}", id); LOG.debug("BlockTokenIdentifier: {}", id);
blockPoolTokenSecretManager.checkAccess(id, null, block, blockPoolTokenSecretManager.checkAccess(id, null, block,
BlockTokenIdentifier.AccessMode.READ, null, null); BlockTokenIdentifier.AccessMode.READ, null, null);
} }
@ -3077,8 +3060,10 @@ void transferReplicaForPipelineRecovery(final ExtendedBlock b,
b.setGenerationStamp(storedGS); b.setGenerationStamp(storedGS);
if (data.isValidRbw(b)) { if (data.isValidRbw(b)) {
stage = BlockConstructionStage.TRANSFER_RBW; stage = BlockConstructionStage.TRANSFER_RBW;
LOG.debug("Replica is being written!");
} else if (data.isValidBlock(b)) { } else if (data.isValidBlock(b)) {
stage = BlockConstructionStage.TRANSFER_FINALIZED; stage = BlockConstructionStage.TRANSFER_FINALIZED;
LOG.debug("Replica is finalized!");
} else { } else {
final String r = data.getReplicaString(b.getBlockPoolId(), b.getBlockId()); final String r = data.getReplicaString(b.getBlockPoolId(), b.getBlockId());
throw new IOException(b + " is neither a RBW nor a Finalized, r=" + r); throw new IOException(b + " is neither a RBW nor a Finalized, r=" + r);

View File

@ -2068,9 +2068,7 @@ ReplicaInfo validateBlockFile(String bpid, long blockId) {
datanode.checkDiskErrorAsync(r.getVolume()); datanode.checkDiskErrorAsync(r.getVolume());
} }
if (LOG.isDebugEnabled()) { LOG.debug("blockId={}, replica={}", blockId, r);
LOG.debug("blockId=" + blockId + ", replica=" + r);
}
return null; return null;
} }
@ -2140,15 +2138,12 @@ private void invalidate(String bpid, Block[] invalidBlks, boolean async)
continue; continue;
} }
} catch(IllegalArgumentException e) { } catch(IllegalArgumentException e) {
LOG.warn("Parent directory check failed; replica " + info LOG.warn("Parent directory check failed; replica {} is " +
+ " is not backed by a local file"); "not backed by a local file", info);
} }
removing = volumeMap.remove(bpid, invalidBlks[i]); removing = volumeMap.remove(bpid, invalidBlks[i]);
addDeletingBlock(bpid, removing.getBlockId()); addDeletingBlock(bpid, removing.getBlockId());
if (LOG.isDebugEnabled()) { LOG.debug("Block file {} is to be deleted", removing.getBlockURI());
LOG.debug("Block file " + removing.getBlockURI()
+ " is to be deleted");
}
if (removing instanceof ReplicaInPipeline) { if (removing instanceof ReplicaInPipeline) {
((ReplicaInPipeline) removing).releaseAllBytesReserved(); ((ReplicaInPipeline) removing).releaseAllBytesReserved();
} }
@ -2189,8 +2184,8 @@ private void invalidate(String bpid, Block[] invalidBlks, boolean async)
dataStorage.getTrashDirectoryForReplica(bpid, removing)); dataStorage.getTrashDirectoryForReplica(bpid, removing));
} }
} catch (ClosedChannelException e) { } catch (ClosedChannelException e) {
LOG.warn("Volume " + v + " is closed, ignore the deletion task for " + LOG.warn("Volume {} is closed, ignore the deletion task for " +
"block " + invalidBlks[i]); "block: {}", v, invalidBlks[i]);
} }
} }
if (!errors.isEmpty()) { if (!errors.isEmpty()) {