diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index cc476d8368..079dda43b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -1113,7 +1113,7 @@ private synchronized void initDirectoryScanner(Configuration conf) { directoryScanner = new DirectoryScanner(data, conf); directoryScanner.start(); } else { - LOG.info("Periodic Directory Tree Verification scan " + + LOG.warn("Periodic Directory Tree Verification scan " + "is disabled because {}", reason); } @@ -1315,21 +1315,6 @@ public void reportCorruptedBlocks( } } - /** - * Try to send an error report to the NNs associated with the given - * block pool. - * @param bpid the block pool ID - * @param errCode error code to send - * @param errMsg textual message to send - */ - void trySendErrorReport(String bpid, int errCode, String errMsg) { - BPOfferService bpos = blockPoolManager.get(bpid); - if (bpos == null) { - throw new IllegalArgumentException("Bad block pool: " + bpid); - } - bpos.trySendErrorReport(errCode, errMsg); - } - /** * Return the BPOfferService instance corresponding to the given block. * @return the BPOS @@ -2017,7 +2002,7 @@ private void checkBlockToken(ExtendedBlock block, ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier()); DataInputStream in = new DataInputStream(buf); id.readFields(in); - LOG.debug("Got: {}", id); + LOG.debug("BlockTokenIdentifier id: {}", id); blockPoolTokenSecretManager.checkAccess(id, null, block, accessMode, null, null); } @@ -2240,8 +2225,8 @@ private void handleDiskError(String failedVolumes, int failedNumber) { return; // do not shutdown } - LOG.warn("DataNode is shutting down due to failed volumes: [" - + failedVolumes + "]"); + LOG.warn("DataNode is shutting down due to failed volumes: [{}]", + failedVolumes); shouldRun = false; } @@ -2283,7 +2268,7 @@ void incrDatanodeNetworkErrors(String host) { curCount.put("networkErrors", curCount.get("networkErrors") + 1L); datanodeNetworkCounts.put(host, curCount); } catch (ExecutionException e) { - LOG.warn("failed to increment network error counts for " + host); + LOG.warn("failed to increment network error counts for host: {}", host); } } } @@ -2333,7 +2318,7 @@ private void reportBadBlock(final BPOfferService bpos, final ExtendedBlock block, final String msg) { FsVolumeSpi volume = getFSDataset().getVolume(block); if (volume == null) { - LOG.warn("Cannot find FsVolumeSpi to report bad block: " + block); + LOG.warn("Cannot find FsVolumeSpi to report bad block: {}", block); return; } bpos.reportBadBlocks( @@ -2414,7 +2399,7 @@ void transferBlocks(String poolId, Block blocks[], transferBlock(new ExtendedBlock(poolId, blocks[i]), xferTargets[i], xferTargetStorageTypes[i], xferTargetStorageIDs[i]); } catch (IOException ie) { - LOG.warn("Failed to transfer block " + blocks[i], ie); + LOG.warn("Failed to transfer block {}", blocks[i], ie); } } } @@ -2533,15 +2518,13 @@ private class DataTransfer implements Runnable { DataTransfer(DatanodeInfo targets[], StorageType[] targetStorageTypes, String[] targetStorageIds, ExtendedBlock b, BlockConstructionStage stage, final String clientname) { - if (DataTransferProtocol.LOG.isDebugEnabled()) { - DataTransferProtocol.LOG.debug("{}: {} (numBytes={}), stage={}, " + - "clientname={}, targets={}, target storage types={}, " + - "target storage IDs={}", getClass().getSimpleName(), b, - b.getNumBytes(), stage, clientname, Arrays.asList(targets), - targetStorageTypes == null ? "[]" : - Arrays.asList(targetStorageTypes), - targetStorageIds == null ? "[]" : Arrays.asList(targetStorageIds)); - } + DataTransferProtocol.LOG.debug("{}: {} (numBytes={}), stage={}, " + + "clientname={}, targets={}, target storage types={}, " + + "target storage IDs={}", getClass().getSimpleName(), b, + b.getNumBytes(), stage, clientname, Arrays.asList(targets), + targetStorageTypes == null ? "[]" : + Arrays.asList(targetStorageTypes), + targetStorageIds == null ? "[]" : Arrays.asList(targetStorageIds)); this.targets = targets; this.targetStorageTypes = targetStorageTypes; this.targetStorageIds = targetStorageIds; @@ -2645,7 +2628,7 @@ public void run() { LOG.warn("{}:Failed to transfer {} to {} got", bpReg, b, targets[0], ie); } catch (Throwable t) { - LOG.error("Failed to transfer block " + b, t); + LOG.error("Failed to transfer block {}", b, t); } finally { decrementXmitsInProgress(); IOUtils.closeStream(blockSender); @@ -3037,7 +3020,7 @@ private void checkReadAccess(final ExtendedBlock block) throws IOException { } for (TokenIdentifier tokenId : tokenIds) { BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId; - LOG.debug("Got: {}", id); + LOG.debug("BlockTokenIdentifier: {}", id); blockPoolTokenSecretManager.checkAccess(id, null, block, BlockTokenIdentifier.AccessMode.READ, null, null); } @@ -3077,8 +3060,10 @@ void transferReplicaForPipelineRecovery(final ExtendedBlock b, b.setGenerationStamp(storedGS); if (data.isValidRbw(b)) { stage = BlockConstructionStage.TRANSFER_RBW; + LOG.debug("Replica is being written!"); } else if (data.isValidBlock(b)) { stage = BlockConstructionStage.TRANSFER_FINALIZED; + LOG.debug("Replica is finalized!"); } else { final String r = data.getReplicaString(b.getBlockPoolId(), b.getBlockId()); throw new IOException(b + " is neither a RBW nor a Finalized, r=" + r); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 7067b9b857..92b1dac2f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -2068,9 +2068,7 @@ ReplicaInfo validateBlockFile(String bpid, long blockId) { datanode.checkDiskErrorAsync(r.getVolume()); } - if (LOG.isDebugEnabled()) { - LOG.debug("blockId=" + blockId + ", replica=" + r); - } + LOG.debug("blockId={}, replica={}", blockId, r); return null; } @@ -2140,15 +2138,12 @@ private void invalidate(String bpid, Block[] invalidBlks, boolean async) continue; } } catch(IllegalArgumentException e) { - LOG.warn("Parent directory check failed; replica " + info - + " is not backed by a local file"); + LOG.warn("Parent directory check failed; replica {} is " + + "not backed by a local file", info); } removing = volumeMap.remove(bpid, invalidBlks[i]); addDeletingBlock(bpid, removing.getBlockId()); - if (LOG.isDebugEnabled()) { - LOG.debug("Block file " + removing.getBlockURI() - + " is to be deleted"); - } + LOG.debug("Block file {} is to be deleted", removing.getBlockURI()); if (removing instanceof ReplicaInPipeline) { ((ReplicaInPipeline) removing).releaseAllBytesReserved(); } @@ -2189,8 +2184,8 @@ private void invalidate(String bpid, Block[] invalidBlks, boolean async) dataStorage.getTrashDirectoryForReplica(bpid, removing)); } } catch (ClosedChannelException e) { - LOG.warn("Volume " + v + " is closed, ignore the deletion task for " + - "block " + invalidBlks[i]); + LOG.warn("Volume {} is closed, ignore the deletion task for " + + "block: {}", v, invalidBlks[i]); } } if (!errors.isEmpty()) {