HDFS-9941. Do not log StandbyException on NN, other minor logging fixes. Contributed by Arpit Agarwal.
This commit is contained in:
parent
f291d82cd4
commit
5644137ada
@ -33,11 +33,13 @@
|
||||
*/
|
||||
public class BlockUnderConstructionFeature {
|
||||
private BlockUCState blockUCState;
|
||||
private static final ReplicaUnderConstruction[] NO_REPLICAS =
|
||||
new ReplicaUnderConstruction[0];
|
||||
|
||||
/**
|
||||
* Block replicas as assigned when the block was allocated.
|
||||
*/
|
||||
private ReplicaUnderConstruction[] replicas;
|
||||
private ReplicaUnderConstruction[] replicas = NO_REPLICAS;
|
||||
|
||||
/**
|
||||
* Index of the primary data node doing the recovery. Useful for log
|
||||
@ -120,7 +122,7 @@ public byte[] getBlockIndices() {
|
||||
}
|
||||
|
||||
public int getNumExpectedLocations() {
|
||||
return replicas == null ? 0 : replicas.length;
|
||||
return replicas.length;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -130,7 +132,7 @@ public int getNumExpectedLocations() {
|
||||
*/
|
||||
void updateStorageScheduledSize(BlockInfoStriped storedBlock) {
|
||||
assert storedBlock.getUnderConstructionFeature() == this;
|
||||
if (replicas == null) {
|
||||
if (replicas.length == 0) {
|
||||
return;
|
||||
}
|
||||
final int dataBlockNum = storedBlock.getDataBlockNum();
|
||||
@ -182,12 +184,10 @@ void commit() {
|
||||
|
||||
List<ReplicaUnderConstruction> getStaleReplicas(long genStamp) {
|
||||
List<ReplicaUnderConstruction> staleReplicas = new ArrayList<>();
|
||||
if (replicas != null) {
|
||||
// Remove replicas with wrong gen stamp. The replica list is unchanged.
|
||||
for (ReplicaUnderConstruction r : replicas) {
|
||||
if (genStamp != r.getGenerationStamp()) {
|
||||
staleReplicas.add(r);
|
||||
}
|
||||
// Remove replicas with wrong gen stamp. The replica list is unchanged.
|
||||
for (ReplicaUnderConstruction r : replicas) {
|
||||
if (genStamp != r.getGenerationStamp()) {
|
||||
staleReplicas.add(r);
|
||||
}
|
||||
}
|
||||
return staleReplicas;
|
||||
@ -201,7 +201,7 @@ List<ReplicaUnderConstruction> getStaleReplicas(long genStamp) {
|
||||
public void initializeBlockRecovery(BlockInfo blockInfo, long recoveryId) {
|
||||
setBlockUCState(BlockUCState.UNDER_RECOVERY);
|
||||
blockRecoveryId = recoveryId;
|
||||
if (replicas == null || replicas.length == 0) {
|
||||
if (replicas.length == 0) {
|
||||
NameNode.blockStateChangeLog.warn("BLOCK*" +
|
||||
" BlockUnderConstructionFeature.initializeBlockRecovery:" +
|
||||
" No blocks found, lease removed.");
|
||||
@ -252,7 +252,7 @@ public void initializeBlockRecovery(BlockInfo blockInfo, long recoveryId) {
|
||||
/** Add the reported replica if it is not already in the replica list. */
|
||||
void addReplicaIfNotPresent(DatanodeStorageInfo storage,
|
||||
Block reportedBlock, ReplicaState rState) {
|
||||
if (replicas == null) {
|
||||
if (replicas.length == 0) {
|
||||
replicas = new ReplicaUnderConstruction[1];
|
||||
replicas[0] = new ReplicaUnderConstruction(reportedBlock, storage,
|
||||
rState);
|
||||
@ -295,15 +295,24 @@ private void appendUCParts(StringBuilder sb) {
|
||||
.append(", truncateBlock=").append(truncateBlock)
|
||||
.append(", primaryNodeIndex=").append(primaryNodeIndex)
|
||||
.append(", replicas=[");
|
||||
if (replicas != null) {
|
||||
int i = 0;
|
||||
for (ReplicaUnderConstruction r : replicas) {
|
||||
r.appendStringTo(sb);
|
||||
if (++i < replicas.length) {
|
||||
sb.append(", ");
|
||||
}
|
||||
int i = 0;
|
||||
for (ReplicaUnderConstruction r : replicas) {
|
||||
r.appendStringTo(sb);
|
||||
if (++i < replicas.length) {
|
||||
sb.append(", ");
|
||||
}
|
||||
}
|
||||
sb.append("]}");
|
||||
}
|
||||
|
||||
public void appendUCPartsConcise(StringBuilder sb) {
|
||||
sb.append("replicas=");
|
||||
int i = 0;
|
||||
for (ReplicaUnderConstruction r : replicas) {
|
||||
sb.append(r.getExpectedStorageLocation().getDatanodeDescriptor());
|
||||
if (++i < replicas.length) {
|
||||
sb.append(", ");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -37,6 +37,7 @@
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeId;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
|
||||
import org.apache.hadoop.hdfs.util.CyclicIteration;
|
||||
import org.apache.hadoop.util.ChunkedArrayList;
|
||||
@ -280,6 +281,10 @@ private void logBlockReplicationInfo(BlockInfo block,
|
||||
BlockCollection bc,
|
||||
DatanodeDescriptor srcNode, NumberReplicas num,
|
||||
Iterable<DatanodeStorageInfo> storages) {
|
||||
if (!NameNode.blockStateChangeLog.isInfoEnabled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
int curReplicas = num.liveReplicas();
|
||||
int curExpectedReplicas = blockManager.getExpectedReplicaNum(block);
|
||||
StringBuilder nodeList = new StringBuilder();
|
||||
@ -288,7 +293,8 @@ private void logBlockReplicationInfo(BlockInfo block,
|
||||
nodeList.append(node);
|
||||
nodeList.append(" ");
|
||||
}
|
||||
LOG.info("Block: " + block + ", Expected Replicas: "
|
||||
NameNode.blockStateChangeLog.info(
|
||||
"Block: " + block + ", Expected Replicas: "
|
||||
+ curExpectedReplicas + ", live replicas: " + curReplicas
|
||||
+ ", corrupt replicas: " + num.corruptReplicas()
|
||||
+ ", decommissioned replicas: " + num.decommissioned()
|
||||
|
@ -848,10 +848,26 @@ private static void saveAllocatedBlock(FSNamesystem fsn, String src,
|
||||
assert fsn.hasWriteLock();
|
||||
BlockInfo b = addBlock(fsn.dir, src, inodesInPath, newBlock, targets,
|
||||
isStriped);
|
||||
NameNode.stateChangeLog.info("BLOCK* allocate " + b + " for " + src);
|
||||
logAllocatedBlock(src, b);
|
||||
DatanodeStorageInfo.incrementBlocksScheduled(targets);
|
||||
}
|
||||
|
||||
private static void logAllocatedBlock(String src, BlockInfo b) {
|
||||
if (!NameNode.stateChangeLog.isInfoEnabled()) {
|
||||
return;
|
||||
}
|
||||
StringBuilder sb = new StringBuilder(150);
|
||||
sb.append("BLOCK* allocate ");
|
||||
b.appendStringTo(sb);
|
||||
sb.append(", ");
|
||||
BlockUnderConstructionFeature uc = b.getUnderConstructionFeature();
|
||||
if (uc != null) {
|
||||
uc.appendUCPartsConcise(sb);
|
||||
}
|
||||
sb.append(" for " + src);
|
||||
NameNode.stateChangeLog.info(sb.toString());
|
||||
}
|
||||
|
||||
private static void setNewINodeStoragePolicy(BlockManager bm, INodeFile
|
||||
inode, INodesInPath iip, boolean isLazyPersist)
|
||||
throws IOException {
|
||||
|
@ -163,6 +163,7 @@
|
||||
import org.apache.hadoop.ipc.RetryCache.CacheEntry;
|
||||
import org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload;
|
||||
import org.apache.hadoop.ipc.Server;
|
||||
import org.apache.hadoop.ipc.StandbyException;
|
||||
import org.apache.hadoop.ipc.WritableRpcEngine;
|
||||
import org.apache.hadoop.ipc.RefreshRegistry;
|
||||
import org.apache.hadoop.ipc.RefreshResponse;
|
||||
@ -494,6 +495,9 @@ public NameNodeRpcServer(Configuration conf, NameNode nn)
|
||||
FSLimitException.PathComponentTooLongException.class,
|
||||
FSLimitException.MaxDirectoryItemsExceededException.class,
|
||||
UnresolvedPathException.class);
|
||||
|
||||
clientRpcServer.addSuppressedLoggingExceptions(StandbyException.class);
|
||||
|
||||
clientRpcServer.setTracer(nn.tracer);
|
||||
if (serviceRpcServer != null) {
|
||||
serviceRpcServer.setTracer(nn.tracer);
|
||||
|
Loading…
Reference in New Issue
Block a user