HDFS-6812. Remove addBlock and replaceBlock from DatanodeDescriptor.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1616426 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2014-08-07 07:30:53 +00:00
parent 83b9933db3
commit 0ed8732fee
7 changed files with 35 additions and 68 deletions

View File

@ -364,6 +364,9 @@ Release 2.6.0 - UNRELEASED
standalone classes and separates KeyManager from NameNodeConnector.
(szetszwo)
HDFS-6812. Remove addBlock and replaceBlock from DatanodeDescriptor.
(szetszwo)
OPTIMIZATIONS
HDFS-6690. Deduplicate xattr names in memory. (wang)

View File

@ -21,7 +21,6 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.util.LightWeightGSet;
@ -254,18 +253,18 @@ int findDatanode(DatanodeDescriptor dn) {
}
/**
* Find specified DatanodeStorageInfo.
* @return index or -1 if not found.
* @return DatanodeStorageInfo or null if not found.
*/
int findStorageInfo(DatanodeInfo dn) {
DatanodeStorageInfo findStorageInfo(DatanodeDescriptor dn) {
int len = getCapacity();
for(int idx = 0; idx < len; idx++) {
DatanodeStorageInfo cur = getStorageInfo(idx);
if(cur == null)
break;
if(cur.getDatanodeDescriptor() == dn)
return idx;
return cur;
}
return -1;
return null;
}
/**

View File

@ -23,8 +23,8 @@
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.util.GSet;
import org.apache.hadoop.util.LightWeightGSet;
import org.apache.hadoop.util.LightWeightGSet.SetIterator;
import com.google.common.base.Preconditions;
import com.google.common.base.Predicate;
import com.google.common.collect.Iterables;
@ -217,9 +217,14 @@ BlockInfo replaceBlock(BlockInfo newBlock) {
BlockInfo currentBlock = blocks.get(newBlock);
assert currentBlock != null : "the block if not in blocksMap";
// replace block in data-node lists
for(int idx = currentBlock.numNodes()-1; idx >= 0; idx--) {
DatanodeDescriptor dn = currentBlock.getDatanode(idx);
dn.replaceBlock(currentBlock, newBlock);
for (int i = currentBlock.numNodes() - 1; i >= 0; i--) {
final DatanodeDescriptor dn = currentBlock.getDatanode(i);
final DatanodeStorageInfo storage = currentBlock.findStorageInfo(dn);
final boolean removed = storage.removeBlock(currentBlock);
Preconditions.checkState(removed, "currentBlock not found.");
final boolean added = storage.addBlock(newBlock);
Preconditions.checkState(added, "newBlock already exists.");
}
// replace block in the map itself
blocks.put(newBlock);

View File

@ -48,18 +48,6 @@ public static enum Reason {
private final SortedMap<Block, Map<DatanodeDescriptor, Reason>> corruptReplicasMap =
new TreeMap<Block, Map<DatanodeDescriptor, Reason>>();
/**
* Mark the block belonging to datanode as corrupt.
*
* @param blk Block to be added to CorruptReplicasMap
* @param dn DatanodeDescriptor which holds the corrupt replica
* @param reason a textual reason (for logging purposes)
*/
public void addToCorruptReplicasMap(Block blk, DatanodeDescriptor dn,
String reason) {
addToCorruptReplicasMap(blk, dn, reason, Reason.NONE);
}
/**
* Mark the block belonging to datanode as corrupt.
@ -69,7 +57,7 @@ public void addToCorruptReplicasMap(Block blk, DatanodeDescriptor dn,
* @param reason a textual reason (for logging purposes)
* @param reasonCode the enum representation of the reason
*/
public void addToCorruptReplicasMap(Block blk, DatanodeDescriptor dn,
void addToCorruptReplicasMap(Block blk, DatanodeDescriptor dn,
String reason, Reason reasonCode) {
Map <DatanodeDescriptor, Reason> nodes = corruptReplicasMap.get(blk);
if (nodes == null) {
@ -127,7 +115,6 @@ boolean removeFromCorruptReplicasMap(Block blk, DatanodeDescriptor datanode) {
boolean removeFromCorruptReplicasMap(Block blk, DatanodeDescriptor datanode,
Reason reason) {
Map <DatanodeDescriptor, Reason> datanodes = corruptReplicasMap.get(blk);
boolean removed = false;
if (datanodes==null)
return false;
@ -174,12 +161,12 @@ boolean isReplicaCorrupt(Block blk, DatanodeDescriptor node) {
return ((nodes != null) && (nodes.contains(node)));
}
public int numCorruptReplicas(Block blk) {
int numCorruptReplicas(Block blk) {
Collection<DatanodeDescriptor> nodes = getNodes(blk);
return (nodes == null) ? 0 : nodes.size();
}
public int size() {
int size() {
return corruptReplicasMap.size();
}

View File

@ -234,18 +234,6 @@ public DatanodeDescriptor(DatanodeID nodeID,
updateHeartbeat(StorageReport.EMPTY_ARRAY, 0L, 0L, 0, 0);
}
/**
* Add data-node to the block. Add block to the head of the list of blocks
* belonging to the data-node.
*/
public boolean addBlock(String storageID, BlockInfo b) {
DatanodeStorageInfo s = getStorageInfo(storageID);
if (s != null) {
return s.addBlock(b);
}
return false;
}
@VisibleForTesting
public DatanodeStorageInfo getStorageInfo(String storageID) {
synchronized (storageMap) {
@ -284,13 +272,10 @@ boolean hasStaleStorages() {
* data-node from the block.
*/
boolean removeBlock(BlockInfo b) {
int index = b.findStorageInfo(this);
final DatanodeStorageInfo s = b.findStorageInfo(this);
// if block exists on this datanode
if (index >= 0) {
DatanodeStorageInfo s = b.getStorageInfo(index);
if (s != null) {
return s.removeBlock(b);
}
if (s != null) {
return s.removeBlock(b);
}
return false;
}
@ -307,24 +292,6 @@ boolean removeBlock(String storageID, BlockInfo b) {
return false;
}
/**
* Replace specified old block with a new one in the DataNodeDescriptor.
*
* @param oldBlock - block to be replaced
* @param newBlock - a replacement block
* @return the new block
*/
public BlockInfo replaceBlock(BlockInfo oldBlock, BlockInfo newBlock) {
int index = oldBlock.findStorageInfo(this);
DatanodeStorageInfo s = oldBlock.getStorageInfo(index);
boolean done = s.removeBlock(oldBlock);
assert done : "Old block should belong to the data-node when replacing";
done = s.addBlock(newBlock);
assert done : "New block should not belong to the data-node when replacing";
return newBlock;
}
public void resetBlocks() {
setCapacity(0);
setRemaining(0);

View File

@ -23,9 +23,9 @@
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
/**
* In the Standby Node, we can receive messages about blocks
@ -123,7 +123,7 @@ private Queue<ReportedBlockInfo> getBlockQueue(Block block) {
return queue;
}
public int count() {
int count() {
return count ;
}
@ -140,7 +140,7 @@ public String toString() {
return sb.toString();
}
public Iterable<ReportedBlockInfo> takeAll() {
Iterable<ReportedBlockInfo> takeAll() {
List<ReportedBlockInfo> rbis = Lists.newArrayListWithCapacity(
count);
for (Queue<ReportedBlockInfo> q : queueByBlockId.values()) {

View File

@ -33,6 +33,7 @@
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.CorruptReplicasMap.Reason;
import org.junit.Test;
@ -89,14 +90,14 @@ public void testCorruptReplicaInfo() throws IOException,
DatanodeDescriptor dn1 = DFSTestUtil.getLocalDatanodeDescriptor();
DatanodeDescriptor dn2 = DFSTestUtil.getLocalDatanodeDescriptor();
crm.addToCorruptReplicasMap(getBlock(0), dn1, "TEST");
addToCorruptReplicasMap(crm, getBlock(0), dn1);
assertEquals("Number of corrupt blocks not returning correctly",
1, crm.size());
crm.addToCorruptReplicasMap(getBlock(1), dn1, "TEST");
addToCorruptReplicasMap(crm, getBlock(1), dn1);
assertEquals("Number of corrupt blocks not returning correctly",
2, crm.size());
crm.addToCorruptReplicasMap(getBlock(1), dn2, "TEST");
addToCorruptReplicasMap(crm, getBlock(1), dn2);
assertEquals("Number of corrupt blocks not returning correctly",
2, crm.size());
@ -109,7 +110,7 @@ public void testCorruptReplicaInfo() throws IOException,
0, crm.size());
for (Long block_id: block_ids) {
crm.addToCorruptReplicasMap(getBlock(block_id), dn1, "TEST");
addToCorruptReplicasMap(crm, getBlock(block_id), dn1);
}
assertEquals("Number of corrupt blocks not returning correctly",
@ -127,4 +128,9 @@ public void testCorruptReplicaInfo() throws IOException,
crm.getCorruptReplicaBlockIds(10, 7L)));
}
private static void addToCorruptReplicasMap(CorruptReplicasMap crm,
Block blk, DatanodeDescriptor dn) {
crm.addToCorruptReplicasMap(blk, dn, "TEST", Reason.NONE);
}
}