diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index a84c320239..ea49eaabe6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -364,6 +364,9 @@ Release 2.6.0 - UNRELEASED standalone classes and separates KeyManager from NameNodeConnector. (szetszwo) + HDFS-6812. Remove addBlock and replaceBlock from DatanodeDescriptor. + (szetszwo) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java index c6650bf66d..272cb1d48a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java @@ -21,7 +21,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.util.LightWeightGSet; @@ -254,18 +253,18 @@ int findDatanode(DatanodeDescriptor dn) { } /** * Find specified DatanodeStorageInfo. - * @return index or -1 if not found. + * @return DatanodeStorageInfo or null if not found. */ - int findStorageInfo(DatanodeInfo dn) { + DatanodeStorageInfo findStorageInfo(DatanodeDescriptor dn) { int len = getCapacity(); for(int idx = 0; idx < len; idx++) { DatanodeStorageInfo cur = getStorageInfo(idx); if(cur == null) break; if(cur.getDatanodeDescriptor() == dn) - return idx; + return cur; } - return -1; + return null; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java index b658f4f395..a675635d5d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java @@ -23,8 +23,8 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.util.GSet; import org.apache.hadoop.util.LightWeightGSet; -import org.apache.hadoop.util.LightWeightGSet.SetIterator; +import com.google.common.base.Preconditions; import com.google.common.base.Predicate; import com.google.common.collect.Iterables; @@ -217,9 +217,14 @@ BlockInfo replaceBlock(BlockInfo newBlock) { BlockInfo currentBlock = blocks.get(newBlock); assert currentBlock != null : "the block if not in blocksMap"; // replace block in data-node lists - for(int idx = currentBlock.numNodes()-1; idx >= 0; idx--) { - DatanodeDescriptor dn = currentBlock.getDatanode(idx); - dn.replaceBlock(currentBlock, newBlock); + for (int i = currentBlock.numNodes() - 1; i >= 0; i--) { + final DatanodeDescriptor dn = currentBlock.getDatanode(i); + final DatanodeStorageInfo storage = currentBlock.findStorageInfo(dn); + final boolean removed = storage.removeBlock(currentBlock); + Preconditions.checkState(removed, "currentBlock not found."); + + final boolean added = storage.addBlock(newBlock); + Preconditions.checkState(added, "newBlock already exists."); } // replace block in the map itself blocks.put(newBlock); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java index c7b148b0ab..9b7515dabe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java @@ -48,18 +48,6 @@ public static enum Reason { private final SortedMap> corruptReplicasMap = new TreeMap>(); - - /** - * Mark the block belonging to datanode as corrupt. - * - * @param blk Block to be added to CorruptReplicasMap - * @param dn DatanodeDescriptor which holds the corrupt replica - * @param reason a textual reason (for logging purposes) - */ - public void addToCorruptReplicasMap(Block blk, DatanodeDescriptor dn, - String reason) { - addToCorruptReplicasMap(blk, dn, reason, Reason.NONE); - } /** * Mark the block belonging to datanode as corrupt. @@ -69,7 +57,7 @@ public void addToCorruptReplicasMap(Block blk, DatanodeDescriptor dn, * @param reason a textual reason (for logging purposes) * @param reasonCode the enum representation of the reason */ - public void addToCorruptReplicasMap(Block blk, DatanodeDescriptor dn, + void addToCorruptReplicasMap(Block blk, DatanodeDescriptor dn, String reason, Reason reasonCode) { Map nodes = corruptReplicasMap.get(blk); if (nodes == null) { @@ -127,7 +115,6 @@ boolean removeFromCorruptReplicasMap(Block blk, DatanodeDescriptor datanode) { boolean removeFromCorruptReplicasMap(Block blk, DatanodeDescriptor datanode, Reason reason) { Map datanodes = corruptReplicasMap.get(blk); - boolean removed = false; if (datanodes==null) return false; @@ -174,12 +161,12 @@ boolean isReplicaCorrupt(Block blk, DatanodeDescriptor node) { return ((nodes != null) && (nodes.contains(node))); } - public int numCorruptReplicas(Block blk) { + int numCorruptReplicas(Block blk) { Collection nodes = getNodes(blk); return (nodes == null) ? 0 : nodes.size(); } - public int size() { + int size() { return corruptReplicasMap.size(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index d622905f0b..34be727ff0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -234,18 +234,6 @@ public DatanodeDescriptor(DatanodeID nodeID, updateHeartbeat(StorageReport.EMPTY_ARRAY, 0L, 0L, 0, 0); } - /** - * Add data-node to the block. Add block to the head of the list of blocks - * belonging to the data-node. - */ - public boolean addBlock(String storageID, BlockInfo b) { - DatanodeStorageInfo s = getStorageInfo(storageID); - if (s != null) { - return s.addBlock(b); - } - return false; - } - @VisibleForTesting public DatanodeStorageInfo getStorageInfo(String storageID) { synchronized (storageMap) { @@ -284,13 +272,10 @@ boolean hasStaleStorages() { * data-node from the block. */ boolean removeBlock(BlockInfo b) { - int index = b.findStorageInfo(this); + final DatanodeStorageInfo s = b.findStorageInfo(this); // if block exists on this datanode - if (index >= 0) { - DatanodeStorageInfo s = b.getStorageInfo(index); - if (s != null) { - return s.removeBlock(b); - } + if (s != null) { + return s.removeBlock(b); } return false; } @@ -307,24 +292,6 @@ boolean removeBlock(String storageID, BlockInfo b) { return false; } - /** - * Replace specified old block with a new one in the DataNodeDescriptor. - * - * @param oldBlock - block to be replaced - * @param newBlock - a replacement block - * @return the new block - */ - public BlockInfo replaceBlock(BlockInfo oldBlock, BlockInfo newBlock) { - int index = oldBlock.findStorageInfo(this); - DatanodeStorageInfo s = oldBlock.getStorageInfo(index); - boolean done = s.removeBlock(oldBlock); - assert done : "Old block should belong to the data-node when replacing"; - - done = s.addBlock(newBlock); - assert done : "New block should not belong to the data-node when replacing"; - return newBlock; - } - public void resetBlocks() { setCapacity(0); setRemaining(0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java index 5f59f0267a..133a28826b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java @@ -23,9 +23,9 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; + import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; /** * In the Standby Node, we can receive messages about blocks @@ -123,7 +123,7 @@ private Queue getBlockQueue(Block block) { return queue; } - public int count() { + int count() { return count ; } @@ -140,7 +140,7 @@ public String toString() { return sb.toString(); } - public Iterable takeAll() { + Iterable takeAll() { List rbis = Lists.newArrayListWithCapacity( count); for (Queue q : queueByBlockId.values()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java index f646839dca..21fb54e289 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java @@ -33,6 +33,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.server.blockmanagement.CorruptReplicasMap.Reason; import org.junit.Test; @@ -89,14 +90,14 @@ public void testCorruptReplicaInfo() throws IOException, DatanodeDescriptor dn1 = DFSTestUtil.getLocalDatanodeDescriptor(); DatanodeDescriptor dn2 = DFSTestUtil.getLocalDatanodeDescriptor(); - crm.addToCorruptReplicasMap(getBlock(0), dn1, "TEST"); + addToCorruptReplicasMap(crm, getBlock(0), dn1); assertEquals("Number of corrupt blocks not returning correctly", 1, crm.size()); - crm.addToCorruptReplicasMap(getBlock(1), dn1, "TEST"); + addToCorruptReplicasMap(crm, getBlock(1), dn1); assertEquals("Number of corrupt blocks not returning correctly", 2, crm.size()); - crm.addToCorruptReplicasMap(getBlock(1), dn2, "TEST"); + addToCorruptReplicasMap(crm, getBlock(1), dn2); assertEquals("Number of corrupt blocks not returning correctly", 2, crm.size()); @@ -109,7 +110,7 @@ public void testCorruptReplicaInfo() throws IOException, 0, crm.size()); for (Long block_id: block_ids) { - crm.addToCorruptReplicasMap(getBlock(block_id), dn1, "TEST"); + addToCorruptReplicasMap(crm, getBlock(block_id), dn1); } assertEquals("Number of corrupt blocks not returning correctly", @@ -127,4 +128,9 @@ public void testCorruptReplicaInfo() throws IOException, crm.getCorruptReplicaBlockIds(10, 7L))); } + + private static void addToCorruptReplicasMap(CorruptReplicasMap crm, + Block blk, DatanodeDescriptor dn) { + crm.addToCorruptReplicasMap(blk, dn, "TEST", Reason.NONE); + } }