HDFS-8988. Use LightWeightHashSet instead of LightWeightLinkedSet in BlockManager#excessReplicateMap. (yliu)
This commit is contained in:
parent
0ff1216100
commit
73b86a5046
@ -1512,6 +1512,9 @@ Release 2.8.0 - UNRELEASED
|
||||
HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling
|
||||
values() since it creates a temporary array. (Staffan Friberg via yliu)
|
||||
|
||||
HDFS-8988. Use LightWeightHashSet instead of LightWeightLinkedSet in
|
||||
BlockManager#excessReplicateMap. (yliu)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
||||
|
@ -89,7 +89,6 @@
|
||||
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
|
||||
import org.apache.hadoop.hdfs.util.LightWeightHashSet;
|
||||
import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
|
||||
import static org.apache.hadoop.hdfs.util.StripedBlockUtil.getInternalBlockLength;
|
||||
@ -219,7 +218,7 @@ public int getPendingDataNodeMessageCount() {
|
||||
* Maps a StorageID to the set of blocks that are "extra" for this
|
||||
* DataNode. We'll eventually remove these extras.
|
||||
*/
|
||||
public final Map<String, LightWeightLinkedSet<BlockInfo>> excessReplicateMap =
|
||||
public final Map<String, LightWeightHashSet<BlockInfo>> excessReplicateMap =
|
||||
new HashMap<>();
|
||||
|
||||
/**
|
||||
@ -1421,11 +1420,6 @@ int computeBlockRecoveryWork(int blocksToProcess) {
|
||||
*/
|
||||
@VisibleForTesting
|
||||
int computeRecoveryWorkForBlocks(List<List<BlockInfo>> blocksToRecover) {
|
||||
int requiredReplication, numEffectiveReplicas;
|
||||
List<DatanodeDescriptor> containingNodes;
|
||||
BlockCollection bc;
|
||||
int additionalReplRequired;
|
||||
|
||||
int scheduledWork = 0;
|
||||
List<BlockRecoveryWork> recovWork = new LinkedList<>();
|
||||
|
||||
@ -1786,7 +1780,7 @@ DatanodeDescriptor[] chooseSourceDatanodes(BlockInfo block,
|
||||
Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(block);
|
||||
for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
|
||||
final DatanodeDescriptor node = storage.getDatanodeDescriptor();
|
||||
LightWeightLinkedSet<BlockInfo> excessBlocks =
|
||||
LightWeightHashSet<BlockInfo> excessBlocks =
|
||||
excessReplicateMap.get(node.getDatanodeUuid());
|
||||
int countableReplica = storage.getState() == State.NORMAL ? 1 : 0;
|
||||
if ((nodesCorrupt != null) && (nodesCorrupt.contains(node)))
|
||||
@ -3090,7 +3084,7 @@ private void processOverReplicatedBlock(final BlockInfo block,
|
||||
postponeBlock(block);
|
||||
return;
|
||||
}
|
||||
LightWeightLinkedSet<BlockInfo> excessBlocks = excessReplicateMap.get(
|
||||
LightWeightHashSet<BlockInfo> excessBlocks = excessReplicateMap.get(
|
||||
cur.getDatanodeUuid());
|
||||
if (excessBlocks == null || !excessBlocks.contains(block)) {
|
||||
if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) {
|
||||
@ -3297,10 +3291,10 @@ static boolean useDelHint(boolean isFirst, DatanodeStorageInfo delHint,
|
||||
|
||||
private void addToExcessReplicate(DatanodeInfo dn, BlockInfo storedBlock) {
|
||||
assert namesystem.hasWriteLock();
|
||||
LightWeightLinkedSet<BlockInfo> excessBlocks = excessReplicateMap.get(
|
||||
LightWeightHashSet<BlockInfo> excessBlocks = excessReplicateMap.get(
|
||||
dn.getDatanodeUuid());
|
||||
if (excessBlocks == null) {
|
||||
excessBlocks = new LightWeightLinkedSet<>();
|
||||
excessBlocks = new LightWeightHashSet<>();
|
||||
excessReplicateMap.put(dn.getDatanodeUuid(), excessBlocks);
|
||||
}
|
||||
if (excessBlocks.add(storedBlock)) {
|
||||
@ -3364,7 +3358,7 @@ public void removeStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) {
|
||||
// We've removed a block from a node, so it's definitely no longer
|
||||
// in "excess" there.
|
||||
//
|
||||
LightWeightLinkedSet<BlockInfo> excessBlocks = excessReplicateMap.get(
|
||||
LightWeightHashSet<BlockInfo> excessBlocks = excessReplicateMap.get(
|
||||
node.getDatanodeUuid());
|
||||
if (excessBlocks != null) {
|
||||
if (excessBlocks.remove(storedBlock)) {
|
||||
@ -3581,7 +3575,7 @@ public NumberReplicas countNodes(BlockInfo b) {
|
||||
} else if (node.isDecommissioned()) {
|
||||
decommissioned++;
|
||||
} else {
|
||||
LightWeightLinkedSet<BlockInfo> blocksExcess = excessReplicateMap.get(
|
||||
LightWeightHashSet<BlockInfo> blocksExcess = excessReplicateMap.get(
|
||||
node.getDatanodeUuid());
|
||||
if (blocksExcess != null && blocksExcess.contains(b)) {
|
||||
excess++;
|
||||
@ -3988,7 +3982,8 @@ public void removeBlockFromMap(Block block) {
|
||||
private void removeFromExcessReplicateMap(Block block) {
|
||||
for (DatanodeStorageInfo info : blocksMap.getStorages(block)) {
|
||||
String uuid = info.getDatanodeDescriptor().getDatanodeUuid();
|
||||
LightWeightLinkedSet<BlockInfo> excessReplicas = excessReplicateMap.get(uuid);
|
||||
LightWeightHashSet<BlockInfo> excessReplicas =
|
||||
excessReplicateMap.get(uuid);
|
||||
if (excessReplicas != null) {
|
||||
if (excessReplicas.remove(block)) {
|
||||
excessBlocksCount.decrementAndGet();
|
||||
|
@ -74,7 +74,7 @@
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
|
||||
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
|
||||
import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
|
||||
import org.apache.hadoop.hdfs.util.LightWeightHashSet;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.net.NetworkTopology;
|
||||
import org.apache.hadoop.net.NodeBase;
|
||||
@ -687,7 +687,7 @@ private void collectBlocksSummary(String parent, HdfsFileStatus file, Result res
|
||||
.getStorageType()));
|
||||
}
|
||||
if (showReplicaDetails) {
|
||||
LightWeightLinkedSet<BlockInfo> blocksExcess =
|
||||
LightWeightHashSet<BlockInfo> blocksExcess =
|
||||
bm.excessReplicateMap.get(dnDesc.getDatanodeUuid());
|
||||
Collection<DatanodeDescriptor> corruptReplicas =
|
||||
bm.getCorruptReplicas(block.getLocalBlock());
|
||||
|
Loading…
Reference in New Issue
Block a user