HDFS-8988. Use LightWeightHashSet instead of LightWeightLinkedSet in BlockManager#excessReplicateMap. (yliu)
This commit is contained in:
parent
0ff1216100
commit
73b86a5046
@ -1512,6 +1512,9 @@ Release 2.8.0 - UNRELEASED
|
|||||||
HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling
|
HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling
|
||||||
values() since it creates a temporary array. (Staffan Friberg via yliu)
|
values() since it creates a temporary array. (Staffan Friberg via yliu)
|
||||||
|
|
||||||
|
HDFS-8988. Use LightWeightHashSet instead of LightWeightLinkedSet in
|
||||||
|
BlockManager#excessReplicateMap. (yliu)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
||||||
|
@ -89,7 +89,6 @@
|
|||||||
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
|
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
|
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
|
||||||
import org.apache.hadoop.hdfs.util.LightWeightHashSet;
|
import org.apache.hadoop.hdfs.util.LightWeightHashSet;
|
||||||
import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.util.StripedBlockUtil.getInternalBlockLength;
|
import static org.apache.hadoop.hdfs.util.StripedBlockUtil.getInternalBlockLength;
|
||||||
@ -219,7 +218,7 @@ public int getPendingDataNodeMessageCount() {
|
|||||||
* Maps a StorageID to the set of blocks that are "extra" for this
|
* Maps a StorageID to the set of blocks that are "extra" for this
|
||||||
* DataNode. We'll eventually remove these extras.
|
* DataNode. We'll eventually remove these extras.
|
||||||
*/
|
*/
|
||||||
public final Map<String, LightWeightLinkedSet<BlockInfo>> excessReplicateMap =
|
public final Map<String, LightWeightHashSet<BlockInfo>> excessReplicateMap =
|
||||||
new HashMap<>();
|
new HashMap<>();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1421,11 +1420,6 @@ int computeBlockRecoveryWork(int blocksToProcess) {
|
|||||||
*/
|
*/
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
int computeRecoveryWorkForBlocks(List<List<BlockInfo>> blocksToRecover) {
|
int computeRecoveryWorkForBlocks(List<List<BlockInfo>> blocksToRecover) {
|
||||||
int requiredReplication, numEffectiveReplicas;
|
|
||||||
List<DatanodeDescriptor> containingNodes;
|
|
||||||
BlockCollection bc;
|
|
||||||
int additionalReplRequired;
|
|
||||||
|
|
||||||
int scheduledWork = 0;
|
int scheduledWork = 0;
|
||||||
List<BlockRecoveryWork> recovWork = new LinkedList<>();
|
List<BlockRecoveryWork> recovWork = new LinkedList<>();
|
||||||
|
|
||||||
@ -1786,7 +1780,7 @@ DatanodeDescriptor[] chooseSourceDatanodes(BlockInfo block,
|
|||||||
Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(block);
|
Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(block);
|
||||||
for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
|
for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
|
||||||
final DatanodeDescriptor node = storage.getDatanodeDescriptor();
|
final DatanodeDescriptor node = storage.getDatanodeDescriptor();
|
||||||
LightWeightLinkedSet<BlockInfo> excessBlocks =
|
LightWeightHashSet<BlockInfo> excessBlocks =
|
||||||
excessReplicateMap.get(node.getDatanodeUuid());
|
excessReplicateMap.get(node.getDatanodeUuid());
|
||||||
int countableReplica = storage.getState() == State.NORMAL ? 1 : 0;
|
int countableReplica = storage.getState() == State.NORMAL ? 1 : 0;
|
||||||
if ((nodesCorrupt != null) && (nodesCorrupt.contains(node)))
|
if ((nodesCorrupt != null) && (nodesCorrupt.contains(node)))
|
||||||
@ -3090,7 +3084,7 @@ private void processOverReplicatedBlock(final BlockInfo block,
|
|||||||
postponeBlock(block);
|
postponeBlock(block);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
LightWeightLinkedSet<BlockInfo> excessBlocks = excessReplicateMap.get(
|
LightWeightHashSet<BlockInfo> excessBlocks = excessReplicateMap.get(
|
||||||
cur.getDatanodeUuid());
|
cur.getDatanodeUuid());
|
||||||
if (excessBlocks == null || !excessBlocks.contains(block)) {
|
if (excessBlocks == null || !excessBlocks.contains(block)) {
|
||||||
if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) {
|
if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) {
|
||||||
@ -3297,10 +3291,10 @@ static boolean useDelHint(boolean isFirst, DatanodeStorageInfo delHint,
|
|||||||
|
|
||||||
private void addToExcessReplicate(DatanodeInfo dn, BlockInfo storedBlock) {
|
private void addToExcessReplicate(DatanodeInfo dn, BlockInfo storedBlock) {
|
||||||
assert namesystem.hasWriteLock();
|
assert namesystem.hasWriteLock();
|
||||||
LightWeightLinkedSet<BlockInfo> excessBlocks = excessReplicateMap.get(
|
LightWeightHashSet<BlockInfo> excessBlocks = excessReplicateMap.get(
|
||||||
dn.getDatanodeUuid());
|
dn.getDatanodeUuid());
|
||||||
if (excessBlocks == null) {
|
if (excessBlocks == null) {
|
||||||
excessBlocks = new LightWeightLinkedSet<>();
|
excessBlocks = new LightWeightHashSet<>();
|
||||||
excessReplicateMap.put(dn.getDatanodeUuid(), excessBlocks);
|
excessReplicateMap.put(dn.getDatanodeUuid(), excessBlocks);
|
||||||
}
|
}
|
||||||
if (excessBlocks.add(storedBlock)) {
|
if (excessBlocks.add(storedBlock)) {
|
||||||
@ -3364,7 +3358,7 @@ public void removeStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) {
|
|||||||
// We've removed a block from a node, so it's definitely no longer
|
// We've removed a block from a node, so it's definitely no longer
|
||||||
// in "excess" there.
|
// in "excess" there.
|
||||||
//
|
//
|
||||||
LightWeightLinkedSet<BlockInfo> excessBlocks = excessReplicateMap.get(
|
LightWeightHashSet<BlockInfo> excessBlocks = excessReplicateMap.get(
|
||||||
node.getDatanodeUuid());
|
node.getDatanodeUuid());
|
||||||
if (excessBlocks != null) {
|
if (excessBlocks != null) {
|
||||||
if (excessBlocks.remove(storedBlock)) {
|
if (excessBlocks.remove(storedBlock)) {
|
||||||
@ -3581,7 +3575,7 @@ public NumberReplicas countNodes(BlockInfo b) {
|
|||||||
} else if (node.isDecommissioned()) {
|
} else if (node.isDecommissioned()) {
|
||||||
decommissioned++;
|
decommissioned++;
|
||||||
} else {
|
} else {
|
||||||
LightWeightLinkedSet<BlockInfo> blocksExcess = excessReplicateMap.get(
|
LightWeightHashSet<BlockInfo> blocksExcess = excessReplicateMap.get(
|
||||||
node.getDatanodeUuid());
|
node.getDatanodeUuid());
|
||||||
if (blocksExcess != null && blocksExcess.contains(b)) {
|
if (blocksExcess != null && blocksExcess.contains(b)) {
|
||||||
excess++;
|
excess++;
|
||||||
@ -3988,7 +3982,8 @@ public void removeBlockFromMap(Block block) {
|
|||||||
private void removeFromExcessReplicateMap(Block block) {
|
private void removeFromExcessReplicateMap(Block block) {
|
||||||
for (DatanodeStorageInfo info : blocksMap.getStorages(block)) {
|
for (DatanodeStorageInfo info : blocksMap.getStorages(block)) {
|
||||||
String uuid = info.getDatanodeDescriptor().getDatanodeUuid();
|
String uuid = info.getDatanodeDescriptor().getDatanodeUuid();
|
||||||
LightWeightLinkedSet<BlockInfo> excessReplicas = excessReplicateMap.get(uuid);
|
LightWeightHashSet<BlockInfo> excessReplicas =
|
||||||
|
excessReplicateMap.get(uuid);
|
||||||
if (excessReplicas != null) {
|
if (excessReplicas != null) {
|
||||||
if (excessReplicas.remove(block)) {
|
if (excessReplicas.remove(block)) {
|
||||||
excessBlocksCount.decrementAndGet();
|
excessBlocksCount.decrementAndGet();
|
||||||
|
@ -74,7 +74,7 @@
|
|||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
|
import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
|
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
|
||||||
import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
|
import org.apache.hadoop.hdfs.util.LightWeightHashSet;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.net.NetworkTopology;
|
import org.apache.hadoop.net.NetworkTopology;
|
||||||
import org.apache.hadoop.net.NodeBase;
|
import org.apache.hadoop.net.NodeBase;
|
||||||
@ -687,7 +687,7 @@ private void collectBlocksSummary(String parent, HdfsFileStatus file, Result res
|
|||||||
.getStorageType()));
|
.getStorageType()));
|
||||||
}
|
}
|
||||||
if (showReplicaDetails) {
|
if (showReplicaDetails) {
|
||||||
LightWeightLinkedSet<BlockInfo> blocksExcess =
|
LightWeightHashSet<BlockInfo> blocksExcess =
|
||||||
bm.excessReplicateMap.get(dnDesc.getDatanodeUuid());
|
bm.excessReplicateMap.get(dnDesc.getDatanodeUuid());
|
||||||
Collection<DatanodeDescriptor> corruptReplicas =
|
Collection<DatanodeDescriptor> corruptReplicas =
|
||||||
bm.getCorruptReplicas(block.getLocalBlock());
|
bm.getCorruptReplicas(block.getLocalBlock());
|
||||||
|
Loading…
Reference in New Issue
Block a user