HDFS-9566. Remove expensive 'BlocksMap#getStorages(Block b, final DatanodeStorage.State state)' method (Contributed by Daryn Sharp)

This commit is contained in:
Vinayakumar B 2016-02-01 13:24:05 +05:30
parent 2673cbaf55
commit e418bd1fb0
3 changed files with 15 additions and 24 deletions

View File

@ -2655,6 +2655,9 @@ Release 2.8.0 - UNRELEASED
HDFS-9682. Fix a typo "aplication" in HttpFS document.
(Weiwei Yang via aajisaka)
HDFS-9566. Remove expensive 'BlocksMap#getStorages(Block b, final
DatanodeStorage.State state)' method (Daryn Sharp via vinayakumarb)
Release 2.7.3 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -1212,8 +1212,10 @@ private void addToInvalidates(BlockInfo storedBlock) {
return;
}
StringBuilder datanodes = new StringBuilder();
for(DatanodeStorageInfo storage : blocksMap.getStorages(storedBlock,
State.NORMAL)) {
for (DatanodeStorageInfo storage : blocksMap.getStorages(storedBlock)) {
if (storage.getState() != State.NORMAL) {
continue;
}
final DatanodeDescriptor node = storage.getDatanodeDescriptor();
final Block b = getBlockOnStorage(storedBlock, storage);
if (b != null) {
@ -3164,7 +3166,10 @@ private void processOverReplicatedBlock(final BlockInfo block,
Collection<DatanodeStorageInfo> nonExcess = new ArrayList<>();
Collection<DatanodeDescriptor> corruptNodes = corruptReplicas
.getNodes(block);
for(DatanodeStorageInfo storage : blocksMap.getStorages(block, State.NORMAL)) {
for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
if (storage.getState() != State.NORMAL) {
continue;
}
final DatanodeDescriptor cur = storage.getDatanodeDescriptor();
if (storage.areBlockContentsStale()) {
LOG.trace("BLOCK* processOverReplicatedBlock: Postponing {}"
@ -3665,7 +3670,10 @@ int countLiveNodes(BlockInfo b) {
// else proceed with fast case
int live = 0;
Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(b);
for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) {
for (DatanodeStorageInfo storage : blocksMap.getStorages(b)) {
if (storage.getState() != State.NORMAL) {
continue;
}
final DatanodeDescriptor node = storage.getDatanodeDescriptor();
if ((nodesCorrupt == null) || (!nodesCorrupt.contains(node)))
live++;

View File

@ -21,13 +21,9 @@
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.namenode.INodeId;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.util.GSet;
import org.apache.hadoop.util.LightWeightGSet;
import com.google.common.base.Predicate;
import com.google.common.collect.Iterables;
/**
* This class maintains the map from a block to its metadata.
* block's metadata currently includes blockCollection it belongs to and
@ -149,22 +145,6 @@ Iterable<DatanodeStorageInfo> getStorages(Block b) {
return getStorages(blocks.get(b));
}
/**
* Searches for the block in the BlocksMap and
* returns {@link Iterable} of the storages the block belongs to
* <i>that are of the given {@link DatanodeStorage.State state}</i>.
*
* @param state DatanodeStorage state by which to filter the returned Iterable
*/
Iterable<DatanodeStorageInfo> getStorages(Block b, final DatanodeStorage.State state) {
return Iterables.filter(getStorages(blocks.get(b)), new Predicate<DatanodeStorageInfo>() {
@Override
public boolean apply(DatanodeStorageInfo storage) {
return storage.getState() == state;
}
});
}
/**
* For a block that has already been retrieved from the BlocksMap
* returns {@link Iterable} of the storages the block belongs to.