HDFS-11681. DatanodeStorageInfo#getBlockIterator() should return an iterator to an unmodifiable set Contributed by Virajith Jalaparti

This commit is contained in:
Chris Douglas 2017-05-10 22:20:27 -07:00
parent eed731496f
commit 51b671ef18
3 changed files with 26 additions and 11 deletions

View File

@ -1409,12 +1409,15 @@ public BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode,
void removeBlocksAssociatedTo(final DatanodeDescriptor node) { void removeBlocksAssociatedTo(final DatanodeDescriptor node) {
for (DatanodeStorageInfo storage : node.getStorageInfos()) { for (DatanodeStorageInfo storage : node.getStorageInfos()) {
final Iterator<BlockInfo> it = storage.getBlockIterator(); final Iterator<BlockInfo> it = storage.getBlockIterator();
//add the BlockInfos to a new collection as the
//returned iterator is not modifiable.
Collection<BlockInfo> toRemove = new ArrayList<>();
while (it.hasNext()) { while (it.hasNext()) {
BlockInfo block = it.next(); toRemove.add(it.next());
// DatanodeStorageInfo must be removed using the iterator to avoid }
// ConcurrentModificationException in the underlying storage
it.remove(); for (BlockInfo b : toRemove) {
removeStoredBlock(block, node); removeStoredBlock(b, node);
} }
} }
// Remove all pending DN messages referencing this DN. // Remove all pending DN messages referencing this DN.
@ -1429,11 +1432,11 @@ void removeBlocksAssociatedTo(final DatanodeStorageInfo storageInfo) {
assert namesystem.hasWriteLock(); assert namesystem.hasWriteLock();
final Iterator<BlockInfo> it = storageInfo.getBlockIterator(); final Iterator<BlockInfo> it = storageInfo.getBlockIterator();
DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
while(it.hasNext()) { Collection<BlockInfo> toRemove = new ArrayList<>();
BlockInfo block = it.next(); while (it.hasNext()) {
// DatanodeStorageInfo must be removed using the iterator to avoid toRemove.add(it.next());
// ConcurrentModificationException in the underlying storage }
it.remove(); for (BlockInfo block : toRemove) {
removeStoredBlock(block, node); removeStoredBlock(block, node);
final Block b = getBlockOnStorage(block, storageInfo); final Block b = getBlockOnStorage(block, storageInfo);
if (b != null) { if (b != null) {

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.blockmanagement; package org.apache.hadoop.hdfs.server.blockmanagement;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
@ -270,8 +271,12 @@ int numBlocks() {
return blocks.size(); return blocks.size();
} }
/**
* @return iterator to an unmodifiable set of blocks
* related to this {@link DatanodeStorageInfo}
*/
Iterator<BlockInfo> getBlockIterator() { Iterator<BlockInfo> getBlockIterator() {
return blocks.iterator(); return Collections.unmodifiableSet(blocks).iterator();
} }
void updateState(StorageReport r) { void updateState(StorageReport r) {

View File

@ -307,6 +307,13 @@ void testBlockIterator(MiniDFSCluster cluster) {
BlockManagerTestUtil.getBlockIterator(s); BlockManagerTestUtil.getBlockIterator(s);
while(storageBlockIt.hasNext()) { while(storageBlockIt.hasNext()) {
allBlocks[idx++] = storageBlockIt.next(); allBlocks[idx++] = storageBlockIt.next();
try {
storageBlockIt.remove();
assertTrue(
"BlockInfo iterator should have been unmodifiable", false);
} catch (UnsupportedOperationException e) {
//expected exception
}
} }
} }