HDFS-6830. BlockInfo.addStorage fails when DN changes the storage for a block replica. (Arpit Agarwal)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1617598 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Arpit Agarwal 2014-08-12 21:42:21 +00:00
parent 7ad7ab1723
commit 662c5bb3af
5 changed files with 46 additions and 29 deletions

View File

@ -491,6 +491,9 @@ Release 2.6.0 - UNRELEASED
HDFS-6582. Missing null check in RpcProgramNfs3#read(XDR, SecurityHandler)
(Abhiraj Butala via brandonli)
HDFS-6830. BlockInfo.addStorage fails when DN changes the storage for a
block replica (Arpit Agarwal)
Release 2.5.0 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -194,24 +194,12 @@ public int numNodes() {
* Add a {@link DatanodeStorageInfo} location for a block
*/
boolean addStorage(DatanodeStorageInfo storage) {
boolean added = true;
int idx = findDatanode(storage.getDatanodeDescriptor());
if(idx >= 0) {
if (getStorageInfo(idx) == storage) { // the storage is already there
return false;
} else {
// The block is on the DN but belongs to a different storage.
// Update our state.
removeStorage(getStorageInfo(idx));
added = false; // Just updating storage. Return false.
}
}
// find the last null node
int lastNode = ensureCapacity(1);
setStorageInfo(lastNode, storage);
setNext(lastNode, null);
setPrevious(lastNode, null);
return added;
return true;
}
/**
@ -240,16 +228,18 @@ assert getPrevious(dnIndex) == null && getNext(dnIndex) == null :
* Find specified DatanodeDescriptor.
* @return index or -1 if not found.
*/
int findDatanode(DatanodeDescriptor dn) {
boolean findDatanode(DatanodeDescriptor dn) {
int len = getCapacity();
for(int idx = 0; idx < len; idx++) {
DatanodeDescriptor cur = getDatanode(idx);
if(cur == dn)
return idx;
if(cur == null)
if(cur == dn) {
return true;
}
if(cur == null) {
break;
}
}
return -1;
return false;
}
/**
* Find specified DatanodeStorageInfo.

View File

@ -2065,7 +2065,7 @@ private BlockInfo processReportedBlock(
// Add replica if appropriate. If the replica was previously corrupt
// but now okay, it might need to be updated.
if (reportedState == ReplicaState.FINALIZED
&& (storedBlock.findDatanode(dn) < 0
&& (!storedBlock.findDatanode(dn)
|| corruptReplicas.isReplicaCorrupt(storedBlock, dn))) {
toAdd.add(storedBlock);
}
@ -2246,7 +2246,7 @@ void addStoredBlockUnderConstruction(StatefulBlockInfo ucBlock,
storageInfo, ucBlock.reportedBlock, ucBlock.reportedState);
if (ucBlock.reportedState == ReplicaState.FINALIZED &&
block.findDatanode(storageInfo.getDatanodeDescriptor()) < 0) {
!block.findDatanode(storageInfo.getDatanodeDescriptor())) {
addStoredBlock(block, storageInfo, null, true);
}
}

View File

@ -208,12 +208,28 @@ long getBlockPoolUsed() {
}
public boolean addBlock(BlockInfo b) {
if(!b.addStorage(this))
return false;
// First check whether the block belongs to a different storage
// on the same DN.
boolean replaced = false;
DatanodeStorageInfo otherStorage =
b.findStorageInfo(getDatanodeDescriptor());
if (otherStorage != null) {
if (otherStorage != this) {
// The block belongs to a different storage. Remove it first.
otherStorage.removeBlock(b);
replaced = true;
} else {
// The block is already associated with this storage.
return false;
}
}
// add to the head of the data-node list
b.addStorage(this);
blockList = b.listInsert(blockList, this);
numBlocks++;
return true;
return !replaced;
}
boolean removeBlock(BlockInfo b) {

View File

@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
@ -59,17 +60,24 @@ public void testAddStorage() throws Exception {
@Test
public void testReplaceStorageIfDifferetnOneAlreadyExistedFromSameDataNode() throws Exception {
BlockInfo blockInfo = new BlockInfo(3);
public void testReplaceStorage() throws Exception {
// Create two dummy storages.
final DatanodeStorageInfo storage1 = DFSTestUtil.createDatanodeStorageInfo("storageID1", "127.0.0.1");
final DatanodeStorageInfo storage2 = new DatanodeStorageInfo(storage1.getDatanodeDescriptor(), new DatanodeStorage("storageID2"));
final int NUM_BLOCKS = 10;
BlockInfo[] blockInfos = new BlockInfo[NUM_BLOCKS];
blockInfo.addStorage(storage1);
boolean added = blockInfo.addStorage(storage2);
// Create a few dummy blocks and add them to the first storage.
for (int i = 0; i < NUM_BLOCKS; ++i) {
blockInfos[i] = new BlockInfo(3);
storage1.addBlock(blockInfos[i]);
}
Assert.assertFalse(added);
Assert.assertEquals(storage2, blockInfo.getStorageInfo(0));
// Try to move one of the blocks to a different storage.
boolean added = storage2.addBlock(blockInfos[NUM_BLOCKS/2]);
Assert.assertThat(added, is(false));
Assert.assertThat(blockInfos[NUM_BLOCKS/2].getStorageInfo(0), is(storage2));
}
@Test