diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt index ffeb3155e0..968b33ab8e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt @@ -70,3 +70,6 @@ IMPROVEMENTS: HDFS-5455. NN should update storageMap on first heartbeat. (Arpit Agarwal) + HDFS-5457. Fix TestDatanodeRegistration, TestFsck and TestAddBlockRetry. + (Contributed bt szetszwo) + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index ad499912c2..3fe6af0127 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -385,7 +385,7 @@ private BlockIterator(final DatanodeStorageInfo... storages) { @Override public boolean hasNext() { update(); - return iterators.get(index).hasNext(); + return !iterators.isEmpty() && iterators.get(index).hasNext(); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index df059c4d16..81991b47b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -5774,7 +5774,8 @@ void reportBadBlocks(LocatedBlock[] blocks) throws IOException { DatanodeInfo[] nodes = blocks[i].getLocations(); String[] storageIDs = blocks[i].getStorageIDs(); for (int j = 0; j < nodes.length; j++) { - blockManager.findAndMarkBlockAsCorrupt(blk, nodes[j], storageIDs[j], + blockManager.findAndMarkBlockAsCorrupt(blk, nodes[j], + storageIDs == null ? null: storageIDs[j], "client machine reported it"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java index 3114c824ce..08c44c23ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.net.Node; @@ -99,13 +100,13 @@ public void testRetryAddBlockWhileInChooseTarget() throws Exception { bmField.setAccessible(true); bmField.set(ns, spyBM); - doAnswer(new Answer() { + doAnswer(new Answer() { @Override - public DatanodeDescriptor[] answer(InvocationOnMock invocation) + public DatanodeStorageInfo[] answer(InvocationOnMock invocation) throws Throwable { LOG.info("chooseTarget for " + src); - DatanodeDescriptor[] ret = - (DatanodeDescriptor[]) invocation.callRealMethod(); + DatanodeStorageInfo[] ret = + (DatanodeStorageInfo[]) invocation.callRealMethod(); count++; if(count == 1) { // run second addBlock() LOG.info("Starting second addBlock for " + src);