diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index f5b4c9a496..84d573115f 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -1078,7 +1078,7 @@ Release 0.23.5 - UNRELEASED IMPROVEMENTS - HADOOP-8923. JNI-based user-group mapping modules can be too chatty on + HADOOP-8932. JNI-based user-group mapping modules can be too chatty on lookup failures. (Kihwal Lee via suresh) HADOOP-8930. Cumulative code coverage calculation (Andrey Klochkov via diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index d8495bae60..0d19eebdf6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -480,6 +480,8 @@ Release 2.0.3-alpha - Unreleased HDFS-4072. On file deletion remove corresponding blocks pending replications. (Jing Zhao via suresh) + HDFS-4022. Replication not happening for appended block. (Vinay via umamahesh) + Release 2.0.2-alpha - 2012-09-07 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 57529a74bb..81ec0b5e2e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -624,7 +624,10 @@ public LocatedBlock convertLastBlockToUnderConstruction( blocksMap.replaceBlock(ucBlock); // Remove block from replication queue. - updateNeededReplications(oldBlock, 0, 0); + NumberReplicas replicas = countNodes(ucBlock); + neededReplications.remove(ucBlock, replicas.liveReplicas(), + replicas.decommissionedReplicas(), getReplication(ucBlock)); + pendingReplications.remove(ucBlock); // remove this block from the list of pending blocks to be deleted. for (DatanodeDescriptor dd : targets) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java index e0a5b42597..45a8309ffb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java @@ -291,4 +291,39 @@ public void run() { cluster.shutdown(); } } + + /** + * Test the updation of NeededReplications for the Appended Block + */ + @Test(timeout = 60000) + public void testUpdateNeededReplicationsForAppendedFile() throws Exception { + Configuration conf = new Configuration(); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) + .build(); + DistributedFileSystem fileSystem = null; + try { + // create a file. + fileSystem = cluster.getFileSystem(); + Path f = new Path("/testAppend"); + FSDataOutputStream create = fileSystem.create(f, (short) 2); + create.write("/testAppend".getBytes()); + create.close(); + + // Append to the file. + FSDataOutputStream append = fileSystem.append(f); + append.write("/testAppend".getBytes()); + append.close(); + + // Start a new datanode + cluster.startDataNodes(conf, 1, true, null, null); + + // Check for replications + DFSTestUtil.waitReplication(fileSystem, f, (short) 2); + } finally { + if (null != fileSystem) { + fileSystem.close(); + } + cluster.shutdown(); + } + } }