Merging trunk changes
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1400740 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
f29724956a
@ -1078,7 +1078,7 @@ Release 0.23.5 - UNRELEASED
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
HADOOP-8923. JNI-based user-group mapping modules can be too chatty on
|
||||
HADOOP-8932. JNI-based user-group mapping modules can be too chatty on
|
||||
lookup failures. (Kihwal Lee via suresh)
|
||||
|
||||
HADOOP-8930. Cumulative code coverage calculation (Andrey Klochkov via
|
||||
|
@ -480,6 +480,8 @@ Release 2.0.3-alpha - Unreleased
|
||||
HDFS-4072. On file deletion remove corresponding blocks pending
|
||||
replications. (Jing Zhao via suresh)
|
||||
|
||||
HDFS-4022. Replication not happening for appended block. (Vinay via umamahesh)
|
||||
|
||||
Release 2.0.2-alpha - 2012-09-07
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -624,7 +624,10 @@ public LocatedBlock convertLastBlockToUnderConstruction(
|
||||
blocksMap.replaceBlock(ucBlock);
|
||||
|
||||
// Remove block from replication queue.
|
||||
updateNeededReplications(oldBlock, 0, 0);
|
||||
NumberReplicas replicas = countNodes(ucBlock);
|
||||
neededReplications.remove(ucBlock, replicas.liveReplicas(),
|
||||
replicas.decommissionedReplicas(), getReplication(ucBlock));
|
||||
pendingReplications.remove(ucBlock);
|
||||
|
||||
// remove this block from the list of pending blocks to be deleted.
|
||||
for (DatanodeDescriptor dd : targets) {
|
||||
|
@ -291,4 +291,39 @@ public void run() {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test the updation of NeededReplications for the Appended Block
|
||||
*/
|
||||
@Test(timeout = 60000)
|
||||
public void testUpdateNeededReplicationsForAppendedFile() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
|
||||
.build();
|
||||
DistributedFileSystem fileSystem = null;
|
||||
try {
|
||||
// create a file.
|
||||
fileSystem = cluster.getFileSystem();
|
||||
Path f = new Path("/testAppend");
|
||||
FSDataOutputStream create = fileSystem.create(f, (short) 2);
|
||||
create.write("/testAppend".getBytes());
|
||||
create.close();
|
||||
|
||||
// Append to the file.
|
||||
FSDataOutputStream append = fileSystem.append(f);
|
||||
append.write("/testAppend".getBytes());
|
||||
append.close();
|
||||
|
||||
// Start a new datanode
|
||||
cluster.startDataNodes(conf, 1, true, null, null);
|
||||
|
||||
// Check for replications
|
||||
DFSTestUtil.waitReplication(fileSystem, f, (short) 2);
|
||||
} finally {
|
||||
if (null != fileSystem) {
|
||||
fileSystem.close();
|
||||
}
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user