HDFS-3436. In DataNode.transferReplicaForPipelineRecovery(..), it should use the stored generation stamp to check if the block is valid. Contributed by Vinay
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1341961 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
445018cea8
commit
ade42bce10
@ -241,6 +241,13 @@ Release 2.0.1-alpha - UNRELEASED
|
||||
|
||||
HDFS-3444. hdfs groups command doesn't work with security enabled. (atm)
|
||||
|
||||
HDFS-3415. Make sure all layout versions are the same for all storage
|
||||
directories in the Namenode. (Brandon Li via szetszwo)
|
||||
|
||||
HDFS-3436. In DataNode.transferReplicaForPipelineRecovery(..), it should
|
||||
use the stored generation stamp to check if the block is valid. (Vinay
|
||||
via szetszwo)
|
||||
|
||||
Release 2.0.0-alpha - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
@ -735,9 +742,6 @@ Release 2.0.0-alpha - UNRELEASED
|
||||
HDFS-860. fuse-dfs truncate behavior causes issues with scp.
|
||||
(Brian Bockelman via eli)
|
||||
|
||||
HDFS-3415. Make sure all layout versions are the same for all storage
|
||||
directories in the Namenode. (Brandon Li via szetszwo)
|
||||
|
||||
BREAKDOWN OF HDFS-1623 SUBTASKS
|
||||
|
||||
HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
|
||||
|
@ -2028,6 +2028,18 @@ void transferReplicaForPipelineRecovery(final ExtendedBlock b,
|
||||
|
||||
//get replica information
|
||||
synchronized(data) {
|
||||
Block storedBlock = data.getStoredBlock(b.getBlockPoolId(),
|
||||
b.getBlockId());
|
||||
if (null == storedBlock) {
|
||||
throw new IOException(b + " not found in datanode.");
|
||||
}
|
||||
storedGS = storedBlock.getGenerationStamp();
|
||||
if (storedGS < b.getGenerationStamp()) {
|
||||
throw new IOException(storedGS
|
||||
+ " = storedGS < b.getGenerationStamp(), b=" + b);
|
||||
}
|
||||
// Update the genstamp with storedGS
|
||||
b.setGenerationStamp(storedGS);
|
||||
if (data.isValidRbw(b)) {
|
||||
stage = BlockConstructionStage.TRANSFER_RBW;
|
||||
} else if (data.isValidBlock(b)) {
|
||||
@ -2036,18 +2048,9 @@ void transferReplicaForPipelineRecovery(final ExtendedBlock b,
|
||||
final String r = data.getReplicaString(b.getBlockPoolId(), b.getBlockId());
|
||||
throw new IOException(b + " is neither a RBW nor a Finalized, r=" + r);
|
||||
}
|
||||
|
||||
storedGS = data.getStoredBlock(b.getBlockPoolId(),
|
||||
b.getBlockId()).getGenerationStamp();
|
||||
if (storedGS < b.getGenerationStamp()) {
|
||||
throw new IOException(
|
||||
storedGS + " = storedGS < b.getGenerationStamp(), b=" + b);
|
||||
}
|
||||
visible = data.getReplicaVisibleLength(b);
|
||||
}
|
||||
|
||||
//set storedGS and visible length
|
||||
b.setGenerationStamp(storedGS);
|
||||
//set visible length
|
||||
b.setNumBytes(visible);
|
||||
|
||||
if (targets.length > 0) {
|
||||
|
@ -176,4 +176,32 @@ public void testLoadLogsFromBuggyEarlierVersions() throws IOException {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test to append to the file, when one of datanode in the existing pipeline is down.
|
||||
* @throws Exception
|
||||
*/
|
||||
@Test
|
||||
public void testAppendWithPipelineRecovery() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
MiniDFSCluster cluster = null;
|
||||
try {
|
||||
cluster = new MiniDFSCluster.Builder(conf).manageDataDfsDirs(true)
|
||||
.manageNameDfsDirs(true).numDataNodes(4)
|
||||
.racks(new String[] { "/rack1", "/rack1", "/rack1", "/rack2" })
|
||||
.build();
|
||||
cluster.waitActive();
|
||||
|
||||
DistributedFileSystem fs = cluster.getFileSystem();
|
||||
Path path = new Path("/test1");
|
||||
DFSTestUtil.createFile(fs, path, 1024, (short) 3, 1l);
|
||||
|
||||
cluster.stopDataNode(3);
|
||||
DFSTestUtil.appendFile(fs, path, "hello");
|
||||
} finally {
|
||||
if (null != cluster) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user