HDFS-3970. Fix bug causing rollback of HDFS upgrade to result in bad VERSION file. Contributed by Vinay and Andrew Wang.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1430037 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
e76b9c2e26
commit
d3949058b8
@ -657,6 +657,9 @@ Release 2.0.3-alpha - Unreleased
|
||||
HDFS-4302. Fix fatal exception when starting NameNode with DEBUG logs
|
||||
(Eugene Koontz via todd)
|
||||
|
||||
HDFS-3970. Fix bug causing rollback of HDFS upgrade to result in bad
|
||||
VERSION file. (Vinay and Andrew Wang via atm)
|
||||
|
||||
BREAKDOWN OF HDFS-3077 SUBTASKS
|
||||
|
||||
HDFS-3077. Quorum-based protocol for reading and writing edit logs.
|
||||
|
@ -78,6 +78,10 @@ public BlockPoolSliceStorage(StorageInfo storageInfo, String bpid) {
|
||||
this.clusterID = clusterId;
|
||||
}
|
||||
|
||||
private BlockPoolSliceStorage() {
|
||||
super(NodeType.DATA_NODE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Analyze storage directories. Recover from previous transitions if required.
|
||||
*
|
||||
@ -378,7 +382,7 @@ void doRollback(StorageDirectory bpSd, NamespaceInfo nsInfo)
|
||||
if (!prevDir.exists())
|
||||
return;
|
||||
// read attributes out of the VERSION file of previous directory
|
||||
DataStorage prevInfo = new DataStorage();
|
||||
BlockPoolSliceStorage prevInfo = new BlockPoolSliceStorage();
|
||||
prevInfo.readPreviousVersionProperties(bpSd);
|
||||
|
||||
// We allow rollback to a state, which is either consistent with
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
|
||||
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
|
||||
import static org.junit.Assert.*;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.fail;
|
||||
@ -31,6 +32,7 @@
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||
@ -176,6 +178,44 @@ public void testRollback() throws Exception {
|
||||
cluster.shutdown();
|
||||
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
|
||||
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
|
||||
|
||||
log("Normal BlockPool rollback", numDirs);
|
||||
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
|
||||
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
|
||||
.format(false)
|
||||
.manageDataDfsDirs(false)
|
||||
.manageNameDfsDirs(false)
|
||||
.startupOption(StartupOption.ROLLBACK)
|
||||
.build();
|
||||
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
|
||||
UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "current",
|
||||
UpgradeUtilities.getCurrentBlockPoolID(cluster));
|
||||
// Create a previous snapshot for the blockpool
|
||||
UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "previous",
|
||||
UpgradeUtilities.getCurrentBlockPoolID(cluster));
|
||||
// Older LayoutVersion to make it rollback
|
||||
storageInfo = new StorageInfo(
|
||||
UpgradeUtilities.getCurrentLayoutVersion()+1,
|
||||
UpgradeUtilities.getCurrentNamespaceID(cluster),
|
||||
UpgradeUtilities.getCurrentClusterID(cluster),
|
||||
UpgradeUtilities.getCurrentFsscTime(cluster));
|
||||
// Create old VERSION file for each data dir
|
||||
for (int i=0; i<dataNodeDirs.length; i++) {
|
||||
Path bpPrevPath = new Path(dataNodeDirs[i] + "/current/"
|
||||
+ UpgradeUtilities.getCurrentBlockPoolID(cluster));
|
||||
UpgradeUtilities.createBlockPoolVersionFile(
|
||||
new File(bpPrevPath.toString()),
|
||||
storageInfo,
|
||||
UpgradeUtilities.getCurrentBlockPoolID(cluster));
|
||||
}
|
||||
|
||||
cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
|
||||
assertTrue(cluster.isDataNodeUp());
|
||||
|
||||
cluster.shutdown();
|
||||
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
|
||||
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
|
||||
|
||||
log("NameNode rollback without existing previous dir", numDirs);
|
||||
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
|
||||
|
Loading…
Reference in New Issue
Block a user