HDFS-6023. Test whether the standby NN continues to checkpoint after the prepare stage. Contributed by Haohui Mai.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-5535@1572337 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
9cc0d5d497
commit
ccf0744243
@ -108,3 +108,6 @@ HDFS-5535 subtasks:
|
||||
|
||||
HDFS-6019. Standby NN might not checkpoint when processing the rolling
|
||||
upgrade marker. (Haohui Mai via jing9)
|
||||
|
||||
HDFS-6023. Test whether the standby NN continues to checkpoint after the
|
||||
prepare stage. (Haohui Mai via jing9)
|
||||
|
@ -676,7 +676,6 @@ public static String getRollbackImageFileName(long txid) {
|
||||
return getNameNodeFileName(NameNodeFile.IMAGE_ROLLBACK, txid);
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
private static String getNameNodeFileName(NameNodeFile nnf, long txid) {
|
||||
return String.format("%s_%019d", nnf.getName(), txid);
|
||||
}
|
||||
|
@ -34,6 +34,7 @@
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImage;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
|
||||
import org.apache.hadoop.hdfs.tools.DFSAdmin;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
@ -467,17 +468,66 @@ public void testQuery() throws Exception {
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout = 300000)
|
||||
public void testCheckpoint() throws IOException, InterruptedException {
|
||||
final Configuration conf = new Configuration();
|
||||
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1);
|
||||
|
||||
MiniQJMHACluster cluster = null;
|
||||
final Path foo = new Path("/foo");
|
||||
|
||||
try {
|
||||
cluster = new MiniQJMHACluster.Builder(conf).build();
|
||||
MiniDFSCluster dfsCluster = cluster.getDfsCluster();
|
||||
dfsCluster.waitActive();
|
||||
|
||||
dfsCluster.transitionToActive(0);
|
||||
DistributedFileSystem dfs = dfsCluster.getFileSystem(0);
|
||||
|
||||
// start rolling upgrade
|
||||
RollingUpgradeInfo info = dfs
|
||||
.rollingUpgrade(RollingUpgradeAction.PREPARE);
|
||||
Assert.assertTrue(info.isStarted());
|
||||
|
||||
queryForPreparation(dfs);
|
||||
|
||||
dfs.mkdirs(foo);
|
||||
long txid = dfs.rollEdits();
|
||||
Assert.assertTrue(txid > 0);
|
||||
|
||||
int retries = 0;
|
||||
while (++retries < 5) {
|
||||
NNStorage storage = dfsCluster.getNamesystem(1).getFSImage()
|
||||
.getStorage();
|
||||
if (storage.getFsImageName(txid - 1) != null) {
|
||||
return;
|
||||
}
|
||||
Thread.sleep(1000);
|
||||
}
|
||||
Assert.fail("new checkpoint does not exist");
|
||||
|
||||
} finally {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void queryForPreparation(DistributedFileSystem dfs) throws IOException,
|
||||
InterruptedException {
|
||||
RollingUpgradeInfo info;
|
||||
int retries = 0;
|
||||
while (retries < 10) {
|
||||
while (++retries < 10) {
|
||||
info = dfs.rollingUpgrade(RollingUpgradeAction.QUERY);
|
||||
if (info.createdRollbackImages()) {
|
||||
break;
|
||||
}
|
||||
Thread.sleep(1000);
|
||||
++retries;
|
||||
}
|
||||
|
||||
if (retries >= 10) {
|
||||
Assert.fail("Query return false");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user