HDFS-6597. Add a new option to NN upgrade to terminate the process after upgrade on NN is completed. Contributed by Danilo Vunjak.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1611723 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
9f75b97a09
commit
5f9e52f745
@ -298,6 +298,9 @@ Release 2.6.0 - UNRELEASED
|
||||
HDFS-4120. Add a new "-skipSharedEditsCheck" option for BootstrapStandby
|
||||
(Liang Xie and Rakesh R via vinayakumarb)
|
||||
|
||||
HDFS-6597. Add a new option to NN upgrade to terminate the process after
|
||||
upgrade on NN is completed. (Danilo Vunjak via cnauroth)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-6690. Deduplicate xattr names in memory. (wang)
|
||||
|
@ -93,7 +93,8 @@ static public enum StartupOption{
|
||||
FORCE("-force"),
|
||||
NONINTERACTIVE("-nonInteractive"),
|
||||
RENAMERESERVED("-renameReserved"),
|
||||
METADATAVERSION("-metadataVersion");
|
||||
METADATAVERSION("-metadataVersion"),
|
||||
UPGRADEONLY("-upgradeOnly");
|
||||
|
||||
private static final Pattern ENUM_WITH_ROLLING_UPGRADE_OPTION = Pattern.compile(
|
||||
"(\\w+)\\((\\w+)\\)");
|
||||
|
@ -225,6 +225,7 @@ boolean recoverTransitionRead(StartupOption startOpt, FSNamesystem target,
|
||||
NNStorage.checkVersionUpgradable(storage.getLayoutVersion());
|
||||
}
|
||||
if (startOpt != StartupOption.UPGRADE
|
||||
&& startOpt != StartupOption.UPGRADEONLY
|
||||
&& !RollingUpgradeStartupOption.STARTED.matches(startOpt)
|
||||
&& layoutVersion < Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION
|
||||
&& layoutVersion != HdfsConstants.NAMENODE_LAYOUT_VERSION) {
|
||||
@ -263,6 +264,7 @@ boolean recoverTransitionRead(StartupOption startOpt, FSNamesystem target,
|
||||
// 3. Do transitions
|
||||
switch(startOpt) {
|
||||
case UPGRADE:
|
||||
case UPGRADEONLY:
|
||||
doUpgrade(target);
|
||||
return false; // upgrade saved image already
|
||||
case IMPORT:
|
||||
@ -748,11 +750,13 @@ public void initEditLog(StartupOption startOpt) throws IOException {
|
||||
editLog.recoverUnclosedStreams();
|
||||
} else if (HAUtil.isHAEnabled(conf, nameserviceId)
|
||||
&& (startOpt == StartupOption.UPGRADE
|
||||
|| startOpt == StartupOption.UPGRADEONLY
|
||||
|| RollingUpgradeStartupOption.ROLLBACK.matches(startOpt))) {
|
||||
// This NN is HA, but we're doing an upgrade or a rollback of rolling
|
||||
// upgrade so init the edit log for write.
|
||||
editLog.initJournalsForWrite();
|
||||
if (startOpt == StartupOption.UPGRADE) {
|
||||
if (startOpt == StartupOption.UPGRADE
|
||||
|| startOpt == StartupOption.UPGRADEONLY) {
|
||||
long sharedLogCTime = editLog.getSharedLogCTime();
|
||||
if (this.storage.getCTime() < sharedLogCTime) {
|
||||
throw new IOException("It looks like the shared log is already " +
|
||||
|
@ -979,7 +979,8 @@ private void loadFSImage(StartupOption startOpt) throws IOException {
|
||||
}
|
||||
// This will start a new log segment and write to the seen_txid file, so
|
||||
// we shouldn't do it when coming up in standby state
|
||||
if (!haEnabled || (haEnabled && startOpt == StartupOption.UPGRADE)) {
|
||||
if (!haEnabled || (haEnabled && startOpt == StartupOption.UPGRADE)
|
||||
|| (haEnabled && startOpt == StartupOption.UPGRADEONLY)) {
|
||||
fsImage.openEditLogForWrite();
|
||||
}
|
||||
success = true;
|
||||
|
@ -836,7 +836,7 @@ private void reportErrorsOnDirectory(StorageDirectory sd) {
|
||||
*/
|
||||
void processStartupOptionsForUpgrade(StartupOption startOpt, int layoutVersion)
|
||||
throws IOException {
|
||||
if (startOpt == StartupOption.UPGRADE) {
|
||||
if (startOpt == StartupOption.UPGRADE || startOpt == StartupOption.UPGRADEONLY) {
|
||||
// If upgrade from a release that does not support federation,
|
||||
// if clusterId is provided in the startupOptions use it.
|
||||
// Else generate a new cluster ID
|
||||
|
@ -210,6 +210,9 @@ public static enum OperationCategory {
|
||||
+ StartupOption.UPGRADE.getName() +
|
||||
" [" + StartupOption.CLUSTERID.getName() + " cid]" +
|
||||
" [" + StartupOption.RENAMERESERVED.getName() + "<k-v pairs>] ] | \n\t["
|
||||
+ StartupOption.UPGRADEONLY.getName() +
|
||||
" [" + StartupOption.CLUSTERID.getName() + " cid]" +
|
||||
" [" + StartupOption.RENAMERESERVED.getName() + "<k-v pairs>] ] | \n\t["
|
||||
+ StartupOption.ROLLBACK.getName() + "] | \n\t["
|
||||
+ StartupOption.ROLLINGUPGRADE.getName() + " <"
|
||||
+ RollingUpgradeStartupOption.DOWNGRADE.name().toLowerCase() + "|"
|
||||
@ -713,6 +716,7 @@ private void stopHttpServer() {
|
||||
* <li>{@link StartupOption#BACKUP BACKUP} - start backup node</li>
|
||||
* <li>{@link StartupOption#CHECKPOINT CHECKPOINT} - start checkpoint node</li>
|
||||
* <li>{@link StartupOption#UPGRADE UPGRADE} - start the cluster
|
||||
* <li>{@link StartupOption#UPGRADEONLY UPGRADEONLY} - upgrade the cluster
|
||||
* upgrade and create a snapshot of the current file system state</li>
|
||||
* <li>{@link StartupOption#RECOVER RECOVERY} - recover name node
|
||||
* metadata</li>
|
||||
@ -767,7 +771,8 @@ protected NameNode(Configuration conf, NamenodeRole role)
|
||||
}
|
||||
|
||||
protected HAState createHAState(StartupOption startOpt) {
|
||||
if (!haEnabled || startOpt == StartupOption.UPGRADE) {
|
||||
if (!haEnabled || startOpt == StartupOption.UPGRADE
|
||||
|| startOpt == StartupOption.UPGRADEONLY) {
|
||||
return ACTIVE_STATE;
|
||||
} else {
|
||||
return STANDBY_STATE;
|
||||
@ -1198,8 +1203,10 @@ static StartupOption parseArguments(String args[]) {
|
||||
startOpt = StartupOption.BACKUP;
|
||||
} else if (StartupOption.CHECKPOINT.getName().equalsIgnoreCase(cmd)) {
|
||||
startOpt = StartupOption.CHECKPOINT;
|
||||
} else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)) {
|
||||
startOpt = StartupOption.UPGRADE;
|
||||
} else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)
|
||||
|| StartupOption.UPGRADEONLY.getName().equalsIgnoreCase(cmd)) {
|
||||
startOpt = StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd) ?
|
||||
StartupOption.UPGRADE : StartupOption.UPGRADEONLY;
|
||||
/* Can be followed by CLUSTERID with a required parameter or
|
||||
* RENAMERESERVED with an optional parameter
|
||||
*/
|
||||
@ -1407,6 +1414,12 @@ public static NameNode createNameNode(String argv[], Configuration conf)
|
||||
terminate(0);
|
||||
return null; // avoid javac warning
|
||||
}
|
||||
case UPGRADEONLY: {
|
||||
DefaultMetricsSystem.initialize("NameNode");
|
||||
new NameNode(conf);
|
||||
terminate(0);
|
||||
return null;
|
||||
}
|
||||
default: {
|
||||
DefaultMetricsSystem.initialize("NameNode");
|
||||
return new NameNode(conf);
|
||||
|
@ -21,6 +21,8 @@
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.net.URI;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
@ -30,11 +32,15 @@
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
import org.junit.runners.Parameterized.Parameters;
|
||||
|
||||
/**
|
||||
* This class tests various upgrade cases from earlier versions to current
|
||||
* version with and without clusterid.
|
||||
*/
|
||||
@RunWith(value = Parameterized.class)
|
||||
public class TestStartupOptionUpgrade {
|
||||
|
||||
private Configuration conf;
|
||||
@ -42,10 +48,21 @@ public class TestStartupOptionUpgrade {
|
||||
private int layoutVersion;
|
||||
NNStorage storage;
|
||||
|
||||
@Parameters
|
||||
public static Collection<Object[]> startOption() {
|
||||
Object[][] params = new Object[][] { { StartupOption.UPGRADE },
|
||||
{ StartupOption.UPGRADEONLY } };
|
||||
return Arrays.asList(params);
|
||||
}
|
||||
|
||||
public TestStartupOptionUpgrade(StartupOption startOption) {
|
||||
super();
|
||||
this.startOpt = startOption;
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
conf = new HdfsConfiguration();
|
||||
startOpt = StartupOption.UPGRADE;
|
||||
startOpt.setClusterId(null);
|
||||
storage = new NNStorage(conf,
|
||||
Collections.<URI>emptyList(),
|
||||
|
Loading…
Reference in New Issue
Block a user