it = storage.dirIterator(); it.hasNext();) {
@@ -409,7 +395,6 @@ private void doUpgrade(FSNamesystem target) throws IOException {
+ storage.getRemovedStorageDirs().size()
+ " storage directory(ies), previously logged.");
}
- storage.initializeDistributedUpgrade();
}
private void doRollback() throws IOException {
@@ -472,8 +457,6 @@ private void doRollback() throws IOException {
LOG.info("Rollback of " + sd.getRoot()+ " is complete.");
}
isUpgradeFinalized = true;
- // check whether name-node can start in regular mode
- storage.verifyDistributedUpgradeProgress(StartupOption.REGULAR);
}
private void doFinalize(StorageDirectory sd) throws IOException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index ba5ec3db19..3a88e26a15 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -108,7 +108,6 @@
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FileAlreadyExistsException;
@@ -136,7 +135,6 @@
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -160,7 +158,6 @@
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
-import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
@@ -179,7 +176,6 @@
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
-import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.Server;
@@ -942,8 +938,7 @@ NamespaceInfo getNamespaceInfo() {
NamespaceInfo unprotectedGetNamespaceInfo() {
return new NamespaceInfo(dir.fsImage.getStorage().getNamespaceID(),
getClusterId(), getBlockPoolId(),
- dir.fsImage.getStorage().getCTime(),
- upgradeManager.getUpgradeVersion());
+ dir.fsImage.getStorage().getCTime());
}
/**
@@ -3387,13 +3382,6 @@ HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg,
DatanodeCommand[] cmds = blockManager.getDatanodeManager().handleHeartbeat(
nodeReg, blockPoolId, capacity, dfsUsed, remaining, blockPoolUsed,
xceiverCount, maxTransfer, failedVolumes);
- if (cmds == null || cmds.length == 0) {
- DatanodeCommand cmd = upgradeManager.getBroadcastCommand();
- if (cmd != null) {
- cmds = new DatanodeCommand[] {cmd};
- }
- }
-
return new HeartbeatResponse(cmds, createHaStatusHeartbeat());
} finally {
readUnlock();
@@ -3834,24 +3822,9 @@ private void enter() {
/**
* Leave safe mode.
*
- * Switch to manual safe mode if distributed upgrade is required.
* Check for invalid, under- & over-replicated blocks in the end of startup.
*/
- private synchronized void leave(boolean checkForUpgrades) {
- if(checkForUpgrades) {
- // verify whether a distributed upgrade needs to be started
- boolean needUpgrade = false;
- try {
- needUpgrade = upgradeManager.startUpgrade();
- } catch(IOException e) {
- FSNamesystem.LOG.error("IOException in startDistributedUpgradeIfNeeded", e);
- }
- if(needUpgrade) {
- // switch to manual safe mode
- safeMode = new SafeModeInfo(false);
- return;
- }
- }
+ private synchronized void leave() {
// if not done yet, initialize replication queues.
// In the standby, do not populate repl queues
if (!isPopulatingReplQueues() && !isInStandbyState()) {
@@ -3945,7 +3918,7 @@ private void checkMode() {
// the threshold is reached
if (!isOn() || // safe mode is off
extension <= 0 || threshold <= 0) { // don't need to wait
- this.leave(true); // leave safe mode
+ this.leave(); // leave safe mode
return;
}
if (reached > 0) { // threshold has already been reached before
@@ -4049,10 +4022,6 @@ String getTurnOffTip() {
leaveMsg = "Safe mode will be turned off automatically";
}
if(isManual()) {
- if(upgradeManager.getUpgradeState())
- return leaveMsg + " upon completion of " +
- "the distributed upgrade: upgrade progress = " +
- upgradeManager.getUpgradeStatus() + "%";
leaveMsg = "Use \"hdfs dfsadmin -safemode leave\" to turn safe mode off";
}
@@ -4187,13 +4156,7 @@ public void run() {
LOG.info("NameNode is being shutdown, exit SafeModeMonitor thread. ");
} else {
// leave safe mode and stop the monitor
- try {
- leaveSafeMode(true);
- } catch(SafeModeException es) { // should never happen
- String msg = "SafeModeMonitor may not run during distributed upgrade.";
- assert false : msg;
- throw new RuntimeException(msg, es);
- }
+ leaveSafeMode();
}
smmthread = null;
}
@@ -4204,7 +4167,7 @@ boolean setSafeMode(SafeModeAction action) throws IOException {
checkSuperuserPrivilege();
switch(action) {
case SAFEMODE_LEAVE: // leave safe mode
- leaveSafeMode(false);
+ leaveSafeMode();
break;
case SAFEMODE_ENTER: // enter safe mode
enterSafeMode(false);
@@ -4389,17 +4352,14 @@ void enterSafeMode(boolean resourcesLow) throws IOException {
* Leave safe mode.
* @throws IOException
*/
- void leaveSafeMode(boolean checkForUpgrades) throws SafeModeException {
+ void leaveSafeMode() {
writeLock();
try {
if (!isInSafeMode()) {
NameNode.stateChangeLog.info("STATE* Safe mode is already OFF.");
return;
}
- if(upgradeManager.getUpgradeState())
- throw new SafeModeException("Distributed upgrade is in progress",
- safeMode);
- safeMode.leave(checkForUpgrades);
+ safeMode.leave();
} finally {
writeUnlock();
}
@@ -4474,18 +4434,6 @@ private boolean isValidBlock(Block b) {
return (blockManager.getBlockCollection(b) != null);
}
- // Distributed upgrade manager
- final UpgradeManagerNamenode upgradeManager = new UpgradeManagerNamenode(this);
-
- UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action
- ) throws IOException {
- return upgradeManager.distributedUpgradeProgress(action);
- }
-
- UpgradeCommand processDistributedUpgradeCommand(UpgradeCommand comm) throws IOException {
- return upgradeManager.processUpgradeCommand(comm);
- }
-
PermissionStatus createFsOwnerPermissions(FsPermission permission) {
return new PermissionStatus(fsOwner.getShortUserName(), supergroup, permission);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
index 2a4998735d..abc871fa9f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
@@ -32,8 +32,6 @@
import java.util.UUID;
import java.util.concurrent.CopyOnWriteArrayList;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSUtil;
@@ -45,7 +43,6 @@
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageErrorReporter;
-import org.apache.hadoop.hdfs.server.common.UpgradeManager;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.util.PersistentLongFile;
@@ -65,8 +62,6 @@
@InterfaceAudience.Private
public class NNStorage extends Storage implements Closeable,
StorageErrorReporter {
- private static final Log LOG = LogFactory.getLog(NNStorage.class.getName());
-
static final String DEPRECATED_MESSAGE_DIGEST_PROPERTY = "imageMD5Digest";
static final String LOCAL_URI_SCHEME = "file";
@@ -112,7 +107,6 @@ public boolean isOfType(StorageDirType type) {
}
}
- private UpgradeManager upgradeManager = null;
protected String blockpoolID = ""; // id of the block pool
/**
@@ -551,11 +545,8 @@ public void format(NamespaceInfo nsInfo) throws IOException {
public static NamespaceInfo newNamespaceInfo()
throws UnknownHostException {
- return new NamespaceInfo(
- newNamespaceID(),
- newClusterID(),
- newBlockPoolID(),
- 0L, 0);
+ return new NamespaceInfo(newNamespaceID(), newClusterID(),
+ newBlockPoolID(), 0L);
}
public void format() throws IOException {
@@ -600,13 +591,6 @@ protected void setFieldsFromProperties(
String sbpid = props.getProperty("blockpoolID");
setBlockPoolID(sd.getRoot(), sbpid);
}
-
- String sDUS, sDUV;
- sDUS = props.getProperty("distributedUpgradeState");
- sDUV = props.getProperty("distributedUpgradeVersion");
- setDistributedUpgradeState(
- sDUS == null? false : Boolean.parseBoolean(sDUS),
- sDUV == null? getLayoutVersion() : Integer.parseInt(sDUV));
setDeprecatedPropertiesForUpgrade(props);
}
@@ -653,13 +637,6 @@ protected void setPropertiesFromFields(Properties props,
if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
props.setProperty("blockpoolID", blockpoolID);
}
- boolean uState = getDistributedUpgradeState();
- int uVersion = getDistributedUpgradeVersion();
- if(uState && uVersion != getLayoutVersion()) {
- props.setProperty("distributedUpgradeState", Boolean.toString(uState));
- props.setProperty("distributedUpgradeVersion",
- Integer.toString(uVersion));
- }
}
static File getStorageFile(StorageDirectory sd, NameNodeFile type, long imageTxId) {
@@ -732,7 +709,7 @@ File findFinalizedEditsFile(long startTxId, long endTxId)
* Return the first readable image file for the given txid, or null
* if no such image can be found
*/
- File findImageFile(long txid) throws IOException {
+ File findImageFile(long txid) {
return findFile(NameNodeDirType.IMAGE,
getImageFileName(txid));
}
@@ -753,76 +730,6 @@ private File findFile(NameNodeDirType dirType, String name) {
return null;
}
- /**
- * Set the upgrade manager for use in a distributed upgrade.
- * @param um The upgrade manager
- */
- void setUpgradeManager(UpgradeManager um) {
- upgradeManager = um;
- }
-
- /**
- * @return The current distribued upgrade state.
- */
- boolean getDistributedUpgradeState() {
- return upgradeManager == null ? false : upgradeManager.getUpgradeState();
- }
-
- /**
- * @return The current upgrade version.
- */
- int getDistributedUpgradeVersion() {
- return upgradeManager == null ? 0 : upgradeManager.getUpgradeVersion();
- }
-
- /**
- * Set the upgrade state and version.
- * @param uState the new state.
- * @param uVersion the new version.
- */
- private void setDistributedUpgradeState(boolean uState, int uVersion) {
- if (upgradeManager != null) {
- upgradeManager.setUpgradeState(uState, uVersion);
- }
- }
-
- /**
- * Verify that the distributed upgrade state is valid.
- * @param startOpt the option the namenode was started with.
- */
- void verifyDistributedUpgradeProgress(StartupOption startOpt
- ) throws IOException {
- if(startOpt == StartupOption.ROLLBACK || startOpt == StartupOption.IMPORT)
- return;
-
- assert upgradeManager != null : "FSNameSystem.upgradeManager is null.";
- if(startOpt != StartupOption.UPGRADE) {
- if(upgradeManager.getUpgradeState())
- throw new IOException(
- "\n Previous distributed upgrade was not completed. "
- + "\n Please restart NameNode with -upgrade option.");
- if(upgradeManager.getDistributedUpgrades() != null)
- throw new IOException("\n Distributed upgrade for NameNode version "
- + upgradeManager.getUpgradeVersion()
- + " to current LV " + HdfsConstants.LAYOUT_VERSION
- + " is required.\n Please restart NameNode"
- + " with -upgrade option.");
- }
- }
-
- /**
- * Initialize a distributed upgrade.
- */
- void initializeDistributedUpgrade() throws IOException {
- if(! upgradeManager.initializeUpgrade())
- return;
- // write new upgrade state into disk
- writeAll();
- LOG.info("\n Distributed upgrade for NameNode version "
- + upgradeManager.getUpgradeVersion() + " to current LV "
- + HdfsConstants.LAYOUT_VERSION + " is initialized.");
- }
-
/**
* Disable the check for pre-upgradable layouts. Needed for BackupImage.
* @param val Whether to disable the preupgradeable layout check.
@@ -1099,7 +1006,6 @@ public NamespaceInfo getNamespaceInfo() {
getNamespaceID(),
getClusterID(),
getBlockPoolID(),
- getCTime(),
- getDistributedUpgradeVersion());
+ getCTime());
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 8a9ca07e44..7f9dcd29a4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -742,8 +742,8 @@ public void finalizeUpgrade() throws IOException {
@Override // ClientProtocol
public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action)
throws IOException {
- namesystem.checkOperation(OperationCategory.READ);
- return namesystem.distributedUpgradeProgress(action);
+ throw new UnsupportedActionException(
+ "Deprecated method. No longer supported");
}
@Override // ClientProtocol
@@ -917,8 +917,10 @@ public NamespaceInfo versionRequest() throws IOException {
}
@Override // DatanodeProtocol
- public UpgradeCommand processUpgradeCommand(UpgradeCommand comm) throws IOException {
- return namesystem.processDistributedUpgradeCommand(comm);
+ public UpgradeCommand processUpgradeCommand(UpgradeCommand comm)
+ throws IOException {
+ throw new UnsupportedActionException(
+ "Deprecated method, no longer supported");
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
index a21bf29fab..2c1981cb62 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
@@ -120,19 +120,6 @@ static String getInodeLimitText(FSNamesystem fsn) {
return str;
}
- static String getUpgradeStatusText(FSNamesystem fsn) {
- String statusText = "";
- try {
- UpgradeStatusReport status = fsn
- .distributedUpgradeProgress(UpgradeAction.GET_STATUS);
- statusText = (status == null ? "There are no upgrades in progress."
- : status.getStatusText(false));
- } catch (IOException e) {
- statusText = "Upgrade status unknown.";
- }
- return statusText;
- }
-
/** Return a table containing version information. */
static String getVersionTable(FSNamesystem fsn) {
return "
"
@@ -141,8 +128,6 @@ static String getVersionTable(FSNamesystem fsn) {
+ VersionInfo.getVersion() + ", " + VersionInfo.getRevision()
+ "\n" + "\n Compiled: | " + VersionInfo.getDate()
+ " by " + VersionInfo.getUser() + " from " + VersionInfo.getBranch()
- + " |
\n Upgrades: | "
- + getUpgradeStatusText(fsn)
+ " |
\n Cluster ID: | " + fsn.getClusterId()
+ " |
\n Block Pool ID: | " + fsn.getBlockPoolId()
+ " |
\n
";
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java
deleted file mode 100644
index 8e6eddf11b..0000000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java
+++ /dev/null
@@ -1,147 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
-import org.apache.hadoop.hdfs.server.common.UpgradeManager;
-import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
-import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
-
-/**
- * Upgrade manager for name-nodes.
- *
- * Distributed upgrades for a name-node starts when the safe mode conditions
- * are met and the name-node is about to exit it.
- * At this point the name-node enters manual safe mode which will remain
- * on until the upgrade is completed.
- * After that the name-nodes processes upgrade commands from data-nodes
- * and updates its status.
- */
-class UpgradeManagerNamenode extends UpgradeManager {
- @Override
- public HdfsServerConstants.NodeType getType() {
- return HdfsServerConstants.NodeType.NAME_NODE;
- }
-
- private final FSNamesystem namesystem;
-
- UpgradeManagerNamenode(FSNamesystem namesystem) {
- this.namesystem = namesystem;
- }
-
- /**
- * Start distributed upgrade.
- * Instantiates distributed upgrade objects.
- *
- * @return true if distributed upgrade is required or false otherwise
- * @throws IOException
- */
- @Override
- public synchronized boolean startUpgrade() throws IOException {
- if(!upgradeState) {
- initializeUpgrade();
- if(!upgradeState) return false;
- // write new upgrade state into disk
- namesystem.getFSImage().getStorage().writeAll();
- }
- assert currentUpgrades != null : "currentUpgrades is null";
- this.broadcastCommand = currentUpgrades.first().startUpgrade();
- NameNode.LOG.info("\n Distributed upgrade for NameNode version "
- + getUpgradeVersion() + " to current LV "
- + HdfsConstants.LAYOUT_VERSION + " is started.");
- return true;
- }
-
- synchronized UpgradeCommand processUpgradeCommand(UpgradeCommand command
- ) throws IOException {
- if(NameNode.LOG.isDebugEnabled()) {
- NameNode.LOG.debug("\n Distributed upgrade for NameNode version "
- + getUpgradeVersion() + " to current LV "
- + HdfsConstants.LAYOUT_VERSION + " is processing upgrade command: "
- + command.getAction() + " status = " + getUpgradeStatus() + "%");
- }
- if(currentUpgrades == null) {
- NameNode.LOG.info("Ignoring upgrade command: "
- + command.getAction() + " version " + command.getVersion()
- + ". No distributed upgrades are currently running on the NameNode");
- return null;
- }
- UpgradeObjectNamenode curUO = (UpgradeObjectNamenode)currentUpgrades.first();
- if(command.getVersion() != curUO.getVersion())
- throw new IncorrectVersionException(command.getVersion(),
- "UpgradeCommand", curUO.getVersion());
- UpgradeCommand reply = curUO.processUpgradeCommand(command);
- if(curUO.getUpgradeStatus() < 100) {
- return reply;
- }
- // current upgrade is done
- curUO.completeUpgrade();
- NameNode.LOG.info("\n Distributed upgrade for NameNode version "
- + curUO.getVersion() + " to current LV "
- + HdfsConstants.LAYOUT_VERSION + " is complete.");
- // proceede with the next one
- currentUpgrades.remove(curUO);
- if(currentUpgrades.isEmpty()) { // all upgrades are done
- completeUpgrade();
- } else { // start next upgrade
- curUO = (UpgradeObjectNamenode)currentUpgrades.first();
- this.broadcastCommand = curUO.startUpgrade();
- }
- return reply;
- }
-
- @Override
- public synchronized void completeUpgrade() throws IOException {
- // set and write new upgrade state into disk
- setUpgradeState(false, HdfsConstants.LAYOUT_VERSION);
- namesystem.getFSImage().getStorage().writeAll();
- currentUpgrades = null;
- broadcastCommand = null;
- namesystem.leaveSafeMode(false);
- }
-
- synchronized UpgradeStatusReport distributedUpgradeProgress
- (UpgradeAction action) throws IOException {
- boolean isFinalized = false;
- if(currentUpgrades == null) { // no upgrades are in progress
- FSImage fsimage = namesystem.getFSImage();
- isFinalized = fsimage.isUpgradeFinalized();
- if(isFinalized) // upgrade is finalized
- return null; // nothing to report
- return new UpgradeStatusReport(fsimage.getStorage().getLayoutVersion(),
- (short)101, isFinalized);
- }
- UpgradeObjectNamenode curUO = (UpgradeObjectNamenode)currentUpgrades.first();
- boolean details = false;
- switch(action) {
- case GET_STATUS:
- break;
- case DETAILED_STATUS:
- details = true;
- break;
- case FORCE_PROCEED:
- curUO.forceProceed();
- }
- return curUO.getUpgradeStatusReport(details);
- }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java
deleted file mode 100644
index 52939c6959..0000000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode;
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.common.UpgradeObject;
-import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
-
-/**
- * Base class for name-node upgrade objects.
- * Data-node upgrades are run in separate threads.
- */
-@InterfaceAudience.Private
-public abstract class UpgradeObjectNamenode extends UpgradeObject {
-
- /**
- * Process an upgrade command.
- * RPC has only one very generic command for all upgrade related inter
- * component communications.
- * The actual command recognition and execution should be handled here.
- * The reply is sent back also as an UpgradeCommand.
- *
- * @param command
- * @return the reply command which is analyzed on the client side.
- */
- public abstract UpgradeCommand processUpgradeCommand(UpgradeCommand command
- ) throws IOException;
-
- @Override
- public HdfsServerConstants.NodeType getType() {
- return HdfsServerConstants.NodeType.NAME_NODE;
- }
-
- /**
- */
- @Override
- public UpgradeCommand startUpgrade() throws IOException {
- // broadcast that data-nodes must start the upgrade
- return new UpgradeCommand(UpgradeCommand.UC_ACTION_START_UPGRADE,
- getVersion(), (short)0);
- }
-
- public void forceProceed() throws IOException {
- // do nothing by default
- NameNode.LOG.info("forceProceed() is not defined for the upgrade. "
- + getDescription());
- }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
index 1bcb9121ed..821d496145 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
@@ -37,7 +37,6 @@
@InterfaceStability.Evolving
public class NamespaceInfo extends StorageInfo {
String buildVersion;
- int distributedUpgradeVersion;
String blockPoolID = ""; // id of the block pool
String softwareVersion;
@@ -47,17 +46,16 @@ public NamespaceInfo() {
}
public NamespaceInfo(int nsID, String clusterID, String bpID,
- long cT, int duVersion, String buildVersion, String softwareVersion) {
+ long cT, String buildVersion, String softwareVersion) {
super(HdfsConstants.LAYOUT_VERSION, nsID, clusterID, cT);
blockPoolID = bpID;
this.buildVersion = buildVersion;
- this.distributedUpgradeVersion = duVersion;
this.softwareVersion = softwareVersion;
}
public NamespaceInfo(int nsID, String clusterID, String bpID,
- long cT, int duVersion) {
- this(nsID, clusterID, bpID, cT, duVersion, Storage.getBuildVersion(),
+ long cT) {
+ this(nsID, clusterID, bpID, cT, Storage.getBuildVersion(),
VersionInfo.getVersion());
}
@@ -65,10 +63,6 @@ public String getBuildVersion() {
return buildVersion;
}
- public int getDistributedUpgradeVersion() {
- return distributedUpgradeVersion;
- }
-
public String getBlockPoolID() {
return blockPoolID;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 8ddfdcabf8..d5487e41b7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -47,8 +47,6 @@
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
-import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
import org.apache.hadoop.ipc.RPC;
@@ -303,15 +301,9 @@ public void report() throws IOException {
long remaining = ds.getRemaining();
long presentCapacity = used + remaining;
boolean mode = dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET);
- UpgradeStatusReport status =
- dfs.distributedUpgradeProgress(UpgradeAction.GET_STATUS);
-
if (mode) {
System.out.println("Safe mode is ON");
}
- if (status != null) {
- System.out.println(status.getStatusText(false));
- }
System.out.println("Configured Capacity: " + capacity
+ " (" + StringUtils.byteDesc(capacity) + ")");
System.out.println("Present Capacity: " + presentCapacity
@@ -578,10 +570,6 @@ private void printHelp(String cmd) {
"\t\tfollowed by Namenode doing the same.\n" +
"\t\tThis completes the upgrade process.\n";
- String upgradeProgress = "-upgradeProgress : \n" +
- "\t\trequest current distributed upgrade status, \n" +
- "\t\ta detailed status or force the upgrade to proceed.\n";
-
String metaSave = "-metasave : \tSave Namenode's primary data structures\n" +
"\t\tto in the directory specified by hadoop.log.dir property.\n" +
"\t\t will contain one line for each of the following\n" +
@@ -643,8 +631,6 @@ private void printHelp(String cmd) {
System.out.println(refreshNodes);
} else if ("finalizeUpgrade".equals(cmd)) {
System.out.println(finalizeUpgrade);
- } else if ("upgradeProgress".equals(cmd)) {
- System.out.println(upgradeProgress);
} else if ("metasave".equals(cmd)) {
System.out.println(metaSave);
} else if (SetQuotaCommand.matches("-"+cmd)) {
@@ -681,7 +667,6 @@ private void printHelp(String cmd) {
System.out.println(restoreFailedStorage);
System.out.println(refreshNodes);
System.out.println(finalizeUpgrade);
- System.out.println(upgradeProgress);
System.out.println(metaSave);
System.out.println(SetQuotaCommand.DESCRIPTION);
System.out.println(ClearQuotaCommand.DESCRIPTION);
@@ -714,41 +699,6 @@ public int finalizeUpgrade() throws IOException {
return 0;
}
- /**
- * Command to request current distributed upgrade status,
- * a detailed status, or to force the upgrade to proceed.
- *
- * Usage: java DFSAdmin -upgradeProgress [status | details | force]
- * @exception IOException
- */
- public int upgradeProgress(String[] argv, int idx) throws IOException {
-
- if (idx != argv.length - 1) {
- printUsage("-upgradeProgress");
- return -1;
- }
-
- UpgradeAction action;
- if ("status".equalsIgnoreCase(argv[idx])) {
- action = UpgradeAction.GET_STATUS;
- } else if ("details".equalsIgnoreCase(argv[idx])) {
- action = UpgradeAction.DETAILED_STATUS;
- } else if ("force".equalsIgnoreCase(argv[idx])) {
- action = UpgradeAction.FORCE_PROCEED;
- } else {
- printUsage("-upgradeProgress");
- return -1;
- }
-
- DistributedFileSystem dfs = getDFS();
- UpgradeStatusReport status = dfs.distributedUpgradeProgress(action);
- String statusText = (status == null ?
- "There are no upgrades in progress." :
- status.getStatusText(action == UpgradeAction.DETAILED_STATUS));
- System.out.println(statusText);
- return 0;
- }
-
/**
* Dumps DFS data structures into specified file.
* Usage: java DFSAdmin -metasave filename
@@ -918,9 +868,6 @@ private static void printUsage(String cmd) {
} else if ("-finalizeUpgrade".equals(cmd)) {
System.err.println("Usage: java DFSAdmin"
+ " [-finalizeUpgrade]");
- } else if ("-upgradeProgress".equals(cmd)) {
- System.err.println("Usage: java DFSAdmin"
- + " [-upgradeProgress status | details | force]");
} else if ("-metasave".equals(cmd)) {
System.err.println("Usage: java DFSAdmin"
+ " [-metasave filename]");
@@ -969,7 +916,6 @@ private static void printUsage(String cmd) {
System.err.println(" [-restoreFailedStorage true|false|check]");
System.err.println(" [-refreshNodes]");
System.err.println(" [-finalizeUpgrade]");
- System.err.println(" [-upgradeProgress status | details | force]");
System.err.println(" [-metasave filename]");
System.err.println(" [-refreshServiceAcl]");
System.err.println(" [-refreshUserToGroupsMappings]");
@@ -1039,11 +985,6 @@ public int run(String[] argv) throws Exception {
printUsage(cmd);
return exitCode;
}
- } else if ("-upgradeProgress".equals(cmd)) {
- if (argv.length != 2) {
- printUsage(cmd);
- return exitCode;
- }
} else if ("-metasave".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
@@ -1113,8 +1054,6 @@ public int run(String[] argv) throws Exception {
exitCode = refreshNodes();
} else if ("-finalizeUpgrade".equals(cmd)) {
exitCode = finalizeUpgrade();
- } else if ("-upgradeProgress".equals(cmd)) {
- exitCode = upgradeProgress(argv, i);
} else if ("-metasave".equals(cmd)) {
exitCode = metaSave(argv, i);
} else if (ClearQuotaCommand.matches(cmd)) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
index 4c1fab59b6..10767549b8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
@@ -325,7 +325,7 @@ message RemoteEditLogManifestProto {
*/
message NamespaceInfoProto {
required string buildVersion = 1; // Software revision version (e.g. an svn or git revision)
- required uint32 distUpgradeVersion = 2; // Distributed upgrade version
+ required uint32 unused = 2; // Retained for backward compatibility
required string blockPoolID = 3; // block pool used by the namespace
required StorageInfoProto storageInfo = 4;// Node information
required string softwareVersion = 5; // Software version number (e.g. 2.0.0)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java
index ba09559cb4..c34834847c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java
@@ -75,9 +75,12 @@ protected String getTestFile() {
@After
@Override
public void tearDown() throws Exception {
- if (null != fs)
+ if (fs != null) {
fs.close();
- dfsCluster.shutdown();
+ }
+ if (dfsCluster != null) {
+ dfsCluster.shutdown();
+ }
Thread.sleep(2000);
super.tearDown();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
index d368c63299..c6776783ee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
@@ -381,14 +381,12 @@ public void testConvertBlockToken() {
@Test
public void testConvertNamespaceInfo() {
- NamespaceInfo info = new NamespaceInfo(37, "clusterID", "bpID", 2300, 53);
+ NamespaceInfo info = new NamespaceInfo(37, "clusterID", "bpID", 2300);
NamespaceInfoProto proto = PBHelper.convert(info);
NamespaceInfo info2 = PBHelper.convert(proto);
compare(info, info2); //Compare the StorageInfo
assertEquals(info.getBlockPoolID(), info2.getBlockPoolID());
assertEquals(info.getBuildVersion(), info2.getBuildVersion());
- assertEquals(info.getDistributedUpgradeVersion(),
- info2.getDistributedUpgradeVersion());
}
private void compare(StorageInfo expected, StorageInfo actual) {
@@ -440,7 +438,7 @@ public void testConvertDatanodeRegistration() {
DatanodeRegistration reg2 = PBHelper.convert(proto);
compare(reg.getStorageInfo(), reg2.getStorageInfo());
compare(reg.getExportedKeys(), reg2.getExportedKeys());
- compare((DatanodeID)reg, (DatanodeID)reg2);
+ compare(reg, reg2);
assertEquals(reg.getSoftwareVersion(), reg2.getSoftwareVersion());
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
index 9f0deb3ff8..e37e18bfad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
@@ -155,7 +155,7 @@ public void testCancelDelegationToken() throws Exception {
@Test
public void testAddDelegationTokensDFSApi() throws Exception {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser("JobTracker");
- DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
+ DistributedFileSystem dfs = cluster.getFileSystem();
Credentials creds = new Credentials();
final Token> tokens[] = dfs.addDelegationTokens("JobTracker", creds);
Assert.assertEquals(1, tokens.length);
@@ -198,7 +198,7 @@ public WebHdfsFileSystem run() throws Exception {
@SuppressWarnings("deprecation")
@Test
public void testDelegationTokenWithDoAs() throws Exception {
- final DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
+ final DistributedFileSystem dfs = cluster.getFileSystem();
final Credentials creds = new Credentials();
final Token> tokens[] = dfs.addDelegationTokens("JobTracker", creds);
Assert.assertEquals(1, tokens.length);
@@ -212,8 +212,7 @@ public void testDelegationTokenWithDoAs() throws Exception {
longUgi.doAs(new PrivilegedExceptionAction