diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 279841320f..c4efe58ae2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -649,6 +649,8 @@ Release 2.0.3-alpha - Unreleased of it is undefined after the iteration or modifications of the map. (szetszwo) + HDFS-4231. BackupNode: Introduce BackupState. (shv) + Release 2.0.2-alpha - 2012-09-07 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java index dd273a2187..077a953690 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java @@ -24,6 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.ha.ServiceFailedException; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.NameNodeProxies; @@ -35,6 +36,7 @@ import org.apache.hadoop.hdfs.protocolPB.JournalProtocolServerSideTranslatorPB; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.Storage; +import org.apache.hadoop.hdfs.server.namenode.ha.HAState; import org.apache.hadoop.hdfs.server.protocol.FenceResponse; import org.apache.hadoop.hdfs.server.protocol.JournalInfo; import org.apache.hadoop.hdfs.server.protocol.JournalProtocol; @@ -414,14 +416,23 @@ private static NamespaceInfo handshake(NamenodeProtocol namenode) + HdfsConstants.LAYOUT_VERSION + " actual "+ nsInfo.getLayoutVersion(); return nsInfo; } - + @Override + protected String getNameServiceId(Configuration conf) { + return DFSUtil.getBackupNameServiceId(conf); + } + + protected HAState createHAState() { + return new BackupState(); + } + + @Override // NameNode protected NameNodeHAContext createHAContext() { return new BNHAContext(); } - + private class BNHAContext extends NameNodeHAContext { - @Override // NameNode + @Override // NameNodeHAContext public void checkOperation(OperationCategory op) throws StandbyException { if (op == OperationCategory.UNCHECKED || @@ -435,10 +446,42 @@ public void checkOperation(OperationCategory op) throw new StandbyException(msg); } } - } - - @Override - protected String getNameServiceId(Configuration conf) { - return DFSUtil.getBackupNameServiceId(conf); + + @Override // NameNodeHAContext + public void prepareToStopStandbyServices() throws ServiceFailedException { + } + + /** + * Start services for BackupNode. + *

+ * The following services should be muted + * (not run or not pass any control commands to DataNodes) + * on BackupNode: + * {@link LeaseManager.Monitor} protected by SafeMode. + * {@link BlockManager.ReplicationMonitor} protected by SafeMode. + * {@link HeartbeatManager.Monitor} protected by SafeMode. + * {@link DecommissionManager.Monitor} need to prohibit refreshNodes(). + * {@link PendingReplicationBlocks.PendingReplicationMonitor} harmless, + * because ReplicationMonitor is muted. + */ + @Override + public void startActiveServices() throws IOException { + try { + namesystem.startActiveServices(); + } catch (Throwable t) { + doImmediateShutdown(t); + } + } + + @Override + public void stopActiveServices() throws IOException { + try { + if (namesystem != null) { + namesystem.stopActiveServices(); + } + } catch (Throwable t) { + doImmediateShutdown(t); + } + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupState.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupState.java new file mode 100644 index 0000000000..f8c79284c1 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupState.java @@ -0,0 +1,51 @@ +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.IOException; + +import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; +import org.apache.hadoop.ha.ServiceFailedException; +import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; +import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; +import org.apache.hadoop.hdfs.server.namenode.ha.HAState; +import org.apache.hadoop.ipc.StandbyException; + +public class BackupState extends HAState { + + public BackupState() { + super(HAServiceState.STANDBY); + } + + @Override // HAState + public void checkOperation(HAContext context, OperationCategory op) + throws StandbyException { + context.checkOperation(op); + } + + @Override // HAState + public boolean shouldPopulateReplQueues() { + return false; + } + + @Override + public void enterState(HAContext context) throws ServiceFailedException { + try { + context.startActiveServices(); + } catch (IOException e) { + throw new ServiceFailedException("Failed to start backup services", e); + } + } + + @Override + public void exitState(HAContext context) throws ServiceFailedException { + try { + context.stopActiveServices(); + } catch (IOException e) { + throw new ServiceFailedException("Failed to stop backup services", e); + } + } + + @Override + public void prepareToExitState(HAContext context) throws ServiceFailedException { + context.prepareToStopStandbyServices(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index afa5fc16f6..682696d627 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -121,6 +121,7 @@ import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.ha.ServiceFailedException; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HAUtil; @@ -163,7 +164,6 @@ import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath; import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; -import org.apache.hadoop.hdfs.server.namenode.ha.ActiveState; import org.apache.hadoop.hdfs.server.namenode.ha.EditLogTailer; import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; import org.apache.hadoop.hdfs.server.namenode.ha.HAState; @@ -3438,9 +3438,9 @@ HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg, private NNHAStatusHeartbeat createHaStatusHeartbeat() { HAState state = haContext.getState(); NNHAStatusHeartbeat.State hbState; - if (state instanceof ActiveState) { + if (state.getServiceState() == HAServiceState.ACTIVE) { hbState = NNHAStatusHeartbeat.State.ACTIVE; - } else if (state instanceof StandbyState) { + } else if (state.getServiceState() == HAServiceState.STANDBY) { hbState = NNHAStatusHeartbeat.State.STANDBY; } else { throw new AssertionError("Invalid state: " + state.getClass()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 309811c242..f77604a962 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -598,11 +598,7 @@ protected NameNode(Configuration conf, NamenodeRole role) String nsId = getNameServiceId(conf); String namenodeId = HAUtil.getNameNodeId(conf, nsId); this.haEnabled = HAUtil.isHAEnabled(conf, nsId); - if (!haEnabled) { - state = ACTIVE_STATE; - } else { - state = STANDBY_STATE; - } + state = createHAState(); this.allowStaleStandbyReads = HAUtil.shouldAllowStandbyReads(conf); this.haContext = createHAContext(); try { @@ -619,6 +615,10 @@ protected NameNode(Configuration conf, NamenodeRole role) } } + protected HAState createHAState() { + return !haEnabled ? ACTIVE_STATE : STANDBY_STATE; + } + protected HAContext createHAContext() { return new NameNodeHAContext(); } @@ -1298,7 +1298,7 @@ synchronized HAServiceState getServiceState() { * before exit. * @throws ExitException thrown only for testing. */ - private synchronized void doImmediateShutdown(Throwable t) + protected synchronized void doImmediateShutdown(Throwable t) throws ExitException { String message = "Error encountered requiring NN shutdown. " + "Shutting down immediately."; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java index 6ade5f2dfc..223064893d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java @@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HAUtil; @@ -103,6 +104,9 @@ BackupNode startBackupNode(Configuration conf, BackupNode bn = (BackupNode)NameNode.createNameNode( new String[]{startupOpt.getName()}, c); assertTrue(bn.getRole() + " must be in SafeMode.", bn.isInSafeMode()); + assertTrue(bn.getRole() + " must be in StandbyState", + bn.getNamesystem().getHAState() + .equalsIgnoreCase(HAServiceState.STANDBY.name())); return bn; }