HDFS-4231. BackupNode: Introduce BackupState. Contributed by Konstantin Shvachko.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1416288 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
5954e4f1ac
commit
cbed126eec
@ -649,6 +649,8 @@ Release 2.0.3-alpha - Unreleased
|
|||||||
of it is undefined after the iteration or modifications of the map.
|
of it is undefined after the iteration or modifications of the map.
|
||||||
(szetszwo)
|
(szetszwo)
|
||||||
|
|
||||||
|
HDFS-4231. BackupNode: Introduce BackupState. (shv)
|
||||||
|
|
||||||
Release 2.0.2-alpha - 2012-09-07
|
Release 2.0.2-alpha - 2012-09-07
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
|
import org.apache.hadoop.ha.ServiceFailedException;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.NameNodeProxies;
|
import org.apache.hadoop.hdfs.NameNodeProxies;
|
||||||
@ -35,6 +36,7 @@
|
|||||||
import org.apache.hadoop.hdfs.protocolPB.JournalProtocolServerSideTranslatorPB;
|
import org.apache.hadoop.hdfs.protocolPB.JournalProtocolServerSideTranslatorPB;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.FenceResponse;
|
import org.apache.hadoop.hdfs.server.protocol.FenceResponse;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.JournalInfo;
|
import org.apache.hadoop.hdfs.server.protocol.JournalInfo;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
|
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
|
||||||
@ -414,14 +416,23 @@ private static NamespaceInfo handshake(NamenodeProtocol namenode)
|
|||||||
+ HdfsConstants.LAYOUT_VERSION + " actual "+ nsInfo.getLayoutVersion();
|
+ HdfsConstants.LAYOUT_VERSION + " actual "+ nsInfo.getLayoutVersion();
|
||||||
return nsInfo;
|
return nsInfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
protected String getNameServiceId(Configuration conf) {
|
||||||
|
return DFSUtil.getBackupNameServiceId(conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
protected HAState createHAState() {
|
||||||
|
return new BackupState();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override // NameNode
|
||||||
protected NameNodeHAContext createHAContext() {
|
protected NameNodeHAContext createHAContext() {
|
||||||
return new BNHAContext();
|
return new BNHAContext();
|
||||||
}
|
}
|
||||||
|
|
||||||
private class BNHAContext extends NameNodeHAContext {
|
private class BNHAContext extends NameNodeHAContext {
|
||||||
@Override // NameNode
|
@Override // NameNodeHAContext
|
||||||
public void checkOperation(OperationCategory op)
|
public void checkOperation(OperationCategory op)
|
||||||
throws StandbyException {
|
throws StandbyException {
|
||||||
if (op == OperationCategory.UNCHECKED ||
|
if (op == OperationCategory.UNCHECKED ||
|
||||||
@ -435,10 +446,42 @@ public void checkOperation(OperationCategory op)
|
|||||||
throw new StandbyException(msg);
|
throw new StandbyException(msg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
@Override // NameNodeHAContext
|
||||||
@Override
|
public void prepareToStopStandbyServices() throws ServiceFailedException {
|
||||||
protected String getNameServiceId(Configuration conf) {
|
}
|
||||||
return DFSUtil.getBackupNameServiceId(conf);
|
|
||||||
|
/**
|
||||||
|
* Start services for BackupNode.
|
||||||
|
* <p>
|
||||||
|
* The following services should be muted
|
||||||
|
* (not run or not pass any control commands to DataNodes)
|
||||||
|
* on BackupNode:
|
||||||
|
* {@link LeaseManager.Monitor} protected by SafeMode.
|
||||||
|
* {@link BlockManager.ReplicationMonitor} protected by SafeMode.
|
||||||
|
* {@link HeartbeatManager.Monitor} protected by SafeMode.
|
||||||
|
* {@link DecommissionManager.Monitor} need to prohibit refreshNodes().
|
||||||
|
* {@link PendingReplicationBlocks.PendingReplicationMonitor} harmless,
|
||||||
|
* because ReplicationMonitor is muted.
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public void startActiveServices() throws IOException {
|
||||||
|
try {
|
||||||
|
namesystem.startActiveServices();
|
||||||
|
} catch (Throwable t) {
|
||||||
|
doImmediateShutdown(t);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void stopActiveServices() throws IOException {
|
||||||
|
try {
|
||||||
|
if (namesystem != null) {
|
||||||
|
namesystem.stopActiveServices();
|
||||||
|
}
|
||||||
|
} catch (Throwable t) {
|
||||||
|
doImmediateShutdown(t);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,51 @@
|
|||||||
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
|
||||||
|
import org.apache.hadoop.ha.ServiceFailedException;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
|
||||||
|
import org.apache.hadoop.ipc.StandbyException;
|
||||||
|
|
||||||
|
public class BackupState extends HAState {
|
||||||
|
|
||||||
|
public BackupState() {
|
||||||
|
super(HAServiceState.STANDBY);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override // HAState
|
||||||
|
public void checkOperation(HAContext context, OperationCategory op)
|
||||||
|
throws StandbyException {
|
||||||
|
context.checkOperation(op);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override // HAState
|
||||||
|
public boolean shouldPopulateReplQueues() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void enterState(HAContext context) throws ServiceFailedException {
|
||||||
|
try {
|
||||||
|
context.startActiveServices();
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ServiceFailedException("Failed to start backup services", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void exitState(HAContext context) throws ServiceFailedException {
|
||||||
|
try {
|
||||||
|
context.stopActiveServices();
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ServiceFailedException("Failed to stop backup services", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void prepareToExitState(HAContext context) throws ServiceFailedException {
|
||||||
|
context.prepareToStopStandbyServices();
|
||||||
|
}
|
||||||
|
}
|
@ -121,6 +121,7 @@
|
|||||||
import org.apache.hadoop.fs.permission.FsAction;
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||||
|
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
|
||||||
import org.apache.hadoop.ha.ServiceFailedException;
|
import org.apache.hadoop.ha.ServiceFailedException;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.HAUtil;
|
import org.apache.hadoop.hdfs.HAUtil;
|
||||||
@ -163,7 +164,6 @@
|
|||||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
|
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
|
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.ha.ActiveState;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.ha.EditLogTailer;
|
import org.apache.hadoop.hdfs.server.namenode.ha.EditLogTailer;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
|
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
|
import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
|
||||||
@ -3438,9 +3438,9 @@ HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg,
|
|||||||
private NNHAStatusHeartbeat createHaStatusHeartbeat() {
|
private NNHAStatusHeartbeat createHaStatusHeartbeat() {
|
||||||
HAState state = haContext.getState();
|
HAState state = haContext.getState();
|
||||||
NNHAStatusHeartbeat.State hbState;
|
NNHAStatusHeartbeat.State hbState;
|
||||||
if (state instanceof ActiveState) {
|
if (state.getServiceState() == HAServiceState.ACTIVE) {
|
||||||
hbState = NNHAStatusHeartbeat.State.ACTIVE;
|
hbState = NNHAStatusHeartbeat.State.ACTIVE;
|
||||||
} else if (state instanceof StandbyState) {
|
} else if (state.getServiceState() == HAServiceState.STANDBY) {
|
||||||
hbState = NNHAStatusHeartbeat.State.STANDBY;
|
hbState = NNHAStatusHeartbeat.State.STANDBY;
|
||||||
} else {
|
} else {
|
||||||
throw new AssertionError("Invalid state: " + state.getClass());
|
throw new AssertionError("Invalid state: " + state.getClass());
|
||||||
|
@ -598,11 +598,7 @@ protected NameNode(Configuration conf, NamenodeRole role)
|
|||||||
String nsId = getNameServiceId(conf);
|
String nsId = getNameServiceId(conf);
|
||||||
String namenodeId = HAUtil.getNameNodeId(conf, nsId);
|
String namenodeId = HAUtil.getNameNodeId(conf, nsId);
|
||||||
this.haEnabled = HAUtil.isHAEnabled(conf, nsId);
|
this.haEnabled = HAUtil.isHAEnabled(conf, nsId);
|
||||||
if (!haEnabled) {
|
state = createHAState();
|
||||||
state = ACTIVE_STATE;
|
|
||||||
} else {
|
|
||||||
state = STANDBY_STATE;
|
|
||||||
}
|
|
||||||
this.allowStaleStandbyReads = HAUtil.shouldAllowStandbyReads(conf);
|
this.allowStaleStandbyReads = HAUtil.shouldAllowStandbyReads(conf);
|
||||||
this.haContext = createHAContext();
|
this.haContext = createHAContext();
|
||||||
try {
|
try {
|
||||||
@ -619,6 +615,10 @@ protected NameNode(Configuration conf, NamenodeRole role)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected HAState createHAState() {
|
||||||
|
return !haEnabled ? ACTIVE_STATE : STANDBY_STATE;
|
||||||
|
}
|
||||||
|
|
||||||
protected HAContext createHAContext() {
|
protected HAContext createHAContext() {
|
||||||
return new NameNodeHAContext();
|
return new NameNodeHAContext();
|
||||||
}
|
}
|
||||||
@ -1298,7 +1298,7 @@ synchronized HAServiceState getServiceState() {
|
|||||||
* before exit.
|
* before exit.
|
||||||
* @throws ExitException thrown only for testing.
|
* @throws ExitException thrown only for testing.
|
||||||
*/
|
*/
|
||||||
private synchronized void doImmediateShutdown(Throwable t)
|
protected synchronized void doImmediateShutdown(Throwable t)
|
||||||
throws ExitException {
|
throws ExitException {
|
||||||
String message = "Error encountered requiring NN shutdown. " +
|
String message = "Error encountered requiring NN shutdown. " +
|
||||||
"Shutting down immediately.";
|
"Shutting down immediately.";
|
||||||
|
@ -35,6 +35,7 @@
|
|||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.HAUtil;
|
import org.apache.hadoop.hdfs.HAUtil;
|
||||||
@ -103,6 +104,9 @@ BackupNode startBackupNode(Configuration conf,
|
|||||||
BackupNode bn = (BackupNode)NameNode.createNameNode(
|
BackupNode bn = (BackupNode)NameNode.createNameNode(
|
||||||
new String[]{startupOpt.getName()}, c);
|
new String[]{startupOpt.getName()}, c);
|
||||||
assertTrue(bn.getRole() + " must be in SafeMode.", bn.isInSafeMode());
|
assertTrue(bn.getRole() + " must be in SafeMode.", bn.isInSafeMode());
|
||||||
|
assertTrue(bn.getRole() + " must be in StandbyState",
|
||||||
|
bn.getNamesystem().getHAState()
|
||||||
|
.equalsIgnoreCase(HAServiceState.STANDBY.name()));
|
||||||
return bn;
|
return bn;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user