diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 70d4954bcf..05fdff045d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -402,6 +402,8 @@ Release 2.0.3-alpha - Unreleased HDFS-3935. Add JournalNode to the start/stop scripts (Andy Isaacson via todd) + HDFS-4268. Remove redundant enum NNHAStatusHeartbeat.State. (shv) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 0603d15dd8..e7833d1c2f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -26,6 +26,7 @@ import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ClientProtocol; @@ -1232,9 +1233,9 @@ public static NNHAStatusHeartbeat convert(NNHAStatusHeartbeatProto s) { if (s == null) return null; switch (s.getState()) { case ACTIVE: - return new NNHAStatusHeartbeat(NNHAStatusHeartbeat.State.ACTIVE, s.getTxid()); + return new NNHAStatusHeartbeat(HAServiceState.ACTIVE, s.getTxid()); case STANDBY: - return new NNHAStatusHeartbeat(NNHAStatusHeartbeat.State.STANDBY, s.getTxid()); + return new NNHAStatusHeartbeat(HAServiceState.STANDBY, s.getTxid()); default: throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" + s.getState()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java index acd8e9ce51..c170cf9edd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java @@ -26,6 +26,7 @@ import org.apache.commons.logging.Log; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -411,7 +412,7 @@ synchronized void updateActorStatesFromHeartbeat( final long txid = nnHaState.getTxId(); final boolean nnClaimsActive = - nnHaState.getState() == NNHAStatusHeartbeat.State.ACTIVE; + nnHaState.getState() == HAServiceState.ACTIVE; final boolean bposThinksActive = bpServiceToActive == actor; final boolean isMoreRecentClaim = txid > lastActiveClaimTxId; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupState.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupState.java index f8c79284c1..ce11fc9e68 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupState.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupState.java @@ -2,6 +2,7 @@ import java.io.IOException; +import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.ha.ServiceFailedException; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; @@ -9,6 +10,7 @@ import org.apache.hadoop.hdfs.server.namenode.ha.HAState; import org.apache.hadoop.ipc.StandbyException; +@InterfaceAudience.Private public class BackupState extends HAState { public BackupState() { @@ -26,7 +28,7 @@ public boolean shouldPopulateReplQueues() { return false; } - @Override + @Override // HAState public void enterState(HAContext context) throws ServiceFailedException { try { context.startActiveServices(); @@ -35,7 +37,7 @@ public void enterState(HAContext context) throws ServiceFailedException { } } - @Override + @Override // HAState public void exitState(HAContext context) throws ServiceFailedException { try { context.stopActiveServices(); @@ -44,7 +46,7 @@ public void exitState(HAContext context) throws ServiceFailedException { } } - @Override + @Override // HAState public void prepareToExitState(HAContext context) throws ServiceFailedException { context.prepareToStopStandbyServices(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 682696d627..ccb91ca328 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -168,7 +168,6 @@ import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; import org.apache.hadoop.hdfs.server.namenode.ha.HAState; import org.apache.hadoop.hdfs.server.namenode.ha.StandbyCheckpointer; -import org.apache.hadoop.hdfs.server.namenode.ha.StandbyState; import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; @@ -1003,8 +1002,8 @@ public boolean isInStandbyState() { // start in active. return haEnabled; } - - return haContext.getState() instanceof StandbyState; + + return HAServiceState.STANDBY == haContext.getState().getServiceState(); } /** @@ -3437,15 +3436,7 @@ HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg, private NNHAStatusHeartbeat createHaStatusHeartbeat() { HAState state = haContext.getState(); - NNHAStatusHeartbeat.State hbState; - if (state.getServiceState() == HAServiceState.ACTIVE) { - hbState = NNHAStatusHeartbeat.State.ACTIVE; - } else if (state.getServiceState() == HAServiceState.STANDBY) { - hbState = NNHAStatusHeartbeat.State.STANDBY; - } else { - throw new AssertionError("Invalid state: " + state.getClass()); - } - return new NNHAStatusHeartbeat(hbState, + return new NNHAStatusHeartbeat(state.getServiceState(), getFSImage().getLastAppliedOrWrittenTxId()); } @@ -3874,7 +3865,7 @@ private void enter() { private synchronized void leave() { // if not done yet, initialize replication queues. // In the standby, do not populate repl queues - if (!isPopulatingReplQueues() && !isInStandbyState()) { + if (!isPopulatingReplQueues() && shouldPopulateReplQueues()) { initializeReplQueues(); } long timeInSafemode = now() - startTime; @@ -3917,7 +3908,8 @@ private synchronized void initializeReplQueues() { * initializing replication queues. */ private synchronized boolean canInitializeReplQueues() { - return !isInStandbyState() && blockSafe >= blockReplQueueThreshold; + return shouldPopulateReplQueues() + && blockSafe >= blockReplQueueThreshold; } /** @@ -4257,7 +4249,7 @@ public boolean isInStartupSafeMode() { @Override public boolean isPopulatingReplQueues() { - if (isInStandbyState()) { + if (!shouldPopulateReplQueues()) { return false; } // safeMode is volatile, and may be set to null at any time @@ -4266,7 +4258,13 @@ public boolean isPopulatingReplQueues() { return true; return safeMode.isPopulatingReplQueues(); } - + + private boolean shouldPopulateReplQueues() { + if(haContext == null || haContext.getState() == null) + return false; + return haContext.getState().shouldPopulateReplQueues(); + } + @Override public void incrementSafeBlockCount(int replication) { // safeMode is volatile, and may be set to null at any time diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java index 337a83c2ed..66ccb3bd79 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java @@ -19,31 +19,26 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.hdfs.protocol.HdfsConstants; @InterfaceAudience.Private @InterfaceStability.Evolving public class NNHAStatusHeartbeat { - private State state; + private HAServiceState state; private long txid = HdfsConstants.INVALID_TXID; - public NNHAStatusHeartbeat(State state, long txid) { + public NNHAStatusHeartbeat(HAServiceState state, long txid) { this.state = state; this.txid = txid; } - public State getState() { + public HAServiceState getState() { return state; } public long getTxId() { return txid; } - - @InterfaceAudience.Private - public enum State { - ACTIVE, - STANDBY; - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java index 8e01e6d70a..504e1ca685 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java @@ -29,6 +29,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -41,7 +42,6 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat; -import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat.State; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; @@ -123,7 +123,7 @@ private DatanodeProtocolClientSideTranslatorPB setupNNMock(int nnIdx) Mockito.anyInt(), Mockito.anyInt(), Mockito.anyInt()); - mockHaStatuses[nnIdx] = new NNHAStatusHeartbeat(State.STANDBY, 0); + mockHaStatuses[nnIdx] = new NNHAStatusHeartbeat(HAServiceState.STANDBY, 0); return mock; } @@ -255,12 +255,12 @@ public void testPickActiveNameNode() throws Exception { assertNull(bpos.getActiveNN()); // Have NN1 claim active at txid 1 - mockHaStatuses[0] = new NNHAStatusHeartbeat(State.ACTIVE, 1); + mockHaStatuses[0] = new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1); bpos.triggerHeartbeatForTests(); assertSame(mockNN1, bpos.getActiveNN()); // NN2 claims active at a higher txid - mockHaStatuses[1] = new NNHAStatusHeartbeat(State.ACTIVE, 2); + mockHaStatuses[1] = new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 2); bpos.triggerHeartbeatForTests(); assertSame(mockNN2, bpos.getActiveNN()); @@ -272,12 +272,12 @@ public void testPickActiveNameNode() throws Exception { // Even if NN2 goes to standby, DN shouldn't reset to talking to NN1, // because NN1's txid is lower than the last active txid. Instead, // it should consider neither active. - mockHaStatuses[1] = new NNHAStatusHeartbeat(State.STANDBY, 2); + mockHaStatuses[1] = new NNHAStatusHeartbeat(HAServiceState.STANDBY, 2); bpos.triggerHeartbeatForTests(); assertNull(bpos.getActiveNN()); // Now if NN1 goes back to a higher txid, it should be considered active - mockHaStatuses[0] = new NNHAStatusHeartbeat(State.ACTIVE, 3); + mockHaStatuses[0] = new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 3); bpos.triggerHeartbeatForTests(); assertSame(mockNN1, bpos.getActiveNN()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java index d496419c7e..a400e85059 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java @@ -49,6 +49,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; @@ -72,7 +73,6 @@ import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat; -import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat.State; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; import org.apache.hadoop.hdfs.server.protocol.StorageReport; @@ -157,7 +157,7 @@ public DatanodeRegistration answer(InvocationOnMock invocation) Mockito.anyInt())) .thenReturn(new HeartbeatResponse( new DatanodeCommand[0], - new NNHAStatusHeartbeat(State.ACTIVE, 1))); + new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1))); dn = new DataNode(conf, dirs, null) { @Override