diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt index 414b28e908..728582a4f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt @@ -31,3 +31,5 @@ HDFS-2591. MiniDFSCluster support to mix and match federation with HA (todd) HDFS-1975. Support for sharing the namenode state from active to standby. (jitendra, atm, todd) HDFS-1971. Send block report from datanode to both active and standby namenodes. (sanjay, todd via suresh) + +HDFS-2616. Change DatanodeProtocol#sendHeartbeat() to return HeartbeatResponse. (suresh) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index 2c4a15bf81..e83ec99c1a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException; +import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.ipc.RPC; @@ -333,7 +334,7 @@ DatanodeCommand blockReport() throws IOException { } - DatanodeCommand [] sendHeartBeat() throws IOException { + HeartbeatResponse sendHeartBeat() throws IOException { LOG.info("heartbeat: " + this); // TODO: saw an NPE here - maybe if the two BPOS register at // same time, this one won't block on the other one? @@ -420,16 +421,17 @@ private void offerService() throws Exception { // lastHeartbeat = startTime; if (!dn.areHeartbeatsDisabledForTests()) { - DatanodeCommand[] cmds = sendHeartBeat(); + HeartbeatResponse resp = sendHeartBeat(); dn.getMetrics().addHeartbeat(now() - startTime); long startProcessCommands = now(); - if (!processCommand(cmds)) + if (!processCommand(resp.getCommands())) continue; long endProcessCommands = now(); if (endProcessCommands - startProcessCommands > 2000) { - LOG.info("Took " + (endProcessCommands - startProcessCommands) + - "ms to process " + cmds.length + " commands from NN"); + LOG.info("Took " + (endProcessCommands - startProcessCommands) + + "ms to process " + resp.getCommands().length + + " commands from NN"); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 9a499b6525..09b6634dab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -154,6 +154,7 @@ import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand; import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; @@ -2688,7 +2689,7 @@ String getRegistrationID() { * @return an array of datanode commands * @throws IOException */ - DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg, + HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg, long capacity, long dfsUsed, long remaining, long blockPoolUsed, int xceiverCount, int xmitsInProgress, int failedVolumes) throws IOException { @@ -2699,16 +2700,13 @@ DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg, DatanodeCommand[] cmds = blockManager.getDatanodeManager().handleHeartbeat( nodeReg, blockPoolId, capacity, dfsUsed, remaining, blockPoolUsed, xceiverCount, maxTransfer, failedVolumes); - if (cmds != null) { - return cmds; + if (cmds == null) { + DatanodeCommand cmd = upgradeManager.getBroadcastCommand(); + if (cmd != null) { + cmds = new DatanodeCommand[] {cmd}; + } } - - //check distributed upgrade - DatanodeCommand cmd = upgradeManager.getBroadcastCommand(); - if (cmd != null) { - return new DatanodeCommand[] {cmd}; - } - return null; + return new HeartbeatResponse(cmds); } finally { readUnlock(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 69b3f972c1..d6ba4175c1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -75,6 +75,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand; +import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; @@ -857,7 +858,7 @@ public DatanodeRegistration registerDatanode(DatanodeRegistration nodeReg) } @Override // DatanodeProtocol - public DatanodeCommand[] sendHeartbeat(DatanodeRegistration nodeReg, + public HeartbeatResponse sendHeartbeat(DatanodeRegistration nodeReg, long capacity, long dfsUsed, long remaining, long blockPoolUsed, int xmitsInProgress, int xceiverCount, int failedVolumes) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java index 5a4cae8a5e..7b99f37123 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java @@ -22,8 +22,8 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocolR23Compatible.ClientNamenodeWireProtocol; import org.apache.hadoop.hdfs.server.protocolR23Compatible.DatanodeWireProtocol; @@ -92,7 +92,7 @@ public DatanodeRegistration registerDatanode(DatanodeRegistration registration * sendHeartbeat() tells the NameNode that the DataNode is still * alive and well. Includes some status info, too. * It also gives the NameNode a chance to return - * an array of "DatanodeCommand" objects. + * an array of "DatanodeCommand" objects in HeartbeatResponse. * A DatanodeCommand tells the DataNode to invalidate local block(s), * or to copy them to other DataNodes, etc. * @param registration datanode registration information @@ -106,7 +106,7 @@ public DatanodeRegistration registerDatanode(DatanodeRegistration registration * @throws IOException on error */ @Nullable - public DatanodeCommand[] sendHeartbeat(DatanodeRegistration registration, + public HeartbeatResponse sendHeartbeat(DatanodeRegistration registration, long capacity, long dfsUsed, long remaining, long blockPoolUsed, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/HeartbeatResponse.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/HeartbeatResponse.java new file mode 100644 index 0000000000..fb1a533afc --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/HeartbeatResponse.java @@ -0,0 +1,73 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.protocol; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.io.ObjectWritable; +import org.apache.hadoop.io.Writable; + +@InterfaceAudience.Private +@InterfaceStability.Evolving +/** + * Response to {@link DatanodeProtocol#sendHeartbeat} + */ +public class HeartbeatResponse implements Writable { + /** Commands returned from the namenode to the datanode */ + private DatanodeCommand[] commands; + + public HeartbeatResponse() { + // Empty constructor required for Writable + } + + public HeartbeatResponse(DatanodeCommand[] cmds) { + commands = cmds; + } + + public DatanodeCommand[] getCommands() { + return commands; + } + + /////////////////////////////////////////// + // Writable + /////////////////////////////////////////// + @Override + public void write(DataOutput out) throws IOException { + int length = commands == null ? 0 : commands.length; + out.writeInt(length); + for (int i = 0; i < length; i++) { + ObjectWritable.writeObject(out, commands[i], commands[i].getClass(), + null, true); + } + } + + @Override + public void readFields(DataInput in) throws IOException { + int length = in.readInt(); + commands = new DatanodeCommand[length]; + ObjectWritable objectWritable = new ObjectWritable(); + for (int i = 0; i < length; i++) { + commands[i] = (DatanodeCommand) ObjectWritable.readObject(in, + objectWritable, null); + } + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeProtocolServerSideTranslatorR23.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeProtocolServerSideTranslatorR23.java index 2c806afd44..11b833fa0d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeProtocolServerSideTranslatorR23.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeProtocolServerSideTranslatorR23.java @@ -110,11 +110,11 @@ public DatanodeRegistrationWritable registerDatanode( } @Override - public DatanodeCommandWritable[] sendHeartbeat( + public HeartbeatResponseWritable sendHeartbeat( DatanodeRegistrationWritable registration, long capacity, long dfsUsed, long remaining, long blockPoolUsed, int xmitsInProgress, int xceiverCount, int failedVolumes) throws IOException { - return DatanodeCommandWritable.convert(server.sendHeartbeat( + return HeartbeatResponseWritable.convert(server.sendHeartbeat( registration.convert(), capacity, dfsUsed, remaining, blockPoolUsed, xmitsInProgress, xceiverCount, failedVolumes)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeProtocolTranslatorR23.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeProtocolTranslatorR23.java index 1664940474..fb29fffac3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeProtocolTranslatorR23.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeProtocolTranslatorR23.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; @@ -130,14 +131,14 @@ public DatanodeRegistration registerDatanode(DatanodeRegistration registration) } @Override - public DatanodeCommand[] sendHeartbeat(DatanodeRegistration registration, + public HeartbeatResponse sendHeartbeat(DatanodeRegistration registration, long capacity, long dfsUsed, long remaining, long blockPoolUsed, int xmitsInProgress, int xceiverCount, int failedVolumes) throws IOException { - return DatanodeCommandWritable.convert(rpcProxy.sendHeartbeat( - DatanodeRegistrationWritable.convert(registration), capacity, - dfsUsed, remaining, blockPoolUsed, xmitsInProgress, xceiverCount, - failedVolumes)); + return rpcProxy.sendHeartbeat( + DatanodeRegistrationWritable.convert(registration), capacity, dfsUsed, + remaining, blockPoolUsed, xmitsInProgress, xceiverCount, failedVolumes) + .convert(); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeWireProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeWireProtocol.java index f630053bf9..8625c22a53 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeWireProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeWireProtocol.java @@ -99,7 +99,7 @@ public DatanodeRegistrationWritable registerDatanode( * @throws IOException on error */ @Nullable - public DatanodeCommandWritable[] sendHeartbeat( + public HeartbeatResponseWritable sendHeartbeat( DatanodeRegistrationWritable registration, long capacity, long dfsUsed, long remaining, long blockPoolUsed, int xmitsInProgress, int xceiverCount, int failedVolumes) throws IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/HeartbeatResponseWritable.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/HeartbeatResponseWritable.java new file mode 100644 index 0000000000..f7fe3db7b7 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/HeartbeatResponseWritable.java @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.protocolR23Compatible; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; +import org.apache.hadoop.io.ObjectWritable; +import org.apache.hadoop.io.Writable; + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class HeartbeatResponseWritable implements Writable { + private DatanodeCommandWritable[] commands; + + public HeartbeatResponseWritable() { + // Empty constructor for Writable + } + + public HeartbeatResponseWritable(DatanodeCommandWritable[] cmds) { + commands = cmds; + } + + public HeartbeatResponse convert() { + return new HeartbeatResponse(DatanodeCommandWritable.convert(commands)); + } + + /////////////////////////////////////////// + // Writable + /////////////////////////////////////////// + @Override + public void write(DataOutput out) throws IOException { + int length = commands == null ? 0 : commands.length; + out.writeInt(length); + for (int i = 0; i < length; i++) { + ObjectWritable.writeObject(out, commands[i], commands[i].getClass(), + null, true); + } + } + + @Override + public void readFields(DataInput in) throws IOException { + int length = in.readInt(); + commands = new DatanodeCommandWritable[length]; + ObjectWritable objectWritable = new ObjectWritable(); + for (int i = 0; i < length; i++) { + commands[i] = (DatanodeCommandWritable) ObjectWritable.readObject(in, + objectWritable, null); + } + } + + public static HeartbeatResponseWritable convert( + HeartbeatResponse resp) { + return new HeartbeatResponseWritable(DatanodeCommandWritable.convert(resp + .getCommands())); + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java index c18a5c04fe..45741ceae2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java @@ -41,7 +41,7 @@ public class TestHeartbeatHandling extends TestCase { /** * Test if - * {@link FSNamesystem#handleHeartbeat(DatanodeRegistration, long, long, long, long, int, int)} + * {@link FSNamesystem#handleHeartbeat} * can pick up replication and/or invalidate requests and observes the max * limit */ @@ -75,7 +75,8 @@ public void testHeartbeat() throws Exception { dd.addBlockToBeReplicated( new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP), ONE_TARGET); } - DatanodeCommand[]cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem); + DatanodeCommand[] cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, + namesystem).getCommands(); assertEquals(1, cmds.length); assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction()); assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length); @@ -85,26 +86,30 @@ public void testHeartbeat() throws Exception { blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP)); } dd.addBlocksToBeInvalidated(blockList); - cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem); + cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem) + .getCommands(); assertEquals(2, cmds.length); assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction()); assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length); assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction()); assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length); - cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem); + cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem) + .getCommands(); assertEquals(2, cmds.length); assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction()); assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length); assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction()); assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length); - cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem); + cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem) + .getCommands(); assertEquals(1, cmds.length); assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[0].getAction()); assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length); - cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem); + cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem) + .getCommands(); assertEquals(null, cmds); } } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index afc003f938..7d15900756 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -25,8 +25,6 @@ import java.util.EnumSet; import java.util.List; -import javax.security.auth.login.LoginException; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; @@ -78,7 +76,7 @@ *
  • -logLevel L specifies the logging level when the benchmark runs. * The default logging level is {@link Level#ERROR}.
  • *
  • -UGCacheRefreshCount G will cause the benchmark to call - * {@link NameNode#refreshUserToGroupsMappings()} after + * {@link NameNodeRpcServer#refreshUserToGroupsMappings} after * every G operations, which purges the name-node's user group cache. * By default the refresh is never called.
  • *
  • -keepResults do not clean up the name-space after execution.
  • @@ -104,7 +102,7 @@ public class NNThroughputBenchmark { static NameNode nameNode; static NamenodeProtocols nameNodeProto; - NNThroughputBenchmark(Configuration conf) throws IOException, LoginException { + NNThroughputBenchmark(Configuration conf) throws IOException { config = conf; // We do not need many handlers, since each thread simulates a handler // by calling name-node methods directly @@ -125,7 +123,7 @@ public class NNThroughputBenchmark { nameNodeProto = nameNode.getRpcServer(); } - void close() throws IOException { + void close() { nameNode.stop(); } @@ -806,7 +804,8 @@ void sendHeartbeat() throws IOException { // register datanode // TODO:FEDERATION currently a single block pool is supported DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration, - DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0, 0); + DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0, 0) + .getCommands(); if(cmds != null) { for (DatanodeCommand cmd : cmds ) { if(LOG.isDebugEnabled()) { @@ -851,7 +850,8 @@ int replicateBlocks() throws IOException { // register datanode // TODO:FEDERATION currently a single block pool is supported DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration, - DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0, 0); + DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0, 0) + .getCommands(); if (cmds != null) { for (DatanodeCommand cmd : cmds) { if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) { @@ -916,7 +916,7 @@ class BlockReportStats extends OperationStatsBase { config.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 3 * 60); parseArguments(args); // adjust replication to the number of data-nodes - this.replication = (short)Math.min((int)replication, getNumDatanodes()); + this.replication = (short)Math.min(replication, getNumDatanodes()); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java index d0aa51f2b1..c7cc61dc13 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java @@ -26,8 +26,8 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; -import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.security.AccessControlException; @@ -90,7 +90,7 @@ public static DelegationTokenSecretManager getDtSecretManager( return ns.getDelegationTokenSecretManager(); } - public static DatanodeCommand[] sendHeartBeat(DatanodeRegistration nodeReg, + public static HeartbeatResponse sendHeartBeat(DatanodeRegistration nodeReg, DatanodeDescriptor dd, FSNamesystem namesystem) throws IOException { return namesystem.handleHeartbeat(nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), dd.getBlockPoolUsed(), 0, 0, 0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java index 2e73ec556a..33a7129457 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java @@ -128,7 +128,8 @@ public void testDeadDatanode() throws Exception { // Ensure heartbeat from dead datanode is rejected with a command // that asks datanode to register again - DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, 0, 0, 0, 0, 0, 0, 0); + DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, 0, 0, 0, 0, 0, 0, 0) + .getCommands(); Assert.assertEquals(1, cmd.length); Assert.assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER .getAction());