HDFS-2489. Move Finalize and Register to separate file out of DatanodeCommand.java. Contributed by Suresh Srinivas.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1188282 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2011-10-24 18:40:13 +00:00
parent 7ce1c4ab35
commit 2b35e8aa4f
6 changed files with 18 additions and 60 deletions

View File

@ -65,14 +65,18 @@ Trunk (unreleased changes)
HDFS-2480. Separate datatypes for NamenodeProtocol. (suresh) HDFS-2480. Separate datatypes for NamenodeProtocol. (suresh)
HDFS-2181 Separate HDFS Client wire protocol data types (sanjay) HDFS-2181 Separate HDFS Client wire protocol data types (sanjay)
HDFS-2294. Download of commons-daemon TAR should not be under target (tucu) HDFS-2294. Download of commons-daemon TAR should not be under target (tucu)
HDFS-2322. the build fails in Windows because commons-daemon TAR cannot be fetched. (tucu) HDFS-2322. the build fails in Windows because commons-daemon TAR cannot be
fetched. (tucu)
HDFS-2427. Change the default permission in webhdfs to 755 and add range HDFS-2427. Change the default permission in webhdfs to 755 and add range
check/validation for all parameters. (szetszwo) check/validation for all parameters. (szetszwo)
HDFS-2489. Move Finalize and Register to separate file out of
DatanodeCommand.java. (suresh)
BUG FIXES BUG FIXES
HDFS-2287. TestParallelRead has a small off-by-one bug. (todd) HDFS-2287. TestParallelRead has a small off-by-one bug. (todd)

View File

@ -60,6 +60,7 @@
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException; import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException;
import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
import org.apache.hadoop.hdfs.util.CyclicIteration; import org.apache.hadoop.hdfs.util.CyclicIteration;
import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.CachedDNSToSwitchMapping; import org.apache.hadoop.net.CachedDNSToSwitchMapping;
@ -859,7 +860,7 @@ public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg,
try { try {
nodeinfo = getDatanode(nodeReg); nodeinfo = getDatanode(nodeReg);
} catch(UnregisteredNodeException e) { } catch(UnregisteredNodeException e) {
return new DatanodeCommand[]{DatanodeCommand.REGISTER}; return new DatanodeCommand[]{RegisterCommand.REGISTER};
} }
// Check if this datanode should actually be shutdown instead. // Check if this datanode should actually be shutdown instead.
@ -869,7 +870,7 @@ public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg,
} }
if (nodeinfo == null || !nodeinfo.isAlive) { if (nodeinfo == null || !nodeinfo.isAlive) {
return new DatanodeCommand[]{DatanodeCommand.REGISTER}; return new DatanodeCommand[]{RegisterCommand.REGISTER};
} }
heartbeatManager.updateHeartbeat(nodeinfo, capacity, dfsUsed, heartbeatManager.updateHeartbeat(nodeinfo, capacity, dfsUsed,

View File

@ -151,6 +151,7 @@
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException; import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException;
import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand; import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
@ -1418,7 +1419,7 @@ private boolean processCommand(DatanodeCommand cmd) throws IOException {
} }
break; break;
case DatanodeProtocol.DNA_FINALIZE: case DatanodeProtocol.DNA_FINALIZE:
storage.finalizeUpgrade(((DatanodeCommand.Finalize) cmd) storage.finalizeUpgrade(((FinalizeCommand) cmd)
.getBlockPoolId()); .getBlockPoolId());
break; break;
case UpgradeCommand.UC_ACTION_START_UPGRADE: case UpgradeCommand.UC_ACTION_START_UPGRADE:

View File

@ -69,6 +69,7 @@
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand; import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
@ -803,7 +804,7 @@ public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
namesystem.getBlockManager().processReport(nodeReg, poolId, blist); namesystem.getBlockManager().processReport(nodeReg, poolId, blist);
if (nn.getFSImage().isUpgradeFinalized()) if (nn.getFSImage().isUpgradeFinalized())
return new DatanodeCommand.Finalize(poolId); return new FinalizeCommand(poolId);
return null; return null;
} }

View File

@ -17,17 +17,9 @@
*/ */
package org.apache.hadoop.hdfs.server.protocol; package org.apache.hadoop.hdfs.server.protocol;
import java.io.DataInput; import org.apache.avro.reflect.Union;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableFactory;
import org.apache.hadoop.io.WritableFactories;
import org.apache.hadoop.io.WritableUtils;
import org.apache.avro.reflect.Union;
/** /**
* Base class for data-node command. * Base class for data-node command.
@ -36,55 +28,13 @@
// Declare subclasses for Avro's denormalized representation // Declare subclasses for Avro's denormalized representation
@Union({Void.class, @Union({Void.class,
DatanodeCommand.Register.class, DatanodeCommand.Finalize.class, RegisterCommand.class, FinalizeCommand.class,
BlockCommand.class, UpgradeCommand.class, BlockCommand.class, UpgradeCommand.class,
BlockRecoveryCommand.class, KeyUpdateCommand.class}) BlockRecoveryCommand.class, KeyUpdateCommand.class})
@InterfaceAudience.Private @InterfaceAudience.Private
@InterfaceStability.Evolving @InterfaceStability.Evolving
public abstract class DatanodeCommand extends ServerCommand { public abstract class DatanodeCommand extends ServerCommand {
static class Register extends DatanodeCommand {
private Register() {super(DatanodeProtocol.DNA_REGISTER);}
public void readFields(DataInput in) {}
public void write(DataOutput out) {}
}
public static class Finalize extends DatanodeCommand {
String blockPoolId;
private Finalize() {
super(DatanodeProtocol.DNA_FINALIZE);
}
public Finalize(String bpid) {
super(DatanodeProtocol.DNA_FINALIZE);
blockPoolId = bpid;
}
public String getBlockPoolId() {
return blockPoolId;
}
public void readFields(DataInput in) throws IOException {
blockPoolId = WritableUtils.readString(in);
}
public void write(DataOutput out) throws IOException {
WritableUtils.writeString(out, blockPoolId);
}
}
static { // register a ctor
WritableFactories.setFactory(Register.class,
new WritableFactory() {
public Writable newInstance() {return new Register();}
});
WritableFactories.setFactory(Finalize.class,
new WritableFactory() {
public Writable newInstance() {return new Finalize();}
});
}
public static final DatanodeCommand REGISTER = new Register();
public DatanodeCommand() { public DatanodeCommand() {
super(); super();
} }

View File

@ -37,6 +37,7 @@
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
import org.junit.After; import org.junit.After;
import org.junit.Test; import org.junit.Test;
@ -129,7 +130,7 @@ public void testDeadDatanode() throws Exception {
// that asks datanode to register again // that asks datanode to register again
DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, 0, 0, 0, 0, 0, 0, 0); DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, 0, 0, 0, 0, 0, 0, 0);
Assert.assertEquals(1, cmd.length); Assert.assertEquals(1, cmd.length);
Assert.assertEquals(cmd[0].getAction(), DatanodeCommand.REGISTER Assert.assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER
.getAction()); .getAction());
} }
} }