HDFS-2489. Move Finalize and Register to separate file out of DatanodeCommand.java. Contributed by Suresh Srinivas.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1188282 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
7ce1c4ab35
commit
2b35e8aa4f
@ -65,14 +65,18 @@ Trunk (unreleased changes)
|
||||
|
||||
HDFS-2480. Separate datatypes for NamenodeProtocol. (suresh)
|
||||
|
||||
HDFS-2181 Separate HDFS Client wire protocol data types (sanjay)
|
||||
HDFS-2181 Separate HDFS Client wire protocol data types (sanjay)
|
||||
|
||||
HDFS-2294. Download of commons-daemon TAR should not be under target (tucu)
|
||||
|
||||
HDFS-2322. the build fails in Windows because commons-daemon TAR cannot be fetched. (tucu)
|
||||
HDFS-2322. the build fails in Windows because commons-daemon TAR cannot be
|
||||
fetched. (tucu)
|
||||
|
||||
HDFS-2427. Change the default permission in webhdfs to 755 and add range
|
||||
check/validation for all parameters. (szetszwo)
|
||||
check/validation for all parameters. (szetszwo)
|
||||
|
||||
HDFS-2489. Move Finalize and Register to separate file out of
|
||||
DatanodeCommand.java. (suresh)
|
||||
|
||||
BUG FIXES
|
||||
HDFS-2287. TestParallelRead has a small off-by-one bug. (todd)
|
||||
|
@ -60,6 +60,7 @@
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException;
|
||||
import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
|
||||
import org.apache.hadoop.hdfs.util.CyclicIteration;
|
||||
import org.apache.hadoop.ipc.Server;
|
||||
import org.apache.hadoop.net.CachedDNSToSwitchMapping;
|
||||
@ -859,7 +860,7 @@ public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg,
|
||||
try {
|
||||
nodeinfo = getDatanode(nodeReg);
|
||||
} catch(UnregisteredNodeException e) {
|
||||
return new DatanodeCommand[]{DatanodeCommand.REGISTER};
|
||||
return new DatanodeCommand[]{RegisterCommand.REGISTER};
|
||||
}
|
||||
|
||||
// Check if this datanode should actually be shutdown instead.
|
||||
@ -869,7 +870,7 @@ public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg,
|
||||
}
|
||||
|
||||
if (nodeinfo == null || !nodeinfo.isAlive) {
|
||||
return new DatanodeCommand[]{DatanodeCommand.REGISTER};
|
||||
return new DatanodeCommand[]{RegisterCommand.REGISTER};
|
||||
}
|
||||
|
||||
heartbeatManager.updateHeartbeat(nodeinfo, capacity, dfsUsed,
|
||||
|
@ -151,6 +151,7 @@
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException;
|
||||
import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
|
||||
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
|
||||
import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||
@ -1418,7 +1419,7 @@ private boolean processCommand(DatanodeCommand cmd) throws IOException {
|
||||
}
|
||||
break;
|
||||
case DatanodeProtocol.DNA_FINALIZE:
|
||||
storage.finalizeUpgrade(((DatanodeCommand.Finalize) cmd)
|
||||
storage.finalizeUpgrade(((FinalizeCommand) cmd)
|
||||
.getBlockPoolId());
|
||||
break;
|
||||
case UpgradeCommand.UC_ACTION_START_UPGRADE:
|
||||
|
@ -69,6 +69,7 @@
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||
import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||
@ -803,7 +804,7 @@ public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
|
||||
|
||||
namesystem.getBlockManager().processReport(nodeReg, poolId, blist);
|
||||
if (nn.getFSImage().isUpgradeFinalized())
|
||||
return new DatanodeCommand.Finalize(poolId);
|
||||
return new FinalizeCommand(poolId);
|
||||
return null;
|
||||
}
|
||||
|
||||
|
@ -17,17 +17,9 @@
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.protocol;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.avro.reflect.Union;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableFactory;
|
||||
import org.apache.hadoop.io.WritableFactories;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
import org.apache.avro.reflect.Union;
|
||||
|
||||
/**
|
||||
* Base class for data-node command.
|
||||
@ -36,55 +28,13 @@
|
||||
|
||||
// Declare subclasses for Avro's denormalized representation
|
||||
@Union({Void.class,
|
||||
DatanodeCommand.Register.class, DatanodeCommand.Finalize.class,
|
||||
RegisterCommand.class, FinalizeCommand.class,
|
||||
BlockCommand.class, UpgradeCommand.class,
|
||||
BlockRecoveryCommand.class, KeyUpdateCommand.class})
|
||||
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public abstract class DatanodeCommand extends ServerCommand {
|
||||
static class Register extends DatanodeCommand {
|
||||
private Register() {super(DatanodeProtocol.DNA_REGISTER);}
|
||||
public void readFields(DataInput in) {}
|
||||
public void write(DataOutput out) {}
|
||||
}
|
||||
|
||||
public static class Finalize extends DatanodeCommand {
|
||||
String blockPoolId;
|
||||
private Finalize() {
|
||||
super(DatanodeProtocol.DNA_FINALIZE);
|
||||
}
|
||||
|
||||
public Finalize(String bpid) {
|
||||
super(DatanodeProtocol.DNA_FINALIZE);
|
||||
blockPoolId = bpid;
|
||||
}
|
||||
|
||||
public String getBlockPoolId() {
|
||||
return blockPoolId;
|
||||
}
|
||||
|
||||
public void readFields(DataInput in) throws IOException {
|
||||
blockPoolId = WritableUtils.readString(in);
|
||||
}
|
||||
public void write(DataOutput out) throws IOException {
|
||||
WritableUtils.writeString(out, blockPoolId);
|
||||
}
|
||||
}
|
||||
|
||||
static { // register a ctor
|
||||
WritableFactories.setFactory(Register.class,
|
||||
new WritableFactory() {
|
||||
public Writable newInstance() {return new Register();}
|
||||
});
|
||||
WritableFactories.setFactory(Finalize.class,
|
||||
new WritableFactory() {
|
||||
public Writable newInstance() {return new Finalize();}
|
||||
});
|
||||
}
|
||||
|
||||
public static final DatanodeCommand REGISTER = new Register();
|
||||
|
||||
public DatanodeCommand() {
|
||||
super();
|
||||
}
|
||||
|
@ -37,6 +37,7 @@
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
|
||||
import org.junit.After;
|
||||
import org.junit.Test;
|
||||
|
||||
@ -129,7 +130,7 @@ public void testDeadDatanode() throws Exception {
|
||||
// that asks datanode to register again
|
||||
DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, 0, 0, 0, 0, 0, 0, 0);
|
||||
Assert.assertEquals(1, cmd.length);
|
||||
Assert.assertEquals(cmd[0].getAction(), DatanodeCommand.REGISTER
|
||||
Assert.assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER
|
||||
.getAction());
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user