HDFS-3832. Remove protocol methods related to DistributedUpgrade. Contributed by Suresh Srinivas.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1376139 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
89e16be81b
commit
380870d544
@ -125,6 +125,8 @@ Trunk (unreleased changes)
|
||||
HDFS-3817. Avoid printing SafeModeException stack trace.
|
||||
(Brandon Li via suresh)
|
||||
|
||||
HDFS-3832. Remove protocol methods related to DistributedUpgrade. (suresh)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
@ -109,7 +109,6 @@
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
@ -129,7 +128,6 @@
|
||||
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
||||
import org.apache.hadoop.io.DataOutputBuffer;
|
||||
@ -1200,7 +1198,7 @@ public DFSOutputStream create(String src,
|
||||
* @param blockSize maximum block size
|
||||
* @param progress interface for reporting client progress
|
||||
* @param buffersize underlying buffer size
|
||||
* @param checksumOpts checksum options
|
||||
* @param checksumOpt checksum options
|
||||
*
|
||||
* @return output stream
|
||||
*
|
||||
@ -1897,14 +1895,6 @@ public void finalizeUpgrade() throws IOException {
|
||||
namenode.finalizeUpgrade();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see ClientProtocol#distributedUpgradeProgress(HdfsConstants.UpgradeAction)
|
||||
*/
|
||||
public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action)
|
||||
throws IOException {
|
||||
return namenode.distributedUpgradeProgress(action);
|
||||
}
|
||||
|
||||
/**
|
||||
*/
|
||||
@Deprecated
|
||||
|
@ -55,13 +55,11 @@
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
@ -654,11 +652,6 @@ public void finalizeUpgrade() throws IOException {
|
||||
dfs.finalizeUpgrade();
|
||||
}
|
||||
|
||||
public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action
|
||||
) throws IOException {
|
||||
return dfs.distributedUpgradeProgress(action);
|
||||
}
|
||||
|
||||
/*
|
||||
* Requests the namenode to dump data strcutures into specified
|
||||
* file.
|
||||
|
@ -33,8 +33,6 @@
|
||||
import org.apache.hadoop.fs.Options.Rename;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
|
||||
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
||||
import org.apache.hadoop.io.EnumSetWritable;
|
||||
@ -694,17 +692,6 @@ public boolean restoreFailedStorage(String arg)
|
||||
*/
|
||||
public void finalizeUpgrade() throws IOException;
|
||||
|
||||
/**
|
||||
* <em>Method no longer used - retained only for backward compatibility</em>
|
||||
*
|
||||
* Report distributed upgrade progress or force current upgrade to proceed.
|
||||
* @param action {@link HdfsConstants.UpgradeAction} to perform
|
||||
* @return upgrade status information or null if no upgrades are in progress
|
||||
* @throws IOException
|
||||
*/
|
||||
public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* @return CorruptFileBlocks, containing a list of corrupt files (with
|
||||
* duplicates if there is more than one corrupt block in a file)
|
||||
|
@ -60,7 +60,7 @@ protected HdfsConstants() {
|
||||
public static int MAX_PATH_LENGTH = 8000;
|
||||
public static int MAX_PATH_DEPTH = 1000;
|
||||
|
||||
// TODO mb@media-style.com: should be conf injected?
|
||||
// TODO should be conf injected?
|
||||
public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024;
|
||||
public static final int IO_FILE_BUFFER_SIZE = new HdfsConfiguration().getInt(
|
||||
DFSConfigKeys.IO_FILE_BUFFER_SIZE_KEY,
|
||||
@ -84,16 +84,6 @@ public static enum DatanodeReportType {
|
||||
// An invalid transaction ID that will never be seen in a real namesystem.
|
||||
public static final long INVALID_TXID = -12345;
|
||||
|
||||
/**
|
||||
* Distributed upgrade actions:
|
||||
*
|
||||
* 1. Get upgrade status. 2. Get detailed upgrade status. 3. Proceed with the
|
||||
* upgrade if it is stuck, no matter what the status is.
|
||||
*/
|
||||
public static enum UpgradeAction {
|
||||
GET_STATUS, DETAILED_STATUS, FORCE_PROCEED;
|
||||
}
|
||||
|
||||
/**
|
||||
* URI Scheme for hdfs://namenode/ URIs.
|
||||
*/
|
||||
|
@ -50,8 +50,6 @@
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto;
|
||||
@ -130,7 +128,6 @@
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
|
||||
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
||||
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
|
||||
import org.apache.hadoop.io.Text;
|
||||
|
||||
import com.google.protobuf.RpcController;
|
||||
@ -570,24 +567,6 @@ public FinalizeUpgradeResponseProto finalizeUpgrade(RpcController controller,
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public DistributedUpgradeProgressResponseProto distributedUpgradeProgress(
|
||||
RpcController controller, DistributedUpgradeProgressRequestProto req)
|
||||
throws ServiceException {
|
||||
try {
|
||||
UpgradeStatusReport result = server.distributedUpgradeProgress(PBHelper
|
||||
.convert(req.getAction()));
|
||||
DistributedUpgradeProgressResponseProto.Builder builder =
|
||||
DistributedUpgradeProgressResponseProto.newBuilder();
|
||||
if (result != null) {
|
||||
builder.setReport(PBHelper.convert(result));
|
||||
}
|
||||
return builder.build();
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ListCorruptFileBlocksResponseProto listCorruptFileBlocks(
|
||||
RpcController controller, ListCorruptFileBlocksRequestProto req)
|
||||
|
@ -42,7 +42,6 @@
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
|
||||
import org.apache.hadoop.ipc.ProtocolTranslator;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
@ -58,8 +57,6 @@
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto;
|
||||
@ -102,7 +99,6 @@
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
|
||||
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
|
||||
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
||||
import org.apache.hadoop.io.EnumSetWritable;
|
||||
@ -128,8 +124,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
||||
ProtocolMetaInterface, ClientProtocol, Closeable, ProtocolTranslator {
|
||||
final private ClientNamenodeProtocolPB rpcProxy;
|
||||
|
||||
public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy)
|
||||
throws IOException {
|
||||
public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) {
|
||||
rpcProxy = proxy;
|
||||
}
|
||||
|
||||
@ -564,21 +559,6 @@ public void finalizeUpgrade() throws IOException {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action)
|
||||
throws IOException {
|
||||
DistributedUpgradeProgressRequestProto req =
|
||||
DistributedUpgradeProgressRequestProto.newBuilder().
|
||||
setAction(PBHelper.convert(action)).build();
|
||||
try {
|
||||
DistributedUpgradeProgressResponseProto res = rpcProxy
|
||||
.distributedUpgradeProgress(null, req);
|
||||
return res.hasReport() ? PBHelper.convert(res.getReport()) : null;
|
||||
} catch (ServiceException e) {
|
||||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie)
|
||||
throws IOException {
|
||||
|
@ -41,8 +41,6 @@
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto;
|
||||
@ -59,7 +57,6 @@
|
||||
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
|
||||
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
|
||||
import org.apache.hadoop.io.retry.RetryPolicies;
|
||||
import org.apache.hadoop.io.retry.RetryPolicy;
|
||||
import org.apache.hadoop.io.retry.RetryProxy;
|
||||
@ -252,20 +249,6 @@ public NamespaceInfo versionRequest() throws IOException {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public UpgradeCommand processUpgradeCommand(UpgradeCommand comm)
|
||||
throws IOException {
|
||||
ProcessUpgradeRequestProto req = ProcessUpgradeRequestProto.newBuilder()
|
||||
.setCmd(PBHelper.convert(comm)).build();
|
||||
ProcessUpgradeResponseProto resp;
|
||||
try {
|
||||
resp = rpcProxy.processUpgrade(NULL_CONTROLLER, req);
|
||||
} catch (ServiceException se) {
|
||||
throw ProtobufHelper.getRemoteException(se);
|
||||
}
|
||||
return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
|
||||
ReportBadBlocksRequestProto.Builder builder = ReportBadBlocksRequestProto
|
||||
|
@ -33,8 +33,6 @@
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto;
|
||||
@ -56,7 +54,6 @@
|
||||
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
|
||||
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
|
||||
|
||||
import com.google.protobuf.RpcController;
|
||||
import com.google.protobuf.ServiceException;
|
||||
@ -211,25 +208,6 @@ public VersionResponseProto versionRequest(RpcController controller,
|
||||
.setInfo(PBHelper.convert(info)).build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ProcessUpgradeResponseProto processUpgrade(RpcController controller,
|
||||
ProcessUpgradeRequestProto request) throws ServiceException {
|
||||
UpgradeCommand ret;
|
||||
try {
|
||||
UpgradeCommand cmd = request.hasCmd() ? PBHelper
|
||||
.convert(request.getCmd()) : null;
|
||||
ret = impl.processUpgradeCommand(cmd);
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
ProcessUpgradeResponseProto.Builder builder =
|
||||
ProcessUpgradeResponseProto.newBuilder();
|
||||
if (ret != null) {
|
||||
builder.setCmd(PBHelper.convert(ret));
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReportBadBlocksResponseProto reportBadBlocks(RpcController controller,
|
||||
ReportBadBlocksRequestProto request) throws ServiceException {
|
||||
|
@ -37,7 +37,6 @@
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
@ -47,7 +46,6 @@
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeActionProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
|
||||
@ -61,7 +59,6 @@
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
|
||||
@ -96,7 +93,6 @@
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto;
|
||||
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockKey;
|
||||
@ -106,7 +102,6 @@
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
|
||||
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
|
||||
@ -132,7 +127,6 @@
|
||||
import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
|
||||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
|
||||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
||||
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
|
||||
import org.apache.hadoop.io.EnumSetWritable;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
@ -640,8 +634,6 @@ public static DatanodeCommand convert(DatanodeCommandProto proto) {
|
||||
return PBHelper.convert(proto.getKeyUpdateCmd());
|
||||
case RegisterCommand:
|
||||
return REG_CMD;
|
||||
case UpgradeCommand:
|
||||
return PBHelper.convert(proto.getUpgradeCmd());
|
||||
}
|
||||
return null;
|
||||
}
|
||||
@ -738,11 +730,6 @@ public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
|
||||
builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).setBlkCmd(
|
||||
PBHelper.convert((BlockCommand) datanodeCommand));
|
||||
break;
|
||||
case DatanodeProtocol.DNA_UC_ACTION_REPORT_STATUS:
|
||||
case DatanodeProtocol.DNA_UC_ACTION_START_UPGRADE:
|
||||
builder.setCmdType(DatanodeCommandProto.Type.UpgradeCommand)
|
||||
.setUpgradeCmd(PBHelper.convert((UpgradeCommand) datanodeCommand));
|
||||
break;
|
||||
case DatanodeProtocol.DNA_UNKNOWN: //Not expected
|
||||
default:
|
||||
builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
|
||||
@ -750,19 +737,6 @@ public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
public static UpgradeCommand convert(UpgradeCommandProto upgradeCmd) {
|
||||
int action = UpgradeCommand.UC_ACTION_UNKNOWN;
|
||||
switch (upgradeCmd.getAction()) {
|
||||
case REPORT_STATUS:
|
||||
action = UpgradeCommand.UC_ACTION_REPORT_STATUS;
|
||||
break;
|
||||
case START_UPGRADE:
|
||||
action = UpgradeCommand.UC_ACTION_START_UPGRADE;
|
||||
}
|
||||
return new UpgradeCommand(action, upgradeCmd.getVersion(),
|
||||
(short) upgradeCmd.getUpgradeStatus());
|
||||
}
|
||||
|
||||
public static KeyUpdateCommand convert(KeyUpdateCommandProto keyUpdateCmd) {
|
||||
return new KeyUpdateCommand(PBHelper.convert(keyUpdateCmd.getKeys()));
|
||||
}
|
||||
@ -852,28 +826,6 @@ public static ReceivedDeletedBlockInfoProto convert(
|
||||
.build();
|
||||
}
|
||||
|
||||
public static UpgradeCommandProto convert(UpgradeCommand comm) {
|
||||
UpgradeCommandProto.Builder builder = UpgradeCommandProto.newBuilder();
|
||||
if (comm == null) {
|
||||
return builder.setAction(UpgradeCommandProto.Action.UNKNOWN)
|
||||
.setVersion(0).setUpgradeStatus(0).build();
|
||||
}
|
||||
builder.setVersion(comm.getVersion()).setUpgradeStatus(
|
||||
comm.getCurrentStatus());
|
||||
switch (comm.getAction()) {
|
||||
case UpgradeCommand.UC_ACTION_REPORT_STATUS:
|
||||
builder.setAction(UpgradeCommandProto.Action.REPORT_STATUS);
|
||||
break;
|
||||
case UpgradeCommand.UC_ACTION_START_UPGRADE:
|
||||
builder.setAction(UpgradeCommandProto.Action.START_UPGRADE);
|
||||
break;
|
||||
default:
|
||||
builder.setAction(UpgradeCommandProto.Action.UNKNOWN);
|
||||
break;
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
public static ReceivedDeletedBlockInfo convert(
|
||||
ReceivedDeletedBlockInfoProto proto) {
|
||||
ReceivedDeletedBlockInfo.BlockStatus status = null;
|
||||
@ -1238,51 +1190,6 @@ public static SafeModeAction convert(
|
||||
}
|
||||
}
|
||||
|
||||
public static UpgradeActionProto convert(
|
||||
UpgradeAction a) {
|
||||
switch (a) {
|
||||
case GET_STATUS:
|
||||
return UpgradeActionProto.GET_STATUS;
|
||||
case DETAILED_STATUS:
|
||||
return UpgradeActionProto.DETAILED_STATUS;
|
||||
case FORCE_PROCEED:
|
||||
return UpgradeActionProto.FORCE_PROCEED;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unexpected UpgradeAction :" + a);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static UpgradeAction convert(
|
||||
UpgradeActionProto a) {
|
||||
switch (a) {
|
||||
case GET_STATUS:
|
||||
return UpgradeAction.GET_STATUS;
|
||||
case DETAILED_STATUS:
|
||||
return UpgradeAction.DETAILED_STATUS;
|
||||
case FORCE_PROCEED:
|
||||
return UpgradeAction.FORCE_PROCEED;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unexpected UpgradeAction :" + a);
|
||||
}
|
||||
}
|
||||
|
||||
public static UpgradeStatusReportProto convert(UpgradeStatusReport r) {
|
||||
if (r == null)
|
||||
return null;
|
||||
return UpgradeStatusReportProto.newBuilder()
|
||||
.setVersion(r.getVersion())
|
||||
.setUpgradeStatus(r.getUpgradeStatus())
|
||||
.setFinalized(r.isFinalized())
|
||||
.build();
|
||||
}
|
||||
|
||||
public static UpgradeStatusReport convert(UpgradeStatusReportProto r) {
|
||||
if (r == null) return null;
|
||||
return new UpgradeStatusReport(r.getVersion(),
|
||||
(short) r.getUpgradeStatus(), r.getFinalized());
|
||||
}
|
||||
|
||||
public static CorruptFileBlocks convert(CorruptFileBlocksProto c) {
|
||||
if (c == null)
|
||||
return null;
|
||||
|
@ -42,7 +42,6 @@
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
|
||||
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
@ -560,10 +559,6 @@ assert getBlockPoolId().equals(bp) :
|
||||
|
||||
dn.finalizeUpgradeForPool(bp);
|
||||
break;
|
||||
case UpgradeCommand.UC_ACTION_START_UPGRADE:
|
||||
// start distributed upgrade here
|
||||
LOG.warn("Distibuted upgrade is no longer supported");
|
||||
break;
|
||||
case DatanodeProtocol.DNA_RECOVERBLOCK:
|
||||
String who = "NameNode at " + actor.getNNSocketAddress();
|
||||
dn.recoverBlocks(who, ((BlockRecoveryCommand)cmd).getRecoveringBlocks());
|
||||
|
@ -59,7 +59,6 @@
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
@ -88,7 +87,6 @@
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
|
||||
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
|
||||
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
|
||||
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
|
||||
@ -107,7 +105,6 @@
|
||||
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
|
||||
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
|
||||
import org.apache.hadoop.io.EnumSetWritable;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
@ -742,13 +739,6 @@ public void finalizeUpgrade() throws IOException {
|
||||
namesystem.finalizeUpgrade();
|
||||
}
|
||||
|
||||
@Override // ClientProtocol
|
||||
public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action)
|
||||
throws IOException {
|
||||
throw new UnsupportedActionException(
|
||||
"Deprecated method. No longer supported");
|
||||
}
|
||||
|
||||
@Override // ClientProtocol
|
||||
public void metaSave(String filename) throws IOException {
|
||||
namesystem.checkOperation(OperationCategory.UNCHECKED);
|
||||
@ -919,13 +909,6 @@ public NamespaceInfo versionRequest() throws IOException {
|
||||
return namesystem.getNamespaceInfo();
|
||||
}
|
||||
|
||||
@Override // DatanodeProtocol
|
||||
public UpgradeCommand processUpgradeCommand(UpgradeCommand comm)
|
||||
throws IOException {
|
||||
throw new UnsupportedActionException(
|
||||
"Deprecated method, no longer supported");
|
||||
}
|
||||
|
||||
/**
|
||||
* Verifies the given registration.
|
||||
*
|
||||
|
@ -41,7 +41,6 @@
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||
@ -49,7 +48,6 @@
|
||||
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
||||
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||
import org.apache.hadoop.http.HttpConfig;
|
||||
import org.apache.hadoop.io.Text;
|
||||
|
@ -72,8 +72,6 @@ public interface DatanodeProtocol {
|
||||
final static int DNA_RECOVERBLOCK = 6; // request a block recovery
|
||||
final static int DNA_ACCESSKEYUPDATE = 7; // update access key
|
||||
final static int DNA_BALANCERBANDWIDTHUPDATE = 8; // update balancer bandwidth
|
||||
final static int DNA_UC_ACTION_REPORT_STATUS = 100; // Report upgrade status
|
||||
final static int DNA_UC_ACTION_START_UPGRADE = 101; // start upgrade
|
||||
|
||||
/**
|
||||
* Register Datanode.
|
||||
@ -150,18 +148,6 @@ public void errorReport(DatanodeRegistration registration,
|
||||
|
||||
public NamespaceInfo versionRequest() throws IOException;
|
||||
|
||||
/**
|
||||
* This is a very general way to send a command to the name-node during
|
||||
* distributed upgrade process.
|
||||
*
|
||||
* The generosity is because the variety of upgrade commands is unpredictable.
|
||||
* The reply from the name-node is also received in the form of an upgrade
|
||||
* command.
|
||||
*
|
||||
* @return a reply in the form of an upgrade command
|
||||
*/
|
||||
UpgradeCommand processUpgradeCommand(UpgradeCommand comm) throws IOException;
|
||||
|
||||
/**
|
||||
* same as {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#reportBadBlocks(LocatedBlock[])}
|
||||
* }
|
||||
|
@ -1,64 +0,0 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.protocol;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
/**
|
||||
* This as a generic distributed upgrade command.
|
||||
*
|
||||
* During the upgrade cluster components send upgrade commands to each other
|
||||
* in order to obtain or share information with them.
|
||||
* It is supposed that each upgrade defines specific upgrade command by
|
||||
* deriving them from this class.
|
||||
* The upgrade command contains version of the upgrade, which is verified
|
||||
* on the receiving side and current status of the upgrade.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public class UpgradeCommand extends DatanodeCommand {
|
||||
public final static int UC_ACTION_UNKNOWN = DatanodeProtocol.DNA_UNKNOWN;
|
||||
public final static int UC_ACTION_REPORT_STATUS =
|
||||
DatanodeProtocol.DNA_UC_ACTION_REPORT_STATUS;
|
||||
public final static int UC_ACTION_START_UPGRADE =
|
||||
DatanodeProtocol.DNA_UC_ACTION_START_UPGRADE;
|
||||
|
||||
private int version;
|
||||
private short upgradeStatus;
|
||||
|
||||
public UpgradeCommand() {
|
||||
super(UC_ACTION_UNKNOWN);
|
||||
this.version = 0;
|
||||
this.upgradeStatus = 0;
|
||||
}
|
||||
|
||||
public UpgradeCommand(int action, int version, short status) {
|
||||
super(action);
|
||||
this.version = version;
|
||||
this.upgradeStatus = status;
|
||||
}
|
||||
|
||||
public int getVersion() {
|
||||
return this.version;
|
||||
}
|
||||
|
||||
public short getCurrentStatus() {
|
||||
return this.upgradeStatus;
|
||||
}
|
||||
}
|
@ -296,19 +296,6 @@ message FinalizeUpgradeRequestProto { // no parameters
|
||||
message FinalizeUpgradeResponseProto { // void response
|
||||
}
|
||||
|
||||
enum UpgradeActionProto {
|
||||
GET_STATUS = 1;
|
||||
DETAILED_STATUS = 2;
|
||||
FORCE_PROCEED = 3;
|
||||
}
|
||||
|
||||
message DistributedUpgradeProgressRequestProto {
|
||||
required UpgradeActionProto action = 1;
|
||||
}
|
||||
message DistributedUpgradeProgressResponseProto {
|
||||
optional UpgradeStatusReportProto report = 1;
|
||||
}
|
||||
|
||||
message ListCorruptFileBlocksRequestProto {
|
||||
required string path = 1;
|
||||
optional string cookie = 2;
|
||||
@ -490,8 +477,6 @@ service ClientNamenodeProtocol {
|
||||
rpc refreshNodes(RefreshNodesRequestProto) returns(RefreshNodesResponseProto);
|
||||
rpc finalizeUpgrade(FinalizeUpgradeRequestProto)
|
||||
returns(FinalizeUpgradeResponseProto);
|
||||
rpc distributedUpgradeProgress(DistributedUpgradeProgressRequestProto)
|
||||
returns(DistributedUpgradeProgressResponseProto);
|
||||
rpc listCorruptFileBlocks(ListCorruptFileBlocksRequestProto)
|
||||
returns(ListCorruptFileBlocksResponseProto);
|
||||
rpc metaSave(MetaSaveRequestProto) returns(MetaSaveResponseProto);
|
||||
|
@ -60,7 +60,7 @@ message DatanodeCommandProto {
|
||||
FinalizeCommand = 3;
|
||||
KeyUpdateCommand = 4;
|
||||
RegisterCommand = 5;
|
||||
UpgradeCommand = 6;
|
||||
UnusedUpgradeCommand = 6;
|
||||
NullDatanodeCommand = 7;
|
||||
}
|
||||
|
||||
@ -74,7 +74,6 @@ message DatanodeCommandProto {
|
||||
optional FinalizeCommandProto finalizeCmd = 5;
|
||||
optional KeyUpdateCommandProto keyUpdateCmd = 6;
|
||||
optional RegisterCommandProto registerCmd = 7;
|
||||
optional UpgradeCommandProto upgradeCmd = 8;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -131,20 +130,6 @@ message RegisterCommandProto {
|
||||
// void
|
||||
}
|
||||
|
||||
/**
|
||||
* Generic distributed upgrade Command
|
||||
*/
|
||||
message UpgradeCommandProto {
|
||||
enum Action {
|
||||
UNKNOWN = 0; // Unknown action
|
||||
REPORT_STATUS = 100; // Report upgrade status
|
||||
START_UPGRADE = 101; // Start upgrade
|
||||
}
|
||||
required Action action = 1; // Upgrade action
|
||||
required uint32 version = 2; // Version of the upgrade
|
||||
required uint32 upgradeStatus = 3; // % completed in range 0 & 100
|
||||
}
|
||||
|
||||
/**
|
||||
* registration - Information of the datanode registering with the namenode
|
||||
*/
|
||||
@ -302,20 +287,6 @@ message ErrorReportRequestProto {
|
||||
message ErrorReportResponseProto {
|
||||
}
|
||||
|
||||
/**
|
||||
* cmd - Upgrade command sent from datanode to namenode
|
||||
*/
|
||||
message ProcessUpgradeRequestProto {
|
||||
optional UpgradeCommandProto cmd = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* cmd - Upgrade command sent from namenode to datanode
|
||||
*/
|
||||
message ProcessUpgradeResponseProto {
|
||||
optional UpgradeCommandProto cmd = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* blocks - list of blocks that are reported as corrupt
|
||||
*/
|
||||
@ -388,12 +359,6 @@ service DatanodeProtocolService {
|
||||
*/
|
||||
rpc versionRequest(VersionRequestProto) returns(VersionResponseProto);
|
||||
|
||||
/**
|
||||
* Generic way to send commands from datanode to namenode during
|
||||
* distributed upgrade process.
|
||||
*/
|
||||
rpc processUpgrade(ProcessUpgradeRequestProto) returns(ProcessUpgradeResponseProto);
|
||||
|
||||
/**
|
||||
* Report corrupt blocks at the specified location
|
||||
*/
|
||||
|
@ -210,15 +210,6 @@ message DirectoryListingProto {
|
||||
required uint32 remainingEntries = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Status of current cluster upgrade from one version to another
|
||||
*/
|
||||
message UpgradeStatusReportProto {
|
||||
required uint32 version = 1;;
|
||||
required uint32 upgradeStatus = 2; // % completed in range 0 & 100
|
||||
required bool finalized = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* Common node information shared by all the nodes in the cluster
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user