diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index b8bfe5b719..2e0ceb4a01 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -37,6 +37,9 @@ Trunk (unreleased changes) HDFS-234. Integration with BookKeeper logging system. (Ivan Kelly via jitendra) + HDFS-2663. Optional protobuf parameters are not handled correctly. + (suresh) + IMPROVEMENTS HADOOP-7524 Change RPC to allow multiple protocols including multuple diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 10527f5ac1..1204a76c4e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -24,6 +24,9 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto; @@ -52,6 +55,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.Builder; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto; @@ -124,6 +128,7 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto; import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable; +import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.ProtocolSignature; import org.apache.hadoop.ipc.RPC; @@ -208,11 +213,16 @@ public GetBlockLocationsResponseProto getBlockLocations( RpcController controller, GetBlockLocationsRequestProto req) throws ServiceException { try { - return GetBlockLocationsResponseProto - .newBuilder() - .setLocations( - PBHelper.convert(server.getBlockLocations(req.getSrc(), - req.getOffset(), req.getLength()))).build(); + LocatedBlocks b = server.getBlockLocations(req.getSrc(), req.getOffset(), + req.getLength()); + Builder builder = GetBlockLocationsResponseProto + .newBuilder(); + if (b != null) { + builder.setLocations( + PBHelper.convert(server.getBlockLocations(req.getSrc(), + req.getOffset(), req.getLength()))).build(); + } + return builder.build(); } catch (IOException e) { throw new ServiceException(e); } @@ -325,7 +335,7 @@ public AddBlockResponseProto addBlock(RpcController controller, return AddBlockResponseProto.newBuilder().setBlock( PBHelper.convert( server.addBlock(req.getSrc(), req.getClientName(), - PBHelper.convert(req.getPrevious()), + req.hasPrevious() ? PBHelper.convert(req.getPrevious()) : null, PBHelper.convert( (DatanodeInfoProto[]) req.getExcludeNodesList().toArray())))) .build(); @@ -594,10 +604,14 @@ public DistributedUpgradeProgressResponseProto distributedUpgradeProgress( RpcController controller, DistributedUpgradeProgressRequestProto req) throws ServiceException { try { - UpgradeStatusReportProto result = PBHelper.convert(server - .distributedUpgradeProgress(PBHelper.convert(req.getAction()))); - return DistributedUpgradeProgressResponseProto.newBuilder() - .setReport(result).build(); + UpgradeStatusReport result = server.distributedUpgradeProgress(PBHelper + .convert(req.getAction())); + DistributedUpgradeProgressResponseProto.Builder builder = + DistributedUpgradeProgressResponseProto.newBuilder(); + if (result != null) { + builder.setReport(PBHelper.convert(result)); + } + return builder.build(); } catch (IOException e) { throw new ServiceException(e); } @@ -636,9 +650,13 @@ public MetaSaveResponseProto metaSave(RpcController controller, public GetFileInfoResponseProto getFileInfo(RpcController controller, GetFileInfoRequestProto req) throws ServiceException { try { - HdfsFileStatusProto result = - PBHelper.convert(server.getFileInfo(req.getSrc())); - return GetFileInfoResponseProto.newBuilder().setFs(result).build(); + HdfsFileStatus res = server.getFileInfo(req.getSrc()); + GetFileInfoResponseProto.Builder builder = + GetFileInfoResponseProto.newBuilder(); + if (res != null) { + builder.setFs(PBHelper.convert(res)); + } + return builder.build(); } catch (IOException e) { throw new ServiceException(e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 83aca3987b..75fbc7bc8e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -83,14 +83,17 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto; @@ -205,7 +208,10 @@ public LocatedBlocks getBlockLocations(String src, long offset, long length) .setLength(length) .build(); try { - return PBHelper.convert(rpcProxy.getBlockLocations(null, req).getLocations()); + GetBlockLocationsResponseProto resp = rpcProxy.getBlockLocations(null, + req); + return resp.hasLocations() ? + PBHelper.convert(resp.getLocations()) : null; } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } @@ -329,12 +335,15 @@ public LocatedBlock addBlock(String src, String clientName, throws AccessControlException, FileNotFoundException, NotReplicatedYetException, SafeModeException, UnresolvedLinkException, IOException { - AddBlockRequestProto req = AddBlockRequestProto.newBuilder().setSrc(src) - .setClientName(clientName).setPrevious(PBHelper.convert(previous)) - .addAllExcludeNodes(Arrays.asList(PBHelper.convert(excludeNodes))) - .build(); + AddBlockRequestProto.Builder builder = AddBlockRequestProto.newBuilder(); + builder.setSrc(src) + .setClientName(clientName) + .addAllExcludeNodes(Arrays.asList(PBHelper.convert(excludeNodes))); + if (previous != null) { + builder.setPrevious(PBHelper.convert(previous)); + } try { - return PBHelper.convert(rpcProxy.addBlock(null, req).getBlock()); + return PBHelper.convert(rpcProxy.addBlock(null, builder.build()).getBlock()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } @@ -615,8 +624,9 @@ public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action) DistributedUpgradeProgressRequestProto.newBuilder(). setAction(PBHelper.convert(action)).build(); try { - return PBHelper.convert( - rpcProxy.distributedUpgradeProgress(null, req).getReport()); + DistributedUpgradeProgressResponseProto res = rpcProxy + .distributedUpgradeProgress(null, req); + return res.hasReport() ? PBHelper.convert(res.getReport()) : null; } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } @@ -653,7 +663,8 @@ public HdfsFileStatus getFileInfo(String src) throws AccessControlException, GetFileInfoRequestProto req = GetFileInfoRequestProto.newBuilder() .setSrc(src).build(); try { - return PBHelper.convert(rpcProxy.getFileInfo(null, req).getFs()); + GetFileInfoResponseProto res = rpcProxy.getFileInfo(null, req); + return res.hasFs() ? PBHelper.convert(res.getFs()) : null; } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java index e05a884cd6..66db4c39cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java @@ -204,7 +204,7 @@ public DatanodeCommand blockReport(DatanodeRegistration registration, } catch (ServiceException se) { throw ProtobufHelper.getRemoteException(se); } - return PBHelper.convert(resp.getCmd()); + return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null; } @Override @@ -262,7 +262,7 @@ public UpgradeCommand processUpgradeCommand(UpgradeCommand comm) } catch (ServiceException se) { throw ProtobufHelper.getRemoteException(se); } - return PBHelper.convert(resp.getCmd()); + return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java index 25f08aee5d..2dbf9150e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto; @@ -108,7 +109,9 @@ public HeartbeatResponseProto sendHeartbeat(RpcController controller, .newBuilder(); if (cmds != null) { for (int i = 0; i < cmds.length; i++) { - builder.addCmds(i, PBHelper.convert(cmds[i])); + if (cmds[i] != null) { + builder.addCmds(PBHelper.convert(cmds[i])); + } } } return builder.build(); @@ -129,8 +132,12 @@ public BlockReportResponseProto blockReport(RpcController controller, } catch (IOException e) { throw new ServiceException(e); } - return BlockReportResponseProto.newBuilder().setCmd(PBHelper.convert(cmd)) - .build(); + BlockReportResponseProto.Builder builder = + BlockReportResponseProto.newBuilder(); + if (cmd != null) { + builder.setCmd(PBHelper.convert(cmd)); + } + return builder.build(); } @Override @@ -180,14 +187,20 @@ public VersionResponseProto versionRequest(RpcController controller, @Override public ProcessUpgradeResponseProto processUpgrade(RpcController controller, ProcessUpgradeRequestProto request) throws ServiceException { - UpgradeCommand cmd; + UpgradeCommand ret; try { - cmd = impl.processUpgradeCommand(PBHelper.convert(request.getCmd())); + UpgradeCommand cmd = request.hasCmd() ? PBHelper + .convert(request.getCmd()) : null; + ret = impl.processUpgradeCommand(cmd); } catch (IOException e) { throw new ServiceException(e); } - return ProcessUpgradeResponseProto.newBuilder() - .setCmd(PBHelper.convert(cmd)).build(); + ProcessUpgradeResponseProto.Builder builder = + ProcessUpgradeResponseProto.newBuilder(); + if (ret != null) { + builder.setCmd(PBHelper.convert(ret)); + } + return builder.build(); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 6be4366069..c684ee2688 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -127,6 +127,10 @@ /** * Utilities for converting protobuf classes to and from implementation classes. + * + * Note that when converting from an internal type to protobuf type, the + * converter never return null for protobuf type. The check for internal type + * being null must be done before calling the convert() method. */ public class PBHelper { private static final RegisterCommandProto REG_CMD_PROTO = @@ -370,6 +374,7 @@ public static NamespaceInfo convert(NamespaceInfoProto info) { } public static NamenodeCommand convert(NamenodeCommandProto cmd) { + if (cmd == null) return null; switch (cmd.getType()) { case CheckPointCommand: CheckpointCommandProto chkPt = cmd.getCheckpointCmd(); @@ -426,7 +431,8 @@ static public DatanodeInfo convert(DatanodeInfoProto di) { if (di == null) return null; return new DatanodeInfo( PBHelper.convert(di.getId()), - di.getLocation(), di.getHostName(), + di.hasLocation() ? di.getLocation() : null , + di.hasHostName() ? di.getHostName() : null, di.getCapacity(), di.getDfsUsed(), di.getRemaining(), di.getBlockPoolUsed() , di.getLastUpdate() , di.getXceiverCount() , PBHelper.convert(di.getAdminState())); @@ -434,10 +440,16 @@ static public DatanodeInfo convert(DatanodeInfoProto di) { static public DatanodeInfoProto convertDatanodeInfo(DatanodeInfo di) { if (di == null) return null; - return DatanodeInfoProto.newBuilder(). + DatanodeInfoProto.Builder builder = DatanodeInfoProto.newBuilder(); + if (di.getHostName() != null) { + builder.setHostName(di.getHostName()); + } + if (di.getNetworkLocation() != null) { + builder.setLocation(di.getNetworkLocation()); + } + + return builder. setId(PBHelper.convert((DatanodeID) di)). - setLocation(di.getNetworkLocation()). - setHostName(di.getHostName()). setCapacity(di.getCapacity()). setDfsUsed(di.getDfsUsed()). setRemaining(di.getRemaining()). @@ -777,9 +789,14 @@ public static BalancerBandwidthCommand convert( public static ReceivedDeletedBlockInfoProto convert( ReceivedDeletedBlockInfo receivedDeletedBlockInfo) { - return ReceivedDeletedBlockInfoProto.newBuilder() - .setBlock(PBHelper.convert(receivedDeletedBlockInfo.getBlock())) - .setDeleteHint(receivedDeletedBlockInfo.getDelHints()).build(); + ReceivedDeletedBlockInfoProto.Builder builder = + ReceivedDeletedBlockInfoProto.newBuilder(); + + if (receivedDeletedBlockInfo.getDelHints() != null) { + builder.setDeleteHint(receivedDeletedBlockInfo.getDelHints()); + } + return builder.setBlock(PBHelper.convert(receivedDeletedBlockInfo.getBlock())) + .build(); } public static UpgradeCommandProto convert(UpgradeCommand comm) { @@ -803,7 +820,7 @@ public static UpgradeCommandProto convert(UpgradeCommand comm) { public static ReceivedDeletedBlockInfo convert( ReceivedDeletedBlockInfoProto proto) { return new ReceivedDeletedBlockInfo(PBHelper.convert(proto.getBlock()), - proto.getDeleteHint()); + proto.hasDeleteHint() ? proto.getDeleteHint() : null); } public static NamespaceInfoProto convert(NamespaceInfo info) { @@ -863,13 +880,10 @@ public static List convertLocatedBlock2(List lb // LocatedBlocks public static LocatedBlocks convert(LocatedBlocksProto lb) { - if (lb == null) { - return null; - } return new LocatedBlocks( lb.getFileLength(), lb.getUnderConstruction(), PBHelper.convertLocatedBlock(lb.getBlocksList()), - PBHelper.convert(lb.getLastBlock()), + lb.hasLastBlock() ? PBHelper.convert(lb.getLastBlock()) : null, lb.getIsLastBlockComplete()); } @@ -877,11 +891,15 @@ public static LocatedBlocksProto convert(LocatedBlocks lb) { if (lb == null) { return null; } - return LocatedBlocksProto.newBuilder(). - setFileLength(lb.getFileLength()). - setUnderConstruction(lb.isUnderConstruction()). - addAllBlocks(PBHelper.convertLocatedBlock2(lb.getLocatedBlocks())). - setLastBlock(PBHelper.convert(lb.getLastLocatedBlock())).setIsLastBlockComplete(lb.isLastBlockComplete()).build(); + LocatedBlocksProto.Builder builder = + LocatedBlocksProto.newBuilder(); + if (lb.getLastLocatedBlock() != null) { + builder.setLastBlock(PBHelper.convert(lb.getLastLocatedBlock())); + } + return builder.setFileLength(lb.getFileLength()) + .setUnderConstruction(lb.isUnderConstruction()) + .addAllBlocks(PBHelper.convertLocatedBlock2(lb.getLocatedBlocks())) + .setIsLastBlockComplete(lb.isLastBlockComplete()).build(); } public static FsServerDefaults convert(FsServerDefaultsProto fs) { @@ -982,11 +1000,16 @@ public static HdfsFileStatusProto convert(HdfsFileStatus fs) { setPermission(PBHelper.convert(fs.getPermission())). setOwner(fs.getOwner()). setGroup(fs.getGroup()). - setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes())). setPath(ByteString.copyFrom(fs.getLocalNameInBytes())); - LocatedBlocks locations = null; + + if (fs.getSymlink() != null) { + builder.setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes())); + } if (fs instanceof HdfsLocatedFileStatus) { - builder.setLocations(PBHelper.convert(locations)); + LocatedBlocks locations = ((HdfsLocatedFileStatus)fs).getBlockLocations(); + if (locations != null) { + builder.setLocations(PBHelper.convert(locations)); + } } return builder.build(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index 17b4d1d90e..7a52460ef0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -39,7 +39,7 @@ message GetBlockLocationsRequestProto { } message GetBlockLocationsResponseProto { - required LocatedBlocksProto locations = 1; + optional LocatedBlocksProto locations = 1; } message GetServerDefaultsRequestProto { // No parameters @@ -115,7 +115,7 @@ message AbandonBlockResponseProto { // void response message AddBlockRequestProto { required string src = 1; required string clientName = 2; - required ExtendedBlockProto previous = 3; + optional ExtendedBlockProto previous = 3; repeated DatanodeInfoProto excludeNodes = 4; } @@ -306,7 +306,7 @@ message DistributedUpgradeProgressRequestProto { required UpgradeActionProto action = 1; } message DistributedUpgradeProgressResponseProto { - required UpgradeStatusReportProto report = 1; + optional UpgradeStatusReportProto report = 1; } message ListCorruptFileBlocksRequestProto { @@ -330,7 +330,7 @@ message GetFileInfoRequestProto { } message GetFileInfoResponseProto { - required HdfsFileStatusProto fs = 1; + optional HdfsFileStatusProto fs = 1; } message GetFileLinkInfoRequestProto { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto index b98a2c2e97..71f609e40a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto @@ -170,7 +170,7 @@ message HeartbeatRequestProto { * cmds - Commands from namenode to datanode. */ message HeartbeatResponseProto { - repeated DatanodeCommandProto cmds = 1; + repeated DatanodeCommandProto cmds = 1; // Returned commands can be null } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto index 9fbf2b969a..cc45593b29 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto @@ -30,7 +30,8 @@ message ExtendedBlockProto { required string poolId = 1; // Block pool id - gloablly unique across clusters required uint64 blockId = 2; // the local id within a pool required uint64 generationStamp = 3; - optional uint64 numBytes = 4; // block len does not belong in ebid - here for historical reasons + optional uint64 numBytes = 4 [default = 0]; // len does not belong in ebid + // here for historical reasons } /** @@ -65,12 +66,12 @@ message DatanodeInfosProto { */ message DatanodeInfoProto { required DatanodeIDProto id = 1; - optional uint64 capacity = 2; - optional uint64 dfsUsed = 3; - optional uint64 remaining = 4; - optional uint64 blockPoolUsed = 5; - optional uint64 lastUpdate = 6; - optional uint32 xceiverCount = 7; + optional uint64 capacity = 2 [default = 0]; + optional uint64 dfsUsed = 3 [default = 0]; + optional uint64 remaining = 4 [default = 0]; + optional uint64 blockPoolUsed = 5 [default = 0]; + optional uint64 lastUpdate = 6 [default = 0]; + optional uint32 xceiverCount = 7 [default = 0]; optional string location = 8; optional string hostName = 9; enum AdminState { @@ -79,7 +80,7 @@ message DatanodeInfoProto { DECOMMISSIONED = 2; } - optional AdminState adminState = 10; + optional AdminState adminState = 10 [default = NORMAL]; } /** @@ -162,8 +163,8 @@ message HdfsFileStatusProto { optional bytes symlink = 9; // if symlink, target encoded java UTF8 // Optional fields for file - optional uint32 block_replication = 10; // Actually a short - only 16bits used - optional uint64 blocksize = 11; + optional uint32 block_replication = 10 [default = 0]; // only 16bits used + optional uint64 blocksize = 11 [default = 0]; optional LocatedBlocksProto locations = 12; // suppled only if asked by client } @@ -218,7 +219,7 @@ message NamenodeRegistrationProto { CHECKPOINT = 3; } required StorageInfoProto storageInfo = 3; // Node information - optional NamenodeRoleProto role = 4; // Namenode role + optional NamenodeRoleProto role = 4 [default = NAMENODE]; // Namenode role } /** @@ -264,7 +265,7 @@ message CheckpointCommandProto { message BlockProto { required uint64 blockId = 1; required uint64 genStamp = 2; - optional uint64 numBytes = 3; + optional uint64 numBytes = 3 [default = 0]; } /** @@ -313,7 +314,7 @@ message NamespaceInfoProto { message BlockKeyProto { required uint32 keyId = 1; // Key identifier required uint64 expiryDate = 2; // Expiry time in milliseconds - required bytes keyBytes = 3; // Key secret + optional bytes keyBytes = 3; // Key secret } /**