diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 989c962367..b8bfe5b719 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -37,8 +37,6 @@ Trunk (unreleased changes) HDFS-234. Integration with BookKeeper logging system. (Ivan Kelly via jitendra) - HDFS-2663. Optional protobuf parameters are not handled correctly. (suresh) - IMPROVEMENTS HADOOP-7524 Change RPC to allow multiple protocols including multuple diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 1204a76c4e..10527f5ac1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -24,9 +24,6 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.hdfs.protocol.ClientProtocol; -import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; -import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto; @@ -55,7 +52,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.Builder; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto; @@ -128,7 +124,6 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto; import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable; -import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.ProtocolSignature; import org.apache.hadoop.ipc.RPC; @@ -213,16 +208,11 @@ public GetBlockLocationsResponseProto getBlockLocations( RpcController controller, GetBlockLocationsRequestProto req) throws ServiceException { try { - LocatedBlocks b = server.getBlockLocations(req.getSrc(), req.getOffset(), - req.getLength()); - Builder builder = GetBlockLocationsResponseProto - .newBuilder(); - if (b != null) { - builder.setLocations( - PBHelper.convert(server.getBlockLocations(req.getSrc(), - req.getOffset(), req.getLength()))).build(); - } - return builder.build(); + return GetBlockLocationsResponseProto + .newBuilder() + .setLocations( + PBHelper.convert(server.getBlockLocations(req.getSrc(), + req.getOffset(), req.getLength()))).build(); } catch (IOException e) { throw new ServiceException(e); } @@ -335,7 +325,7 @@ public AddBlockResponseProto addBlock(RpcController controller, return AddBlockResponseProto.newBuilder().setBlock( PBHelper.convert( server.addBlock(req.getSrc(), req.getClientName(), - req.hasPrevious() ? PBHelper.convert(req.getPrevious()) : null, + PBHelper.convert(req.getPrevious()), PBHelper.convert( (DatanodeInfoProto[]) req.getExcludeNodesList().toArray())))) .build(); @@ -604,14 +594,10 @@ public DistributedUpgradeProgressResponseProto distributedUpgradeProgress( RpcController controller, DistributedUpgradeProgressRequestProto req) throws ServiceException { try { - UpgradeStatusReport result = server.distributedUpgradeProgress(PBHelper - .convert(req.getAction())); - DistributedUpgradeProgressResponseProto.Builder builder = - DistributedUpgradeProgressResponseProto.newBuilder(); - if (result != null) { - builder.setReport(PBHelper.convert(result)); - } - return builder.build(); + UpgradeStatusReportProto result = PBHelper.convert(server + .distributedUpgradeProgress(PBHelper.convert(req.getAction()))); + return DistributedUpgradeProgressResponseProto.newBuilder() + .setReport(result).build(); } catch (IOException e) { throw new ServiceException(e); } @@ -650,13 +636,9 @@ public MetaSaveResponseProto metaSave(RpcController controller, public GetFileInfoResponseProto getFileInfo(RpcController controller, GetFileInfoRequestProto req) throws ServiceException { try { - HdfsFileStatus res = server.getFileInfo(req.getSrc()); - GetFileInfoResponseProto.Builder builder = - GetFileInfoResponseProto.newBuilder(); - if (res != null) { - builder.setFs(PBHelper.convert(res)); - } - return builder.build(); + HdfsFileStatusProto result = + PBHelper.convert(server.getFileInfo(req.getSrc())); + return GetFileInfoResponseProto.newBuilder().setFs(result).build(); } catch (IOException e) { throw new ServiceException(e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 75fbc7bc8e..83aca3987b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -83,17 +83,14 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto; @@ -208,10 +205,7 @@ public LocatedBlocks getBlockLocations(String src, long offset, long length) .setLength(length) .build(); try { - GetBlockLocationsResponseProto resp = rpcProxy.getBlockLocations(null, - req); - return resp.hasLocations() ? - PBHelper.convert(resp.getLocations()) : null; + return PBHelper.convert(rpcProxy.getBlockLocations(null, req).getLocations()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } @@ -335,15 +329,12 @@ public LocatedBlock addBlock(String src, String clientName, throws AccessControlException, FileNotFoundException, NotReplicatedYetException, SafeModeException, UnresolvedLinkException, IOException { - AddBlockRequestProto.Builder builder = AddBlockRequestProto.newBuilder(); - builder.setSrc(src) - .setClientName(clientName) - .addAllExcludeNodes(Arrays.asList(PBHelper.convert(excludeNodes))); - if (previous != null) { - builder.setPrevious(PBHelper.convert(previous)); - } + AddBlockRequestProto req = AddBlockRequestProto.newBuilder().setSrc(src) + .setClientName(clientName).setPrevious(PBHelper.convert(previous)) + .addAllExcludeNodes(Arrays.asList(PBHelper.convert(excludeNodes))) + .build(); try { - return PBHelper.convert(rpcProxy.addBlock(null, builder.build()).getBlock()); + return PBHelper.convert(rpcProxy.addBlock(null, req).getBlock()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } @@ -624,9 +615,8 @@ public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action) DistributedUpgradeProgressRequestProto.newBuilder(). setAction(PBHelper.convert(action)).build(); try { - DistributedUpgradeProgressResponseProto res = rpcProxy - .distributedUpgradeProgress(null, req); - return res.hasReport() ? PBHelper.convert(res.getReport()) : null; + return PBHelper.convert( + rpcProxy.distributedUpgradeProgress(null, req).getReport()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } @@ -663,8 +653,7 @@ public HdfsFileStatus getFileInfo(String src) throws AccessControlException, GetFileInfoRequestProto req = GetFileInfoRequestProto.newBuilder() .setSrc(src).build(); try { - GetFileInfoResponseProto res = rpcProxy.getFileInfo(null, req); - return res.hasFs() ? PBHelper.convert(res.getFs()) : null; + return PBHelper.convert(rpcProxy.getFileInfo(null, req).getFs()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java index 66db4c39cc..e05a884cd6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java @@ -204,7 +204,7 @@ public DatanodeCommand blockReport(DatanodeRegistration registration, } catch (ServiceException se) { throw ProtobufHelper.getRemoteException(se); } - return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null; + return PBHelper.convert(resp.getCmd()); } @Override @@ -262,7 +262,7 @@ public UpgradeCommand processUpgradeCommand(UpgradeCommand comm) } catch (ServiceException se) { throw ProtobufHelper.getRemoteException(se); } - return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null; + return PBHelper.convert(resp.getCmd()); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java index 2dbf9150e9..25f08aee5d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java @@ -29,7 +29,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto; @@ -109,9 +108,7 @@ public HeartbeatResponseProto sendHeartbeat(RpcController controller, .newBuilder(); if (cmds != null) { for (int i = 0; i < cmds.length; i++) { - if (cmds[i] != null) { - builder.addCmds(PBHelper.convert(cmds[i])); - } + builder.addCmds(i, PBHelper.convert(cmds[i])); } } return builder.build(); @@ -132,12 +129,8 @@ public BlockReportResponseProto blockReport(RpcController controller, } catch (IOException e) { throw new ServiceException(e); } - BlockReportResponseProto.Builder builder = - BlockReportResponseProto.newBuilder(); - if (cmd != null) { - builder.setCmd(PBHelper.convert(cmd)); - } - return builder.build(); + return BlockReportResponseProto.newBuilder().setCmd(PBHelper.convert(cmd)) + .build(); } @Override @@ -187,20 +180,14 @@ public VersionResponseProto versionRequest(RpcController controller, @Override public ProcessUpgradeResponseProto processUpgrade(RpcController controller, ProcessUpgradeRequestProto request) throws ServiceException { - UpgradeCommand ret; + UpgradeCommand cmd; try { - UpgradeCommand cmd = request.hasCmd() ? PBHelper - .convert(request.getCmd()) : null; - ret = impl.processUpgradeCommand(cmd); + cmd = impl.processUpgradeCommand(PBHelper.convert(request.getCmd())); } catch (IOException e) { throw new ServiceException(e); } - ProcessUpgradeResponseProto.Builder builder = - ProcessUpgradeResponseProto.newBuilder(); - if (ret != null) { - builder.setCmd(PBHelper.convert(ret)); - } - return builder.build(); + return ProcessUpgradeResponseProto.newBuilder() + .setCmd(PBHelper.convert(cmd)).build(); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index c684ee2688..6be4366069 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -127,10 +127,6 @@ /** * Utilities for converting protobuf classes to and from implementation classes. - * - * Note that when converting from an internal type to protobuf type, the - * converter never return null for protobuf type. The check for internal type - * being null must be done before calling the convert() method. */ public class PBHelper { private static final RegisterCommandProto REG_CMD_PROTO = @@ -374,7 +370,6 @@ public static NamespaceInfo convert(NamespaceInfoProto info) { } public static NamenodeCommand convert(NamenodeCommandProto cmd) { - if (cmd == null) return null; switch (cmd.getType()) { case CheckPointCommand: CheckpointCommandProto chkPt = cmd.getCheckpointCmd(); @@ -431,8 +426,7 @@ static public DatanodeInfo convert(DatanodeInfoProto di) { if (di == null) return null; return new DatanodeInfo( PBHelper.convert(di.getId()), - di.hasLocation() ? di.getLocation() : null , - di.hasHostName() ? di.getHostName() : null, + di.getLocation(), di.getHostName(), di.getCapacity(), di.getDfsUsed(), di.getRemaining(), di.getBlockPoolUsed() , di.getLastUpdate() , di.getXceiverCount() , PBHelper.convert(di.getAdminState())); @@ -440,16 +434,10 @@ static public DatanodeInfo convert(DatanodeInfoProto di) { static public DatanodeInfoProto convertDatanodeInfo(DatanodeInfo di) { if (di == null) return null; - DatanodeInfoProto.Builder builder = DatanodeInfoProto.newBuilder(); - if (di.getHostName() != null) { - builder.setHostName(di.getHostName()); - } - if (di.getNetworkLocation() != null) { - builder.setLocation(di.getNetworkLocation()); - } - - return builder. + return DatanodeInfoProto.newBuilder(). setId(PBHelper.convert((DatanodeID) di)). + setLocation(di.getNetworkLocation()). + setHostName(di.getHostName()). setCapacity(di.getCapacity()). setDfsUsed(di.getDfsUsed()). setRemaining(di.getRemaining()). @@ -789,14 +777,9 @@ public static BalancerBandwidthCommand convert( public static ReceivedDeletedBlockInfoProto convert( ReceivedDeletedBlockInfo receivedDeletedBlockInfo) { - ReceivedDeletedBlockInfoProto.Builder builder = - ReceivedDeletedBlockInfoProto.newBuilder(); - - if (receivedDeletedBlockInfo.getDelHints() != null) { - builder.setDeleteHint(receivedDeletedBlockInfo.getDelHints()); - } - return builder.setBlock(PBHelper.convert(receivedDeletedBlockInfo.getBlock())) - .build(); + return ReceivedDeletedBlockInfoProto.newBuilder() + .setBlock(PBHelper.convert(receivedDeletedBlockInfo.getBlock())) + .setDeleteHint(receivedDeletedBlockInfo.getDelHints()).build(); } public static UpgradeCommandProto convert(UpgradeCommand comm) { @@ -820,7 +803,7 @@ public static UpgradeCommandProto convert(UpgradeCommand comm) { public static ReceivedDeletedBlockInfo convert( ReceivedDeletedBlockInfoProto proto) { return new ReceivedDeletedBlockInfo(PBHelper.convert(proto.getBlock()), - proto.hasDeleteHint() ? proto.getDeleteHint() : null); + proto.getDeleteHint()); } public static NamespaceInfoProto convert(NamespaceInfo info) { @@ -880,10 +863,13 @@ public static List convertLocatedBlock2(List lb // LocatedBlocks public static LocatedBlocks convert(LocatedBlocksProto lb) { + if (lb == null) { + return null; + } return new LocatedBlocks( lb.getFileLength(), lb.getUnderConstruction(), PBHelper.convertLocatedBlock(lb.getBlocksList()), - lb.hasLastBlock() ? PBHelper.convert(lb.getLastBlock()) : null, + PBHelper.convert(lb.getLastBlock()), lb.getIsLastBlockComplete()); } @@ -891,15 +877,11 @@ public static LocatedBlocksProto convert(LocatedBlocks lb) { if (lb == null) { return null; } - LocatedBlocksProto.Builder builder = - LocatedBlocksProto.newBuilder(); - if (lb.getLastLocatedBlock() != null) { - builder.setLastBlock(PBHelper.convert(lb.getLastLocatedBlock())); - } - return builder.setFileLength(lb.getFileLength()) - .setUnderConstruction(lb.isUnderConstruction()) - .addAllBlocks(PBHelper.convertLocatedBlock2(lb.getLocatedBlocks())) - .setIsLastBlockComplete(lb.isLastBlockComplete()).build(); + return LocatedBlocksProto.newBuilder(). + setFileLength(lb.getFileLength()). + setUnderConstruction(lb.isUnderConstruction()). + addAllBlocks(PBHelper.convertLocatedBlock2(lb.getLocatedBlocks())). + setLastBlock(PBHelper.convert(lb.getLastLocatedBlock())).setIsLastBlockComplete(lb.isLastBlockComplete()).build(); } public static FsServerDefaults convert(FsServerDefaultsProto fs) { @@ -1000,16 +982,11 @@ public static HdfsFileStatusProto convert(HdfsFileStatus fs) { setPermission(PBHelper.convert(fs.getPermission())). setOwner(fs.getOwner()). setGroup(fs.getGroup()). + setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes())). setPath(ByteString.copyFrom(fs.getLocalNameInBytes())); - - if (fs.getSymlink() != null) { - builder.setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes())); - } + LocatedBlocks locations = null; if (fs instanceof HdfsLocatedFileStatus) { - LocatedBlocks locations = ((HdfsLocatedFileStatus)fs).getBlockLocations(); - if (locations != null) { - builder.setLocations(PBHelper.convert(locations)); - } + builder.setLocations(PBHelper.convert(locations)); } return builder.build(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index 7a52460ef0..17b4d1d90e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -39,7 +39,7 @@ message GetBlockLocationsRequestProto { } message GetBlockLocationsResponseProto { - optional LocatedBlocksProto locations = 1; + required LocatedBlocksProto locations = 1; } message GetServerDefaultsRequestProto { // No parameters @@ -115,7 +115,7 @@ message AbandonBlockResponseProto { // void response message AddBlockRequestProto { required string src = 1; required string clientName = 2; - optional ExtendedBlockProto previous = 3; + required ExtendedBlockProto previous = 3; repeated DatanodeInfoProto excludeNodes = 4; } @@ -306,7 +306,7 @@ message DistributedUpgradeProgressRequestProto { required UpgradeActionProto action = 1; } message DistributedUpgradeProgressResponseProto { - optional UpgradeStatusReportProto report = 1; + required UpgradeStatusReportProto report = 1; } message ListCorruptFileBlocksRequestProto { @@ -330,7 +330,7 @@ message GetFileInfoRequestProto { } message GetFileInfoResponseProto { - optional HdfsFileStatusProto fs = 1; + required HdfsFileStatusProto fs = 1; } message GetFileLinkInfoRequestProto { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto index 71f609e40a..b98a2c2e97 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto @@ -170,7 +170,7 @@ message HeartbeatRequestProto { * cmds - Commands from namenode to datanode. */ message HeartbeatResponseProto { - repeated DatanodeCommandProto cmds = 1; // Returned commands can be null + repeated DatanodeCommandProto cmds = 1; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto index cc45593b29..9fbf2b969a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto @@ -30,8 +30,7 @@ message ExtendedBlockProto { required string poolId = 1; // Block pool id - gloablly unique across clusters required uint64 blockId = 2; // the local id within a pool required uint64 generationStamp = 3; - optional uint64 numBytes = 4 [default = 0]; // len does not belong in ebid - // here for historical reasons + optional uint64 numBytes = 4; // block len does not belong in ebid - here for historical reasons } /** @@ -66,12 +65,12 @@ message DatanodeInfosProto { */ message DatanodeInfoProto { required DatanodeIDProto id = 1; - optional uint64 capacity = 2 [default = 0]; - optional uint64 dfsUsed = 3 [default = 0]; - optional uint64 remaining = 4 [default = 0]; - optional uint64 blockPoolUsed = 5 [default = 0]; - optional uint64 lastUpdate = 6 [default = 0]; - optional uint32 xceiverCount = 7 [default = 0]; + optional uint64 capacity = 2; + optional uint64 dfsUsed = 3; + optional uint64 remaining = 4; + optional uint64 blockPoolUsed = 5; + optional uint64 lastUpdate = 6; + optional uint32 xceiverCount = 7; optional string location = 8; optional string hostName = 9; enum AdminState { @@ -80,7 +79,7 @@ message DatanodeInfoProto { DECOMMISSIONED = 2; } - optional AdminState adminState = 10 [default = NORMAL]; + optional AdminState adminState = 10; } /** @@ -163,8 +162,8 @@ message HdfsFileStatusProto { optional bytes symlink = 9; // if symlink, target encoded java UTF8 // Optional fields for file - optional uint32 block_replication = 10 [default = 0]; // only 16bits used - optional uint64 blocksize = 11 [default = 0]; + optional uint32 block_replication = 10; // Actually a short - only 16bits used + optional uint64 blocksize = 11; optional LocatedBlocksProto locations = 12; // suppled only if asked by client } @@ -219,7 +218,7 @@ message NamenodeRegistrationProto { CHECKPOINT = 3; } required StorageInfoProto storageInfo = 3; // Node information - optional NamenodeRoleProto role = 4 [default = NAMENODE]; // Namenode role + optional NamenodeRoleProto role = 4; // Namenode role } /** @@ -265,7 +264,7 @@ message CheckpointCommandProto { message BlockProto { required uint64 blockId = 1; required uint64 genStamp = 2; - optional uint64 numBytes = 3 [default = 0]; + optional uint64 numBytes = 3; } /** @@ -314,7 +313,7 @@ message NamespaceInfoProto { message BlockKeyProto { required uint32 keyId = 1; // Key identifier required uint64 expiryDate = 2; // Expiry time in milliseconds - optional bytes keyBytes = 3; // Key secret + required bytes keyBytes = 3; // Key secret } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index e50da8d748..ee3e46998b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -507,11 +507,6 @@ private void initMiniDFSCluster(int nameNodePort, int nameNodeHttpPort, this.waitSafeMode = waitSafeMode; // use alternate RPC engine if spec'd - /* - Turned off - see HDFS-2647 and HDFS-2660 for related comments. - This test can be turned on when Avro RPC is enabled using mechanism - similar to protobuf. - String rpcEngineName = System.getProperty("hdfs.rpc.engine"); if (rpcEngineName != null && !"".equals(rpcEngineName)) { @@ -535,7 +530,6 @@ private void initMiniDFSCluster(int nameNodePort, int nameNodeHttpPort, conf.setBoolean(HADOOP_SECURITY_AUTHORIZATION, false); } - */ int replication = conf.getInt(DFS_REPLICATION_KEY, 3); conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDfsOverAvroRpc.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDfsOverAvroRpc.java index 7f4ad2f023..350d6ac52a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDfsOverAvroRpc.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDfsOverAvroRpc.java @@ -28,16 +28,9 @@ public class TestDfsOverAvroRpc extends TestLocalDFS { @Test(timeout=20000) public void testWorkingDirectory() throws IOException { - /* - Test turned off - see HDFS-2647 and HDFS-2660 for related comments. - This test can be turned on when Avro RPC is enabled using mechanism - similar to protobuf. - */ - /* System.setProperty("hdfs.rpc.engine", "org.apache.hadoop.ipc.AvroRpcEngine"); super.testWorkingDirectory(); - */ } }