From eb9f1b670726e1af03f2e940ce2696b880964972 Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Fri, 20 Sep 2013 22:06:09 +0000 Subject: [PATCH] HDFS-5232. Protocol changes to transmit StorageUuid. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1525153 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES_HDFS-2832.txt | 2 ++ ...amenodeProtocolServerSideTranslatorPB.java | 2 +- .../ClientNamenodeProtocolTranslatorPB.java | 2 +- ...atanodeProtocolClientSideTranslatorPB.java | 2 +- ...atanodeProtocolServerSideTranslatorPB.java | 4 +-- ...atanodeProtocolServerSideTranslatorPB.java | 2 +- .../InterDatanodeProtocolTranslatorPB.java | 2 +- .../hadoop/hdfs/protocolPB/PBHelper.java | 36 +++++++++---------- .../main/proto/ClientNamenodeProtocol.proto | 2 +- .../src/main/proto/DatanodeProtocol.proto | 8 ++--- .../main/proto/InterDatanodeProtocol.proto | 2 +- .../hadoop-hdfs/src/main/proto/hdfs.proto | 11 +++--- 12 files changed, 40 insertions(+), 35 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt index b9d4981a81..9f4e314ad7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt @@ -26,3 +26,5 @@ IMPROVEMENTS: HDFS-4990. Change BlockPlacementPolicy to choose storages instead of datanodes. (szetszwo) + + HDFS-5232. Protocol changes to transmit StorageUuid. (Arpit Agarwal) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 6e8e7bc34e..8290a4b4e4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -405,7 +405,7 @@ public GetAdditionalDatanodeResponseProto getAdditionalDatanode( throws ServiceException { try { List existingList = req.getExistingsList(); - List existingStorageIDsList = req.getExistingStorageIDsList(); + List existingStorageIDsList = req.getExistingStorageUuidsList(); List excludesList = req.getExcludesList(); LocatedBlock result = server.getAdditionalDatanode(req.getSrc(), PBHelper.convert(req.getBlk()), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 84d10e473a..30ea3e562e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -345,7 +345,7 @@ public LocatedBlock getAdditionalDatanode(String src, ExtendedBlock blk, .setSrc(src) .setBlk(PBHelper.convert(blk)) .addAllExistings(PBHelper.convert(existings)) - .addAllExistingStorageIDs(Arrays.asList(existingStorageIDs)) + .addAllExistingStorageUuids(Arrays.asList(existingStorageIDs)) .addAllExcludes(PBHelper.convert(excludes)) .setNumAdditionalNodes(numAdditionalNodes) .setClientName(clientName) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java index fd4cc4b01c..315ad92d04 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java @@ -213,7 +213,7 @@ public void blockReceivedAndDeleted(DatanodeRegistration registration, for (StorageReceivedDeletedBlocks storageBlock : receivedAndDeletedBlocks) { StorageReceivedDeletedBlocksProto.Builder repBuilder = StorageReceivedDeletedBlocksProto.newBuilder(); - repBuilder.setStorageID(storageBlock.getStorageID()); + repBuilder.setStorageUuid(storageBlock.getStorageID()); for (ReceivedDeletedBlockInfo rdBlock : storageBlock.getBlocks()) { repBuilder.addBlocks(PBHelper.convert(rdBlock)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java index 3e424602fa..9a63d37dec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java @@ -104,7 +104,7 @@ public HeartbeatResponseProto sendHeartbeat(RpcController controller, StorageReport[] report = new StorageReport[list.size()]; int i = 0; for (StorageReportProto p : list) { - report[i++] = new StorageReport(p.getStorageID(), p.getFailed(), + report[i++] = new StorageReport(p.getStorageUuid(), p.getFailed(), p.getCapacity(), p.getDfsUsed(), p.getRemaining(), p.getBlockPoolUsed()); } @@ -174,7 +174,7 @@ public BlockReceivedAndDeletedResponseProto blockReceivedAndDeleted( for (int j = 0; j < list.size(); j++) { rdBlocks[j] = PBHelper.convert(list.get(j)); } - info[i] = new StorageReceivedDeletedBlocks(sBlock.getStorageID(), rdBlocks); + info[i] = new StorageReceivedDeletedBlocks(sBlock.getStorageUuid(), rdBlocks); } try { impl.blockReceivedAndDeleted(PBHelper.convert(request.getRegistration()), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolServerSideTranslatorPB.java index 8f3eed9685..087c697c58 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolServerSideTranslatorPB.java @@ -82,6 +82,6 @@ public UpdateReplicaUnderRecoveryResponseProto updateReplicaUnderRecovery( throw new ServiceException(e); } return UpdateReplicaUnderRecoveryResponseProto.newBuilder() - .setStorageID(storageID).build(); + .setStorageUuid(storageID).build(); } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java index 4e518c35bf..5174d86188 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java @@ -109,7 +109,7 @@ public String updateReplicaUnderRecovery(ExtendedBlock oldBlock, .setNewLength(newLength).setRecoveryId(recoveryId).build(); try { return rpcProxy.updateReplicaUnderRecovery(NULL_CONTROLLER, req - ).getStorageID(); + ).getStorageUuid(); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index d118904d0c..c2de12e7dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -94,7 +94,7 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageIDsProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto; @@ -226,7 +226,7 @@ public static NamenodeRegistration convert(NamenodeRegistrationProto reg) { // DatanodeId public static DatanodeID convert(DatanodeIDProto dn) { - return new DatanodeID(dn.getIpAddr(), dn.getHostName(), dn.getStorageID(), + return new DatanodeID(dn.getIpAddr(), dn.getHostName(), dn.getDatanodeUuid(), dn.getXferPort(), dn.getInfoPort(), dn.getIpcPort()); } @@ -234,7 +234,7 @@ public static DatanodeIDProto convert(DatanodeID dn) { return DatanodeIDProto.newBuilder() .setIpAddr(dn.getIpAddr()) .setHostName(dn.getHostName()) - .setStorageID(dn.getStorageID()) + .setDatanodeUuid(dn.getStorageID()) .setXferPort(dn.getXferPort()) .setInfoPort(dn.getInfoPort()) .setIpcPort(dn.getIpcPort()).build(); @@ -276,11 +276,11 @@ public static Block convert(BlockProto b) { public static BlockWithLocationsProto convert(BlockWithLocations blk) { return BlockWithLocationsProto.newBuilder() .setBlock(convert(blk.getBlock())) - .addAllStorageIDs(Arrays.asList(blk.getStorageIDs())).build(); + .addAllStorageUuids(Arrays.asList(blk.getStorageIDs())).build(); } public static BlockWithLocations convert(BlockWithLocationsProto b) { - return new BlockWithLocations(convert(b.getBlock()), b.getStorageIDsList() + return new BlockWithLocations(convert(b.getBlock()), b.getStorageUuidsList() .toArray(new String[0])); } @@ -746,7 +746,7 @@ public static BlockCommandProto convert(BlockCommand cmd) { builder.addBlocks(PBHelper.convert(blocks[i])); } builder.addAllTargets(convert(cmd.getTargets())) - .addAllTargetStorageIDs(convert(cmd.getTargetStorageIDs())); + .addAllTargetStorageUuids(convert(cmd.getTargetStorageIDs())); return builder.build(); } @@ -759,11 +759,11 @@ private static List convert(DatanodeInfo[][] targets) { return Arrays.asList(ret); } - private static List convert(String[][] targetStorageIDs) { - StorageIDsProto[] ret = new StorageIDsProto[targetStorageIDs.length]; - for (int i = 0; i < targetStorageIDs.length; i++) { - ret[i] = StorageIDsProto.newBuilder() - .addAllStorageIDs(Arrays.asList(targetStorageIDs[i])).build(); + private static List convert(String[][] targetStorageUuids) { + StorageUuidsProto[] ret = new StorageUuidsProto[targetStorageUuids.length]; + for (int i = 0; i < targetStorageUuids.length; i++) { + ret[i] = StorageUuidsProto.newBuilder() + .addAllStorageUuids(Arrays.asList(targetStorageUuids[i])).build(); } return Arrays.asList(ret); } @@ -843,10 +843,10 @@ public static BlockCommand convert(BlockCommandProto blkCmd) { targets[i] = PBHelper.convert(targetList.get(i)); } - List targetStorageIDsList = blkCmd.getTargetStorageIDsList(); - String[][] targetStorageIDs = new String[targetStorageIDsList.size()][]; + List targetStorageUuidsList = blkCmd.getTargetStorageUuidsList(); + String[][] targetStorageIDs = new String[targetStorageUuidsList.size()][]; for(int i = 0; i < targetStorageIDs.length; i++) { - List storageIDs = targetStorageIDsList.get(i).getStorageIDsList(); + List storageIDs = targetStorageUuidsList.get(i).getStorageUuidsList(); targetStorageIDs[i] = storageIDs.toArray(new String[storageIDs.size()]); } @@ -1375,7 +1375,7 @@ public static DatanodeStorageProto convert(DatanodeStorage s) { return DatanodeStorageProto.newBuilder() .setState(PBHelper.convertState(s.getState())) .setStorageType(PBHelper.convertStorageType(s.getStorageType())) - .setStorageID(s.getStorageID()).build(); + .setStorageUuid(s.getStorageID()).build(); } private static StorageState convertState(State state) { @@ -1406,11 +1406,11 @@ private static StorageTypeProto convertStorageType( public static DatanodeStorage convert(DatanodeStorageProto s) { if (s.hasStorageType()) { - return new DatanodeStorage(s.getStorageID(), + return new DatanodeStorage(s.getStorageUuid(), PBHelper.convertState(s.getState()), PBHelper.convertType(s.getStorageType())); } else { - return new DatanodeStorage(s.getStorageID(), + return new DatanodeStorage(s.getStorageUuid(), PBHelper.convertState(s.getState())); } } @@ -1440,7 +1440,7 @@ public static StorageReportProto convert(StorageReport r) { return StorageReportProto.newBuilder() .setBlockPoolUsed(r.getBlockPoolUsed()).setCapacity(r.getCapacity()) .setDfsUsed(r.getDfsUsed()).setRemaining(r.getRemaining()) - .setStorageID(r.getStorageID()).build(); + .setStorageUuid(r.getStorageID()).build(); } public static JournalInfo convert(JournalInfoProto info) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index 968b074035..fda60857ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -141,7 +141,7 @@ message GetAdditionalDatanodeRequestProto { repeated DatanodeInfoProto excludes = 4; required uint32 numAdditionalNodes = 5; required string clientName = 6; - repeated string existingStorageIDs = 7; + repeated string existingStorageUuids = 7; } message GetAdditionalDatanodeResponseProto { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto index 2f845e08a4..6f93afcc96 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto @@ -53,7 +53,7 @@ message DatanodeStorageProto { READ_ONLY = 1; } - required string storageID = 1; // Unique identifier for the storage + required string storageUuid = 1; optional StorageState state = 2 [default = NORMAL]; optional StorageTypeProto storageType = 3; } @@ -110,7 +110,7 @@ message BlockCommandProto { required string blockPoolId = 2; repeated BlockProto blocks = 3; repeated DatanodeInfosProto targets = 4; - repeated StorageIDsProto targetStorageIDs = 5; + repeated StorageUuidsProto targetStorageUuids = 5; } /** @@ -176,7 +176,7 @@ message HeartbeatRequestProto { } message StorageReportProto { - required string storageID = 1; + required string storageUuid = 1; optional bool failed = 2 [ default = false ]; optional uint64 capacity = 3 [ default = 0 ]; optional uint64 dfsUsed = 4 [ default = 0 ]; @@ -250,7 +250,7 @@ message ReceivedDeletedBlockInfoProto { * List of blocks received and deleted for a storage. */ message StorageReceivedDeletedBlocksProto { - required string storageID = 1; + required string storageUuid = 1; repeated ReceivedDeletedBlockInfoProto blocks = 2; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto index c76f7edfa8..47f79bed16 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto @@ -65,7 +65,7 @@ message UpdateReplicaUnderRecoveryRequestProto { * Response returns updated block information */ message UpdateReplicaUnderRecoveryResponseProto { - required string storageID = 1; // ID of the storage that stores replica + optional string storageUuid = 1; // ID of the storage that stores replica } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto index 22121a73b5..b8775259f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto @@ -50,7 +50,10 @@ message ExtendedBlockProto { message DatanodeIDProto { required string ipAddr = 1; // IP address required string hostName = 2; // hostname - required string storageID = 3; // unique storage id + required string datanodeUuid = 3; // UUID assigned to the Datanode. For + // upgraded clusters this is the same + // as the original StorageID of the + // Datanode. required uint32 xferPort = 4; // data streaming port required uint32 infoPort = 5; // info server port required uint32 ipcPort = 6; // ipc server port @@ -124,8 +127,8 @@ enum StorageTypeProto { /** * A list of storage IDs. */ -message StorageIDsProto { - repeated string storageIDs = 1; +message StorageUuidsProto { + repeated string storageUuids = 1; } /** @@ -348,7 +351,7 @@ message BlockProto { */ message BlockWithLocationsProto { required BlockProto block = 1; // Block - repeated string storageIDs = 2; // Datanodes with replicas of the block + repeated string storageUuids = 2; // Datanodes with replicas of the block } /**