From 7c5c099324d9168114be2f1233d49fdb65a8c1f2 Mon Sep 17 00:00:00 2001 From: Haohui Mai Date: Tue, 22 Sep 2015 20:57:05 -0700 Subject: [PATCH] HDFS-8733. Keep server related definition in hdfs.proto on server side. Contributed by Mingliang Liu. --- .../src/main/proto/hdfs.proto | 166 --------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + hadoop-hdfs-project/hadoop-hdfs/pom.xml | 1 + .../hadoop-hdfs/src/contrib/bkjournal/pom.xml | 1 + .../bkjournal/src/main/proto/bkjournal.proto | 1 + ...atanodeProtocolClientSideTranslatorPB.java | 2 +- ...atanodeProtocolServerSideTranslatorPB.java | 4 +- ...amenodeProtocolServerSideTranslatorPB.java | 4 +- .../NamenodeProtocolTranslatorPB.java | 5 +- .../hadoop/hdfs/protocolPB/PBHelper.java | 32 +-- .../src/main/proto/DatanodeProtocol.proto | 1 + .../src/main/proto/HdfsServer.proto | 201 ++++++++++++++++++ .../main/proto/InterDatanodeProtocol.proto | 1 + .../src/main/proto/JournalProtocol.proto | 1 + .../src/main/proto/NamenodeProtocol.proto | 1 + .../src/main/proto/QJournalProtocol.proto | 1 + .../hadoop/hdfs/protocolPB/TestPBHelper.java | 24 +-- 17 files changed, 247 insertions(+), 202 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto index ee77dc0df0..0e2d541869 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto @@ -199,12 +199,6 @@ message BlockStoragePolicyProto { optional StorageTypesProto replicationFallbackPolicy = 5; } -/** - * A list of storage IDs. - */ -message StorageUuidsProto { - repeated string storageUuids = 1; -} /** * A LocatedBlock gives information about a block and its location. @@ -414,68 +408,6 @@ message SnapshotDiffReportProto { repeated SnapshotDiffReportEntryProto diffReportEntries = 4; } -/** - * Common node information shared by all the nodes in the cluster - */ -message StorageInfoProto { - required uint32 layoutVersion = 1; // Layout version of the file system - required uint32 namespceID = 2; // File system namespace ID - required string clusterID = 3; // ID of the cluster - required uint64 cTime = 4; // File system creation time -} - -/** - * Information sent by a namenode to identify itself to the primary namenode. - */ -message NamenodeRegistrationProto { - required string rpcAddress = 1; // host:port of the namenode RPC address - required string httpAddress = 2; // host:port of the namenode http server - enum NamenodeRoleProto { - NAMENODE = 1; - BACKUP = 2; - CHECKPOINT = 3; - } - required StorageInfoProto storageInfo = 3; // Node information - optional NamenodeRoleProto role = 4 [default = NAMENODE]; // Namenode role -} - -/** - * Unique signature to identify checkpoint transactions. - */ -message CheckpointSignatureProto { - required string blockPoolId = 1; - required uint64 mostRecentCheckpointTxId = 2; - required uint64 curSegmentTxId = 3; - required StorageInfoProto storageInfo = 4; -} - -/** - * Command sent from one namenode to another namenode. - */ -message NamenodeCommandProto { - enum Type { - NamenodeCommand = 0; // Base command - CheckPointCommand = 1; // Check point command - } - required uint32 action = 1; - required Type type = 2; - optional CheckpointCommandProto checkpointCmd = 3; -} - -/** - * Command returned from primary to checkpointing namenode. - * This command has checkpoint signature that identifies - * checkpoint transaction and is needed for further - * communication related to checkpointing. - */ -message CheckpointCommandProto { - // Unique signature to identify checkpoint transation - required CheckpointSignatureProto signature = 1; - - // If true, return transfer image to primary upon the completion of checkpoint - required bool needToReturnImage = 2; -} - /** * Block information * @@ -491,104 +423,6 @@ message BlockProto { optional uint64 numBytes = 3 [default = 0]; } -/** - * Block and datanodes where is it located - */ -message BlockWithLocationsProto { - required BlockProto block = 1; // Block - repeated string datanodeUuids = 2; // Datanodes with replicas of the block - repeated string storageUuids = 3; // Storages with replicas of the block - repeated StorageTypeProto storageTypes = 4; -} - -/** - * List of block with locations - */ -message BlocksWithLocationsProto { - repeated BlockWithLocationsProto blocks = 1; -} - -/** - * Editlog information with available transactions - */ -message RemoteEditLogProto { - required uint64 startTxId = 1; // Starting available edit log transaction - required uint64 endTxId = 2; // Ending available edit log transaction - optional bool isInProgress = 3 [default = false]; -} - -/** - * Enumeration of editlogs available on a remote namenode - */ -message RemoteEditLogManifestProto { - repeated RemoteEditLogProto logs = 1; -} - -/** - * Namespace information that describes namespace on a namenode - */ -message NamespaceInfoProto { - required string buildVersion = 1; // Software revision version (e.g. an svn or git revision) - required uint32 unused = 2; // Retained for backward compatibility - required string blockPoolID = 3; // block pool used by the namespace - required StorageInfoProto storageInfo = 4;// Node information - required string softwareVersion = 5; // Software version number (e.g. 2.0.0) - optional uint64 capabilities = 6 [default = 0]; // feature flags -} - -/** - * Block access token information - */ -message BlockKeyProto { - required uint32 keyId = 1; // Key identifier - required uint64 expiryDate = 2; // Expiry time in milliseconds - optional bytes keyBytes = 3; // Key secret -} - -/** - * Current key and set of block keys at the namenode. - */ -message ExportedBlockKeysProto { - required bool isBlockTokenEnabled = 1; - required uint64 keyUpdateInterval = 2; - required uint64 tokenLifeTime = 3; - required BlockKeyProto currentKey = 4; - repeated BlockKeyProto allKeys = 5; -} - -/** - * State of a block replica at a datanode - */ -enum ReplicaStateProto { - FINALIZED = 0; // State of a replica when it is not modified - RBW = 1; // State of replica that is being written to - RWR = 2; // State of replica that is waiting to be recovered - RUR = 3; // State of replica that is under recovery - TEMPORARY = 4; // State of replica that is created for replication -} - -/** - * Block that needs to be recovered with at a given location - */ -message RecoveringBlockProto { - required uint64 newGenStamp = 1; // New genstamp post recovery - required LocatedBlockProto block = 2; // Block to be recovered - optional BlockProto truncateBlock = 3; // New block for recovery (truncate) -} - -/** - * void request - */ -message VersionRequestProto { -} - -/** - * Version response from namenode. - */ -message VersionResponseProto { - required NamespaceInfoProto info = 1; -} - /** * Information related to a snapshot * TODO: add more information diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 0718a3ab75..b900d9130a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -947,6 +947,9 @@ Release 2.8.0 - UNRELEASED HDFS-9039. Separate client and server side methods of o.a.h.hdfs. NameNodeProxies. (Mingliang Liu via wheat9) + HDFS-8733. Keep server related definition in hdfs.proto on server side. + (Mingliang Liu via wheat9) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index d0c2dc7d59..6a93331f80 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -340,6 +340,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> ${basedir}/src/main/proto + HdfsServer.proto DatanodeProtocol.proto HAZKInfo.proto InterDatanodeProtocol.proto diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml index 7e58606181..005ee4dbdc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml @@ -113,6 +113,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> ${basedir}/../../../../../hadoop-common-project/hadoop-common/src/main/proto ${basedir}/../../../../../hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto + ${basedir}/../../../../../hadoop-hdfs-project/hadoop-hdfs/src/main/proto ${basedir}/src/main/proto diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/proto/bkjournal.proto b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/proto/bkjournal.proto index c8091054b5..15fa479ea5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/proto/bkjournal.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/proto/bkjournal.proto @@ -25,6 +25,7 @@ option java_generate_equals_and_hash = true; package hadoop.hdfs; import "hdfs.proto"; +import "HdfsServer.proto"; message VersionProto { required int32 layoutVersion = 1; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java index 18f89f8ac0..705d573194 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java @@ -46,7 +46,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto; import org.apache.hadoop.hdfs.server.protocol.BlockReportContext; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java index 94d1f0c729..4b9f7c4b5f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java @@ -46,8 +46,8 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java index 91ffb1b330..db7a8d2d8e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java @@ -20,8 +20,8 @@ import java.io.IOException; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto; import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto; import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto; import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java index bcb96ba4d3..6fc5fc7374 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java @@ -24,9 +24,8 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto; import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto; import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 7c08f716b3..75b3811eff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -45,27 +45,27 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.NamenodeRoleProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ReplicaStateProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageUuidsProto; import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto; import org.apache.hadoop.hdfs.security.token.block.BlockKey; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto index b87e7533bb..727259f99b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto @@ -33,6 +33,7 @@ package hadoop.hdfs.datanode; import "HAServiceProtocol.proto"; import "hdfs.proto"; +import "HdfsServer.proto"; /** * Information to identify a datanode to a namenode diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto new file mode 100644 index 0000000000..3b60e51a64 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto @@ -0,0 +1,201 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * These .proto interfaces are private and stable. + * Please see http://wiki.apache.org/hadoop/Compatibility + * for what changes are allowed for a *stable* .proto interface. + */ + +// This file contains protocol buffers that are used throughout HDFS -- i.e. +// by the client, server, and data transfer protocols. + + +option java_package = "org.apache.hadoop.hdfs.protocol.proto"; +option java_outer_classname = "HdfsServerProtos"; +option java_generate_equals_and_hash = true; +package hadoop.hdfs; + +import "hdfs.proto"; + +/** + * A list of storage IDs. + */ +message StorageUuidsProto { + repeated string storageUuids = 1; +} + +/** + * Block access token information + */ +message BlockKeyProto { + required uint32 keyId = 1; // Key identifier + required uint64 expiryDate = 2; // Expiry time in milliseconds + optional bytes keyBytes = 3; // Key secret +} + +/** + * Current key and set of block keys at the namenode. + */ +message ExportedBlockKeysProto { + required bool isBlockTokenEnabled = 1; + required uint64 keyUpdateInterval = 2; + required uint64 tokenLifeTime = 3; + required BlockKeyProto currentKey = 4; + repeated BlockKeyProto allKeys = 5; +} + +/** + * Block and datanodes where is it located + */ +message BlockWithLocationsProto { + required BlockProto block = 1; // Block + repeated string datanodeUuids = 2; // Datanodes with replicas of the block + repeated string storageUuids = 3; // Storages with replicas of the block + repeated StorageTypeProto storageTypes = 4; +} + +/** + * List of block with locations + */ +message BlocksWithLocationsProto { + repeated BlockWithLocationsProto blocks = 1; +} + +/** + * Editlog information with available transactions + */ +message RemoteEditLogProto { + required uint64 startTxId = 1; // Starting available edit log transaction + required uint64 endTxId = 2; // Ending available edit log transaction + optional bool isInProgress = 3 [default = false]; +} + +/** + * Enumeration of editlogs available on a remote namenode + */ +message RemoteEditLogManifestProto { + repeated RemoteEditLogProto logs = 1; +} + +/** + * Namespace information that describes namespace on a namenode + */ +message NamespaceInfoProto { + required string buildVersion = 1; // Software revision version (e.g. an svn or git revision) + required uint32 unused = 2; // Retained for backward compatibility + required string blockPoolID = 3; // block pool used by the namespace + required StorageInfoProto storageInfo = 4;// Node information + required string softwareVersion = 5; // Software version number (e.g. 2.0.0) + optional uint64 capabilities = 6 [default = 0]; // feature flags +} + +/** + * State of a block replica at a datanode + */ +enum ReplicaStateProto { + FINALIZED = 0; // State of a replica when it is not modified + RBW = 1; // State of replica that is being written to + RWR = 2; // State of replica that is waiting to be recovered + RUR = 3; // State of replica that is under recovery + TEMPORARY = 4; // State of replica that is created for replication +} + +/** + * Block that needs to be recovered with at a given location + */ +message RecoveringBlockProto { + required uint64 newGenStamp = 1; // New genstamp post recovery + required LocatedBlockProto block = 2; // Block to be recovered + optional BlockProto truncateBlock = 3; // New block for recovery (truncate) +} + +/** + * Unique signature to identify checkpoint transactions. + */ +message CheckpointSignatureProto { + required string blockPoolId = 1; + required uint64 mostRecentCheckpointTxId = 2; + required uint64 curSegmentTxId = 3; + required StorageInfoProto storageInfo = 4; +} + +/** + * Command returned from primary to checkpointing namenode. + * This command has checkpoint signature that identifies + * checkpoint transaction and is needed for further + * communication related to checkpointing. + */ +message CheckpointCommandProto { + // Unique signature to identify checkpoint transation + required CheckpointSignatureProto signature = 1; + + // If true, return transfer image to primary upon the completion of checkpoint + required bool needToReturnImage = 2; +} + +/** + * Command sent from one namenode to another namenode. + */ +message NamenodeCommandProto { + enum Type { + NamenodeCommand = 0; // Base command + CheckPointCommand = 1; // Check point command + } + required uint32 action = 1; + required Type type = 2; + optional CheckpointCommandProto checkpointCmd = 3; +} + +/** + * void request + */ +message VersionRequestProto { +} + +/** + * Version response from namenode. + */ +message VersionResponseProto { + required NamespaceInfoProto info = 1; +} + +/** + * Common node information shared by all the nodes in the cluster + */ +message StorageInfoProto { + required uint32 layoutVersion = 1; // Layout version of the file system + required uint32 namespceID = 2; // File system namespace ID + required string clusterID = 3; // ID of the cluster + required uint64 cTime = 4; // File system creation time +} + +/** + * Information sent by a namenode to identify itself to the primary namenode. + */ +message NamenodeRegistrationProto { + required string rpcAddress = 1; // host:port of the namenode RPC address + required string httpAddress = 2; // host:port of the namenode http server + enum NamenodeRoleProto { + NAMENODE = 1; + BACKUP = 2; + CHECKPOINT = 3; + } + required StorageInfoProto storageInfo = 3; // Node information + optional NamenodeRoleProto role = 4 [default = NAMENODE]; // Namenode role +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto index 1a21777988..580f8d3473 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto @@ -32,6 +32,7 @@ option java_generate_equals_and_hash = true; package hadoop.hdfs; import "hdfs.proto"; +import "HdfsServer.proto"; /** * Block with location information and new generation stamp diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto index 0de717eb5c..3fd029b736 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto @@ -32,6 +32,7 @@ option java_generate_equals_and_hash = true; package hadoop.hdfs; import "hdfs.proto"; +import "HdfsServer.proto"; /** * Journal information used by the journal receiver to identify a journal. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto index f7c1312bfc..d8b1e44868 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto @@ -32,6 +32,7 @@ option java_generate_equals_and_hash = true; package hadoop.hdfs.namenode; import "hdfs.proto"; +import "HdfsServer.proto"; /** * Get list of blocks for a given datanode with the total length diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto index 809ee3580e..960a21f5b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto @@ -29,6 +29,7 @@ option java_generate_equals_and_hash = true; package hadoop.hdfs.qjournal; import "hdfs.proto"; +import "HdfsServer.proto"; message JournalIdProto { required string identifier = 1; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java index 851e5b9b9b..2bfba98cf1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java @@ -43,23 +43,23 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.NamenodeRoleProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto; import org.apache.hadoop.hdfs.security.token.block.BlockKey; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;