hadoop/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
2014-02-20 04:38:30 +00:00

424 lines
12 KiB
Protocol Buffer

/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* These .proto interfaces are private and stable.
* Please see http://wiki.apache.org/hadoop/Compatibility
* for what changes are allowed for a *stable* .proto interface.
*/
// This file contains protocol buffers that are used throughout HDFS -- i.e.
// by the client, server, and data transfer protocols.
option java_package = "org.apache.hadoop.hdfs.protocol.proto";
option java_outer_classname = "DatanodeProtocolProtos";
option java_generic_services = true;
option java_generate_equals_and_hash = true;
package hadoop.hdfs;
import "HAServiceProtocol.proto";
import "hdfs.proto";
/**
* Information to identify a datanode to a namenode
*/
message DatanodeRegistrationProto {
required DatanodeIDProto datanodeID = 1; // Datanode information
required StorageInfoProto storageInfo = 2; // Node information
required ExportedBlockKeysProto keys = 3; // Block keys
required string softwareVersion = 4; // Software version of the DN, e.g. "2.0.0"
}
/**
* Represents a storage available on the datanode
*/
message DatanodeStorageProto {
enum StorageState {
NORMAL = 0;
READ_ONLY_SHARED = 1;
}
required string storageUuid = 1;
optional StorageState state = 2 [default = NORMAL];
optional StorageTypeProto storageType = 3 [default = DISK];
}
/**
* Commands sent from namenode to the datanodes
*/
message DatanodeCommandProto {
enum Type {
BalancerBandwidthCommand = 0;
BlockCommand = 1;
BlockRecoveryCommand = 2;
FinalizeCommand = 3;
KeyUpdateCommand = 4;
RegisterCommand = 5;
UnusedUpgradeCommand = 6;
NullDatanodeCommand = 7;
BlockIdCommand = 8;
}
required Type cmdType = 1; // Type of the command
// One of the following command is available when the corresponding
// cmdType is set
optional BalancerBandwidthCommandProto balancerCmd = 2;
optional BlockCommandProto blkCmd = 3;
optional BlockRecoveryCommandProto recoveryCmd = 4;
optional FinalizeCommandProto finalizeCmd = 5;
optional KeyUpdateCommandProto keyUpdateCmd = 6;
optional RegisterCommandProto registerCmd = 7;
optional BlockIdCommandProto blkIdCmd = 8;
}
/**
* Command sent from namenode to datanode to set the
* maximum bandwidth to be used for balancing.
*/
message BalancerBandwidthCommandProto {
// Maximum bandwidth to be used by datanode for balancing
required uint64 bandwidth = 1;
}
/**
* Command to instruct datanodes to perform certain action
* on the given set of blocks.
*/
message BlockCommandProto {
enum Action {
TRANSFER = 1; // Transfer blocks to another datanode
INVALIDATE = 2; // Invalidate blocks
SHUTDOWN = 3; // Shutdown the datanode
}
required Action action = 1;
required string blockPoolId = 2;
repeated BlockProto blocks = 3;
repeated DatanodeInfosProto targets = 4;
repeated StorageUuidsProto targetStorageUuids = 5;
}
/**
* Command to instruct datanodes to perform certain action
* on the given set of block IDs.
*/
message BlockIdCommandProto {
enum Action {
CACHE = 1;
UNCACHE = 2;
}
required Action action = 1;
required string blockPoolId = 2;
repeated uint64 blockIds = 3 [packed=true];
}
/**
* List of blocks to be recovered by the datanode
*/
message BlockRecoveryCommandProto {
repeated RecoveringBlockProto blocks = 1;
}
/**
* Finalize the upgrade at the datanode
*/
message FinalizeCommandProto {
required string blockPoolId = 1; // Block pool to be finalized
}
/**
* Update the block keys at the datanode
*/
message KeyUpdateCommandProto {
required ExportedBlockKeysProto keys = 1;
}
/**
* Instruct datanode to register with the namenode
*/
message RegisterCommandProto {
// void
}
/**
* registration - Information of the datanode registering with the namenode
*/
message RegisterDatanodeRequestProto {
required DatanodeRegistrationProto registration = 1; // Datanode info
}
/**
* registration - Update registration of the datanode that successfully
* registered. StorageInfo will be updated to include new
* storage ID if the datanode did not have one in the request.
*/
message RegisterDatanodeResponseProto {
required DatanodeRegistrationProto registration = 1; // Datanode info
}
/**
* registration - datanode registration information
* capacity - total storage capacity available at the datanode
* dfsUsed - storage used by HDFS
* remaining - remaining storage available for HDFS
* blockPoolUsed - storage used by the block pool
* xmitsInProgress - number of transfers from this datanode to others
* xceiverCount - number of active transceiver threads
* failedVolumes - number of failed volumes
* cacheCapacity - total cache capacity available at the datanode
* cacheUsed - amount of cache used
*/
message HeartbeatRequestProto {
required DatanodeRegistrationProto registration = 1; // Datanode info
repeated StorageReportProto reports = 2;
optional uint32 xmitsInProgress = 3 [ default = 0 ];
optional uint32 xceiverCount = 4 [ default = 0 ];
optional uint32 failedVolumes = 5 [ default = 0 ];
optional uint64 cacheCapacity = 6 [ default = 0 ];
optional uint64 cacheUsed = 7 [default = 0 ];
}
message StorageReportProto {
required string storageUuid = 1 [ deprecated = true ];
optional bool failed = 2 [ default = false ];
optional uint64 capacity = 3 [ default = 0 ];
optional uint64 dfsUsed = 4 [ default = 0 ];
optional uint64 remaining = 5 [ default = 0 ];
optional uint64 blockPoolUsed = 6 [ default = 0 ];
optional DatanodeStorageProto storage = 7; // supersedes StorageUuid
}
/**
* state - State the NN is in when returning response to the DN
* txid - Highest transaction ID this NN has seen
*/
message NNHAStatusHeartbeatProto {
required hadoop.common.HAServiceStateProto state = 1;
required uint64 txid = 2;
}
/**
* cmds - Commands from namenode to datanode.
* haStatus - Status (from an HA perspective) of the NN sending this response
*/
message HeartbeatResponseProto {
repeated DatanodeCommandProto cmds = 1; // Returned commands can be null
required NNHAStatusHeartbeatProto haStatus = 2;
optional RollingUpgradeStatusProto rollingUpgradeStatus = 3;
}
/**
* registration - datanode registration information
* blockPoolID - block pool ID of the reported blocks
* blocks - each block is represented as multiple longs in the array.
* first long represents block ID
* second long represents length
* third long represents gen stamp
* fourth long (if under construction) represents replica state
*/
message BlockReportRequestProto {
required DatanodeRegistrationProto registration = 1;
required string blockPoolId = 2;
repeated StorageBlockReportProto reports = 3;
}
/**
* Report of blocks in a storage
*/
message StorageBlockReportProto {
required DatanodeStorageProto storage = 1; // Storage
repeated uint64 blocks = 2 [packed=true];
}
/**
* cmd - Command from namenode to the datanode
*/
message BlockReportResponseProto {
optional DatanodeCommandProto cmd = 1;
}
/**
* registration - datanode registration information
* blockPoolId - block pool ID of the reported blocks
* blocks - representation of blocks as longs for efficiency reasons
*/
message CacheReportRequestProto {
required DatanodeRegistrationProto registration = 1;
required string blockPoolId = 2;
repeated uint64 blocks = 3 [packed=true];
}
message CacheReportResponseProto {
optional DatanodeCommandProto cmd = 1;
}
/**
* Data structure to send received or deleted block information
* from datanode to namenode.
*/
message ReceivedDeletedBlockInfoProto {
enum BlockStatus {
RECEIVING = 1; // block being created
RECEIVED = 2; // block creation complete
DELETED = 3;
}
required BlockProto block = 1;
required BlockStatus status = 3;
optional string deleteHint = 2;
}
/**
* List of blocks received and deleted for a storage.
*/
message StorageReceivedDeletedBlocksProto {
required string storageUuid = 1;
repeated ReceivedDeletedBlockInfoProto blocks = 2;
}
/**
* registration - datanode registration information
* blockPoolID - block pool ID of the reported blocks
* blocks - Received/deleted block list
*/
message BlockReceivedAndDeletedRequestProto {
required DatanodeRegistrationProto registration = 1;
required string blockPoolId = 2;
repeated StorageReceivedDeletedBlocksProto blocks = 3;
}
/**
* void response
*/
message BlockReceivedAndDeletedResponseProto {
}
/**
* registartion - Datanode reporting the error
* errorCode - error code indicating the error
* msg - Free text description of the error
*/
message ErrorReportRequestProto {
enum ErrorCode {
NOTIFY = 0; // Error report to be logged at the namenode
DISK_ERROR = 1; // DN has disk errors but still has valid volumes
INVALID_BLOCK = 2; // Command from namenode has invalid block ID
FATAL_DISK_ERROR = 3; // No valid volumes left on datanode
}
required DatanodeRegistrationProto registartion = 1; // Registartion info
required uint32 errorCode = 2; // Error code
required string msg = 3; // Error message
}
/**
* void response
*/
message ErrorReportResponseProto {
}
/**
* blocks - list of blocks that are reported as corrupt
*/
message ReportBadBlocksRequestProto {
repeated LocatedBlockProto blocks = 1;
}
/**
* void response
*/
message ReportBadBlocksResponseProto {
}
/**
* Commit block synchronization request during lease recovery
*/
message CommitBlockSynchronizationRequestProto {
required ExtendedBlockProto block = 1;
required uint64 newGenStamp = 2;
required uint64 newLength = 3;
required bool closeFile = 4;
required bool deleteBlock = 5;
repeated DatanodeIDProto newTaragets = 6;
repeated string newTargetStorages = 7;
}
/**
* void response
*/
message CommitBlockSynchronizationResponseProto {
}
/**
* Protocol used from datanode to the namenode
* See the request and response for details of rpc call.
*/
service DatanodeProtocolService {
/**
* Register a datanode at a namenode
*/
rpc registerDatanode(RegisterDatanodeRequestProto)
returns(RegisterDatanodeResponseProto);
/**
* Send heartbeat from datanode to namenode
*/
rpc sendHeartbeat(HeartbeatRequestProto) returns(HeartbeatResponseProto);
/**
* Report blocks at a given datanode to the namenode
*/
rpc blockReport(BlockReportRequestProto) returns(BlockReportResponseProto);
/**
* Report cached blocks at a datanode to the namenode
*/
rpc cacheReport(CacheReportRequestProto) returns(CacheReportResponseProto);
/**
* Incremental block report from the DN. This contains info about recently
* received and deleted blocks, as well as when blocks start being
* received.
*/
rpc blockReceivedAndDeleted(BlockReceivedAndDeletedRequestProto)
returns(BlockReceivedAndDeletedResponseProto);
/**
* Report from a datanode of an error to the active namenode.
* Used for debugging.
*/
rpc errorReport(ErrorReportRequestProto) returns(ErrorReportResponseProto);
/**
* Request the version
*/
rpc versionRequest(VersionRequestProto) returns(VersionResponseProto);
/**
* Report corrupt blocks at the specified location
*/
rpc reportBadBlocks(ReportBadBlocksRequestProto) returns(ReportBadBlocksResponseProto);
/**
* Commit block synchronization during lease recovery.
*/
rpc commitBlockSynchronization(CommitBlockSynchronizationRequestProto)
returns(CommitBlockSynchronizationResponseProto);
}