HDFS-6739. Add getDatanodeStorageReport to ClientProtocol.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1614215 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
f49c2a1b62
commit
4f9ffc7455
@ -324,6 +324,8 @@ Release 2.6.0 - UNRELEASED
|
|||||||
HDFS-6750. The DataNode should use its shared memory segment to mark
|
HDFS-6750. The DataNode should use its shared memory segment to mark
|
||||||
short-circuit replicas that have been unlinked as stale (cmccabe)
|
short-circuit replicas that have been unlinked as stale (cmccabe)
|
||||||
|
|
||||||
|
HDFS-6739. Add getDatanodeStorageReport to ClientProtocol. (szetszwo)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-6690. Deduplicate xattr names in memory. (wang)
|
HDFS-6690. Deduplicate xattr names in memory. (wang)
|
||||||
|
@ -32,19 +32,21 @@
|
|||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY;
|
||||||
@ -60,8 +62,6 @@
|
|||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT;
|
|
||||||
|
|
||||||
import java.io.BufferedOutputStream;
|
import java.io.BufferedOutputStream;
|
||||||
import java.io.DataInputStream;
|
import java.io.DataInputStream;
|
||||||
@ -91,7 +91,6 @@
|
|||||||
|
|
||||||
import javax.net.SocketFactory;
|
import javax.net.SocketFactory;
|
||||||
|
|
||||||
import com.google.common.collect.Lists;
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
@ -112,22 +111,22 @@
|
|||||||
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
|
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
|
||||||
import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
|
import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
|
||||||
import org.apache.hadoop.fs.Options;
|
import org.apache.hadoop.fs.Options;
|
||||||
import org.apache.hadoop.fs.XAttr;
|
|
||||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
|
||||||
import org.apache.hadoop.fs.Options.ChecksumOpt;
|
import org.apache.hadoop.fs.Options.ChecksumOpt;
|
||||||
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.RemoteIterator;
|
import org.apache.hadoop.fs.RemoteIterator;
|
||||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||||
import org.apache.hadoop.fs.VolumeId;
|
import org.apache.hadoop.fs.VolumeId;
|
||||||
|
import org.apache.hadoop.fs.XAttr;
|
||||||
|
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||||
import org.apache.hadoop.fs.permission.AclEntry;
|
import org.apache.hadoop.fs.permission.AclEntry;
|
||||||
import org.apache.hadoop.fs.permission.AclStatus;
|
import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
|
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
||||||
import org.apache.hadoop.hdfs.protocol.AclException;
|
|
||||||
import org.apache.hadoop.hdfs.net.Peer;
|
import org.apache.hadoop.hdfs.net.Peer;
|
||||||
import org.apache.hadoop.hdfs.net.TcpPeerServer;
|
import org.apache.hadoop.hdfs.net.TcpPeerServer;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.AclException;
|
||||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
|
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
|
||||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
|
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveIterator;
|
import org.apache.hadoop.hdfs.protocol.CacheDirectiveIterator;
|
||||||
@ -158,8 +157,8 @@
|
|||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
|
||||||
@ -175,6 +174,7 @@
|
|||||||
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
|
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
||||||
import org.apache.hadoop.io.DataOutputBuffer;
|
import org.apache.hadoop.io.DataOutputBuffer;
|
||||||
import org.apache.hadoop.io.EnumSetWritable;
|
import org.apache.hadoop.io.EnumSetWritable;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
@ -200,6 +200,7 @@
|
|||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import com.google.common.base.Joiner;
|
import com.google.common.base.Joiner;
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
|
import com.google.common.collect.Lists;
|
||||||
import com.google.common.net.InetAddresses;
|
import com.google.common.net.InetAddresses;
|
||||||
|
|
||||||
/********************************************************
|
/********************************************************
|
||||||
@ -2192,6 +2193,11 @@ public DatanodeInfo[] datanodeReport(DatanodeReportType type)
|
|||||||
return namenode.getDatanodeReport(type);
|
return namenode.getDatanodeReport(type);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public DatanodeStorageReport[] getDatanodeStorageReport(
|
||||||
|
DatanodeReportType type) throws IOException {
|
||||||
|
return namenode.getDatanodeStorageReport(type);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Enter, leave or get safe mode.
|
* Enter, leave or get safe mode.
|
||||||
*
|
*
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
|
||||||
import org.apache.hadoop.fs.CacheFlag;
|
import org.apache.hadoop.fs.CacheFlag;
|
||||||
import org.apache.hadoop.fs.ContentSummary;
|
import org.apache.hadoop.fs.ContentSummary;
|
||||||
import org.apache.hadoop.fs.CreateFlag;
|
import org.apache.hadoop.fs.CreateFlag;
|
||||||
@ -31,11 +32,10 @@
|
|||||||
import org.apache.hadoop.fs.FsServerDefaults;
|
import org.apache.hadoop.fs.FsServerDefaults;
|
||||||
import org.apache.hadoop.fs.InvalidPathException;
|
import org.apache.hadoop.fs.InvalidPathException;
|
||||||
import org.apache.hadoop.fs.Options;
|
import org.apache.hadoop.fs.Options;
|
||||||
import org.apache.hadoop.fs.XAttr;
|
|
||||||
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
|
|
||||||
import org.apache.hadoop.fs.Options.Rename;
|
import org.apache.hadoop.fs.Options.Rename;
|
||||||
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
||||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||||
|
import org.apache.hadoop.fs.XAttr;
|
||||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||||
import org.apache.hadoop.fs.permission.AclEntry;
|
import org.apache.hadoop.fs.permission.AclEntry;
|
||||||
import org.apache.hadoop.fs.permission.AclStatus;
|
import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
@ -47,6 +47,7 @@
|
|||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
|
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
||||||
import org.apache.hadoop.io.EnumSetWritable;
|
import org.apache.hadoop.io.EnumSetWritable;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.io.retry.AtMostOnce;
|
import org.apache.hadoop.io.retry.AtMostOnce;
|
||||||
@ -654,6 +655,13 @@ public void renewLease(String clientName) throws AccessControlException,
|
|||||||
public DatanodeInfo[] getDatanodeReport(HdfsConstants.DatanodeReportType type)
|
public DatanodeInfo[] getDatanodeReport(HdfsConstants.DatanodeReportType type)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get a report on the current datanode storages.
|
||||||
|
*/
|
||||||
|
@Idempotent
|
||||||
|
public DatanodeStorageReport[] getDatanodeStorageReport(
|
||||||
|
HdfsConstants.DatanodeReportType type) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the block size for the given file.
|
* Get the block size for the given file.
|
||||||
* @param filename The name of the file
|
* @param filename The name of the file
|
||||||
|
@ -72,6 +72,7 @@
|
|||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto;
|
||||||
@ -93,6 +94,8 @@
|
|||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
|
||||||
@ -174,7 +177,6 @@
|
|||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto;
|
||||||
@ -655,6 +657,21 @@ public GetDatanodeReportResponseProto getDatanodeReport(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public GetDatanodeStorageReportResponseProto getDatanodeStorageReport(
|
||||||
|
RpcController controller, GetDatanodeStorageReportRequestProto req)
|
||||||
|
throws ServiceException {
|
||||||
|
try {
|
||||||
|
List<DatanodeStorageReportProto> reports = PBHelper.convertDatanodeStorageReports(
|
||||||
|
server.getDatanodeStorageReport(PBHelper.convert(req.getType())));
|
||||||
|
return GetDatanodeStorageReportResponseProto.newBuilder()
|
||||||
|
.addAllDatanodeStorageReports(reports)
|
||||||
|
.build();
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ServiceException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public GetPreferredBlockSizeResponseProto getPreferredBlockSize(
|
public GetPreferredBlockSizeResponseProto getPreferredBlockSize(
|
||||||
RpcController controller, GetPreferredBlockSizeRequestProto req)
|
RpcController controller, GetPreferredBlockSizeRequestProto req)
|
||||||
|
@ -94,6 +94,7 @@
|
|||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
|
||||||
@ -151,6 +152,7 @@
|
|||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
|
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
||||||
import org.apache.hadoop.io.EnumSetWritable;
|
import org.apache.hadoop.io.EnumSetWritable;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.ipc.ProtobufHelper;
|
import org.apache.hadoop.ipc.ProtobufHelper;
|
||||||
@ -580,6 +582,20 @@ public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public DatanodeStorageReport[] getDatanodeStorageReport(DatanodeReportType type)
|
||||||
|
throws IOException {
|
||||||
|
final GetDatanodeStorageReportRequestProto req
|
||||||
|
= GetDatanodeStorageReportRequestProto.newBuilder()
|
||||||
|
.setType(PBHelper.convert(type)).build();
|
||||||
|
try {
|
||||||
|
return PBHelper.convertDatanodeStorageReports(
|
||||||
|
rpcProxy.getDatanodeStorageReport(null, req).getDatanodeStorageReportsList());
|
||||||
|
} catch (ServiceException e) {
|
||||||
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getPreferredBlockSize(String filename) throws IOException,
|
public long getPreferredBlockSize(String filename) throws IOException,
|
||||||
UnresolvedLinkException {
|
UnresolvedLinkException {
|
||||||
|
@ -21,18 +21,13 @@
|
|||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus;
|
import org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto;
|
||||||
@ -51,7 +46,6 @@
|
|||||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||||
@ -61,14 +55,10 @@
|
|||||||
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
|
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
|
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||||
import org.apache.hadoop.io.retry.RetryPolicies;
|
|
||||||
import org.apache.hadoop.io.retry.RetryPolicy;
|
|
||||||
import org.apache.hadoop.io.retry.RetryProxy;
|
|
||||||
import org.apache.hadoop.ipc.ProtobufHelper;
|
import org.apache.hadoop.ipc.ProtobufHelper;
|
||||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||||
import org.apache.hadoop.ipc.ProtocolMetaInterface;
|
import org.apache.hadoop.ipc.ProtocolMetaInterface;
|
||||||
import org.apache.hadoop.ipc.RPC;
|
import org.apache.hadoop.ipc.RPC;
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
|
||||||
import org.apache.hadoop.ipc.RpcClientUtil;
|
import org.apache.hadoop.ipc.RpcClientUtil;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
@ -137,9 +127,7 @@ public HeartbeatResponse sendHeartbeat(DatanodeRegistration registration,
|
|||||||
.setRegistration(PBHelper.convert(registration))
|
.setRegistration(PBHelper.convert(registration))
|
||||||
.setXmitsInProgress(xmitsInProgress).setXceiverCount(xceiverCount)
|
.setXmitsInProgress(xmitsInProgress).setXceiverCount(xceiverCount)
|
||||||
.setFailedVolumes(failedVolumes);
|
.setFailedVolumes(failedVolumes);
|
||||||
for (StorageReport r : reports) {
|
builder.addAllReports(PBHelper.convertStorageReports(reports));
|
||||||
builder.addReports(PBHelper.convert(r));
|
|
||||||
}
|
|
||||||
if (cacheCapacity != 0) {
|
if (cacheCapacity != 0) {
|
||||||
builder.setCacheCapacity(cacheCapacity);
|
builder.setCacheCapacity(cacheCapacity);
|
||||||
}
|
}
|
||||||
|
@ -90,6 +90,7 @@
|
|||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto;
|
||||||
@ -102,14 +103,11 @@
|
|||||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.StorageState;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
|
||||||
@ -125,6 +123,8 @@
|
|||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
|
||||||
@ -149,6 +149,7 @@
|
|||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto;
|
||||||
@ -182,6 +183,7 @@
|
|||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
|
import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.JournalInfo;
|
import org.apache.hadoop.hdfs.server.protocol.JournalInfo;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
|
import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
|
||||||
@ -620,6 +622,41 @@ public static DatanodeInfoProto convert(DatanodeInfo info) {
|
|||||||
return builder.build();
|
return builder.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static DatanodeStorageReportProto convertDatanodeStorageReport(
|
||||||
|
DatanodeStorageReport report) {
|
||||||
|
return DatanodeStorageReportProto.newBuilder()
|
||||||
|
.setDatanodeInfo(convert(report.getDatanodeInfo()))
|
||||||
|
.addAllStorageReports(convertStorageReports(report.getStorageReports()))
|
||||||
|
.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static List<DatanodeStorageReportProto> convertDatanodeStorageReports(
|
||||||
|
DatanodeStorageReport[] reports) {
|
||||||
|
final List<DatanodeStorageReportProto> protos
|
||||||
|
= new ArrayList<DatanodeStorageReportProto>(reports.length);
|
||||||
|
for(int i = 0; i < reports.length; i++) {
|
||||||
|
protos.add(convertDatanodeStorageReport(reports[i]));
|
||||||
|
}
|
||||||
|
return protos;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static DatanodeStorageReport convertDatanodeStorageReport(
|
||||||
|
DatanodeStorageReportProto proto) {
|
||||||
|
return new DatanodeStorageReport(
|
||||||
|
convert(proto.getDatanodeInfo()),
|
||||||
|
convertStorageReports(proto.getStorageReportsList()));
|
||||||
|
}
|
||||||
|
|
||||||
|
public static DatanodeStorageReport[] convertDatanodeStorageReports(
|
||||||
|
List<DatanodeStorageReportProto> protos) {
|
||||||
|
final DatanodeStorageReport[] reports
|
||||||
|
= new DatanodeStorageReport[protos.size()];
|
||||||
|
for(int i = 0; i < reports.length; i++) {
|
||||||
|
reports[i] = convertDatanodeStorageReport(protos.get(i));
|
||||||
|
}
|
||||||
|
return reports;
|
||||||
|
}
|
||||||
|
|
||||||
public static AdminStates convert(AdminState adminState) {
|
public static AdminStates convert(AdminState adminState) {
|
||||||
switch(adminState) {
|
switch(adminState) {
|
||||||
case DECOMMISSION_INPROGRESS:
|
case DECOMMISSION_INPROGRESS:
|
||||||
@ -1713,6 +1750,15 @@ public static StorageReport[] convertStorageReports(
|
|||||||
return report;
|
return report;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static List<StorageReportProto> convertStorageReports(StorageReport[] storages) {
|
||||||
|
final List<StorageReportProto> protos = new ArrayList<StorageReportProto>(
|
||||||
|
storages.length);
|
||||||
|
for(int i = 0; i < storages.length; i++) {
|
||||||
|
protos.add(convert(storages[i]));
|
||||||
|
}
|
||||||
|
return protos;
|
||||||
|
}
|
||||||
|
|
||||||
public static JournalInfo convert(JournalInfoProto info) {
|
public static JournalInfo convert(JournalInfoProto info) {
|
||||||
int lv = info.hasLayoutVersion() ? info.getLayoutVersion() : 0;
|
int lv = info.hasLayoutVersion() ? info.getLayoutVersion() : 0;
|
||||||
int nsID = info.hasNamespaceID() ? info.getNamespaceID() : 0;
|
int nsID = info.hasNamespaceID() ? info.getNamespaceID() : 0;
|
||||||
|
@ -259,6 +259,15 @@ DatanodeStorageInfo[] getStorageInfos() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public StorageReport[] getStorageReports() {
|
||||||
|
final StorageReport[] reports = new StorageReport[storageMap.size()];
|
||||||
|
final DatanodeStorageInfo[] infos = getStorageInfos();
|
||||||
|
for(int i = 0; i < infos.length; i++) {
|
||||||
|
reports[i] = infos[i].toStorageReport();
|
||||||
|
}
|
||||||
|
return reports;
|
||||||
|
}
|
||||||
|
|
||||||
boolean hasStaleStorages() {
|
boolean hasStaleStorages() {
|
||||||
synchronized (storageMap) {
|
synchronized (storageMap) {
|
||||||
for (DatanodeStorageInfo storage : storageMap.values()) {
|
for (DatanodeStorageInfo storage : storageMap.values()) {
|
||||||
|
@ -292,6 +292,12 @@ public String toString() {
|
|||||||
return "[" + storageType + "]" + storageID + ":" + state;
|
return "[" + storageType + "]" + storageID + ":" + state;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
StorageReport toStorageReport() {
|
||||||
|
return new StorageReport(
|
||||||
|
new DatanodeStorage(storageID, state, storageType),
|
||||||
|
false, capacity, dfsUsed, remaining, blockPoolUsed);
|
||||||
|
}
|
||||||
|
|
||||||
/** @return the first {@link DatanodeStorageInfo} corresponding to
|
/** @return the first {@link DatanodeStorageInfo} corresponding to
|
||||||
* the given datanode
|
* the given datanode
|
||||||
*/
|
*/
|
||||||
|
@ -62,6 +62,8 @@
|
|||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY;
|
||||||
@ -83,9 +85,6 @@
|
|||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK_DEFAULT;
|
|
||||||
|
|
||||||
import static org.apache.hadoop.util.Time.now;
|
import static org.apache.hadoop.util.Time.now;
|
||||||
|
|
||||||
import java.io.BufferedWriter;
|
import java.io.BufferedWriter;
|
||||||
@ -231,6 +230,7 @@
|
|||||||
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
|
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
|
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
|
import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
|
||||||
@ -4916,6 +4916,28 @@ DatanodeInfo[] datanodeReport(final DatanodeReportType type
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
DatanodeStorageReport[] getDatanodeStorageReport(final DatanodeReportType type
|
||||||
|
) throws AccessControlException, StandbyException {
|
||||||
|
checkSuperuserPrivilege();
|
||||||
|
checkOperation(OperationCategory.UNCHECKED);
|
||||||
|
readLock();
|
||||||
|
try {
|
||||||
|
checkOperation(OperationCategory.UNCHECKED);
|
||||||
|
final DatanodeManager dm = getBlockManager().getDatanodeManager();
|
||||||
|
final List<DatanodeDescriptor> datanodes = dm.getDatanodeListForReport(type);
|
||||||
|
|
||||||
|
DatanodeStorageReport[] reports = new DatanodeStorageReport[datanodes.size()];
|
||||||
|
for (int i = 0; i < reports.length; i++) {
|
||||||
|
final DatanodeDescriptor d = datanodes.get(i);
|
||||||
|
reports[i] = new DatanodeStorageReport(new DatanodeInfo(d),
|
||||||
|
d.getStorageReports());
|
||||||
|
}
|
||||||
|
return reports;
|
||||||
|
} finally {
|
||||||
|
readUnlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Save namespace image.
|
* Save namespace image.
|
||||||
* This will save current namespace into fsimage file and empty edits file.
|
* This will save current namespace into fsimage file and empty edits file.
|
||||||
|
@ -115,6 +115,7 @@
|
|||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
|
import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
|
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
|
||||||
@ -830,11 +831,23 @@ public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
|
|||||||
throws IOException {
|
throws IOException {
|
||||||
DatanodeInfo results[] = namesystem.datanodeReport(type);
|
DatanodeInfo results[] = namesystem.datanodeReport(type);
|
||||||
if (results == null ) {
|
if (results == null ) {
|
||||||
throw new IOException("Cannot find datanode report");
|
throw new IOException("Failed to get datanode report for " + type
|
||||||
|
+ " datanodes.");
|
||||||
}
|
}
|
||||||
return results;
|
return results;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override // ClientProtocol
|
||||||
|
public DatanodeStorageReport[] getDatanodeStorageReport(
|
||||||
|
DatanodeReportType type) throws IOException {
|
||||||
|
final DatanodeStorageReport[] reports = namesystem.getDatanodeStorageReport(type);
|
||||||
|
if (reports == null ) {
|
||||||
|
throw new IOException("Failed to get datanode storage report for " + type
|
||||||
|
+ " datanodes.");
|
||||||
|
}
|
||||||
|
return reports;
|
||||||
|
}
|
||||||
|
|
||||||
@Override // ClientProtocol
|
@Override // ClientProtocol
|
||||||
public boolean setSafeMode(SafeModeAction action, boolean isChecked)
|
public boolean setSafeMode(SafeModeAction action, boolean isChecked)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
@ -0,0 +1,42 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.server.protocol;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Class captures information of a datanode and its storages.
|
||||||
|
*/
|
||||||
|
public class DatanodeStorageReport {
|
||||||
|
final DatanodeInfo datanodeInfo;
|
||||||
|
final StorageReport[] storageReports;
|
||||||
|
|
||||||
|
public DatanodeStorageReport(DatanodeInfo datanodeInfo,
|
||||||
|
StorageReport[] storageReports) {
|
||||||
|
this.datanodeInfo = datanodeInfo;
|
||||||
|
this.storageReports = storageReports;
|
||||||
|
}
|
||||||
|
|
||||||
|
public DatanodeInfo getDatanodeInfo() {
|
||||||
|
return datanodeInfo;
|
||||||
|
}
|
||||||
|
|
||||||
|
public StorageReport[] getStorageReports() {
|
||||||
|
return storageReports;
|
||||||
|
}
|
||||||
|
}
|
@ -281,6 +281,19 @@ message GetDatanodeReportResponseProto {
|
|||||||
repeated DatanodeInfoProto di = 1;
|
repeated DatanodeInfoProto di = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message GetDatanodeStorageReportRequestProto {
|
||||||
|
required DatanodeReportTypeProto type = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message DatanodeStorageReportProto {
|
||||||
|
required DatanodeInfoProto datanodeInfo = 1;
|
||||||
|
repeated StorageReportProto storageReports = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message GetDatanodeStorageReportResponseProto {
|
||||||
|
repeated DatanodeStorageReportProto datanodeStorageReports = 1;
|
||||||
|
}
|
||||||
|
|
||||||
message GetPreferredBlockSizeRequestProto {
|
message GetPreferredBlockSizeRequestProto {
|
||||||
required string filename = 1;
|
required string filename = 1;
|
||||||
}
|
}
|
||||||
@ -672,6 +685,8 @@ service ClientNamenodeProtocol {
|
|||||||
rpc getFsStats(GetFsStatusRequestProto) returns(GetFsStatsResponseProto);
|
rpc getFsStats(GetFsStatusRequestProto) returns(GetFsStatsResponseProto);
|
||||||
rpc getDatanodeReport(GetDatanodeReportRequestProto)
|
rpc getDatanodeReport(GetDatanodeReportRequestProto)
|
||||||
returns(GetDatanodeReportResponseProto);
|
returns(GetDatanodeReportResponseProto);
|
||||||
|
rpc getDatanodeStorageReport(GetDatanodeStorageReportRequestProto)
|
||||||
|
returns(GetDatanodeStorageReportResponseProto);
|
||||||
rpc getPreferredBlockSize(GetPreferredBlockSizeRequestProto)
|
rpc getPreferredBlockSize(GetPreferredBlockSizeRequestProto)
|
||||||
returns(GetPreferredBlockSizeResponseProto);
|
returns(GetPreferredBlockSizeResponseProto);
|
||||||
rpc setSafeMode(SetSafeModeRequestProto)
|
rpc setSafeMode(SetSafeModeRequestProto)
|
||||||
|
@ -44,20 +44,6 @@ message DatanodeRegistrationProto {
|
|||||||
required string softwareVersion = 4; // Software version of the DN, e.g. "2.0.0"
|
required string softwareVersion = 4; // Software version of the DN, e.g. "2.0.0"
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Represents a storage available on the datanode
|
|
||||||
*/
|
|
||||||
message DatanodeStorageProto {
|
|
||||||
enum StorageState {
|
|
||||||
NORMAL = 0;
|
|
||||||
READ_ONLY_SHARED = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
required string storageUuid = 1;
|
|
||||||
optional StorageState state = 2 [default = NORMAL];
|
|
||||||
optional StorageTypeProto storageType = 3 [default = DISK];
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Commands sent from namenode to the datanodes
|
* Commands sent from namenode to the datanodes
|
||||||
*/
|
*/
|
||||||
@ -196,16 +182,6 @@ message HeartbeatRequestProto {
|
|||||||
optional uint64 cacheUsed = 7 [default = 0 ];
|
optional uint64 cacheUsed = 7 [default = 0 ];
|
||||||
}
|
}
|
||||||
|
|
||||||
message StorageReportProto {
|
|
||||||
required string storageUuid = 1 [ deprecated = true ];
|
|
||||||
optional bool failed = 2 [ default = false ];
|
|
||||||
optional uint64 capacity = 3 [ default = 0 ];
|
|
||||||
optional uint64 dfsUsed = 4 [ default = 0 ];
|
|
||||||
optional uint64 remaining = 5 [ default = 0 ];
|
|
||||||
optional uint64 blockPoolUsed = 6 [ default = 0 ];
|
|
||||||
optional DatanodeStorageProto storage = 7; // supersedes StorageUuid
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* state - State the NN is in when returning response to the DN
|
* state - State the NN is in when returning response to the DN
|
||||||
* txid - Highest transaction ID this NN has seen
|
* txid - Highest transaction ID this NN has seen
|
||||||
|
@ -99,6 +99,30 @@ message DatanodeInfoProto {
|
|||||||
optional uint64 cacheUsed = 12 [default = 0];
|
optional uint64 cacheUsed = 12 [default = 0];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Represents a storage available on the datanode
|
||||||
|
*/
|
||||||
|
message DatanodeStorageProto {
|
||||||
|
enum StorageState {
|
||||||
|
NORMAL = 0;
|
||||||
|
READ_ONLY_SHARED = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
required string storageUuid = 1;
|
||||||
|
optional StorageState state = 2 [default = NORMAL];
|
||||||
|
optional StorageTypeProto storageType = 3 [default = DISK];
|
||||||
|
}
|
||||||
|
|
||||||
|
message StorageReportProto {
|
||||||
|
required string storageUuid = 1 [ deprecated = true ];
|
||||||
|
optional bool failed = 2 [ default = false ];
|
||||||
|
optional uint64 capacity = 3 [ default = 0 ];
|
||||||
|
optional uint64 dfsUsed = 4 [ default = 0 ];
|
||||||
|
optional uint64 remaining = 5 [ default = 0 ];
|
||||||
|
optional uint64 blockPoolUsed = 6 [ default = 0 ];
|
||||||
|
optional DatanodeStorageProto storage = 7; // supersedes StorageUuid
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Summary of a file or directory
|
* Summary of a file or directory
|
||||||
*/
|
*/
|
||||||
|
@ -21,19 +21,26 @@
|
|||||||
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
|
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
|
|
||||||
import java.net.InetSocketAddress;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.Arrays;
|
||||||
|
import java.util.Comparator;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This test ensures the all types of data node report work correctly.
|
* This test ensures the all types of data node report work correctly.
|
||||||
*/
|
*/
|
||||||
public class TestDatanodeReport {
|
public class TestDatanodeReport {
|
||||||
|
static final Log LOG = LogFactory.getLog(TestDatanodeReport.class);
|
||||||
final static private Configuration conf = new HdfsConfiguration();
|
final static private Configuration conf = new HdfsConfiguration();
|
||||||
final static private int NUM_OF_DATANODES = 4;
|
final static private int NUM_OF_DATANODES = 4;
|
||||||
|
|
||||||
@ -50,20 +57,18 @@ public void testDatanodeReport() throws Exception {
|
|||||||
try {
|
try {
|
||||||
//wait until the cluster is up
|
//wait until the cluster is up
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
|
final String bpid = cluster.getNamesystem().getBlockPoolId();
|
||||||
|
final List<DataNode> datanodes = cluster.getDataNodes();
|
||||||
|
final DFSClient client = cluster.getFileSystem().dfs;
|
||||||
|
|
||||||
InetSocketAddress addr = new InetSocketAddress("localhost",
|
assertReports(NUM_OF_DATANODES, DatanodeReportType.ALL, client, datanodes, bpid);
|
||||||
cluster.getNameNodePort());
|
assertReports(NUM_OF_DATANODES, DatanodeReportType.LIVE, client, datanodes, bpid);
|
||||||
DFSClient client = new DFSClient(addr, conf);
|
assertReports(0, DatanodeReportType.DEAD, client, datanodes, bpid);
|
||||||
|
|
||||||
assertEquals(client.datanodeReport(DatanodeReportType.ALL).length,
|
|
||||||
NUM_OF_DATANODES);
|
|
||||||
assertEquals(client.datanodeReport(DatanodeReportType.LIVE).length,
|
|
||||||
NUM_OF_DATANODES);
|
|
||||||
assertEquals(client.datanodeReport(DatanodeReportType.DEAD).length, 0);
|
|
||||||
|
|
||||||
// bring down one datanode
|
// bring down one datanode
|
||||||
ArrayList<DataNode> datanodes = cluster.getDataNodes();
|
final DataNode last = datanodes.get(datanodes.size() - 1);
|
||||||
datanodes.remove(datanodes.size()-1).shutdown();
|
LOG.info("XXX shutdown datanode " + last.getDatanodeUuid());
|
||||||
|
last.shutdown();
|
||||||
|
|
||||||
DatanodeInfo[] nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
|
DatanodeInfo[] nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
|
||||||
while (nodeInfo.length != 1) {
|
while (nodeInfo.length != 1) {
|
||||||
@ -74,22 +79,59 @@ public void testDatanodeReport() throws Exception {
|
|||||||
nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
|
nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
assertEquals(client.datanodeReport(DatanodeReportType.LIVE).length,
|
assertReports(NUM_OF_DATANODES, DatanodeReportType.ALL, client, datanodes, null);
|
||||||
NUM_OF_DATANODES-1);
|
assertReports(NUM_OF_DATANODES - 1, DatanodeReportType.LIVE, client, datanodes, null);
|
||||||
assertEquals(client.datanodeReport(DatanodeReportType.ALL).length,
|
assertReports(1, DatanodeReportType.DEAD, client, datanodes, null);
|
||||||
NUM_OF_DATANODES);
|
|
||||||
|
|
||||||
Thread.sleep(5000);
|
Thread.sleep(5000);
|
||||||
assertGauge("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
|
assertGauge("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
|
||||||
}finally {
|
} finally {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void main(String[] args) throws Exception {
|
final static Comparator<StorageReport> CMP = new Comparator<StorageReport>() {
|
||||||
new TestDatanodeReport().testDatanodeReport();
|
@Override
|
||||||
|
public int compare(StorageReport left, StorageReport right) {
|
||||||
|
return left.getStorage().getStorageID().compareTo(
|
||||||
|
right.getStorage().getStorageID());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static void assertReports(int numDatanodes, DatanodeReportType type,
|
||||||
|
DFSClient client, List<DataNode> datanodes, String bpid) throws IOException {
|
||||||
|
final DatanodeInfo[] infos = client.datanodeReport(type);
|
||||||
|
assertEquals(numDatanodes, infos.length);
|
||||||
|
final DatanodeStorageReport[] reports = client.getDatanodeStorageReport(type);
|
||||||
|
assertEquals(numDatanodes, reports.length);
|
||||||
|
|
||||||
|
for(int i = 0; i < infos.length; i++) {
|
||||||
|
assertEquals(infos[i], reports[i].getDatanodeInfo());
|
||||||
|
|
||||||
|
final DataNode d = findDatanode(infos[i].getDatanodeUuid(), datanodes);
|
||||||
|
if (bpid != null) {
|
||||||
|
//check storage
|
||||||
|
final StorageReport[] computed = reports[i].getStorageReports();
|
||||||
|
Arrays.sort(computed, CMP);
|
||||||
|
final StorageReport[] expected = d.getFSDataset().getStorageReports(bpid);
|
||||||
|
Arrays.sort(expected, CMP);
|
||||||
|
|
||||||
|
assertEquals(expected.length, computed.length);
|
||||||
|
for(int j = 0; j < expected.length; j++) {
|
||||||
|
assertEquals(expected[j].getStorage().getStorageID(),
|
||||||
|
computed[j].getStorage().getStorageID());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static DataNode findDatanode(String id, List<DataNode> datanodes) {
|
||||||
|
for(DataNode d : datanodes) {
|
||||||
|
if (d.getDatanodeUuid().equals(id)) {
|
||||||
|
return d;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
throw new IllegalStateException("Datnode " + id + " not in datanode list: "
|
||||||
|
+ datanodes);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -31,25 +31,25 @@
|
|||||||
import org.apache.hadoop.fs.permission.AclEntryType;
|
import org.apache.hadoop.fs.permission.AclEntryType;
|
||||||
import org.apache.hadoop.fs.permission.AclStatus;
|
import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
import org.apache.hadoop.fs.permission.FsAction;
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.hdfs.StorageType;
|
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
|
import org.apache.hadoop.hdfs.StorageType;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
|
||||||
@ -67,9 +67,18 @@
|
|||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
|
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.*;
|
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
|
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
|
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
|
Loading…
Reference in New Issue
Block a user