From 0ebab3a88a5f172a1180f4e88a91cf6194b273ca Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Tue, 22 Oct 2013 02:22:17 +0000 Subject: [PATCH] HDFS-5377. Heartbeats from Datandode should include one storage report per storage directory git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1534464 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES_HDFS-2832.txt | 3 +++ .../hdfs/server/datanode/BPServiceActor.java | 16 +++++++------- .../datanode/fsdataset/FsDatasetSpi.java | 5 +++++ .../fsdataset/impl/FsDatasetImpl.java | 21 +++++++++++++++++++ .../server/datanode/SimulatedFSDataset.java | 6 ++++++ 5 files changed, 42 insertions(+), 9 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt index 3b975f242b..ff318f291f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt @@ -36,3 +36,6 @@ IMPROVEMENTS: HDFS-4988. Datanode must support all the volumes as individual storages. (Arpit Agarwal) + + HDFS-5377. Heartbeats from Datandode should include one storage report + per storage directory (Arpit Agarwal) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index 62b9b7a7a8..32e32966f7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -449,17 +449,15 @@ DatanodeCommand blockReport() throws IOException { HeartbeatResponse sendHeartBeat() throws IOException { + StorageReport[] reports = + dn.getFSDataset().getStorageReports(bpos.getBlockPoolId()); if (LOG.isDebugEnabled()) { - LOG.debug("Sending heartbeat from service actor: " + this); + LOG.debug("Sending heartbeat with " + reports.length + + " storage reports from service actor: " + this); } - // reports number of failed volumes - StorageReport[] report = { new StorageReport(bpRegistration.getDatanodeUuid(), - false, - dn.getFSDataset().getCapacity(), - dn.getFSDataset().getDfsUsed(), - dn.getFSDataset().getRemaining(), - dn.getFSDataset().getBlockPoolUsed(bpos.getBlockPoolId())) }; - return bpNamenode.sendHeartbeat(bpRegistration, report, + + return bpNamenode.sendHeartbeat(bpRegistration, + reports, dn.getXmitsInProgress(), dn.getXceiverCount(), dn.getFSDataset().getNumFailedVolumes()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java index d9f7655dd6..f4c49bc04e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; +import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.util.DiskChecker.DiskErrorException; import org.apache.hadoop.util.ReflectionUtils; @@ -86,6 +87,10 @@ public RollingLogs createRollingLogs(String bpid, String prefix /** @return a list of volumes. */ public List getVolumes(); + /** @return one or more storage reports for attached volumes. */ + public StorageReport[] getStorageReports(String bpid) + throws IOException; + /** @return the volume that contains a replica of the block. */ public V getVolume(ExtendedBlock b); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 2d84e6ac84..85238e459b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -78,6 +78,7 @@ import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; +import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.util.DataChecksum; @@ -109,6 +110,26 @@ public List getVolumes() { return volumes.volumes; } + @Override // FsDatasetSpi + public StorageReport[] getStorageReports(String bpid) + throws IOException { + StorageReport[] reports; + synchronized (statsLock) { + reports = new StorageReport[volumes.volumes.size()]; + int i = 0; + for (FsVolumeImpl volume : volumes.volumes) { + reports[i++] = new StorageReport(volume.getStorageID(), + false, + volume.getCapacity(), + volume.getDfsUsed(), + volume.getAvailable(), + volume.getBlockPoolUsed(bpid)); + } + } + + return reports; + } + @Override public synchronized FsVolumeImpl getVolume(final ExtendedBlock b) { final ReplicaInfo r = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java index c0a9481c24..05033d6ff5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; +import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.util.DataChecksum; @@ -987,6 +988,11 @@ public List getVolumes() { throw new UnsupportedOperationException(); } + @Override + public StorageReport[] getStorageReports(String bpid) { + throw new UnsupportedOperationException(); + } + @Override public List getFinalizedBlocks(String bpid) { throw new UnsupportedOperationException();