HDFS-5377. Heartbeats from Datandode should include one storage report per storage directory
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1534464 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
40fec97bef
commit
0ebab3a88a
@ -36,3 +36,6 @@ IMPROVEMENTS:
|
|||||||
|
|
||||||
HDFS-4988. Datanode must support all the volumes as individual storages.
|
HDFS-4988. Datanode must support all the volumes as individual storages.
|
||||||
(Arpit Agarwal)
|
(Arpit Agarwal)
|
||||||
|
|
||||||
|
HDFS-5377. Heartbeats from Datandode should include one storage report
|
||||||
|
per storage directory (Arpit Agarwal)
|
||||||
|
@ -449,17 +449,15 @@ DatanodeCommand blockReport() throws IOException {
|
|||||||
|
|
||||||
|
|
||||||
HeartbeatResponse sendHeartBeat() throws IOException {
|
HeartbeatResponse sendHeartBeat() throws IOException {
|
||||||
|
StorageReport[] reports =
|
||||||
|
dn.getFSDataset().getStorageReports(bpos.getBlockPoolId());
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Sending heartbeat from service actor: " + this);
|
LOG.debug("Sending heartbeat with " + reports.length +
|
||||||
|
" storage reports from service actor: " + this);
|
||||||
}
|
}
|
||||||
// reports number of failed volumes
|
|
||||||
StorageReport[] report = { new StorageReport(bpRegistration.getDatanodeUuid(),
|
return bpNamenode.sendHeartbeat(bpRegistration,
|
||||||
false,
|
reports,
|
||||||
dn.getFSDataset().getCapacity(),
|
|
||||||
dn.getFSDataset().getDfsUsed(),
|
|
||||||
dn.getFSDataset().getRemaining(),
|
|
||||||
dn.getFSDataset().getBlockPoolUsed(bpos.getBlockPoolId())) };
|
|
||||||
return bpNamenode.sendHeartbeat(bpRegistration, report,
|
|
||||||
dn.getXmitsInProgress(),
|
dn.getXmitsInProgress(),
|
||||||
dn.getXceiverCount(),
|
dn.getXceiverCount(),
|
||||||
dn.getFSDataset().getNumFailedVolumes());
|
dn.getFSDataset().getNumFailedVolumes());
|
||||||
|
@ -40,6 +40,7 @@
|
|||||||
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
|
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
|
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||||
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
||||||
import org.apache.hadoop.util.ReflectionUtils;
|
import org.apache.hadoop.util.ReflectionUtils;
|
||||||
|
|
||||||
@ -86,6 +87,10 @@ public RollingLogs createRollingLogs(String bpid, String prefix
|
|||||||
/** @return a list of volumes. */
|
/** @return a list of volumes. */
|
||||||
public List<V> getVolumes();
|
public List<V> getVolumes();
|
||||||
|
|
||||||
|
/** @return one or more storage reports for attached volumes. */
|
||||||
|
public StorageReport[] getStorageReports(String bpid)
|
||||||
|
throws IOException;
|
||||||
|
|
||||||
/** @return the volume that contains a replica of the block. */
|
/** @return the volume that contains a replica of the block. */
|
||||||
public V getVolume(ExtendedBlock b);
|
public V getVolume(ExtendedBlock b);
|
||||||
|
|
||||||
|
@ -78,6 +78,7 @@
|
|||||||
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
|
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
|
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||||
import org.apache.hadoop.io.nativeio.NativeIO;
|
import org.apache.hadoop.io.nativeio.NativeIO;
|
||||||
import org.apache.hadoop.metrics2.util.MBeans;
|
import org.apache.hadoop.metrics2.util.MBeans;
|
||||||
import org.apache.hadoop.util.DataChecksum;
|
import org.apache.hadoop.util.DataChecksum;
|
||||||
@ -109,6 +110,26 @@ public List<FsVolumeImpl> getVolumes() {
|
|||||||
return volumes.volumes;
|
return volumes.volumes;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override // FsDatasetSpi
|
||||||
|
public StorageReport[] getStorageReports(String bpid)
|
||||||
|
throws IOException {
|
||||||
|
StorageReport[] reports;
|
||||||
|
synchronized (statsLock) {
|
||||||
|
reports = new StorageReport[volumes.volumes.size()];
|
||||||
|
int i = 0;
|
||||||
|
for (FsVolumeImpl volume : volumes.volumes) {
|
||||||
|
reports[i++] = new StorageReport(volume.getStorageID(),
|
||||||
|
false,
|
||||||
|
volume.getCapacity(),
|
||||||
|
volume.getDfsUsed(),
|
||||||
|
volume.getAvailable(),
|
||||||
|
volume.getBlockPoolUsed(bpid));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return reports;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public synchronized FsVolumeImpl getVolume(final ExtendedBlock b) {
|
public synchronized FsVolumeImpl getVolume(final ExtendedBlock b) {
|
||||||
final ReplicaInfo r = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock());
|
final ReplicaInfo r = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock());
|
||||||
|
@ -44,6 +44,7 @@
|
|||||||
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
|
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
|
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.metrics2.util.MBeans;
|
import org.apache.hadoop.metrics2.util.MBeans;
|
||||||
import org.apache.hadoop.util.DataChecksum;
|
import org.apache.hadoop.util.DataChecksum;
|
||||||
@ -987,6 +988,11 @@ public List<FsVolumeSpi> getVolumes() {
|
|||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public StorageReport[] getStorageReports(String bpid) {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public List<Block> getFinalizedBlocks(String bpid) {
|
public List<Block> getFinalizedBlocks(String bpid) {
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
|
Loading…
Reference in New Issue
Block a user