HDFS-12371. BlockVerificationFailures and BlocksVerified show up as 0 in Datanode JMX. Contributed by Hanisha Koneru.
This commit is contained in:
parent
10d7493587
commit
6bf921a5c3
@ -37,6 +37,7 @@
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.BlockIterator;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
||||
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
|
||||
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.util.Time;
|
||||
@ -81,6 +82,8 @@ void setConf(Conf conf) {
|
||||
*/
|
||||
private final DataNode datanode;
|
||||
|
||||
private final DataNodeMetrics metrics;
|
||||
|
||||
/**
|
||||
* A reference to the volume that we're scanning.
|
||||
*/
|
||||
@ -299,6 +302,7 @@ public void handle(ExtendedBlock block, IOException e) {
|
||||
VolumeScanner(Conf conf, DataNode datanode, FsVolumeReference ref) {
|
||||
this.conf = conf;
|
||||
this.datanode = datanode;
|
||||
this.metrics = datanode.getMetrics();
|
||||
this.ref = ref;
|
||||
this.volume = ref.getVolume();
|
||||
ScanResultHandler handler;
|
||||
@ -443,12 +447,14 @@ private long scanBlock(ExtendedBlock cblock, long bytesPerSec) {
|
||||
throttler.setBandwidth(bytesPerSec);
|
||||
long bytesRead = blockSender.sendBlock(nullStream, null, throttler);
|
||||
resultHandler.handle(block, null);
|
||||
metrics.incrBlocksVerified();
|
||||
return bytesRead;
|
||||
} catch (IOException e) {
|
||||
resultHandler.handle(block, e);
|
||||
} finally {
|
||||
IOUtils.cleanup(null, blockSender);
|
||||
}
|
||||
metrics.incrBlockVerificationFailures();
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user