HDFS-11517. Expose slow disks via DataNode JMX. Contributed by Hanisha Koneru
This commit is contained in:
parent
4a8e304502
commit
7f8e928400
@ -1822,6 +1822,10 @@ public DataNodeMetrics getMetrics() {
|
|||||||
return metrics;
|
return metrics;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public DataNodeDiskMetrics getDiskMetrics() {
|
||||||
|
return diskMetrics;
|
||||||
|
}
|
||||||
|
|
||||||
public DataNodePeerMetrics getPeerMetrics() {
|
public DataNodePeerMetrics getPeerMetrics() {
|
||||||
return peerMetrics;
|
return peerMetrics;
|
||||||
}
|
}
|
||||||
@ -3520,4 +3524,14 @@ public String getSendPacketDownstreamAvgInfo() {
|
|||||||
return peerMetrics != null ?
|
return peerMetrics != null ?
|
||||||
peerMetrics.dumpSendPacketDownstreamAvgInfoAsJson() : null;
|
peerMetrics.dumpSendPacketDownstreamAvgInfoAsJson() : null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override // DataNodeMXBean
|
||||||
|
public String getSlowDisks() {
|
||||||
|
if (diskMetrics == null) {
|
||||||
|
//Disk Stats not enabled
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
Set<String> slowDisks = diskMetrics.getDiskOutliersStats().keySet();
|
||||||
|
return JSON.toString(slowDisks);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -132,4 +132,11 @@ public interface DataNodeMXBean {
|
|||||||
* </p>
|
* </p>
|
||||||
*/
|
*/
|
||||||
String getSendPacketDownstreamAvgInfo();
|
String getSendPacketDownstreamAvgInfo();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Gets the slow disks in the Datanode.
|
||||||
|
*
|
||||||
|
* @return list of slow disks
|
||||||
|
*/
|
||||||
|
String getSlowDisks();
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,8 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.datanode.metrics;
|
package org.apache.hadoop.hdfs.server.datanode.metrics;
|
||||||
|
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
import com.google.common.collect.ImmutableMap;
|
||||||
import com.google.common.collect.Maps;
|
import com.google.common.collect.Maps;
|
||||||
import com.google.common.collect.Sets;
|
import com.google.common.collect.Sets;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
@ -52,7 +54,8 @@ public class DataNodeDiskMetrics {
|
|||||||
private volatile boolean shouldRun;
|
private volatile boolean shouldRun;
|
||||||
private OutlierDetector slowDiskDetector;
|
private OutlierDetector slowDiskDetector;
|
||||||
private Daemon slowDiskDetectionDaemon;
|
private Daemon slowDiskDetectionDaemon;
|
||||||
private volatile Map<String, Map<DiskOutlierDetectionOp, Double>> diskOutliersStats;
|
private volatile Map<String, Map<DiskOutlierDetectionOp, Double>>
|
||||||
|
diskOutliersStats = Maps.newHashMap();
|
||||||
|
|
||||||
public DataNodeDiskMetrics(DataNode dn, long diskOutlierDetectionIntervalMs) {
|
public DataNodeDiskMetrics(DataNode dn, long diskOutlierDetectionIntervalMs) {
|
||||||
this.dn = dn;
|
this.dn = dn;
|
||||||
@ -178,4 +181,12 @@ public void shutdownAndWait() {
|
|||||||
LOG.error("Disk Outlier Detection daemon did not shutdown", e);
|
LOG.error("Disk Outlier Detection daemon did not shutdown", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Use only for testing.
|
||||||
|
*/
|
||||||
|
@VisibleForTesting
|
||||||
|
public void addSlowDiskForTesting(String slowDiskPath) {
|
||||||
|
diskOutliersStats.put(slowDiskPath, ImmutableMap.of());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -35,6 +35,7 @@
|
|||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
@ -104,8 +105,12 @@ public void testDataNodeMXBean() throws Exception {
|
|||||||
String bpActorInfo = (String)mbs.getAttribute(mxbeanName,
|
String bpActorInfo = (String)mbs.getAttribute(mxbeanName,
|
||||||
"BPServiceActorInfo");
|
"BPServiceActorInfo");
|
||||||
Assert.assertEquals(datanode.getBPServiceActorInfo(), bpActorInfo);
|
Assert.assertEquals(datanode.getBPServiceActorInfo(), bpActorInfo);
|
||||||
|
String slowDisks = (String)mbs.getAttribute(mxbeanName, "SlowDisks");
|
||||||
|
Assert.assertEquals(datanode.getSlowDisks(), slowDisks);
|
||||||
} finally {
|
} finally {
|
||||||
if (cluster != null) {cluster.shutdown();}
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -209,4 +214,30 @@ private int getTotalNumBlocks(MBeanServer mbs, ObjectName mxbeanName)
|
|||||||
}
|
}
|
||||||
return totalBlocks;
|
return totalBlocks;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testDataNodeMXBeanSlowDisksEnabled() throws Exception {
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
conf.setDouble(DFSConfigKeys
|
||||||
|
.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY, 1.0);
|
||||||
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
|
||||||
|
|
||||||
|
try {
|
||||||
|
List<DataNode> datanodes = cluster.getDataNodes();
|
||||||
|
Assert.assertEquals(datanodes.size(), 1);
|
||||||
|
DataNode datanode = datanodes.get(0);
|
||||||
|
String slowDiskPath = "test/data1/slowVolume";
|
||||||
|
datanode.getDiskMetrics().addSlowDiskForTesting(slowDiskPath);
|
||||||
|
|
||||||
|
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
|
||||||
|
ObjectName mxbeanName = new ObjectName(
|
||||||
|
"Hadoop:service=DataNode,name=DataNodeInfo");
|
||||||
|
|
||||||
|
String slowDisks = (String)mbs.getAttribute(mxbeanName, "SlowDisks");
|
||||||
|
Assert.assertEquals(datanode.getSlowDisks(), slowDisks);
|
||||||
|
Assert.assertTrue(slowDisks.contains(slowDiskPath));
|
||||||
|
} finally {
|
||||||
|
if (cluster != null) {cluster.shutdown();}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user