HDFS-17282. Reconfig 'SlowIoWarningThreshold' parameters for datanode. (#6338). Contributed by huangzhaobo99
Reviewed-by: Haiyang Hu <haiyang.hu@shopee.com> Reviewed-by: Tao Li <tomscut@apache.org> Signed-off-by: Ayush Saxena <ayushsaxena@apache.org>
This commit is contained in:
parent
562c42c86a
commit
1498a8685d
@ -37,6 +37,7 @@
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PMEM_CACHE_RECOVERY_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PROCESS_COMMANDS_THRESHOLD_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PROCESS_COMMANDS_THRESHOLD_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_OVERWRITE_DOWNSTREAM_DERIVED_QOP_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_OVERWRITE_DOWNSTREAM_DERIVED_QOP_KEY;
|
||||
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
|
||||
@ -114,7 +115,7 @@ public class DNConf {
|
||||
final long ibrInterval;
|
||||
volatile long initialBlockReportDelayMs;
|
||||
volatile long cacheReportInterval;
|
||||
final long datanodeSlowIoWarningThresholdMs;
|
||||
private volatile long datanodeSlowIoWarningThresholdMs;
|
||||
|
||||
final String minimumNameNodeVersion;
|
||||
final String encryptionAlgorithm;
|
||||
@ -522,4 +523,10 @@ public void setOutliersReportIntervalMs(String reportIntervalMs) {
|
||||
DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY,
|
||||
DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS);
|
||||
}
|
||||
|
||||
public void setDatanodeSlowIoWarningThresholdMs(long threshold) {
|
||||
Preconditions.checkArgument(threshold > 0,
|
||||
DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY + " should be greater than 0");
|
||||
datanodeSlowIoWarningThresholdMs = threshold;
|
||||
}
|
||||
}
|
||||
|
@ -80,6 +80,8 @@
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_SLOWDISKS_TO_EXCLUDE_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_SLOWDISKS_TO_EXCLUDE_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT;
|
||||
@ -371,7 +373,8 @@ public class DataNode extends ReconfigurableBase
|
||||
DFS_DISK_BALANCER_PLAN_VALID_INTERVAL,
|
||||
DFS_DATANODE_DATA_TRANSFER_BANDWIDTHPERSEC_KEY,
|
||||
DFS_DATANODE_DATA_WRITE_BANDWIDTHPERSEC_KEY,
|
||||
DFS_DATANODE_DATA_READ_BANDWIDTHPERSEC_KEY));
|
||||
DFS_DATANODE_DATA_READ_BANDWIDTHPERSEC_KEY,
|
||||
DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY));
|
||||
|
||||
public static final String METRICS_LOG_NAME = "DataNodeMetricsLog";
|
||||
|
||||
@ -735,6 +738,8 @@ public String reconfigurePropertyImpl(String property, String newVal)
|
||||
case DFS_DISK_BALANCER_ENABLED:
|
||||
case DFS_DISK_BALANCER_PLAN_VALID_INTERVAL:
|
||||
return reconfDiskBalancerParameters(property, newVal);
|
||||
case DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY:
|
||||
return reconfSlowIoWarningThresholdParameters(property, newVal);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -1056,6 +1061,24 @@ private String reconfDiskBalancerParameters(String property, String newVal)
|
||||
}
|
||||
}
|
||||
|
||||
private String reconfSlowIoWarningThresholdParameters(String property, String newVal)
|
||||
throws ReconfigurationException {
|
||||
String result;
|
||||
try {
|
||||
LOG.info("Reconfiguring {} to {}", property, newVal);
|
||||
Preconditions.checkNotNull(dnConf, "DNConf has not been initialized.");
|
||||
long slowIoWarningThreshold = (newVal == null ?
|
||||
DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_DEFAULT :
|
||||
Long.parseLong(newVal));
|
||||
result = Long.toString(slowIoWarningThreshold);
|
||||
dnConf.setDatanodeSlowIoWarningThresholdMs(slowIoWarningThreshold);
|
||||
LOG.info("RECONFIGURE* changed {} to {}", property, newVal);
|
||||
return result;
|
||||
} catch (IllegalArgumentException e) {
|
||||
throw new ReconfigurationException(property, newVal, getConf().get(property), e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a list of the keys of the re-configurable properties in configuration.
|
||||
*/
|
||||
|
@ -49,6 +49,8 @@
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_SLOWDISKS_TO_EXCLUDE_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DISK_BALANCER_ENABLED;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DISK_BALANCER_ENABLED_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DISK_BALANCER_PLAN_VALID_INTERVAL;
|
||||
@ -916,4 +918,34 @@ public void testDiskBalancerParameters() throws Exception {
|
||||
assertEquals(60000, dn.getDiskBalancer().getPlanValidityIntervalInConfig());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSlowIoWarningThresholdReconfiguration() throws Exception {
|
||||
int slowIoWarningThreshold = 500;
|
||||
for (int i = 0; i < NUM_DATA_NODE; i++) {
|
||||
DataNode dn = cluster.getDataNodes().get(i);
|
||||
|
||||
// Verify DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY.
|
||||
// Try invalid values.
|
||||
LambdaTestUtils.intercept(ReconfigurationException.class,
|
||||
"Could not change property dfs.datanode.slow.io.warning.threshold.ms from "
|
||||
+ "'300' to 'text'",
|
||||
() -> dn.reconfigureProperty(DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY, "text"));
|
||||
LambdaTestUtils.intercept(ReconfigurationException.class,
|
||||
"Could not change property dfs.datanode.slow.io.warning.threshold.ms from "
|
||||
+ "'300' to '-1'",
|
||||
() -> dn.reconfigureProperty(DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY, "-1"));
|
||||
|
||||
// Set value is 500.
|
||||
dn.reconfigureProperty(DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY,
|
||||
String.valueOf(slowIoWarningThreshold));
|
||||
assertEquals(slowIoWarningThreshold, dn.getDnConf().getSlowIoWarningThresholdMs());
|
||||
|
||||
// Set default value.
|
||||
dn.reconfigureProperty(DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY, null);
|
||||
assertEquals(DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_DEFAULT,
|
||||
dn.getDnConf().getSlowIoWarningThresholdMs());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -347,7 +347,7 @@ public void testDataNodeGetReconfigurableProperties() throws IOException, Interr
|
||||
final List<String> outs = Lists.newArrayList();
|
||||
final List<String> errs = Lists.newArrayList();
|
||||
getReconfigurableProperties("datanode", address, outs, errs);
|
||||
assertEquals(25, outs.size());
|
||||
assertEquals(26, outs.size());
|
||||
assertEquals(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, outs.get(1));
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user