HDFS-16399. Reconfig cache report parameters for datanode (#3841)

This commit is contained in:
litao 2022-01-19 14:10:39 +08:00 committed by GitHub
parent fcb1076699
commit e355646330
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 78 additions and 3 deletions

View File

@ -113,7 +113,7 @@ public class DNConf {
final long outliersReportIntervalMs;
final long ibrInterval;
final long initialBlockReportDelayMs;
final long cacheReportInterval;
volatile long cacheReportInterval;
final long datanodeSlowIoWarningThresholdMs;
final String minimumNameNodeVersion;
@ -484,4 +484,14 @@ void setBlockReportInterval(long intervalMs) {
public long getBlockReportInterval() {
return blockReportInterval;
}
void setCacheReportInterval(long intervalMs) {
Preconditions.checkArgument(intervalMs > 0,
"dfs.cachereport.intervalMsec should be larger than 0");
cacheReportInterval = intervalMs;
}
public long getCacheReportInterval() {
return cacheReportInterval;
}
}

View File

@ -20,6 +20,8 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ALLOW_SAME_DISK_TIERING;
@ -306,7 +308,8 @@ public class DataNode extends ReconfigurableBase
DFS_DATANODE_DATA_DIR_KEY,
DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,
DFS_DATANODE_MAX_RECEIVER_THREADS_KEY));
DFS_DATANODE_MAX_RECEIVER_THREADS_KEY,
DFS_CACHEREPORT_INTERVAL_MSEC_KEY));
public static final Log METRICS_LOG = LogFactory.getLog("DataNodeMetricsLog");
@ -652,6 +655,8 @@ public String reconfigurePropertyImpl(String property, String newVal)
}
case DFS_DATANODE_MAX_RECEIVER_THREADS_KEY:
return reconfDataXceiverParameters(property, newVal);
case DFS_CACHEREPORT_INTERVAL_MSEC_KEY:
return reconfCacheReportParameters(property, newVal);
default:
break;
}
@ -676,6 +681,23 @@ private String reconfDataXceiverParameters(String property, String newVal)
}
}
private String reconfCacheReportParameters(String property, String newVal)
throws ReconfigurationException {
String result;
try {
LOG.info("Reconfiguring {} to {}", property, newVal);
Preconditions.checkNotNull(dnConf, "DNConf has not been initialized.");
long reportInterval = (newVal == null ? DFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT :
Long.parseLong(newVal));
result = Long.toString(reportInterval);
dnConf.setCacheReportInterval(reportInterval);
LOG.info("RECONFIGURE* changed {} to {}", property, newVal);
return result;
} catch (IllegalArgumentException e) {
throw new ReconfigurationException(property, newVal, getConf().get(property), e);
}
}
/**
* Get a list of the keys of the re-configurable properties in configuration.
*/

View File

@ -20,6 +20,8 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT;
@ -406,4 +408,45 @@ public void testDataXceiverReconfiguration()
dn.getConf().get(DFS_DATANODE_MAX_RECEIVER_THREADS_KEY));
}
}
@Test
public void testCacheReportReconfiguration()
throws ReconfigurationException {
int cacheReportInterval = 300 * 1000;
for (int i = 0; i < NUM_DATA_NODE; i++) {
DataNode dn = cluster.getDataNodes().get(i);
// Try invalid values.
try {
dn.reconfigureProperty(DFS_CACHEREPORT_INTERVAL_MSEC_KEY, "text");
fail("ReconfigurationException expected");
} catch (ReconfigurationException expected) {
assertTrue("expecting NumberFormatException",
expected.getCause() instanceof NumberFormatException);
}
try {
dn.reconfigureProperty(DFS_CACHEREPORT_INTERVAL_MSEC_KEY, String.valueOf(-1));
fail("ReconfigurationException expected");
} catch (ReconfigurationException expected) {
assertTrue("expecting IllegalArgumentException",
expected.getCause() instanceof IllegalArgumentException);
}
// Change properties.
dn.reconfigureProperty(DFS_CACHEREPORT_INTERVAL_MSEC_KEY,
String.valueOf(cacheReportInterval));
// Verify change.
assertEquals(String.format("%s has wrong value", DFS_CACHEREPORT_INTERVAL_MSEC_KEY),
cacheReportInterval, dn.getDnConf().getCacheReportInterval());
// Revert to default.
dn.reconfigureProperty(DFS_CACHEREPORT_INTERVAL_MSEC_KEY, null);
assertEquals(String.format("%s has wrong value", DFS_CACHEREPORT_INTERVAL_MSEC_KEY),
DFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT, dn.getDnConf().getCacheReportInterval());
assertNull(String.format("expect %s is not configured", DFS_CACHEREPORT_INTERVAL_MSEC_KEY),
dn.getConf().get(DFS_CACHEREPORT_INTERVAL_MSEC_KEY));
}
}
}

View File

@ -338,7 +338,7 @@ public void testDataNodeGetReconfigurableProperties() throws IOException {
final List<String> outs = Lists.newArrayList();
final List<String> errs = Lists.newArrayList();
getReconfigurableProperties("datanode", address, outs, errs);
assertEquals(5, outs.size());
assertEquals(6, outs.size());
assertEquals(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, outs.get(1));
}