HDFS-9872. HDFS bytes-default configurations should accept multiple size units. Contributed by Yiqun Lin.

This commit is contained in:
Inigo Goiri 2018-10-19 10:38:04 -07:00
parent 8b64fbab1a
commit 88cce32551
10 changed files with 38 additions and 21 deletions

View File

@ -371,7 +371,7 @@ public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode,
(conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_READS) == null) ? (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_READS) == null) ?
null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_READS, false); null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_READS, false);
Long readahead = (conf.get(DFS_CLIENT_CACHE_READAHEAD) == null) ? Long readahead = (conf.get(DFS_CLIENT_CACHE_READAHEAD) == null) ?
null : conf.getLong(DFS_CLIENT_CACHE_READAHEAD, 0); null : conf.getLongBytes(DFS_CLIENT_CACHE_READAHEAD, 0);
this.serverDefaultsValidityPeriod = this.serverDefaultsValidityPeriod =
conf.getLong(DFS_CLIENT_SERVER_DEFAULTS_VALIDITY_PERIOD_MS_KEY, conf.getLong(DFS_CLIENT_SERVER_DEFAULTS_VALIDITY_PERIOD_MS_KEY,
DFS_CLIENT_SERVER_DEFAULTS_VALIDITY_PERIOD_MS_DEFAULT); DFS_CLIENT_SERVER_DEFAULTS_VALIDITY_PERIOD_MS_DEFAULT);

View File

@ -248,7 +248,7 @@ public DNConf(final Configurable dn) {
DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY, DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY,
DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT); DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
this.maxLockedMemory = getConf().getLong( this.maxLockedMemory = getConf().getLongBytes(
DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
DFS_DATANODE_MAX_LOCKED_MEMORY_DEFAULT); DFS_DATANODE_MAX_LOCKED_MEMORY_DEFAULT);

View File

@ -76,7 +76,7 @@ private void initLocks() {
@Override @Override
public void setConf(Configuration conf) { public void setConf(Configuration conf) {
balancedSpaceThreshold = conf.getLong( balancedSpaceThreshold = conf.getLongBytes(
DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_KEY, DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_KEY,
DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_DEFAULT); DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_DEFAULT);
balancedPreferencePercent = conf.getFloat( balancedPreferencePercent = conf.getFloat(

View File

@ -97,7 +97,7 @@ DF getUsage() {
long getReservedFromConf(String key, long defaultValue) { long getReservedFromConf(String key, long defaultValue) {
return conf.getLong(key + "." + StringUtils.toLowerCase( return conf.getLong(key + "." + StringUtils.toLowerCase(
storageType.toString()), conf.getLong(key, defaultValue)); storageType.toString()), conf.getLongBytes(key, defaultValue));
} }
/** /**

View File

@ -291,7 +291,7 @@ public enum DirOp {
DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY,
DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_DEFAULT); DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_DEFAULT);
LOG.info("XAttrs enabled? " + xattrsEnabled); LOG.info("XAttrs enabled? " + xattrsEnabled);
this.xattrMaxSize = conf.getInt( this.xattrMaxSize = (int) conf.getLongBytes(
DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,
DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT); DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT);
Preconditions.checkArgument(xattrMaxSize > 0, Preconditions.checkArgument(xattrMaxSize > 0,
@ -327,7 +327,7 @@ public enum DirOp {
DFSConfigKeys.DFS_CONTENT_SUMMARY_SLEEP_MICROSEC_DEFAULT); DFSConfigKeys.DFS_CONTENT_SUMMARY_SLEEP_MICROSEC_DEFAULT);
// filesystem limits // filesystem limits
this.maxComponentLength = conf.getInt( this.maxComponentLength = (int) conf.getLongBytes(
DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY, DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY,
DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_DEFAULT); DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_DEFAULT);
this.maxDirItems = conf.getInt( this.maxDirItems = conf.getInt(

View File

@ -825,7 +825,8 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException {
this.maxFsObjects = conf.getLong(DFS_NAMENODE_MAX_OBJECTS_KEY, this.maxFsObjects = conf.getLong(DFS_NAMENODE_MAX_OBJECTS_KEY,
DFS_NAMENODE_MAX_OBJECTS_DEFAULT); DFS_NAMENODE_MAX_OBJECTS_DEFAULT);
this.minBlockSize = conf.getLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, this.minBlockSize = conf.getLongBytes(
DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_DEFAULT); DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_DEFAULT);
this.maxBlocksPerFile = conf.getLong(DFSConfigKeys.DFS_NAMENODE_MAX_BLOCKS_PER_FILE_KEY, this.maxBlocksPerFile = conf.getLong(DFSConfigKeys.DFS_NAMENODE_MAX_BLOCKS_PER_FILE_KEY,
DFSConfigKeys.DFS_NAMENODE_MAX_BLOCKS_PER_FILE_DEFAULT); DFSConfigKeys.DFS_NAMENODE_MAX_BLOCKS_PER_FILE_DEFAULT);

View File

@ -220,9 +220,9 @@ public static void setFileNameHeaders(HttpServletResponse response,
* @return a data transfer throttler * @return a data transfer throttler
*/ */
public static DataTransferThrottler getThrottler(Configuration conf) { public static DataTransferThrottler getThrottler(Configuration conf) {
long transferBandwidth = long transferBandwidth = conf.getLongBytes(
conf.getLong(DFSConfigKeys.DFS_IMAGE_TRANSFER_RATE_KEY, DFSConfigKeys.DFS_IMAGE_TRANSFER_RATE_KEY,
DFSConfigKeys.DFS_IMAGE_TRANSFER_RATE_DEFAULT); DFSConfigKeys.DFS_IMAGE_TRANSFER_RATE_DEFAULT);
DataTransferThrottler throttler = null; DataTransferThrottler throttler = null;
if (transferBandwidth > 0) { if (transferBandwidth > 0) {
throttler = new DataTransferThrottler(transferBandwidth); throttler = new DataTransferThrottler(transferBandwidth);
@ -233,7 +233,7 @@ public static DataTransferThrottler getThrottler(Configuration conf) {
private static DataTransferThrottler getThrottlerForBootstrapStandby( private static DataTransferThrottler getThrottlerForBootstrapStandby(
Configuration conf) { Configuration conf) {
long transferBandwidth = long transferBandwidth =
conf.getLong( conf.getLongBytes(
DFSConfigKeys.DFS_IMAGE_TRANSFER_BOOTSTRAP_STANDBY_RATE_KEY, DFSConfigKeys.DFS_IMAGE_TRANSFER_BOOTSTRAP_STANDBY_RATE_KEY,
DFSConfigKeys.DFS_IMAGE_TRANSFER_BOOTSTRAP_STANDBY_RATE_DEFAULT); DFSConfigKeys.DFS_IMAGE_TRANSFER_BOOTSTRAP_STANDBY_RATE_DEFAULT);
DataTransferThrottler throttler = null; DataTransferThrottler throttler = null;

View File

@ -111,7 +111,7 @@ public NameNodeResourceChecker(Configuration conf) throws IOException {
this.conf = conf; this.conf = conf;
volumes = new HashMap<String, CheckedVolume>(); volumes = new HashMap<String, CheckedVolume>();
duReserved = conf.getLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY, duReserved = conf.getLongBytes(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY,
DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_DEFAULT); DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_DEFAULT);
Collection<URI> extraCheckedVolumes = Util.stringCollectionAsURIs(conf Collection<URI> extraCheckedVolumes = Util.stringCollectionAsURIs(conf

View File

@ -275,7 +275,7 @@ private static void uploadImage(URL url, Configuration conf,
connection.setDoOutput(true); connection.setDoOutput(true);
int chunkSize = conf.getInt( int chunkSize = (int) conf.getLongBytes(
DFSConfigKeys.DFS_IMAGE_TRANSFER_CHUNKSIZE_KEY, DFSConfigKeys.DFS_IMAGE_TRANSFER_CHUNKSIZE_KEY,
DFSConfigKeys.DFS_IMAGE_TRANSFER_CHUNKSIZE_DEFAULT); DFSConfigKeys.DFS_IMAGE_TRANSFER_CHUNKSIZE_DEFAULT);
if (imageFile.length() > chunkSize) { if (imageFile.length() > chunkSize) {

View File

@ -348,7 +348,8 @@
corresponding storage types ([ssd]/[disk]/[archive]/[ram_disk]) for cluster with heterogeneous storage. corresponding storage types ([ssd]/[disk]/[archive]/[ram_disk]) for cluster with heterogeneous storage.
For example, reserved space for RAM_DISK storage can be configured using property For example, reserved space for RAM_DISK storage can be configured using property
'dfs.datanode.du.reserved.ram_disk'. If specific storage type reservation is not configured 'dfs.datanode.du.reserved.ram_disk'. If specific storage type reservation is not configured
then dfs.datanode.du.reserved will be used. then dfs.datanode.du.reserved will be used. Support multiple size unit suffix(case insensitive),
as described in dfs.blocksize.
Note: In case of using tune2fs to set reserved-blocks-percentage, or other filesystem tools, Note: In case of using tune2fs to set reserved-blocks-percentage, or other filesystem tools,
then you can possibly run into out of disk errors because hadoop will not check those then you can possibly run into out of disk errors because hadoop will not check those
external tool configurations. external tool configurations.
@ -390,7 +391,9 @@
<name>dfs.namenode.fs-limits.max-component-length</name> <name>dfs.namenode.fs-limits.max-component-length</name>
<value>255</value> <value>255</value>
<description>Defines the maximum number of bytes in UTF-8 encoding in each <description>Defines the maximum number of bytes in UTF-8 encoding in each
component of a path. A value of 0 will disable the check.</description> component of a path. A value of 0 will disable the check. Support
multiple size unit suffix(case insensitive), as described in dfs.blocksize.
</description>
</property> </property>
<property> <property>
@ -406,8 +409,9 @@
<value>1048576</value> <value>1048576</value>
<description>Minimum block size in bytes, enforced by the Namenode at create <description>Minimum block size in bytes, enforced by the Namenode at create
time. This prevents the accidental creation of files with tiny block time. This prevents the accidental creation of files with tiny block
sizes (and thus many blocks), which can degrade sizes (and thus many blocks), which can degrade performance. Support multiple
performance.</description> size unit suffix(case insensitive), as described in dfs.blocksize.
</description>
</property> </property>
<property> <property>
@ -945,7 +949,8 @@
<value>104857600</value> <value>104857600</value>
<description> <description>
The amount of space to reserve/require for a NameNode storage directory The amount of space to reserve/require for a NameNode storage directory
in bytes. The default is 100MB. in bytes. The default is 100MB. Support multiple size unit
suffix(case insensitive), as described in dfs.blocksize.
</description> </description>
</property> </property>
@ -1331,6 +1336,8 @@
A default value of 0 indicates that throttling is disabled. A default value of 0 indicates that throttling is disabled.
The maximum bandwidth used for bootstrapping standby namenode is The maximum bandwidth used for bootstrapping standby namenode is
configured with dfs.image.transfer-bootstrap-standby.bandwidthPerSec. configured with dfs.image.transfer-bootstrap-standby.bandwidthPerSec.
Support multiple size unit suffix(case insensitive), as described
in dfs.blocksize.
</description> </description>
</property> </property>
@ -1344,6 +1351,8 @@
value should be used in most cases, to ensure timely HA operations. value should be used in most cases, to ensure timely HA operations.
The maximum bandwidth used for regular image transfers is configured The maximum bandwidth used for regular image transfers is configured
with dfs.image.transfer.bandwidthPerSec. with dfs.image.transfer.bandwidthPerSec.
Support multiple size unit suffix(case insensitive), as described in
dfs.blocksize.
</description> </description>
</property> </property>
@ -1354,6 +1363,8 @@
Chunksize in bytes to upload the checkpoint. Chunksize in bytes to upload the checkpoint.
Chunked streaming is used to avoid internal buffering of contents Chunked streaming is used to avoid internal buffering of contents
of image file of huge size. of image file of huge size.
Support multiple size unit suffix(case insensitive), as described
in dfs.blocksize.
</description> </description>
</property> </property>
@ -2293,7 +2304,8 @@
bytes of free disk space before they are considered imbalanced. If the free bytes of free disk space before they are considered imbalanced. If the free
space of all the volumes are within this range of each other, the volumes space of all the volumes are within this range of each other, the volumes
will be considered balanced and block assignments will be done on a pure will be considered balanced and block assignments will be done on a pure
round robin basis. round robin basis. Support multiple size unit suffix(case insensitive), as
described in dfs.blocksize.
</description> </description>
</property> </property>
@ -2366,7 +2378,8 @@
read ahead in the block file using posix_fadvise, potentially decreasing read ahead in the block file using posix_fadvise, potentially decreasing
I/O wait times. Unlike dfs.datanode.readahead.bytes, this is a client-side I/O wait times. Unlike dfs.datanode.readahead.bytes, this is a client-side
setting rather than a setting for the entire datanode. If present, this setting rather than a setting for the entire datanode. If present, this
setting will override the DataNode default. setting will override the DataNode default. Support multiple size unit
suffix(case insensitive), as described in dfs.blocksize.
When using local reads, this setting determines how much readahead we do in When using local reads, this setting determines how much readahead we do in
BlockReaderLocal. BlockReaderLocal.
@ -2510,7 +2523,8 @@
The amount of memory in bytes to use for caching of block replicas in The amount of memory in bytes to use for caching of block replicas in
memory on the datanode. The datanode's maximum locked memory soft ulimit memory on the datanode. The datanode's maximum locked memory soft ulimit
(RLIMIT_MEMLOCK) must be set to at least this value, else the datanode (RLIMIT_MEMLOCK) must be set to at least this value, else the datanode
will abort on startup. will abort on startup. Support multiple size unit suffix(case insensitive),
as described in dfs.blocksize.
By default, this parameter is set to 0, which disables in-memory caching. By default, this parameter is set to 0, which disables in-memory caching.
@ -2835,6 +2849,8 @@
The maximum combined size of the name and value of an extended attribute The maximum combined size of the name and value of an extended attribute
in bytes. It should be larger than 0, and less than or equal to maximum in bytes. It should be larger than 0, and less than or equal to maximum
size hard limit which is 32768. size hard limit which is 32768.
Support multiple size unit suffix(case insensitive), as described in
dfs.blocksize.
</description> </description>
</property> </property>