HDFS-1314. Make dfs.blocksize accept size-indicating prefixes (Sho Shimauchi via harsh)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1227165 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Harsh J 2012-01-04 14:15:15 +00:00
parent 2f19c59f1d
commit 075122690c
9 changed files with 19 additions and 9 deletions

View File

@ -628,8 +628,11 @@
<tr> <tr>
<td>conf/hdfs-site.xml</td> <td>conf/hdfs-site.xml</td>
<td>dfs.blocksize</td> <td>dfs.blocksize</td>
<td>134217728</td> <td>128m</td>
<td>HDFS blocksize of 128MB for large file-systems.</td> <td>
HDFS blocksize of 128 MB for large file-systems. Sizes can be provided
in size-prefixed values (10k, 128m, 1g, etc.) or simply in bytes (134217728 for 128 MB, etc.).
</td>
</tr> </tr>
<tr> <tr>
<td>conf/hdfs-site.xml</td> <td>conf/hdfs-site.xml</td>

View File

@ -121,6 +121,8 @@ Trunk (unreleased changes)
HDFS-2729. Update BlockManager's comments regarding the invalid block set (harsh) HDFS-2729. Update BlockManager's comments regarding the invalid block set (harsh)
HDFS-1314. Make dfs.blocksize accept size-indicating prefixes (Sho Shimauchi via harsh)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-2477. Optimize computing the diff between a block report and the HDFS-2477. Optimize computing the diff between a block report and the
namenode state. (Tomasz Nykiel via hairong) namenode state. (Tomasz Nykiel via hairong)

View File

@ -180,7 +180,7 @@ static class Conf {
/** dfs.write.packet.size is an internal config variable */ /** dfs.write.packet.size is an internal config variable */
writePacketSize = conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, writePacketSize = conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT); DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
defaultBlockSize = conf.getLong(DFS_BLOCK_SIZE_KEY, defaultBlockSize = conf.getLongBytes(DFS_BLOCK_SIZE_KEY,
DFS_BLOCK_SIZE_DEFAULT); DFS_BLOCK_SIZE_DEFAULT);
defaultReplication = (short) conf.getInt( defaultReplication = (short) conf.getInt(
DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT); DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT);

View File

@ -119,7 +119,7 @@ synchronized void release() {
conf.getInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY, conf.getInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY,
DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT); DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT);
this.estimateBlockSize = conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, this.estimateBlockSize = conf.getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT); DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
//set up parameter for cluster balancing //set up parameter for cluster balancing

View File

@ -528,7 +528,7 @@ private void setConfigurationParameters(Configuration conf)
fsOwner.getShortUserName(), supergroup, new FsPermission(filePermission)); fsOwner.getShortUserName(), supergroup, new FsPermission(filePermission));
this.serverDefaults = new FsServerDefaults( this.serverDefaults = new FsServerDefaults(
conf.getLong(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT), conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT),
conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT), conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT),
conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT), conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT),
(short) conf.getInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT), (short) conf.getInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT),

View File

@ -529,7 +529,7 @@ public void setTimes(final Path p, final long mtime, final long atime
@Override @Override
public long getDefaultBlockSize() { public long getDefaultBlockSize() {
return getConf().getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, return getConf().getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT); DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
} }

View File

@ -55,6 +55,6 @@ public String getName() {
/** @return the value or, if it is null, return the default from conf. */ /** @return the value or, if it is null, return the default from conf. */
public long getValue(final Configuration conf) { public long getValue(final Configuration conf) {
return getValue() != null? getValue() return getValue() != null? getValue()
: conf.getLong(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT); : conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT);
} }
} }

View File

@ -329,7 +329,12 @@ creations/deletions), or "all".</description>
<property> <property>
<name>dfs.blocksize</name> <name>dfs.blocksize</name>
<value>67108864</value> <value>67108864</value>
<description>The default block size for new files.</description> <description>
The default block size for new files, in bytes.
You can use the following suffix (case insensitive):
k(kilo), m(mega), g(giga), t(tera), p(peta), e(exa) to specify the size (such as 128k, 512m, 1g, etc.),
Or provide complete size in bytes (such as 134217728 for 128 MB).
</description>
</property> </property>
<property> <property>

View File

@ -51,7 +51,7 @@ public void testBlockSizeParam() {
final BlockSizeParam p = new BlockSizeParam(BlockSizeParam.DEFAULT); final BlockSizeParam p = new BlockSizeParam(BlockSizeParam.DEFAULT);
Assert.assertEquals(null, p.getValue()); Assert.assertEquals(null, p.getValue());
Assert.assertEquals( Assert.assertEquals(
conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, conf.getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT), DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT),
p.getValue(conf)); p.getValue(conf));