HDFS-9902. Support different values of dfs.datanode.du.reserved per storage type. (Contributed by Brahma Reddy Battula)

This commit is contained in:
Arpit Agarwal 2016-05-03 16:52:43 -07:00
parent ed54f5f1ff
commit 6d77d6eab7
3 changed files with 44 additions and 3 deletions

View File

@ -58,6 +58,7 @@
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.CloseableReferenceCount;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Timer;
import org.codehaus.jackson.annotate.JsonProperty;
@ -118,9 +119,10 @@ public class FsVolumeImpl implements FsVolumeSpi {
Configuration conf, StorageType storageType) throws IOException {
this.dataset = dataset;
this.storageID = storageID;
this.reserved = conf.getLong(
this.reserved = conf.getLong(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY
+ "." + StringUtils.toLowerCase(storageType.toString()), conf.getLong(
DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY,
DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT);
DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT));
this.reservedForReplicas = new AtomicLong(0L);
this.currentDir = currentDir;
File parent = currentDir.getParentFile();

View File

@ -321,6 +321,11 @@
<name>dfs.datanode.du.reserved</name>
<value>0</value>
<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
Specific storage type based reservation is also supported. The property can be followed with
corresponding storage types ([ssd]/[disk]/[archive]/[ram_disk]) for cluster with heterogeneous storage.
For example, reserved space for RAM_DISK storage can be configured using property
'dfs.datanode.du.reserved.ram_disk'. If specific storage type reservation is not configured
then dfs.datanode.du.reserved will be used.
</description>
</property>

View File

@ -27,6 +27,7 @@
import org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
import org.junit.Before;
import org.junit.Test;
@ -36,7 +37,7 @@
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeoutException;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.fail;
@ -143,4 +144,37 @@ public void testReleaseVolumeRefIfNoBlockScanner() throws IOException {
volumeList.addVolume(ref);
assertNull(ref.getVolume());
}
@Test
public void testDfsReservedForDifferentStorageTypes() throws IOException {
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY, 100L);
File volDir = new File(baseDir, "volume-0");
volDir.mkdirs();
// when storage type reserved is not configured,should consider
// dfs.datanode.du.reserved.
FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", volDir, conf,
StorageType.RAM_DISK);
assertEquals("", 100L, volume.getReserved());
// when storage type reserved is configured.
conf.setLong(
DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY + "."
+ StringUtils.toLowerCase(StorageType.RAM_DISK.toString()), 1L);
conf.setLong(
DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY + "."
+ StringUtils.toLowerCase(StorageType.SSD.toString()), 2L);
FsVolumeImpl volume1 = new FsVolumeImpl(dataset, "storage-id", volDir,
conf, StorageType.RAM_DISK);
assertEquals("", 1L, volume1.getReserved());
FsVolumeImpl volume2 = new FsVolumeImpl(dataset, "storage-id", volDir,
conf, StorageType.SSD);
assertEquals("", 2L, volume2.getReserved());
FsVolumeImpl volume3 = new FsVolumeImpl(dataset, "storage-id", volDir,
conf, StorageType.DISK);
assertEquals("", 100L, volume3.getReserved());
FsVolumeImpl volume4 = new FsVolumeImpl(dataset, "storage-id", volDir,
conf, StorageType.DEFAULT);
assertEquals("", 100L, volume4.getReserved());
}
}