diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java index 68069d7256..0948801a59 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageType.java @@ -33,10 +33,11 @@ @InterfaceAudience.Public @InterfaceStability.Unstable public enum StorageType { - DISK(false), + // sorted by the speed of the storage types, from fast to slow + RAM_DISK(true), SSD(false), - ARCHIVE(false), - RAM_DISK(true); + DISK(false), + ARCHIVE(false); private final boolean isTransient; diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 9822575877..79e7820e87 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -920,6 +920,8 @@ Release 2.7.1 - UNRELEASED HDFS-8521. Add VisibleForTesting annotation to BlockPoolSlice#selectReplicaToDelete. (cmccabe) + HDFS-8361. Choose SSD over DISK in block placement. (szetszwo) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java index ea69f976a6..0d59deda37 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java @@ -26,6 +26,7 @@ import com.google.common.collect.Lists; import com.google.common.collect.Sets; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockStoragePolicySpi; import org.apache.hadoop.fs.FSDataOutputStream; @@ -40,7 +41,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; -import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.Node; @@ -1152,6 +1153,50 @@ public void testChooseTargetWithTopology() throws Exception { Assert.assertEquals(3, targets.length); } + @Test + public void testChooseSsdOverDisk() throws Exception { + BlockStoragePolicy policy = new BlockStoragePolicy((byte) 9, "TEST1", + new StorageType[]{StorageType.SSD, StorageType.DISK, + StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{}); + + final String[] racks = {"/d1/r1", "/d1/r1", "/d1/r1"}; + final String[] hosts = {"host1", "host2", "host3"}; + final StorageType[] disks = {StorageType.DISK, StorageType.DISK, StorageType.DISK}; + + final DatanodeStorageInfo[] diskStorages + = DFSTestUtil.createDatanodeStorageInfos(3, racks, hosts, disks); + final DatanodeDescriptor[] dataNodes + = DFSTestUtil.toDatanodeDescriptor(diskStorages); + for(int i = 0; i < dataNodes.length; i++) { + BlockManagerTestUtil.updateStorage(dataNodes[i], + new DatanodeStorage("ssd" + i, DatanodeStorage.State.NORMAL, + StorageType.SSD)); + } + + FileSystem.setDefaultUri(conf, "hdfs://localhost:0"); + conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); + File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class); + conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, + new File(baseDir, "name").getPath()); + DFSTestUtil.formatNameNode(conf); + NameNode namenode = new NameNode(conf); + + final BlockManager bm = namenode.getNamesystem().getBlockManager(); + BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy(); + NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology(); + for (DatanodeDescriptor datanode : dataNodes) { + cluster.add(datanode); + } + + DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3, + dataNodes[0], Collections.emptyList(), false, + new HashSet(), 0, policy); + System.out.println(policy.getName() + ": " + Arrays.asList(targets)); + Assert.assertEquals(2, targets.length); + Assert.assertEquals(StorageType.SSD, targets[0].getStorageType()); + Assert.assertEquals(StorageType.DISK, targets[1].getStorageType()); + } + @Test public void testGetFileStoragePolicyAfterRestartNN() throws Exception { //HDFS8219 @@ -1233,4 +1278,32 @@ public void testGetAllStoragePoliciesFromFs() throws IOException { cluster.shutdown(); } } + + @Test + public void testStorageType() { + final EnumMap map = new EnumMap<>(StorageType.class); + + //put storage type is reversed order + map.put(StorageType.ARCHIVE, 1); + map.put(StorageType.DISK, 1); + map.put(StorageType.SSD, 1); + map.put(StorageType.RAM_DISK, 1); + + { + final Iterator i = map.keySet().iterator(); + Assert.assertEquals(StorageType.RAM_DISK, i.next()); + Assert.assertEquals(StorageType.SSD, i.next()); + Assert.assertEquals(StorageType.DISK, i.next()); + Assert.assertEquals(StorageType.ARCHIVE, i.next()); + } + + { + final Iterator> i + = map.entrySet().iterator(); + Assert.assertEquals(StorageType.RAM_DISK, i.next().getKey()); + Assert.assertEquals(StorageType.SSD, i.next().getKey()); + Assert.assertEquals(StorageType.DISK, i.next().getKey()); + Assert.assertEquals(StorageType.ARCHIVE, i.next().getKey()); + } + } }