diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java index f5f29abd2e..b9cf5fb8f4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java @@ -20,12 +20,14 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; +import java.util.Collection; import java.util.EnumSet; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.BlockStoragePolicySpi; import org.apache.hadoop.fs.CacheFlag; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -413,6 +415,39 @@ public void setStoragePolicy(final Path src, final String policyName) dfs.setStoragePolicy(src, policyName); } + /** + * Unset the storage policy set for a given file or directory. + * + * @param src file or directory path. + * @throws IOException + */ + public void unsetStoragePolicy(final Path src) throws IOException { + dfs.unsetStoragePolicy(src); + } + + /** + * Query the effective storage policy ID for the given file or directory. + * + * @param src file or directory path. + * @return storage policy for the given file or directory. + * @throws IOException + */ + public BlockStoragePolicySpi getStoragePolicy(final Path src) + throws IOException { + return dfs.getStoragePolicy(src); + } + + /** + * Retrieve all the storage policies supported by HDFS file system. + * + * @return all storage policies supported by HDFS file system. + * @throws IOException + */ + public Collection getAllStoragePolicies() + throws IOException { + return dfs.getAllStoragePolicies(); + } + /** * Set the source path to the specified erasure coding policy. * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java index 0f5bdf5c21..717d79e3ad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java @@ -23,24 +23,35 @@ import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; +import java.util.HashSet; +import java.util.Set; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.BlockStoragePolicySpi; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.client.HdfsAdmin; +import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import com.google.common.collect.Sets; + public class TestHdfsAdmin { private static final Path TEST_PATH = new Path("/test"); + private static final short REPL = 1; + private static final int SIZE = 128; private final Configuration conf = new Configuration(); private MiniDFSCluster cluster; - + @Before public void setUpCluster() throws IOException { - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); + cluster.waitActive(); } @After @@ -94,4 +105,71 @@ public void testHdfsAdminSetQuota() throws Exception { public void testHdfsAdminWithBadUri() throws IOException, URISyntaxException { new HdfsAdmin(new URI("file:///bad-scheme"), conf); } + + /** + * Test that we can set, get, unset storage policies via {@link HdfsAdmin}. + */ + @Test + public void testHdfsAdminStoragePolicies() throws Exception { + HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); + FileSystem fs = FileSystem.get(conf); + final Path foo = new Path("/foo"); + final Path bar = new Path(foo, "bar"); + final Path wow = new Path(bar, "wow"); + DFSTestUtil.createFile(fs, wow, SIZE, REPL, 0); + + final BlockStoragePolicySuite suite = BlockStoragePolicySuite + .createDefaultSuite(); + final BlockStoragePolicy warm = suite.getPolicy("WARM"); + final BlockStoragePolicy cold = suite.getPolicy("COLD"); + final BlockStoragePolicy hot = suite.getPolicy("HOT"); + + /* + * test: set storage policy + */ + hdfsAdmin.setStoragePolicy(foo, warm.getName()); + hdfsAdmin.setStoragePolicy(bar, cold.getName()); + hdfsAdmin.setStoragePolicy(wow, hot.getName()); + + /* + * test: get storage policy after set + */ + assertEquals(hdfsAdmin.getStoragePolicy(foo), warm); + assertEquals(hdfsAdmin.getStoragePolicy(bar), cold); + assertEquals(hdfsAdmin.getStoragePolicy(wow), hot); + + /* + * test: unset storage policy + */ + hdfsAdmin.unsetStoragePolicy(foo); + hdfsAdmin.unsetStoragePolicy(bar); + hdfsAdmin.unsetStoragePolicy(wow); + + /* + * test: get storage policy after unset. HOT by default. + */ + assertEquals(hdfsAdmin.getStoragePolicy(foo), hot); + assertEquals(hdfsAdmin.getStoragePolicy(bar), hot); + assertEquals(hdfsAdmin.getStoragePolicy(wow), hot); + + /* + * test: get all storage policies + */ + // Get policies via HdfsAdmin + Set policyNamesSet1 = new HashSet<>(); + for (BlockStoragePolicySpi policy : hdfsAdmin.getAllStoragePolicies()) { + policyNamesSet1.add(policy.getName()); + } + + // Get policies via BlockStoragePolicySuite + Set policyNamesSet2 = new HashSet<>(); + for (BlockStoragePolicy policy : suite.getAllPolicies()) { + policyNamesSet2.add(policy.getName()); + } + // Ensure that we got the same set of policies in both cases. + Assert.assertTrue( + Sets.difference(policyNamesSet1, policyNamesSet2).isEmpty()); + Assert.assertTrue( + Sets.difference(policyNamesSet2, policyNamesSet1).isEmpty()); + } }